]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/tg3.c
ce04c64a8a6e730a928d65ab9a8d15e92cd25f19
[net-next-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/if_vlan.h>
37 #include <linux/ip.h>
38 #include <linux/tcp.h>
39 #include <linux/workqueue.h>
40 #include <linux/prefetch.h>
41 #include <linux/dma-mapping.h>
42
43 #include <net/checksum.h>
44 #include <net/ip.h>
45
46 #include <asm/system.h>
47 #include <asm/io.h>
48 #include <asm/byteorder.h>
49 #include <asm/uaccess.h>
50
51 #ifdef CONFIG_SPARC
52 #include <asm/idprom.h>
53 #include <asm/prom.h>
54 #endif
55
56 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57 #define TG3_VLAN_TAG_USED 1
58 #else
59 #define TG3_VLAN_TAG_USED 0
60 #endif
61
62 #define TG3_TSO_SUPPORT 1
63
64 #include "tg3.h"
65
66 #define DRV_MODULE_NAME         "tg3"
67 #define PFX DRV_MODULE_NAME     ": "
68 #define DRV_MODULE_VERSION      "3.92"
69 #define DRV_MODULE_RELDATE      "May 2, 2008"
70
71 #define TG3_DEF_MAC_MODE        0
72 #define TG3_DEF_RX_MODE         0
73 #define TG3_DEF_TX_MODE         0
74 #define TG3_DEF_MSG_ENABLE        \
75         (NETIF_MSG_DRV          | \
76          NETIF_MSG_PROBE        | \
77          NETIF_MSG_LINK         | \
78          NETIF_MSG_TIMER        | \
79          NETIF_MSG_IFDOWN       | \
80          NETIF_MSG_IFUP         | \
81          NETIF_MSG_RX_ERR       | \
82          NETIF_MSG_TX_ERR)
83
84 /* length of time before we decide the hardware is borked,
85  * and dev->tx_timeout() should be called to fix the problem
86  */
87 #define TG3_TX_TIMEOUT                  (5 * HZ)
88
89 /* hardware minimum and maximum for a single frame's data payload */
90 #define TG3_MIN_MTU                     60
91 #define TG3_MAX_MTU(tp) \
92         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
93
94 /* These numbers seem to be hard coded in the NIC firmware somehow.
95  * You can't change the ring sizes, but you can change where you place
96  * them in the NIC onboard memory.
97  */
98 #define TG3_RX_RING_SIZE                512
99 #define TG3_DEF_RX_RING_PENDING         200
100 #define TG3_RX_JUMBO_RING_SIZE          256
101 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
102
103 /* Do not place this n-ring entries value into the tp struct itself,
104  * we really want to expose these constants to GCC so that modulo et
105  * al.  operations are done with shifts and masks instead of with
106  * hw multiply/modulo instructions.  Another solution would be to
107  * replace things like '% foo' with '& (foo - 1)'.
108  */
109 #define TG3_RX_RCB_RING_SIZE(tp)        \
110         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
111
112 #define TG3_TX_RING_SIZE                512
113 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
114
115 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
116                                  TG3_RX_RING_SIZE)
117 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
118                                  TG3_RX_JUMBO_RING_SIZE)
119 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
120                                    TG3_RX_RCB_RING_SIZE(tp))
121 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
122                                  TG3_TX_RING_SIZE)
123 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
124
125 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
126 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
127
128 /* minimum number of free TX descriptors required to wake up TX process */
129 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
130
131 /* number of ETHTOOL_GSTATS u64's */
132 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
133
134 #define TG3_NUM_TEST            6
135
136 static char version[] __devinitdata =
137         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138
139 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
140 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
141 MODULE_LICENSE("GPL");
142 MODULE_VERSION(DRV_MODULE_VERSION);
143
144 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
145 module_param(tg3_debug, int, 0);
146 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
147
148 static struct pci_device_id tg3_pci_tbl[] = {
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
206         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
207         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
208         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
210         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
211         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
212         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
213         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
214         {}
215 };
216
217 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
218
219 static const struct {
220         const char string[ETH_GSTRING_LEN];
221 } ethtool_stats_keys[TG3_NUM_STATS] = {
222         { "rx_octets" },
223         { "rx_fragments" },
224         { "rx_ucast_packets" },
225         { "rx_mcast_packets" },
226         { "rx_bcast_packets" },
227         { "rx_fcs_errors" },
228         { "rx_align_errors" },
229         { "rx_xon_pause_rcvd" },
230         { "rx_xoff_pause_rcvd" },
231         { "rx_mac_ctrl_rcvd" },
232         { "rx_xoff_entered" },
233         { "rx_frame_too_long_errors" },
234         { "rx_jabbers" },
235         { "rx_undersize_packets" },
236         { "rx_in_length_errors" },
237         { "rx_out_length_errors" },
238         { "rx_64_or_less_octet_packets" },
239         { "rx_65_to_127_octet_packets" },
240         { "rx_128_to_255_octet_packets" },
241         { "rx_256_to_511_octet_packets" },
242         { "rx_512_to_1023_octet_packets" },
243         { "rx_1024_to_1522_octet_packets" },
244         { "rx_1523_to_2047_octet_packets" },
245         { "rx_2048_to_4095_octet_packets" },
246         { "rx_4096_to_8191_octet_packets" },
247         { "rx_8192_to_9022_octet_packets" },
248
249         { "tx_octets" },
250         { "tx_collisions" },
251
252         { "tx_xon_sent" },
253         { "tx_xoff_sent" },
254         { "tx_flow_control" },
255         { "tx_mac_errors" },
256         { "tx_single_collisions" },
257         { "tx_mult_collisions" },
258         { "tx_deferred" },
259         { "tx_excessive_collisions" },
260         { "tx_late_collisions" },
261         { "tx_collide_2times" },
262         { "tx_collide_3times" },
263         { "tx_collide_4times" },
264         { "tx_collide_5times" },
265         { "tx_collide_6times" },
266         { "tx_collide_7times" },
267         { "tx_collide_8times" },
268         { "tx_collide_9times" },
269         { "tx_collide_10times" },
270         { "tx_collide_11times" },
271         { "tx_collide_12times" },
272         { "tx_collide_13times" },
273         { "tx_collide_14times" },
274         { "tx_collide_15times" },
275         { "tx_ucast_packets" },
276         { "tx_mcast_packets" },
277         { "tx_bcast_packets" },
278         { "tx_carrier_sense_errors" },
279         { "tx_discards" },
280         { "tx_errors" },
281
282         { "dma_writeq_full" },
283         { "dma_write_prioq_full" },
284         { "rxbds_empty" },
285         { "rx_discards" },
286         { "rx_errors" },
287         { "rx_threshold_hit" },
288
289         { "dma_readq_full" },
290         { "dma_read_prioq_full" },
291         { "tx_comp_queue_full" },
292
293         { "ring_set_send_prod_index" },
294         { "ring_status_update" },
295         { "nic_irqs" },
296         { "nic_avoided_irqs" },
297         { "nic_tx_threshold_hit" }
298 };
299
300 static const struct {
301         const char string[ETH_GSTRING_LEN];
302 } ethtool_test_keys[TG3_NUM_TEST] = {
303         { "nvram test     (online) " },
304         { "link test      (online) " },
305         { "register test  (offline)" },
306         { "memory test    (offline)" },
307         { "loopback test  (offline)" },
308         { "interrupt test (offline)" },
309 };
310
311 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
312 {
313         writel(val, tp->regs + off);
314 }
315
316 static u32 tg3_read32(struct tg3 *tp, u32 off)
317 {
318         return (readl(tp->regs + off));
319 }
320
321 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
322 {
323         writel(val, tp->aperegs + off);
324 }
325
326 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
327 {
328         return (readl(tp->aperegs + off));
329 }
330
331 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
332 {
333         unsigned long flags;
334
335         spin_lock_irqsave(&tp->indirect_lock, flags);
336         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
337         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
338         spin_unlock_irqrestore(&tp->indirect_lock, flags);
339 }
340
341 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
342 {
343         writel(val, tp->regs + off);
344         readl(tp->regs + off);
345 }
346
347 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
348 {
349         unsigned long flags;
350         u32 val;
351
352         spin_lock_irqsave(&tp->indirect_lock, flags);
353         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
354         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
355         spin_unlock_irqrestore(&tp->indirect_lock, flags);
356         return val;
357 }
358
359 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
360 {
361         unsigned long flags;
362
363         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
364                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
365                                        TG3_64BIT_REG_LOW, val);
366                 return;
367         }
368         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
369                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
370                                        TG3_64BIT_REG_LOW, val);
371                 return;
372         }
373
374         spin_lock_irqsave(&tp->indirect_lock, flags);
375         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
376         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
377         spin_unlock_irqrestore(&tp->indirect_lock, flags);
378
379         /* In indirect mode when disabling interrupts, we also need
380          * to clear the interrupt bit in the GRC local ctrl register.
381          */
382         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
383             (val == 0x1)) {
384                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
385                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
386         }
387 }
388
389 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
390 {
391         unsigned long flags;
392         u32 val;
393
394         spin_lock_irqsave(&tp->indirect_lock, flags);
395         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
396         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
397         spin_unlock_irqrestore(&tp->indirect_lock, flags);
398         return val;
399 }
400
401 /* usec_wait specifies the wait time in usec when writing to certain registers
402  * where it is unsafe to read back the register without some delay.
403  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
404  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
405  */
406 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
407 {
408         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
409             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
410                 /* Non-posted methods */
411                 tp->write32(tp, off, val);
412         else {
413                 /* Posted method */
414                 tg3_write32(tp, off, val);
415                 if (usec_wait)
416                         udelay(usec_wait);
417                 tp->read32(tp, off);
418         }
419         /* Wait again after the read for the posted method to guarantee that
420          * the wait time is met.
421          */
422         if (usec_wait)
423                 udelay(usec_wait);
424 }
425
426 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
427 {
428         tp->write32_mbox(tp, off, val);
429         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
430             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
431                 tp->read32_mbox(tp, off);
432 }
433
434 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
435 {
436         void __iomem *mbox = tp->regs + off;
437         writel(val, mbox);
438         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
439                 writel(val, mbox);
440         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
441                 readl(mbox);
442 }
443
444 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
445 {
446         return (readl(tp->regs + off + GRCMBOX_BASE));
447 }
448
449 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
450 {
451         writel(val, tp->regs + off + GRCMBOX_BASE);
452 }
453
454 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
455 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
456 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
457 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
458 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
459
460 #define tw32(reg,val)           tp->write32(tp, reg, val)
461 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
462 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
463 #define tr32(reg)               tp->read32(tp, reg)
464
465 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
466 {
467         unsigned long flags;
468
469         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
470             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
471                 return;
472
473         spin_lock_irqsave(&tp->indirect_lock, flags);
474         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
475                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
476                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
477
478                 /* Always leave this as zero. */
479                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
480         } else {
481                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
482                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
483
484                 /* Always leave this as zero. */
485                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
486         }
487         spin_unlock_irqrestore(&tp->indirect_lock, flags);
488 }
489
490 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
491 {
492         unsigned long flags;
493
494         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
495             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
496                 *val = 0;
497                 return;
498         }
499
500         spin_lock_irqsave(&tp->indirect_lock, flags);
501         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
502                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
503                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
504
505                 /* Always leave this as zero. */
506                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
507         } else {
508                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
509                 *val = tr32(TG3PCI_MEM_WIN_DATA);
510
511                 /* Always leave this as zero. */
512                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
513         }
514         spin_unlock_irqrestore(&tp->indirect_lock, flags);
515 }
516
517 static void tg3_ape_lock_init(struct tg3 *tp)
518 {
519         int i;
520
521         /* Make sure the driver hasn't any stale locks. */
522         for (i = 0; i < 8; i++)
523                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
524                                 APE_LOCK_GRANT_DRIVER);
525 }
526
527 static int tg3_ape_lock(struct tg3 *tp, int locknum)
528 {
529         int i, off;
530         int ret = 0;
531         u32 status;
532
533         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
534                 return 0;
535
536         switch (locknum) {
537                 case TG3_APE_LOCK_MEM:
538                         break;
539                 default:
540                         return -EINVAL;
541         }
542
543         off = 4 * locknum;
544
545         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
546
547         /* Wait for up to 1 millisecond to acquire lock. */
548         for (i = 0; i < 100; i++) {
549                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
550                 if (status == APE_LOCK_GRANT_DRIVER)
551                         break;
552                 udelay(10);
553         }
554
555         if (status != APE_LOCK_GRANT_DRIVER) {
556                 /* Revoke the lock request. */
557                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
558                                 APE_LOCK_GRANT_DRIVER);
559
560                 ret = -EBUSY;
561         }
562
563         return ret;
564 }
565
566 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
567 {
568         int off;
569
570         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
571                 return;
572
573         switch (locknum) {
574                 case TG3_APE_LOCK_MEM:
575                         break;
576                 default:
577                         return;
578         }
579
580         off = 4 * locknum;
581         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
582 }
583
584 static void tg3_disable_ints(struct tg3 *tp)
585 {
586         tw32(TG3PCI_MISC_HOST_CTRL,
587              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
588         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
589 }
590
591 static inline void tg3_cond_int(struct tg3 *tp)
592 {
593         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
594             (tp->hw_status->status & SD_STATUS_UPDATED))
595                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
596         else
597                 tw32(HOSTCC_MODE, tp->coalesce_mode |
598                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
599 }
600
601 static void tg3_enable_ints(struct tg3 *tp)
602 {
603         tp->irq_sync = 0;
604         wmb();
605
606         tw32(TG3PCI_MISC_HOST_CTRL,
607              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
608         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
609                        (tp->last_tag << 24));
610         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
611                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
612                                (tp->last_tag << 24));
613         tg3_cond_int(tp);
614 }
615
616 static inline unsigned int tg3_has_work(struct tg3 *tp)
617 {
618         struct tg3_hw_status *sblk = tp->hw_status;
619         unsigned int work_exists = 0;
620
621         /* check for phy events */
622         if (!(tp->tg3_flags &
623               (TG3_FLAG_USE_LINKCHG_REG |
624                TG3_FLAG_POLL_SERDES))) {
625                 if (sblk->status & SD_STATUS_LINK_CHG)
626                         work_exists = 1;
627         }
628         /* check for RX/TX work to do */
629         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
630             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
631                 work_exists = 1;
632
633         return work_exists;
634 }
635
636 /* tg3_restart_ints
637  *  similar to tg3_enable_ints, but it accurately determines whether there
638  *  is new work pending and can return without flushing the PIO write
639  *  which reenables interrupts
640  */
641 static void tg3_restart_ints(struct tg3 *tp)
642 {
643         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
644                      tp->last_tag << 24);
645         mmiowb();
646
647         /* When doing tagged status, this work check is unnecessary.
648          * The last_tag we write above tells the chip which piece of
649          * work we've completed.
650          */
651         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
652             tg3_has_work(tp))
653                 tw32(HOSTCC_MODE, tp->coalesce_mode |
654                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
655 }
656
657 static inline void tg3_netif_stop(struct tg3 *tp)
658 {
659         tp->dev->trans_start = jiffies; /* prevent tx timeout */
660         napi_disable(&tp->napi);
661         netif_tx_disable(tp->dev);
662 }
663
664 static inline void tg3_netif_start(struct tg3 *tp)
665 {
666         netif_wake_queue(tp->dev);
667         /* NOTE: unconditional netif_wake_queue is only appropriate
668          * so long as all callers are assured to have free tx slots
669          * (such as after tg3_init_hw)
670          */
671         napi_enable(&tp->napi);
672         tp->hw_status->status |= SD_STATUS_UPDATED;
673         tg3_enable_ints(tp);
674 }
675
676 static void tg3_switch_clocks(struct tg3 *tp)
677 {
678         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
679         u32 orig_clock_ctrl;
680
681         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
682             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
683                 return;
684
685         orig_clock_ctrl = clock_ctrl;
686         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
687                        CLOCK_CTRL_CLKRUN_OENABLE |
688                        0x1f);
689         tp->pci_clock_ctrl = clock_ctrl;
690
691         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
692                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
693                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
694                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
695                 }
696         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
697                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
698                             clock_ctrl |
699                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
700                             40);
701                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
702                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
703                             40);
704         }
705         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
706 }
707
708 #define PHY_BUSY_LOOPS  5000
709
710 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
711 {
712         u32 frame_val;
713         unsigned int loops;
714         int ret;
715
716         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
717                 tw32_f(MAC_MI_MODE,
718                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
719                 udelay(80);
720         }
721
722         *val = 0x0;
723
724         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
725                       MI_COM_PHY_ADDR_MASK);
726         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
727                       MI_COM_REG_ADDR_MASK);
728         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
729
730         tw32_f(MAC_MI_COM, frame_val);
731
732         loops = PHY_BUSY_LOOPS;
733         while (loops != 0) {
734                 udelay(10);
735                 frame_val = tr32(MAC_MI_COM);
736
737                 if ((frame_val & MI_COM_BUSY) == 0) {
738                         udelay(5);
739                         frame_val = tr32(MAC_MI_COM);
740                         break;
741                 }
742                 loops -= 1;
743         }
744
745         ret = -EBUSY;
746         if (loops != 0) {
747                 *val = frame_val & MI_COM_DATA_MASK;
748                 ret = 0;
749         }
750
751         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
752                 tw32_f(MAC_MI_MODE, tp->mi_mode);
753                 udelay(80);
754         }
755
756         return ret;
757 }
758
759 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
760 {
761         u32 frame_val;
762         unsigned int loops;
763         int ret;
764
765         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
766             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
767                 return 0;
768
769         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
770                 tw32_f(MAC_MI_MODE,
771                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
772                 udelay(80);
773         }
774
775         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
776                       MI_COM_PHY_ADDR_MASK);
777         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
778                       MI_COM_REG_ADDR_MASK);
779         frame_val |= (val & MI_COM_DATA_MASK);
780         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
781
782         tw32_f(MAC_MI_COM, frame_val);
783
784         loops = PHY_BUSY_LOOPS;
785         while (loops != 0) {
786                 udelay(10);
787                 frame_val = tr32(MAC_MI_COM);
788                 if ((frame_val & MI_COM_BUSY) == 0) {
789                         udelay(5);
790                         frame_val = tr32(MAC_MI_COM);
791                         break;
792                 }
793                 loops -= 1;
794         }
795
796         ret = -EBUSY;
797         if (loops != 0)
798                 ret = 0;
799
800         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
801                 tw32_f(MAC_MI_MODE, tp->mi_mode);
802                 udelay(80);
803         }
804
805         return ret;
806 }
807
808 static int tg3_bmcr_reset(struct tg3 *tp)
809 {
810         u32 phy_control;
811         int limit, err;
812
813         /* OK, reset it, and poll the BMCR_RESET bit until it
814          * clears or we time out.
815          */
816         phy_control = BMCR_RESET;
817         err = tg3_writephy(tp, MII_BMCR, phy_control);
818         if (err != 0)
819                 return -EBUSY;
820
821         limit = 5000;
822         while (limit--) {
823                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
824                 if (err != 0)
825                         return -EBUSY;
826
827                 if ((phy_control & BMCR_RESET) == 0) {
828                         udelay(40);
829                         break;
830                 }
831                 udelay(10);
832         }
833         if (limit <= 0)
834                 return -EBUSY;
835
836         return 0;
837 }
838
839 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
840 {
841         struct tg3 *tp = (struct tg3 *)bp->priv;
842         u32 val;
843
844         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
845                 return -EAGAIN;
846
847         if (tg3_readphy(tp, reg, &val))
848                 return -EIO;
849
850         return val;
851 }
852
853 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
854 {
855         struct tg3 *tp = (struct tg3 *)bp->priv;
856
857         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
858                 return -EAGAIN;
859
860         if (tg3_writephy(tp, reg, val))
861                 return -EIO;
862
863         return 0;
864 }
865
866 static int tg3_mdio_reset(struct mii_bus *bp)
867 {
868         return 0;
869 }
870
871 static void tg3_mdio_start(struct tg3 *tp)
872 {
873         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
874                 mutex_lock(&tp->mdio_bus.mdio_lock);
875                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
876                 mutex_unlock(&tp->mdio_bus.mdio_lock);
877         }
878
879         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
880         tw32_f(MAC_MI_MODE, tp->mi_mode);
881         udelay(80);
882 }
883
884 static void tg3_mdio_stop(struct tg3 *tp)
885 {
886         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
887                 mutex_lock(&tp->mdio_bus.mdio_lock);
888                 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
889                 mutex_unlock(&tp->mdio_bus.mdio_lock);
890         }
891 }
892
893 static int tg3_mdio_init(struct tg3 *tp)
894 {
895         int i;
896         u32 reg;
897         struct mii_bus *mdio_bus = &tp->mdio_bus;
898
899         tg3_mdio_start(tp);
900
901         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
902             (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
903                 return 0;
904
905         memset(mdio_bus, 0, sizeof(*mdio_bus));
906
907         mdio_bus->name     = "tg3 mdio bus";
908         snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%x",
909                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
910         mdio_bus->priv     = tp;
911         mdio_bus->dev      = &tp->pdev->dev;
912         mdio_bus->read     = &tg3_mdio_read;
913         mdio_bus->write    = &tg3_mdio_write;
914         mdio_bus->reset    = &tg3_mdio_reset;
915         mdio_bus->phy_mask = ~(1 << PHY_ADDR);
916         mdio_bus->irq      = &tp->mdio_irq[0];
917
918         for (i = 0; i < PHY_MAX_ADDR; i++)
919                 mdio_bus->irq[i] = PHY_POLL;
920
921         /* The bus registration will look for all the PHYs on the mdio bus.
922          * Unfortunately, it does not ensure the PHY is powered up before
923          * accessing the PHY ID registers.  A chip reset is the
924          * quickest way to bring the device back to an operational state..
925          */
926         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
927                 tg3_bmcr_reset(tp);
928
929         i = mdiobus_register(mdio_bus);
930         if (!i)
931                 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
932         else
933                 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
934                         tp->dev->name, i);
935
936         return i;
937 }
938
939 static void tg3_mdio_fini(struct tg3 *tp)
940 {
941         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
942                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
943                 mdiobus_unregister(&tp->mdio_bus);
944                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
945         }
946 }
947
948 /* tp->lock is held. */
949 static void tg3_wait_for_event_ack(struct tg3 *tp)
950 {
951         int i;
952
953         /* Wait for up to 2.5 milliseconds */
954         for (i = 0; i < 250000; i++) {
955                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
956                         break;
957                 udelay(10);
958         }
959 }
960
961 /* tp->lock is held. */
962 static void tg3_ump_link_report(struct tg3 *tp)
963 {
964         u32 reg;
965         u32 val;
966
967         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
968             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
969                 return;
970
971         tg3_wait_for_event_ack(tp);
972
973         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
974
975         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
976
977         val = 0;
978         if (!tg3_readphy(tp, MII_BMCR, &reg))
979                 val = reg << 16;
980         if (!tg3_readphy(tp, MII_BMSR, &reg))
981                 val |= (reg & 0xffff);
982         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
983
984         val = 0;
985         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
986                 val = reg << 16;
987         if (!tg3_readphy(tp, MII_LPA, &reg))
988                 val |= (reg & 0xffff);
989         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
990
991         val = 0;
992         if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
993                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
994                         val = reg << 16;
995                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
996                         val |= (reg & 0xffff);
997         }
998         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
999
1000         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1001                 val = reg << 16;
1002         else
1003                 val = 0;
1004         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1005
1006         val = tr32(GRC_RX_CPU_EVENT);
1007         val |= GRC_RX_CPU_DRIVER_EVENT;
1008         tw32_f(GRC_RX_CPU_EVENT, val);
1009 }
1010
1011 static void tg3_link_report(struct tg3 *tp)
1012 {
1013         if (!netif_carrier_ok(tp->dev)) {
1014                 if (netif_msg_link(tp))
1015                         printk(KERN_INFO PFX "%s: Link is down.\n",
1016                                tp->dev->name);
1017                 tg3_ump_link_report(tp);
1018         } else if (netif_msg_link(tp)) {
1019                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1020                        tp->dev->name,
1021                        (tp->link_config.active_speed == SPEED_1000 ?
1022                         1000 :
1023                         (tp->link_config.active_speed == SPEED_100 ?
1024                          100 : 10)),
1025                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1026                         "full" : "half"));
1027
1028                 printk(KERN_INFO PFX
1029                        "%s: Flow control is %s for TX and %s for RX.\n",
1030                        tp->dev->name,
1031                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1032                        "on" : "off",
1033                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1034                        "on" : "off");
1035                 tg3_ump_link_report(tp);
1036         }
1037 }
1038
1039 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1040 {
1041         u16 miireg;
1042
1043         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1044                 miireg = ADVERTISE_PAUSE_CAP;
1045         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1046                 miireg = ADVERTISE_PAUSE_ASYM;
1047         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1048                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1049         else
1050                 miireg = 0;
1051
1052         return miireg;
1053 }
1054
1055 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1056 {
1057         u16 miireg;
1058
1059         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1060                 miireg = ADVERTISE_1000XPAUSE;
1061         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1062                 miireg = ADVERTISE_1000XPSE_ASYM;
1063         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1064                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1065         else
1066                 miireg = 0;
1067
1068         return miireg;
1069 }
1070
1071 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1072 {
1073         u8 cap = 0;
1074
1075         if (lcladv & ADVERTISE_PAUSE_CAP) {
1076                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1077                         if (rmtadv & LPA_PAUSE_CAP)
1078                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1079                         else if (rmtadv & LPA_PAUSE_ASYM)
1080                                 cap = TG3_FLOW_CTRL_RX;
1081                 } else {
1082                         if (rmtadv & LPA_PAUSE_CAP)
1083                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1084                 }
1085         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1086                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1087                         cap = TG3_FLOW_CTRL_TX;
1088         }
1089
1090         return cap;
1091 }
1092
1093 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1094 {
1095         u8 cap = 0;
1096
1097         if (lcladv & ADVERTISE_1000XPAUSE) {
1098                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1099                         if (rmtadv & LPA_1000XPAUSE)
1100                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1101                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1102                                 cap = TG3_FLOW_CTRL_RX;
1103                 } else {
1104                         if (rmtadv & LPA_1000XPAUSE)
1105                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1106                 }
1107         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1108                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1109                         cap = TG3_FLOW_CTRL_TX;
1110         }
1111
1112         return cap;
1113 }
1114
1115 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1116 {
1117         u8 flowctrl = 0;
1118         u32 old_rx_mode = tp->rx_mode;
1119         u32 old_tx_mode = tp->tx_mode;
1120
1121         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1122             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1123                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1124                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1125                 else
1126                         flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1127         } else
1128                 flowctrl = tp->link_config.flowctrl;
1129
1130         tp->link_config.active_flowctrl = flowctrl;
1131
1132         if (flowctrl & TG3_FLOW_CTRL_RX)
1133                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1134         else
1135                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1136
1137         if (old_rx_mode != tp->rx_mode)
1138                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1139
1140         if (flowctrl & TG3_FLOW_CTRL_TX)
1141                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1142         else
1143                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1144
1145         if (old_tx_mode != tp->tx_mode)
1146                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1147 }
1148
1149 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1150 {
1151         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1152         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1153 }
1154
1155 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1156 {
1157         u32 phy;
1158
1159         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1160             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1161                 return;
1162
1163         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1164                 u32 ephy;
1165
1166                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1167                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1168                                      ephy | MII_TG3_EPHY_SHADOW_EN);
1169                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1170                                 if (enable)
1171                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1172                                 else
1173                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1174                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1175                         }
1176                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1177                 }
1178         } else {
1179                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1180                       MII_TG3_AUXCTL_SHDWSEL_MISC;
1181                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1182                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1183                         if (enable)
1184                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1185                         else
1186                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1187                         phy |= MII_TG3_AUXCTL_MISC_WREN;
1188                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1189                 }
1190         }
1191 }
1192
1193 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1194 {
1195         u32 val;
1196
1197         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1198                 return;
1199
1200         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1201             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1202                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1203                              (val | (1 << 15) | (1 << 4)));
1204 }
1205
1206 static void tg3_phy_apply_otp(struct tg3 *tp)
1207 {
1208         u32 otp, phy;
1209
1210         if (!tp->phy_otp)
1211                 return;
1212
1213         otp = tp->phy_otp;
1214
1215         /* Enable SM_DSP clock and tx 6dB coding. */
1216         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1217               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1218               MII_TG3_AUXCTL_ACTL_TX_6DB;
1219         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1220
1221         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1222         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1223         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1224
1225         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1226               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1227         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1228
1229         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1230         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1231         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1232
1233         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1234         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1235
1236         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1237         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1238
1239         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1240               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1241         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1242
1243         /* Turn off SM_DSP clock. */
1244         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1245               MII_TG3_AUXCTL_ACTL_TX_6DB;
1246         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1247 }
1248
1249 static int tg3_wait_macro_done(struct tg3 *tp)
1250 {
1251         int limit = 100;
1252
1253         while (limit--) {
1254                 u32 tmp32;
1255
1256                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1257                         if ((tmp32 & 0x1000) == 0)
1258                                 break;
1259                 }
1260         }
1261         if (limit <= 0)
1262                 return -EBUSY;
1263
1264         return 0;
1265 }
1266
1267 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1268 {
1269         static const u32 test_pat[4][6] = {
1270         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1271         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1272         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1273         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1274         };
1275         int chan;
1276
1277         for (chan = 0; chan < 4; chan++) {
1278                 int i;
1279
1280                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1281                              (chan * 0x2000) | 0x0200);
1282                 tg3_writephy(tp, 0x16, 0x0002);
1283
1284                 for (i = 0; i < 6; i++)
1285                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1286                                      test_pat[chan][i]);
1287
1288                 tg3_writephy(tp, 0x16, 0x0202);
1289                 if (tg3_wait_macro_done(tp)) {
1290                         *resetp = 1;
1291                         return -EBUSY;
1292                 }
1293
1294                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1295                              (chan * 0x2000) | 0x0200);
1296                 tg3_writephy(tp, 0x16, 0x0082);
1297                 if (tg3_wait_macro_done(tp)) {
1298                         *resetp = 1;
1299                         return -EBUSY;
1300                 }
1301
1302                 tg3_writephy(tp, 0x16, 0x0802);
1303                 if (tg3_wait_macro_done(tp)) {
1304                         *resetp = 1;
1305                         return -EBUSY;
1306                 }
1307
1308                 for (i = 0; i < 6; i += 2) {
1309                         u32 low, high;
1310
1311                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1312                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1313                             tg3_wait_macro_done(tp)) {
1314                                 *resetp = 1;
1315                                 return -EBUSY;
1316                         }
1317                         low &= 0x7fff;
1318                         high &= 0x000f;
1319                         if (low != test_pat[chan][i] ||
1320                             high != test_pat[chan][i+1]) {
1321                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1322                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1323                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1324
1325                                 return -EBUSY;
1326                         }
1327                 }
1328         }
1329
1330         return 0;
1331 }
1332
1333 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1334 {
1335         int chan;
1336
1337         for (chan = 0; chan < 4; chan++) {
1338                 int i;
1339
1340                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1341                              (chan * 0x2000) | 0x0200);
1342                 tg3_writephy(tp, 0x16, 0x0002);
1343                 for (i = 0; i < 6; i++)
1344                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1345                 tg3_writephy(tp, 0x16, 0x0202);
1346                 if (tg3_wait_macro_done(tp))
1347                         return -EBUSY;
1348         }
1349
1350         return 0;
1351 }
1352
1353 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1354 {
1355         u32 reg32, phy9_orig;
1356         int retries, do_phy_reset, err;
1357
1358         retries = 10;
1359         do_phy_reset = 1;
1360         do {
1361                 if (do_phy_reset) {
1362                         err = tg3_bmcr_reset(tp);
1363                         if (err)
1364                                 return err;
1365                         do_phy_reset = 0;
1366                 }
1367
1368                 /* Disable transmitter and interrupt.  */
1369                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1370                         continue;
1371
1372                 reg32 |= 0x3000;
1373                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1374
1375                 /* Set full-duplex, 1000 mbps.  */
1376                 tg3_writephy(tp, MII_BMCR,
1377                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1378
1379                 /* Set to master mode.  */
1380                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1381                         continue;
1382
1383                 tg3_writephy(tp, MII_TG3_CTRL,
1384                              (MII_TG3_CTRL_AS_MASTER |
1385                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1386
1387                 /* Enable SM_DSP_CLOCK and 6dB.  */
1388                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1389
1390                 /* Block the PHY control access.  */
1391                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1392                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1393
1394                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1395                 if (!err)
1396                         break;
1397         } while (--retries);
1398
1399         err = tg3_phy_reset_chanpat(tp);
1400         if (err)
1401                 return err;
1402
1403         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1404         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1405
1406         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1407         tg3_writephy(tp, 0x16, 0x0000);
1408
1409         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1410             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1411                 /* Set Extended packet length bit for jumbo frames */
1412                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1413         }
1414         else {
1415                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1416         }
1417
1418         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1419
1420         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1421                 reg32 &= ~0x3000;
1422                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1423         } else if (!err)
1424                 err = -EBUSY;
1425
1426         return err;
1427 }
1428
1429 /* This will reset the tigon3 PHY if there is no valid
1430  * link unless the FORCE argument is non-zero.
1431  */
1432 static int tg3_phy_reset(struct tg3 *tp)
1433 {
1434         u32 cpmuctrl;
1435         u32 phy_status;
1436         int err;
1437
1438         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1439                 u32 val;
1440
1441                 val = tr32(GRC_MISC_CFG);
1442                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1443                 udelay(40);
1444         }
1445         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1446         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1447         if (err != 0)
1448                 return -EBUSY;
1449
1450         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1451                 netif_carrier_off(tp->dev);
1452                 tg3_link_report(tp);
1453         }
1454
1455         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1456             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1457             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1458                 err = tg3_phy_reset_5703_4_5(tp);
1459                 if (err)
1460                         return err;
1461                 goto out;
1462         }
1463
1464         cpmuctrl = 0;
1465         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1466             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1467                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1468                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1469                         tw32(TG3_CPMU_CTRL,
1470                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1471         }
1472
1473         err = tg3_bmcr_reset(tp);
1474         if (err)
1475                 return err;
1476
1477         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1478                 u32 phy;
1479
1480                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1481                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1482
1483                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1484         }
1485
1486         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1487                 u32 val;
1488
1489                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1490                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1491                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1492                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1493                         udelay(40);
1494                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1495                 }
1496
1497                 /* Disable GPHY autopowerdown. */
1498                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1499                              MII_TG3_MISC_SHDW_WREN |
1500                              MII_TG3_MISC_SHDW_APD_SEL |
1501                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1502         }
1503
1504         tg3_phy_apply_otp(tp);
1505
1506 out:
1507         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1508                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1509                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1510                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1511                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1512                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1513                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1514         }
1515         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1516                 tg3_writephy(tp, 0x1c, 0x8d68);
1517                 tg3_writephy(tp, 0x1c, 0x8d68);
1518         }
1519         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1520                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1521                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1522                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1523                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1524                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1525                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1526                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1527                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1528         }
1529         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1530                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1531                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1532                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1533                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1534                         tg3_writephy(tp, MII_TG3_TEST1,
1535                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1536                 } else
1537                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1538                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1539         }
1540         /* Set Extended packet length bit (bit 14) on all chips that */
1541         /* support jumbo frames */
1542         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1543                 /* Cannot do read-modify-write on 5401 */
1544                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1545         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1546                 u32 phy_reg;
1547
1548                 /* Set bit 14 with read-modify-write to preserve other bits */
1549                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1550                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1551                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1552         }
1553
1554         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1555          * jumbo frames transmission.
1556          */
1557         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1558                 u32 phy_reg;
1559
1560                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1561                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1562                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1563         }
1564
1565         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1566                 /* adjust output voltage */
1567                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1568         }
1569
1570         tg3_phy_toggle_automdix(tp, 1);
1571         tg3_phy_set_wirespeed(tp);
1572         return 0;
1573 }
1574
1575 static void tg3_frob_aux_power(struct tg3 *tp)
1576 {
1577         struct tg3 *tp_peer = tp;
1578
1579         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1580                 return;
1581
1582         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1583             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1584                 struct net_device *dev_peer;
1585
1586                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1587                 /* remove_one() may have been run on the peer. */
1588                 if (!dev_peer)
1589                         tp_peer = tp;
1590                 else
1591                         tp_peer = netdev_priv(dev_peer);
1592         }
1593
1594         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1595             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1596             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1597             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1598                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1599                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1600                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1601                                     (GRC_LCLCTRL_GPIO_OE0 |
1602                                      GRC_LCLCTRL_GPIO_OE1 |
1603                                      GRC_LCLCTRL_GPIO_OE2 |
1604                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1605                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1606                                     100);
1607                 } else {
1608                         u32 no_gpio2;
1609                         u32 grc_local_ctrl = 0;
1610
1611                         if (tp_peer != tp &&
1612                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1613                                 return;
1614
1615                         /* Workaround to prevent overdrawing Amps. */
1616                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1617                             ASIC_REV_5714) {
1618                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1619                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1620                                             grc_local_ctrl, 100);
1621                         }
1622
1623                         /* On 5753 and variants, GPIO2 cannot be used. */
1624                         no_gpio2 = tp->nic_sram_data_cfg &
1625                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1626
1627                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1628                                          GRC_LCLCTRL_GPIO_OE1 |
1629                                          GRC_LCLCTRL_GPIO_OE2 |
1630                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1631                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1632                         if (no_gpio2) {
1633                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1634                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1635                         }
1636                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1637                                                     grc_local_ctrl, 100);
1638
1639                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1640
1641                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1642                                                     grc_local_ctrl, 100);
1643
1644                         if (!no_gpio2) {
1645                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1646                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1647                                             grc_local_ctrl, 100);
1648                         }
1649                 }
1650         } else {
1651                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1652                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1653                         if (tp_peer != tp &&
1654                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1655                                 return;
1656
1657                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1658                                     (GRC_LCLCTRL_GPIO_OE1 |
1659                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1660
1661                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1662                                     GRC_LCLCTRL_GPIO_OE1, 100);
1663
1664                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1665                                     (GRC_LCLCTRL_GPIO_OE1 |
1666                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1667                 }
1668         }
1669 }
1670
1671 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1672 {
1673         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1674                 return 1;
1675         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1676                 if (speed != SPEED_10)
1677                         return 1;
1678         } else if (speed == SPEED_10)
1679                 return 1;
1680
1681         return 0;
1682 }
1683
1684 static int tg3_setup_phy(struct tg3 *, int);
1685
1686 #define RESET_KIND_SHUTDOWN     0
1687 #define RESET_KIND_INIT         1
1688 #define RESET_KIND_SUSPEND      2
1689
1690 static void tg3_write_sig_post_reset(struct tg3 *, int);
1691 static int tg3_halt_cpu(struct tg3 *, u32);
1692 static int tg3_nvram_lock(struct tg3 *);
1693 static void tg3_nvram_unlock(struct tg3 *);
1694
1695 static void tg3_power_down_phy(struct tg3 *tp)
1696 {
1697         u32 val;
1698
1699         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1700                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1701                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1702                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1703
1704                         sg_dig_ctrl |=
1705                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1706                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1707                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1708                 }
1709                 return;
1710         }
1711
1712         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1713                 tg3_bmcr_reset(tp);
1714                 val = tr32(GRC_MISC_CFG);
1715                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1716                 udelay(40);
1717                 return;
1718         } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1719                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1720                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1721                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1722         }
1723
1724         /* The PHY should not be powered down on some chips because
1725          * of bugs.
1726          */
1727         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1728             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1729             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1730              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1731                 return;
1732
1733         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1734                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1735                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1736                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1737                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1738         }
1739
1740         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1741 }
1742
1743 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1744 {
1745         u32 misc_host_ctrl;
1746         u16 power_control, power_caps;
1747         int pm = tp->pm_cap;
1748
1749         /* Make sure register accesses (indirect or otherwise)
1750          * will function correctly.
1751          */
1752         pci_write_config_dword(tp->pdev,
1753                                TG3PCI_MISC_HOST_CTRL,
1754                                tp->misc_host_ctrl);
1755
1756         pci_read_config_word(tp->pdev,
1757                              pm + PCI_PM_CTRL,
1758                              &power_control);
1759         power_control |= PCI_PM_CTRL_PME_STATUS;
1760         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1761         switch (state) {
1762         case PCI_D0:
1763                 power_control |= 0;
1764                 pci_write_config_word(tp->pdev,
1765                                       pm + PCI_PM_CTRL,
1766                                       power_control);
1767                 udelay(100);    /* Delay after power state change */
1768
1769                 /* Switch out of Vaux if it is a NIC */
1770                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1771                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1772
1773                 return 0;
1774
1775         case PCI_D1:
1776                 power_control |= 1;
1777                 break;
1778
1779         case PCI_D2:
1780                 power_control |= 2;
1781                 break;
1782
1783         case PCI_D3hot:
1784                 power_control |= 3;
1785                 break;
1786
1787         default:
1788                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1789                        "requested.\n",
1790                        tp->dev->name, state);
1791                 return -EINVAL;
1792         };
1793
1794         power_control |= PCI_PM_CTRL_PME_ENABLE;
1795
1796         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1797         tw32(TG3PCI_MISC_HOST_CTRL,
1798              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1799
1800         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
1801                 tp->link_config.phy_is_low_power = 1;
1802         } else {
1803                 if (tp->link_config.phy_is_low_power == 0) {
1804                         tp->link_config.phy_is_low_power = 1;
1805                         tp->link_config.orig_speed = tp->link_config.speed;
1806                         tp->link_config.orig_duplex = tp->link_config.duplex;
1807                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
1808                 }
1809
1810                 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1811                         tp->link_config.speed = SPEED_10;
1812                         tp->link_config.duplex = DUPLEX_HALF;
1813                         tp->link_config.autoneg = AUTONEG_ENABLE;
1814                         tg3_setup_phy(tp, 0);
1815                 }
1816         }
1817
1818         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1819                 u32 val;
1820
1821                 val = tr32(GRC_VCPU_EXT_CTRL);
1822                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1823         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1824                 int i;
1825                 u32 val;
1826
1827                 for (i = 0; i < 200; i++) {
1828                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1829                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1830                                 break;
1831                         msleep(1);
1832                 }
1833         }
1834         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1835                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1836                                                      WOL_DRV_STATE_SHUTDOWN |
1837                                                      WOL_DRV_WOL |
1838                                                      WOL_SET_MAGIC_PKT);
1839
1840         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1841
1842         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1843                 u32 mac_mode;
1844
1845                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1846                         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1847                                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1848                                 udelay(40);
1849                         }
1850
1851                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1852                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1853                         else
1854                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1855
1856                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1857                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1858                             ASIC_REV_5700) {
1859                                 u32 speed = (tp->tg3_flags &
1860                                              TG3_FLAG_WOL_SPEED_100MB) ?
1861                                              SPEED_100 : SPEED_10;
1862                                 if (tg3_5700_link_polarity(tp, speed))
1863                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1864                                 else
1865                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1866                         }
1867                 } else {
1868                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1869                 }
1870
1871                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1872                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1873
1874                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1875                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1876                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1877
1878                 tw32_f(MAC_MODE, mac_mode);
1879                 udelay(100);
1880
1881                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1882                 udelay(10);
1883         }
1884
1885         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1886             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1887              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1888                 u32 base_val;
1889
1890                 base_val = tp->pci_clock_ctrl;
1891                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1892                              CLOCK_CTRL_TXCLK_DISABLE);
1893
1894                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1895                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1896         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1897                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1898                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1899                 /* do nothing */
1900         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1901                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1902                 u32 newbits1, newbits2;
1903
1904                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1905                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1906                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1907                                     CLOCK_CTRL_TXCLK_DISABLE |
1908                                     CLOCK_CTRL_ALTCLK);
1909                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1910                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1911                         newbits1 = CLOCK_CTRL_625_CORE;
1912                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1913                 } else {
1914                         newbits1 = CLOCK_CTRL_ALTCLK;
1915                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1916                 }
1917
1918                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1919                             40);
1920
1921                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1922                             40);
1923
1924                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1925                         u32 newbits3;
1926
1927                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1928                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1929                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1930                                             CLOCK_CTRL_TXCLK_DISABLE |
1931                                             CLOCK_CTRL_44MHZ_CORE);
1932                         } else {
1933                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1934                         }
1935
1936                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1937                                     tp->pci_clock_ctrl | newbits3, 40);
1938                 }
1939         }
1940
1941         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1942             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1943             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1944                 tg3_power_down_phy(tp);
1945
1946         tg3_frob_aux_power(tp);
1947
1948         /* Workaround for unstable PLL clock */
1949         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1950             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1951                 u32 val = tr32(0x7d00);
1952
1953                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1954                 tw32(0x7d00, val);
1955                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1956                         int err;
1957
1958                         err = tg3_nvram_lock(tp);
1959                         tg3_halt_cpu(tp, RX_CPU_BASE);
1960                         if (!err)
1961                                 tg3_nvram_unlock(tp);
1962                 }
1963         }
1964
1965         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1966
1967         /* Finally, set the new power state. */
1968         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1969         udelay(100);    /* Delay after power state change */
1970
1971         return 0;
1972 }
1973
1974 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1975 {
1976         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1977         case MII_TG3_AUX_STAT_10HALF:
1978                 *speed = SPEED_10;
1979                 *duplex = DUPLEX_HALF;
1980                 break;
1981
1982         case MII_TG3_AUX_STAT_10FULL:
1983                 *speed = SPEED_10;
1984                 *duplex = DUPLEX_FULL;
1985                 break;
1986
1987         case MII_TG3_AUX_STAT_100HALF:
1988                 *speed = SPEED_100;
1989                 *duplex = DUPLEX_HALF;
1990                 break;
1991
1992         case MII_TG3_AUX_STAT_100FULL:
1993                 *speed = SPEED_100;
1994                 *duplex = DUPLEX_FULL;
1995                 break;
1996
1997         case MII_TG3_AUX_STAT_1000HALF:
1998                 *speed = SPEED_1000;
1999                 *duplex = DUPLEX_HALF;
2000                 break;
2001
2002         case MII_TG3_AUX_STAT_1000FULL:
2003                 *speed = SPEED_1000;
2004                 *duplex = DUPLEX_FULL;
2005                 break;
2006
2007         default:
2008                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2009                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2010                                  SPEED_10;
2011                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2012                                   DUPLEX_HALF;
2013                         break;
2014                 }
2015                 *speed = SPEED_INVALID;
2016                 *duplex = DUPLEX_INVALID;
2017                 break;
2018         };
2019 }
2020
2021 static void tg3_phy_copper_begin(struct tg3 *tp)
2022 {
2023         u32 new_adv;
2024         int i;
2025
2026         if (tp->link_config.phy_is_low_power) {
2027                 /* Entering low power mode.  Disable gigabit and
2028                  * 100baseT advertisements.
2029                  */
2030                 tg3_writephy(tp, MII_TG3_CTRL, 0);
2031
2032                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2033                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2034                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2035                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2036
2037                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2038         } else if (tp->link_config.speed == SPEED_INVALID) {
2039                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2040                         tp->link_config.advertising &=
2041                                 ~(ADVERTISED_1000baseT_Half |
2042                                   ADVERTISED_1000baseT_Full);
2043
2044                 new_adv = ADVERTISE_CSMA;
2045                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2046                         new_adv |= ADVERTISE_10HALF;
2047                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2048                         new_adv |= ADVERTISE_10FULL;
2049                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2050                         new_adv |= ADVERTISE_100HALF;
2051                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2052                         new_adv |= ADVERTISE_100FULL;
2053
2054                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2055
2056                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2057
2058                 if (tp->link_config.advertising &
2059                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2060                         new_adv = 0;
2061                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2062                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2063                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2064                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2065                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2066                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2067                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2068                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2069                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2070                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2071                 } else {
2072                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2073                 }
2074         } else {
2075                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2076                 new_adv |= ADVERTISE_CSMA;
2077
2078                 /* Asking for a specific link mode. */
2079                 if (tp->link_config.speed == SPEED_1000) {
2080                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2081
2082                         if (tp->link_config.duplex == DUPLEX_FULL)
2083                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2084                         else
2085                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2086                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2087                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2088                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2089                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2090                 } else {
2091                         if (tp->link_config.speed == SPEED_100) {
2092                                 if (tp->link_config.duplex == DUPLEX_FULL)
2093                                         new_adv |= ADVERTISE_100FULL;
2094                                 else
2095                                         new_adv |= ADVERTISE_100HALF;
2096                         } else {
2097                                 if (tp->link_config.duplex == DUPLEX_FULL)
2098                                         new_adv |= ADVERTISE_10FULL;
2099                                 else
2100                                         new_adv |= ADVERTISE_10HALF;
2101                         }
2102                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2103
2104                         new_adv = 0;
2105                 }
2106
2107                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2108         }
2109
2110         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2111             tp->link_config.speed != SPEED_INVALID) {
2112                 u32 bmcr, orig_bmcr;
2113
2114                 tp->link_config.active_speed = tp->link_config.speed;
2115                 tp->link_config.active_duplex = tp->link_config.duplex;
2116
2117                 bmcr = 0;
2118                 switch (tp->link_config.speed) {
2119                 default:
2120                 case SPEED_10:
2121                         break;
2122
2123                 case SPEED_100:
2124                         bmcr |= BMCR_SPEED100;
2125                         break;
2126
2127                 case SPEED_1000:
2128                         bmcr |= TG3_BMCR_SPEED1000;
2129                         break;
2130                 };
2131
2132                 if (tp->link_config.duplex == DUPLEX_FULL)
2133                         bmcr |= BMCR_FULLDPLX;
2134
2135                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2136                     (bmcr != orig_bmcr)) {
2137                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2138                         for (i = 0; i < 1500; i++) {
2139                                 u32 tmp;
2140
2141                                 udelay(10);
2142                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2143                                     tg3_readphy(tp, MII_BMSR, &tmp))
2144                                         continue;
2145                                 if (!(tmp & BMSR_LSTATUS)) {
2146                                         udelay(40);
2147                                         break;
2148                                 }
2149                         }
2150                         tg3_writephy(tp, MII_BMCR, bmcr);
2151                         udelay(40);
2152                 }
2153         } else {
2154                 tg3_writephy(tp, MII_BMCR,
2155                              BMCR_ANENABLE | BMCR_ANRESTART);
2156         }
2157 }
2158
2159 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2160 {
2161         int err;
2162
2163         /* Turn off tap power management. */
2164         /* Set Extended packet length bit */
2165         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2166
2167         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2168         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2169
2170         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2171         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2172
2173         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2174         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2175
2176         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2177         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2178
2179         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2180         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2181
2182         udelay(40);
2183
2184         return err;
2185 }
2186
2187 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2188 {
2189         u32 adv_reg, all_mask = 0;
2190
2191         if (mask & ADVERTISED_10baseT_Half)
2192                 all_mask |= ADVERTISE_10HALF;
2193         if (mask & ADVERTISED_10baseT_Full)
2194                 all_mask |= ADVERTISE_10FULL;
2195         if (mask & ADVERTISED_100baseT_Half)
2196                 all_mask |= ADVERTISE_100HALF;
2197         if (mask & ADVERTISED_100baseT_Full)
2198                 all_mask |= ADVERTISE_100FULL;
2199
2200         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2201                 return 0;
2202
2203         if ((adv_reg & all_mask) != all_mask)
2204                 return 0;
2205         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2206                 u32 tg3_ctrl;
2207
2208                 all_mask = 0;
2209                 if (mask & ADVERTISED_1000baseT_Half)
2210                         all_mask |= ADVERTISE_1000HALF;
2211                 if (mask & ADVERTISED_1000baseT_Full)
2212                         all_mask |= ADVERTISE_1000FULL;
2213
2214                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2215                         return 0;
2216
2217                 if ((tg3_ctrl & all_mask) != all_mask)
2218                         return 0;
2219         }
2220         return 1;
2221 }
2222
2223 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2224 {
2225         u32 curadv, reqadv;
2226
2227         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2228                 return 1;
2229
2230         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2231         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2232
2233         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2234                 if (curadv != reqadv)
2235                         return 0;
2236
2237                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2238                         tg3_readphy(tp, MII_LPA, rmtadv);
2239         } else {
2240                 /* Reprogram the advertisement register, even if it
2241                  * does not affect the current link.  If the link
2242                  * gets renegotiated in the future, we can save an
2243                  * additional renegotiation cycle by advertising
2244                  * it correctly in the first place.
2245                  */
2246                 if (curadv != reqadv) {
2247                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2248                                      ADVERTISE_PAUSE_ASYM);
2249                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2250                 }
2251         }
2252
2253         return 1;
2254 }
2255
2256 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2257 {
2258         int current_link_up;
2259         u32 bmsr, dummy;
2260         u32 lcl_adv, rmt_adv;
2261         u16 current_speed;
2262         u8 current_duplex;
2263         int i, err;
2264
2265         tw32(MAC_EVENT, 0);
2266
2267         tw32_f(MAC_STATUS,
2268              (MAC_STATUS_SYNC_CHANGED |
2269               MAC_STATUS_CFG_CHANGED |
2270               MAC_STATUS_MI_COMPLETION |
2271               MAC_STATUS_LNKSTATE_CHANGED));
2272         udelay(40);
2273
2274         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2275                 tw32_f(MAC_MI_MODE,
2276                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2277                 udelay(80);
2278         }
2279
2280         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2281
2282         /* Some third-party PHYs need to be reset on link going
2283          * down.
2284          */
2285         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2286              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2287              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2288             netif_carrier_ok(tp->dev)) {
2289                 tg3_readphy(tp, MII_BMSR, &bmsr);
2290                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2291                     !(bmsr & BMSR_LSTATUS))
2292                         force_reset = 1;
2293         }
2294         if (force_reset)
2295                 tg3_phy_reset(tp);
2296
2297         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2298                 tg3_readphy(tp, MII_BMSR, &bmsr);
2299                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2300                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2301                         bmsr = 0;
2302
2303                 if (!(bmsr & BMSR_LSTATUS)) {
2304                         err = tg3_init_5401phy_dsp(tp);
2305                         if (err)
2306                                 return err;
2307
2308                         tg3_readphy(tp, MII_BMSR, &bmsr);
2309                         for (i = 0; i < 1000; i++) {
2310                                 udelay(10);
2311                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2312                                     (bmsr & BMSR_LSTATUS)) {
2313                                         udelay(40);
2314                                         break;
2315                                 }
2316                         }
2317
2318                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2319                             !(bmsr & BMSR_LSTATUS) &&
2320                             tp->link_config.active_speed == SPEED_1000) {
2321                                 err = tg3_phy_reset(tp);
2322                                 if (!err)
2323                                         err = tg3_init_5401phy_dsp(tp);
2324                                 if (err)
2325                                         return err;
2326                         }
2327                 }
2328         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2329                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2330                 /* 5701 {A0,B0} CRC bug workaround */
2331                 tg3_writephy(tp, 0x15, 0x0a75);
2332                 tg3_writephy(tp, 0x1c, 0x8c68);
2333                 tg3_writephy(tp, 0x1c, 0x8d68);
2334                 tg3_writephy(tp, 0x1c, 0x8c68);
2335         }
2336
2337         /* Clear pending interrupts... */
2338         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2339         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2340
2341         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2342                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2343         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2344                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2345
2346         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2347             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2348                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2349                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2350                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2351                 else
2352                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2353         }
2354
2355         current_link_up = 0;
2356         current_speed = SPEED_INVALID;
2357         current_duplex = DUPLEX_INVALID;
2358
2359         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2360                 u32 val;
2361
2362                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2363                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2364                 if (!(val & (1 << 10))) {
2365                         val |= (1 << 10);
2366                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2367                         goto relink;
2368                 }
2369         }
2370
2371         bmsr = 0;
2372         for (i = 0; i < 100; i++) {
2373                 tg3_readphy(tp, MII_BMSR, &bmsr);
2374                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2375                     (bmsr & BMSR_LSTATUS))
2376                         break;
2377                 udelay(40);
2378         }
2379
2380         if (bmsr & BMSR_LSTATUS) {
2381                 u32 aux_stat, bmcr;
2382
2383                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2384                 for (i = 0; i < 2000; i++) {
2385                         udelay(10);
2386                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2387                             aux_stat)
2388                                 break;
2389                 }
2390
2391                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2392                                              &current_speed,
2393                                              &current_duplex);
2394
2395                 bmcr = 0;
2396                 for (i = 0; i < 200; i++) {
2397                         tg3_readphy(tp, MII_BMCR, &bmcr);
2398                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2399                                 continue;
2400                         if (bmcr && bmcr != 0x7fff)
2401                                 break;
2402                         udelay(10);
2403                 }
2404
2405                 lcl_adv = 0;
2406                 rmt_adv = 0;
2407
2408                 tp->link_config.active_speed = current_speed;
2409                 tp->link_config.active_duplex = current_duplex;
2410
2411                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2412                         if ((bmcr & BMCR_ANENABLE) &&
2413                             tg3_copper_is_advertising_all(tp,
2414                                                 tp->link_config.advertising)) {
2415                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2416                                                                   &rmt_adv))
2417                                         current_link_up = 1;
2418                         }
2419                 } else {
2420                         if (!(bmcr & BMCR_ANENABLE) &&
2421                             tp->link_config.speed == current_speed &&
2422                             tp->link_config.duplex == current_duplex &&
2423                             tp->link_config.flowctrl ==
2424                             tp->link_config.active_flowctrl) {
2425                                 current_link_up = 1;
2426                         }
2427                 }
2428
2429                 if (current_link_up == 1 &&
2430                     tp->link_config.active_duplex == DUPLEX_FULL)
2431                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2432         }
2433
2434 relink:
2435         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2436                 u32 tmp;
2437
2438                 tg3_phy_copper_begin(tp);
2439
2440                 tg3_readphy(tp, MII_BMSR, &tmp);
2441                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2442                     (tmp & BMSR_LSTATUS))
2443                         current_link_up = 1;
2444         }
2445
2446         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2447         if (current_link_up == 1) {
2448                 if (tp->link_config.active_speed == SPEED_100 ||
2449                     tp->link_config.active_speed == SPEED_10)
2450                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2451                 else
2452                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2453         } else
2454                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2455
2456         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2457         if (tp->link_config.active_duplex == DUPLEX_HALF)
2458                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2459
2460         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2461                 if (current_link_up == 1 &&
2462                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2463                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2464                 else
2465                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2466         }
2467
2468         /* ??? Without this setting Netgear GA302T PHY does not
2469          * ??? send/receive packets...
2470          */
2471         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2472             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2473                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2474                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2475                 udelay(80);
2476         }
2477
2478         tw32_f(MAC_MODE, tp->mac_mode);
2479         udelay(40);
2480
2481         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2482                 /* Polled via timer. */
2483                 tw32_f(MAC_EVENT, 0);
2484         } else {
2485                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2486         }
2487         udelay(40);
2488
2489         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2490             current_link_up == 1 &&
2491             tp->link_config.active_speed == SPEED_1000 &&
2492             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2493              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2494                 udelay(120);
2495                 tw32_f(MAC_STATUS,
2496                      (MAC_STATUS_SYNC_CHANGED |
2497                       MAC_STATUS_CFG_CHANGED));
2498                 udelay(40);
2499                 tg3_write_mem(tp,
2500                               NIC_SRAM_FIRMWARE_MBOX,
2501                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2502         }
2503
2504         if (current_link_up != netif_carrier_ok(tp->dev)) {
2505                 if (current_link_up)
2506                         netif_carrier_on(tp->dev);
2507                 else
2508                         netif_carrier_off(tp->dev);
2509                 tg3_link_report(tp);
2510         }
2511
2512         return 0;
2513 }
2514
2515 struct tg3_fiber_aneginfo {
2516         int state;
2517 #define ANEG_STATE_UNKNOWN              0
2518 #define ANEG_STATE_AN_ENABLE            1
2519 #define ANEG_STATE_RESTART_INIT         2
2520 #define ANEG_STATE_RESTART              3
2521 #define ANEG_STATE_DISABLE_LINK_OK      4
2522 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2523 #define ANEG_STATE_ABILITY_DETECT       6
2524 #define ANEG_STATE_ACK_DETECT_INIT      7
2525 #define ANEG_STATE_ACK_DETECT           8
2526 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2527 #define ANEG_STATE_COMPLETE_ACK         10
2528 #define ANEG_STATE_IDLE_DETECT_INIT     11
2529 #define ANEG_STATE_IDLE_DETECT          12
2530 #define ANEG_STATE_LINK_OK              13
2531 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2532 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2533
2534         u32 flags;
2535 #define MR_AN_ENABLE            0x00000001
2536 #define MR_RESTART_AN           0x00000002
2537 #define MR_AN_COMPLETE          0x00000004
2538 #define MR_PAGE_RX              0x00000008
2539 #define MR_NP_LOADED            0x00000010
2540 #define MR_TOGGLE_TX            0x00000020
2541 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2542 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2543 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2544 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2545 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2546 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2547 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2548 #define MR_TOGGLE_RX            0x00002000
2549 #define MR_NP_RX                0x00004000
2550
2551 #define MR_LINK_OK              0x80000000
2552
2553         unsigned long link_time, cur_time;
2554
2555         u32 ability_match_cfg;
2556         int ability_match_count;
2557
2558         char ability_match, idle_match, ack_match;
2559
2560         u32 txconfig, rxconfig;
2561 #define ANEG_CFG_NP             0x00000080
2562 #define ANEG_CFG_ACK            0x00000040
2563 #define ANEG_CFG_RF2            0x00000020
2564 #define ANEG_CFG_RF1            0x00000010
2565 #define ANEG_CFG_PS2            0x00000001
2566 #define ANEG_CFG_PS1            0x00008000
2567 #define ANEG_CFG_HD             0x00004000
2568 #define ANEG_CFG_FD             0x00002000
2569 #define ANEG_CFG_INVAL          0x00001f06
2570
2571 };
2572 #define ANEG_OK         0
2573 #define ANEG_DONE       1
2574 #define ANEG_TIMER_ENAB 2
2575 #define ANEG_FAILED     -1
2576
2577 #define ANEG_STATE_SETTLE_TIME  10000
2578
2579 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2580                                    struct tg3_fiber_aneginfo *ap)
2581 {
2582         u16 flowctrl;
2583         unsigned long delta;
2584         u32 rx_cfg_reg;
2585         int ret;
2586
2587         if (ap->state == ANEG_STATE_UNKNOWN) {
2588                 ap->rxconfig = 0;
2589                 ap->link_time = 0;
2590                 ap->cur_time = 0;
2591                 ap->ability_match_cfg = 0;
2592                 ap->ability_match_count = 0;
2593                 ap->ability_match = 0;
2594                 ap->idle_match = 0;
2595                 ap->ack_match = 0;
2596         }
2597         ap->cur_time++;
2598
2599         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2600                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2601
2602                 if (rx_cfg_reg != ap->ability_match_cfg) {
2603                         ap->ability_match_cfg = rx_cfg_reg;
2604                         ap->ability_match = 0;
2605                         ap->ability_match_count = 0;
2606                 } else {
2607                         if (++ap->ability_match_count > 1) {
2608                                 ap->ability_match = 1;
2609                                 ap->ability_match_cfg = rx_cfg_reg;
2610                         }
2611                 }
2612                 if (rx_cfg_reg & ANEG_CFG_ACK)
2613                         ap->ack_match = 1;
2614                 else
2615                         ap->ack_match = 0;
2616
2617                 ap->idle_match = 0;
2618         } else {
2619                 ap->idle_match = 1;
2620                 ap->ability_match_cfg = 0;
2621                 ap->ability_match_count = 0;
2622                 ap->ability_match = 0;
2623                 ap->ack_match = 0;
2624
2625                 rx_cfg_reg = 0;
2626         }
2627
2628         ap->rxconfig = rx_cfg_reg;
2629         ret = ANEG_OK;
2630
2631         switch(ap->state) {
2632         case ANEG_STATE_UNKNOWN:
2633                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2634                         ap->state = ANEG_STATE_AN_ENABLE;
2635
2636                 /* fallthru */
2637         case ANEG_STATE_AN_ENABLE:
2638                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2639                 if (ap->flags & MR_AN_ENABLE) {
2640                         ap->link_time = 0;
2641                         ap->cur_time = 0;
2642                         ap->ability_match_cfg = 0;
2643                         ap->ability_match_count = 0;
2644                         ap->ability_match = 0;
2645                         ap->idle_match = 0;
2646                         ap->ack_match = 0;
2647
2648                         ap->state = ANEG_STATE_RESTART_INIT;
2649                 } else {
2650                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2651                 }
2652                 break;
2653
2654         case ANEG_STATE_RESTART_INIT:
2655                 ap->link_time = ap->cur_time;
2656                 ap->flags &= ~(MR_NP_LOADED);
2657                 ap->txconfig = 0;
2658                 tw32(MAC_TX_AUTO_NEG, 0);
2659                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2660                 tw32_f(MAC_MODE, tp->mac_mode);
2661                 udelay(40);
2662
2663                 ret = ANEG_TIMER_ENAB;
2664                 ap->state = ANEG_STATE_RESTART;
2665
2666                 /* fallthru */
2667         case ANEG_STATE_RESTART:
2668                 delta = ap->cur_time - ap->link_time;
2669                 if (delta > ANEG_STATE_SETTLE_TIME) {
2670                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2671                 } else {
2672                         ret = ANEG_TIMER_ENAB;
2673                 }
2674                 break;
2675
2676         case ANEG_STATE_DISABLE_LINK_OK:
2677                 ret = ANEG_DONE;
2678                 break;
2679
2680         case ANEG_STATE_ABILITY_DETECT_INIT:
2681                 ap->flags &= ~(MR_TOGGLE_TX);
2682                 ap->txconfig = ANEG_CFG_FD;
2683                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2684                 if (flowctrl & ADVERTISE_1000XPAUSE)
2685                         ap->txconfig |= ANEG_CFG_PS1;
2686                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2687                         ap->txconfig |= ANEG_CFG_PS2;
2688                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2689                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2690                 tw32_f(MAC_MODE, tp->mac_mode);
2691                 udelay(40);
2692
2693                 ap->state = ANEG_STATE_ABILITY_DETECT;
2694                 break;
2695
2696         case ANEG_STATE_ABILITY_DETECT:
2697                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2698                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2699                 }
2700                 break;
2701
2702         case ANEG_STATE_ACK_DETECT_INIT:
2703                 ap->txconfig |= ANEG_CFG_ACK;
2704                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2705                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2706                 tw32_f(MAC_MODE, tp->mac_mode);
2707                 udelay(40);
2708
2709                 ap->state = ANEG_STATE_ACK_DETECT;
2710
2711                 /* fallthru */
2712         case ANEG_STATE_ACK_DETECT:
2713                 if (ap->ack_match != 0) {
2714                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2715                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2716                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2717                         } else {
2718                                 ap->state = ANEG_STATE_AN_ENABLE;
2719                         }
2720                 } else if (ap->ability_match != 0 &&
2721                            ap->rxconfig == 0) {
2722                         ap->state = ANEG_STATE_AN_ENABLE;
2723                 }
2724                 break;
2725
2726         case ANEG_STATE_COMPLETE_ACK_INIT:
2727                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2728                         ret = ANEG_FAILED;
2729                         break;
2730                 }
2731                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2732                                MR_LP_ADV_HALF_DUPLEX |
2733                                MR_LP_ADV_SYM_PAUSE |
2734                                MR_LP_ADV_ASYM_PAUSE |
2735                                MR_LP_ADV_REMOTE_FAULT1 |
2736                                MR_LP_ADV_REMOTE_FAULT2 |
2737                                MR_LP_ADV_NEXT_PAGE |
2738                                MR_TOGGLE_RX |
2739                                MR_NP_RX);
2740                 if (ap->rxconfig & ANEG_CFG_FD)
2741                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2742                 if (ap->rxconfig & ANEG_CFG_HD)
2743                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2744                 if (ap->rxconfig & ANEG_CFG_PS1)
2745                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2746                 if (ap->rxconfig & ANEG_CFG_PS2)
2747                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2748                 if (ap->rxconfig & ANEG_CFG_RF1)
2749                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2750                 if (ap->rxconfig & ANEG_CFG_RF2)
2751                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2752                 if (ap->rxconfig & ANEG_CFG_NP)
2753                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2754
2755                 ap->link_time = ap->cur_time;
2756
2757                 ap->flags ^= (MR_TOGGLE_TX);
2758                 if (ap->rxconfig & 0x0008)
2759                         ap->flags |= MR_TOGGLE_RX;
2760                 if (ap->rxconfig & ANEG_CFG_NP)
2761                         ap->flags |= MR_NP_RX;
2762                 ap->flags |= MR_PAGE_RX;
2763
2764                 ap->state = ANEG_STATE_COMPLETE_ACK;
2765                 ret = ANEG_TIMER_ENAB;
2766                 break;
2767
2768         case ANEG_STATE_COMPLETE_ACK:
2769                 if (ap->ability_match != 0 &&
2770                     ap->rxconfig == 0) {
2771                         ap->state = ANEG_STATE_AN_ENABLE;
2772                         break;
2773                 }
2774                 delta = ap->cur_time - ap->link_time;
2775                 if (delta > ANEG_STATE_SETTLE_TIME) {
2776                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2777                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2778                         } else {
2779                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2780                                     !(ap->flags & MR_NP_RX)) {
2781                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2782                                 } else {
2783                                         ret = ANEG_FAILED;
2784                                 }
2785                         }
2786                 }
2787                 break;
2788
2789         case ANEG_STATE_IDLE_DETECT_INIT:
2790                 ap->link_time = ap->cur_time;
2791                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2792                 tw32_f(MAC_MODE, tp->mac_mode);
2793                 udelay(40);
2794
2795                 ap->state = ANEG_STATE_IDLE_DETECT;
2796                 ret = ANEG_TIMER_ENAB;
2797                 break;
2798
2799         case ANEG_STATE_IDLE_DETECT:
2800                 if (ap->ability_match != 0 &&
2801                     ap->rxconfig == 0) {
2802                         ap->state = ANEG_STATE_AN_ENABLE;
2803                         break;
2804                 }
2805                 delta = ap->cur_time - ap->link_time;
2806                 if (delta > ANEG_STATE_SETTLE_TIME) {
2807                         /* XXX another gem from the Broadcom driver :( */
2808                         ap->state = ANEG_STATE_LINK_OK;
2809                 }
2810                 break;
2811
2812         case ANEG_STATE_LINK_OK:
2813                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2814                 ret = ANEG_DONE;
2815                 break;
2816
2817         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2818                 /* ??? unimplemented */
2819                 break;
2820
2821         case ANEG_STATE_NEXT_PAGE_WAIT:
2822                 /* ??? unimplemented */
2823                 break;
2824
2825         default:
2826                 ret = ANEG_FAILED;
2827                 break;
2828         };
2829
2830         return ret;
2831 }
2832
2833 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
2834 {
2835         int res = 0;
2836         struct tg3_fiber_aneginfo aninfo;
2837         int status = ANEG_FAILED;
2838         unsigned int tick;
2839         u32 tmp;
2840
2841         tw32_f(MAC_TX_AUTO_NEG, 0);
2842
2843         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2844         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2845         udelay(40);
2846
2847         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2848         udelay(40);
2849
2850         memset(&aninfo, 0, sizeof(aninfo));
2851         aninfo.flags |= MR_AN_ENABLE;
2852         aninfo.state = ANEG_STATE_UNKNOWN;
2853         aninfo.cur_time = 0;
2854         tick = 0;
2855         while (++tick < 195000) {
2856                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2857                 if (status == ANEG_DONE || status == ANEG_FAILED)
2858                         break;
2859
2860                 udelay(1);
2861         }
2862
2863         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2864         tw32_f(MAC_MODE, tp->mac_mode);
2865         udelay(40);
2866
2867         *txflags = aninfo.txconfig;
2868         *rxflags = aninfo.flags;
2869
2870         if (status == ANEG_DONE &&
2871             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2872                              MR_LP_ADV_FULL_DUPLEX)))
2873                 res = 1;
2874
2875         return res;
2876 }
2877
2878 static void tg3_init_bcm8002(struct tg3 *tp)
2879 {
2880         u32 mac_status = tr32(MAC_STATUS);
2881         int i;
2882
2883         /* Reset when initting first time or we have a link. */
2884         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2885             !(mac_status & MAC_STATUS_PCS_SYNCED))
2886                 return;
2887
2888         /* Set PLL lock range. */
2889         tg3_writephy(tp, 0x16, 0x8007);
2890
2891         /* SW reset */
2892         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2893
2894         /* Wait for reset to complete. */
2895         /* XXX schedule_timeout() ... */
2896         for (i = 0; i < 500; i++)
2897                 udelay(10);
2898
2899         /* Config mode; select PMA/Ch 1 regs. */
2900         tg3_writephy(tp, 0x10, 0x8411);
2901
2902         /* Enable auto-lock and comdet, select txclk for tx. */
2903         tg3_writephy(tp, 0x11, 0x0a10);
2904
2905         tg3_writephy(tp, 0x18, 0x00a0);
2906         tg3_writephy(tp, 0x16, 0x41ff);
2907
2908         /* Assert and deassert POR. */
2909         tg3_writephy(tp, 0x13, 0x0400);
2910         udelay(40);
2911         tg3_writephy(tp, 0x13, 0x0000);
2912
2913         tg3_writephy(tp, 0x11, 0x0a50);
2914         udelay(40);
2915         tg3_writephy(tp, 0x11, 0x0a10);
2916
2917         /* Wait for signal to stabilize */
2918         /* XXX schedule_timeout() ... */
2919         for (i = 0; i < 15000; i++)
2920                 udelay(10);
2921
2922         /* Deselect the channel register so we can read the PHYID
2923          * later.
2924          */
2925         tg3_writephy(tp, 0x10, 0x8011);
2926 }
2927
2928 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2929 {
2930         u16 flowctrl;
2931         u32 sg_dig_ctrl, sg_dig_status;
2932         u32 serdes_cfg, expected_sg_dig_ctrl;
2933         int workaround, port_a;
2934         int current_link_up;
2935
2936         serdes_cfg = 0;
2937         expected_sg_dig_ctrl = 0;
2938         workaround = 0;
2939         port_a = 1;
2940         current_link_up = 0;
2941
2942         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2943             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2944                 workaround = 1;
2945                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2946                         port_a = 0;
2947
2948                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2949                 /* preserve bits 20-23 for voltage regulator */
2950                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2951         }
2952
2953         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2954
2955         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2956                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
2957                         if (workaround) {
2958                                 u32 val = serdes_cfg;
2959
2960                                 if (port_a)
2961                                         val |= 0xc010000;
2962                                 else
2963                                         val |= 0x4010000;
2964                                 tw32_f(MAC_SERDES_CFG, val);
2965                         }
2966
2967                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2968                 }
2969                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2970                         tg3_setup_flow_control(tp, 0, 0);
2971                         current_link_up = 1;
2972                 }
2973                 goto out;
2974         }
2975
2976         /* Want auto-negotiation.  */
2977         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
2978
2979         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2980         if (flowctrl & ADVERTISE_1000XPAUSE)
2981                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
2982         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2983                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
2984
2985         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2986                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2987                     tp->serdes_counter &&
2988                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2989                                     MAC_STATUS_RCVD_CFG)) ==
2990                      MAC_STATUS_PCS_SYNCED)) {
2991                         tp->serdes_counter--;
2992                         current_link_up = 1;
2993                         goto out;
2994                 }
2995 restart_autoneg:
2996                 if (workaround)
2997                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2998                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
2999                 udelay(5);
3000                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3001
3002                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3003                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3004         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3005                                  MAC_STATUS_SIGNAL_DET)) {
3006                 sg_dig_status = tr32(SG_DIG_STATUS);
3007                 mac_status = tr32(MAC_STATUS);
3008
3009                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3010                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3011                         u32 local_adv = 0, remote_adv = 0;
3012
3013                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3014                                 local_adv |= ADVERTISE_1000XPAUSE;
3015                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3016                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3017
3018                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3019                                 remote_adv |= LPA_1000XPAUSE;
3020                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3021                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3022
3023                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3024                         current_link_up = 1;
3025                         tp->serdes_counter = 0;
3026                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3027                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3028                         if (tp->serdes_counter)
3029                                 tp->serdes_counter--;
3030                         else {
3031                                 if (workaround) {
3032                                         u32 val = serdes_cfg;
3033
3034                                         if (port_a)
3035                                                 val |= 0xc010000;
3036                                         else
3037                                                 val |= 0x4010000;
3038
3039                                         tw32_f(MAC_SERDES_CFG, val);
3040                                 }
3041
3042                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3043                                 udelay(40);
3044
3045                                 /* Link parallel detection - link is up */
3046                                 /* only if we have PCS_SYNC and not */
3047                                 /* receiving config code words */
3048                                 mac_status = tr32(MAC_STATUS);
3049                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3050                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
3051                                         tg3_setup_flow_control(tp, 0, 0);
3052                                         current_link_up = 1;
3053                                         tp->tg3_flags2 |=
3054                                                 TG3_FLG2_PARALLEL_DETECT;
3055                                         tp->serdes_counter =
3056                                                 SERDES_PARALLEL_DET_TIMEOUT;
3057                                 } else
3058                                         goto restart_autoneg;
3059                         }
3060                 }
3061         } else {
3062                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3063                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3064         }
3065
3066 out:
3067         return current_link_up;
3068 }
3069
3070 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3071 {
3072         int current_link_up = 0;
3073
3074         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3075                 goto out;
3076
3077         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3078                 u32 txflags, rxflags;
3079                 int i;
3080
3081                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3082                         u32 local_adv = 0, remote_adv = 0;
3083
3084                         if (txflags & ANEG_CFG_PS1)
3085                                 local_adv |= ADVERTISE_1000XPAUSE;
3086                         if (txflags & ANEG_CFG_PS2)
3087                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3088
3089                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
3090                                 remote_adv |= LPA_1000XPAUSE;
3091                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3092                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3093
3094                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3095
3096                         current_link_up = 1;
3097                 }
3098                 for (i = 0; i < 30; i++) {
3099                         udelay(20);
3100                         tw32_f(MAC_STATUS,
3101                                (MAC_STATUS_SYNC_CHANGED |
3102                                 MAC_STATUS_CFG_CHANGED));
3103                         udelay(40);
3104                         if ((tr32(MAC_STATUS) &
3105                              (MAC_STATUS_SYNC_CHANGED |
3106                               MAC_STATUS_CFG_CHANGED)) == 0)
3107                                 break;
3108                 }
3109
3110                 mac_status = tr32(MAC_STATUS);
3111                 if (current_link_up == 0 &&
3112                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
3113                     !(mac_status & MAC_STATUS_RCVD_CFG))
3114                         current_link_up = 1;
3115         } else {
3116                 tg3_setup_flow_control(tp, 0, 0);
3117
3118                 /* Forcing 1000FD link up. */
3119                 current_link_up = 1;
3120
3121                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3122                 udelay(40);
3123
3124                 tw32_f(MAC_MODE, tp->mac_mode);
3125                 udelay(40);
3126         }
3127
3128 out:
3129         return current_link_up;
3130 }
3131
3132 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3133 {
3134         u32 orig_pause_cfg;
3135         u16 orig_active_speed;
3136         u8 orig_active_duplex;
3137         u32 mac_status;
3138         int current_link_up;
3139         int i;
3140
3141         orig_pause_cfg = tp->link_config.active_flowctrl;
3142         orig_active_speed = tp->link_config.active_speed;
3143         orig_active_duplex = tp->link_config.active_duplex;
3144
3145         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3146             netif_carrier_ok(tp->dev) &&
3147             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3148                 mac_status = tr32(MAC_STATUS);
3149                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3150                                MAC_STATUS_SIGNAL_DET |
3151                                MAC_STATUS_CFG_CHANGED |
3152                                MAC_STATUS_RCVD_CFG);
3153                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3154                                    MAC_STATUS_SIGNAL_DET)) {
3155                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3156                                             MAC_STATUS_CFG_CHANGED));
3157                         return 0;
3158                 }
3159         }
3160
3161         tw32_f(MAC_TX_AUTO_NEG, 0);
3162
3163         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3164         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3165         tw32_f(MAC_MODE, tp->mac_mode);
3166         udelay(40);
3167
3168         if (tp->phy_id == PHY_ID_BCM8002)
3169                 tg3_init_bcm8002(tp);
3170
3171         /* Enable link change event even when serdes polling.  */
3172         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3173         udelay(40);
3174
3175         current_link_up = 0;
3176         mac_status = tr32(MAC_STATUS);
3177
3178         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3179                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3180         else
3181                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3182
3183         tp->hw_status->status =
3184                 (SD_STATUS_UPDATED |
3185                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3186
3187         for (i = 0; i < 100; i++) {
3188                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3189                                     MAC_STATUS_CFG_CHANGED));
3190                 udelay(5);
3191                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3192                                          MAC_STATUS_CFG_CHANGED |
3193                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3194                         break;
3195         }
3196
3197         mac_status = tr32(MAC_STATUS);
3198         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3199                 current_link_up = 0;
3200                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3201                     tp->serdes_counter == 0) {
3202                         tw32_f(MAC_MODE, (tp->mac_mode |
3203                                           MAC_MODE_SEND_CONFIGS));
3204                         udelay(1);
3205                         tw32_f(MAC_MODE, tp->mac_mode);
3206                 }
3207         }
3208
3209         if (current_link_up == 1) {
3210                 tp->link_config.active_speed = SPEED_1000;
3211                 tp->link_config.active_duplex = DUPLEX_FULL;
3212                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3213                                     LED_CTRL_LNKLED_OVERRIDE |
3214                                     LED_CTRL_1000MBPS_ON));
3215         } else {
3216                 tp->link_config.active_speed = SPEED_INVALID;
3217                 tp->link_config.active_duplex = DUPLEX_INVALID;
3218                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3219                                     LED_CTRL_LNKLED_OVERRIDE |
3220                                     LED_CTRL_TRAFFIC_OVERRIDE));
3221         }
3222
3223         if (current_link_up != netif_carrier_ok(tp->dev)) {
3224                 if (current_link_up)
3225                         netif_carrier_on(tp->dev);
3226                 else
3227                         netif_carrier_off(tp->dev);
3228                 tg3_link_report(tp);
3229         } else {
3230                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3231                 if (orig_pause_cfg != now_pause_cfg ||
3232                     orig_active_speed != tp->link_config.active_speed ||
3233                     orig_active_duplex != tp->link_config.active_duplex)
3234                         tg3_link_report(tp);
3235         }
3236
3237         return 0;
3238 }
3239
3240 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3241 {
3242         int current_link_up, err = 0;
3243         u32 bmsr, bmcr;
3244         u16 current_speed;
3245         u8 current_duplex;
3246         u32 local_adv, remote_adv;
3247
3248         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3249         tw32_f(MAC_MODE, tp->mac_mode);
3250         udelay(40);
3251
3252         tw32(MAC_EVENT, 0);
3253
3254         tw32_f(MAC_STATUS,
3255              (MAC_STATUS_SYNC_CHANGED |
3256               MAC_STATUS_CFG_CHANGED |
3257               MAC_STATUS_MI_COMPLETION |
3258               MAC_STATUS_LNKSTATE_CHANGED));
3259         udelay(40);
3260
3261         if (force_reset)
3262                 tg3_phy_reset(tp);
3263
3264         current_link_up = 0;
3265         current_speed = SPEED_INVALID;
3266         current_duplex = DUPLEX_INVALID;
3267
3268         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3269         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3270         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3271                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3272                         bmsr |= BMSR_LSTATUS;
3273                 else
3274                         bmsr &= ~BMSR_LSTATUS;
3275         }
3276
3277         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3278
3279         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3280             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3281              tp->link_config.flowctrl == tp->link_config.active_flowctrl) {
3282                 /* do nothing, just check for link up at the end */
3283         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3284                 u32 adv, new_adv;
3285
3286                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3287                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3288                                   ADVERTISE_1000XPAUSE |
3289                                   ADVERTISE_1000XPSE_ASYM |
3290                                   ADVERTISE_SLCT);
3291
3292                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3293
3294                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3295                         new_adv |= ADVERTISE_1000XHALF;
3296                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3297                         new_adv |= ADVERTISE_1000XFULL;
3298
3299                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3300                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3301                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3302                         tg3_writephy(tp, MII_BMCR, bmcr);
3303
3304                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3305                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3306                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3307
3308                         return err;
3309                 }
3310         } else {
3311                 u32 new_bmcr;
3312
3313                 bmcr &= ~BMCR_SPEED1000;
3314                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3315
3316                 if (tp->link_config.duplex == DUPLEX_FULL)
3317                         new_bmcr |= BMCR_FULLDPLX;
3318
3319                 if (new_bmcr != bmcr) {
3320                         /* BMCR_SPEED1000 is a reserved bit that needs
3321                          * to be set on write.
3322                          */
3323                         new_bmcr |= BMCR_SPEED1000;
3324
3325                         /* Force a linkdown */
3326                         if (netif_carrier_ok(tp->dev)) {
3327                                 u32 adv;
3328
3329                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3330                                 adv &= ~(ADVERTISE_1000XFULL |
3331                                          ADVERTISE_1000XHALF |
3332                                          ADVERTISE_SLCT);
3333                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3334                                 tg3_writephy(tp, MII_BMCR, bmcr |
3335                                                            BMCR_ANRESTART |
3336                                                            BMCR_ANENABLE);
3337                                 udelay(10);
3338                                 netif_carrier_off(tp->dev);
3339                         }
3340                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3341                         bmcr = new_bmcr;
3342                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3343                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3344                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3345                             ASIC_REV_5714) {
3346                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3347                                         bmsr |= BMSR_LSTATUS;
3348                                 else
3349                                         bmsr &= ~BMSR_LSTATUS;
3350                         }
3351                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3352                 }
3353         }
3354
3355         if (bmsr & BMSR_LSTATUS) {
3356                 current_speed = SPEED_1000;
3357                 current_link_up = 1;
3358                 if (bmcr & BMCR_FULLDPLX)
3359                         current_duplex = DUPLEX_FULL;
3360                 else
3361                         current_duplex = DUPLEX_HALF;
3362
3363                 local_adv = 0;
3364                 remote_adv = 0;
3365
3366                 if (bmcr & BMCR_ANENABLE) {
3367                         u32 common;
3368
3369                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3370                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3371                         common = local_adv & remote_adv;
3372                         if (common & (ADVERTISE_1000XHALF |
3373                                       ADVERTISE_1000XFULL)) {
3374                                 if (common & ADVERTISE_1000XFULL)
3375                                         current_duplex = DUPLEX_FULL;
3376                                 else
3377                                         current_duplex = DUPLEX_HALF;
3378                         }
3379                         else
3380                                 current_link_up = 0;
3381                 }
3382         }
3383
3384         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3385                 tg3_setup_flow_control(tp, local_adv, remote_adv);
3386
3387         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3388         if (tp->link_config.active_duplex == DUPLEX_HALF)
3389                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3390
3391         tw32_f(MAC_MODE, tp->mac_mode);
3392         udelay(40);
3393
3394         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3395
3396         tp->link_config.active_speed = current_speed;
3397         tp->link_config.active_duplex = current_duplex;
3398
3399         if (current_link_up != netif_carrier_ok(tp->dev)) {
3400                 if (current_link_up)
3401                         netif_carrier_on(tp->dev);
3402                 else {
3403                         netif_carrier_off(tp->dev);
3404                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3405                 }
3406                 tg3_link_report(tp);
3407         }
3408         return err;
3409 }
3410
3411 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3412 {
3413         if (tp->serdes_counter) {
3414                 /* Give autoneg time to complete. */
3415                 tp->serdes_counter--;
3416                 return;
3417         }
3418         if (!netif_carrier_ok(tp->dev) &&
3419             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3420                 u32 bmcr;
3421
3422                 tg3_readphy(tp, MII_BMCR, &bmcr);
3423                 if (bmcr & BMCR_ANENABLE) {
3424                         u32 phy1, phy2;
3425
3426                         /* Select shadow register 0x1f */
3427                         tg3_writephy(tp, 0x1c, 0x7c00);
3428                         tg3_readphy(tp, 0x1c, &phy1);
3429
3430                         /* Select expansion interrupt status register */
3431                         tg3_writephy(tp, 0x17, 0x0f01);
3432                         tg3_readphy(tp, 0x15, &phy2);
3433                         tg3_readphy(tp, 0x15, &phy2);
3434
3435                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3436                                 /* We have signal detect and not receiving
3437                                  * config code words, link is up by parallel
3438                                  * detection.
3439                                  */
3440
3441                                 bmcr &= ~BMCR_ANENABLE;
3442                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3443                                 tg3_writephy(tp, MII_BMCR, bmcr);
3444                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3445                         }
3446                 }
3447         }
3448         else if (netif_carrier_ok(tp->dev) &&
3449                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3450                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3451                 u32 phy2;
3452
3453                 /* Select expansion interrupt status register */
3454                 tg3_writephy(tp, 0x17, 0x0f01);
3455                 tg3_readphy(tp, 0x15, &phy2);
3456                 if (phy2 & 0x20) {
3457                         u32 bmcr;
3458
3459                         /* Config code words received, turn on autoneg. */
3460                         tg3_readphy(tp, MII_BMCR, &bmcr);
3461                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3462
3463                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3464
3465                 }
3466         }
3467 }
3468
3469 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3470 {
3471         int err;
3472
3473         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3474                 err = tg3_setup_fiber_phy(tp, force_reset);
3475         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3476                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3477         } else {
3478                 err = tg3_setup_copper_phy(tp, force_reset);
3479         }
3480
3481         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3482             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3483                 u32 val, scale;
3484
3485                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3486                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3487                         scale = 65;
3488                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3489                         scale = 6;
3490                 else
3491                         scale = 12;
3492
3493                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3494                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3495                 tw32(GRC_MISC_CFG, val);
3496         }
3497
3498         if (tp->link_config.active_speed == SPEED_1000 &&
3499             tp->link_config.active_duplex == DUPLEX_HALF)
3500                 tw32(MAC_TX_LENGTHS,
3501                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3502                       (6 << TX_LENGTHS_IPG_SHIFT) |
3503                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3504         else
3505                 tw32(MAC_TX_LENGTHS,
3506                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3507                       (6 << TX_LENGTHS_IPG_SHIFT) |
3508                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3509
3510         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3511                 if (netif_carrier_ok(tp->dev)) {
3512                         tw32(HOSTCC_STAT_COAL_TICKS,
3513                              tp->coal.stats_block_coalesce_usecs);
3514                 } else {
3515                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3516                 }
3517         }
3518
3519         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3520                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3521                 if (!netif_carrier_ok(tp->dev))
3522                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3523                               tp->pwrmgmt_thresh;
3524                 else
3525                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3526                 tw32(PCIE_PWR_MGMT_THRESH, val);
3527         }
3528
3529         return err;
3530 }
3531
3532 /* This is called whenever we suspect that the system chipset is re-
3533  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3534  * is bogus tx completions. We try to recover by setting the
3535  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3536  * in the workqueue.
3537  */
3538 static void tg3_tx_recover(struct tg3 *tp)
3539 {
3540         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3541                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3542
3543         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3544                "mapped I/O cycles to the network device, attempting to "
3545                "recover. Please report the problem to the driver maintainer "
3546                "and include system chipset information.\n", tp->dev->name);
3547
3548         spin_lock(&tp->lock);
3549         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3550         spin_unlock(&tp->lock);
3551 }
3552
3553 static inline u32 tg3_tx_avail(struct tg3 *tp)
3554 {
3555         smp_mb();
3556         return (tp->tx_pending -
3557                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3558 }
3559
3560 /* Tigon3 never reports partial packet sends.  So we do not
3561  * need special logic to handle SKBs that have not had all
3562  * of their frags sent yet, like SunGEM does.
3563  */
3564 static void tg3_tx(struct tg3 *tp)
3565 {
3566         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3567         u32 sw_idx = tp->tx_cons;
3568
3569         while (sw_idx != hw_idx) {
3570                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3571                 struct sk_buff *skb = ri->skb;
3572                 int i, tx_bug = 0;
3573
3574                 if (unlikely(skb == NULL)) {
3575                         tg3_tx_recover(tp);
3576                         return;
3577                 }
3578
3579                 pci_unmap_single(tp->pdev,
3580                                  pci_unmap_addr(ri, mapping),
3581                                  skb_headlen(skb),
3582                                  PCI_DMA_TODEVICE);
3583
3584                 ri->skb = NULL;
3585
3586                 sw_idx = NEXT_TX(sw_idx);
3587
3588                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3589                         ri = &tp->tx_buffers[sw_idx];
3590                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3591                                 tx_bug = 1;
3592
3593                         pci_unmap_page(tp->pdev,
3594                                        pci_unmap_addr(ri, mapping),
3595                                        skb_shinfo(skb)->frags[i].size,
3596                                        PCI_DMA_TODEVICE);
3597
3598                         sw_idx = NEXT_TX(sw_idx);
3599                 }
3600
3601                 dev_kfree_skb(skb);
3602
3603                 if (unlikely(tx_bug)) {
3604                         tg3_tx_recover(tp);
3605                         return;
3606                 }
3607         }
3608
3609         tp->tx_cons = sw_idx;
3610
3611         /* Need to make the tx_cons update visible to tg3_start_xmit()
3612          * before checking for netif_queue_stopped().  Without the
3613          * memory barrier, there is a small possibility that tg3_start_xmit()
3614          * will miss it and cause the queue to be stopped forever.
3615          */
3616         smp_mb();
3617
3618         if (unlikely(netif_queue_stopped(tp->dev) &&
3619                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3620                 netif_tx_lock(tp->dev);
3621                 if (netif_queue_stopped(tp->dev) &&
3622                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3623                         netif_wake_queue(tp->dev);
3624                 netif_tx_unlock(tp->dev);
3625         }
3626 }
3627
3628 /* Returns size of skb allocated or < 0 on error.
3629  *
3630  * We only need to fill in the address because the other members
3631  * of the RX descriptor are invariant, see tg3_init_rings.
3632  *
3633  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3634  * posting buffers we only dirty the first cache line of the RX
3635  * descriptor (containing the address).  Whereas for the RX status
3636  * buffers the cpu only reads the last cacheline of the RX descriptor
3637  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3638  */
3639 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3640                             int src_idx, u32 dest_idx_unmasked)
3641 {
3642         struct tg3_rx_buffer_desc *desc;
3643         struct ring_info *map, *src_map;
3644         struct sk_buff *skb;
3645         dma_addr_t mapping;
3646         int skb_size, dest_idx;
3647
3648         src_map = NULL;
3649         switch (opaque_key) {
3650         case RXD_OPAQUE_RING_STD:
3651                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3652                 desc = &tp->rx_std[dest_idx];
3653                 map = &tp->rx_std_buffers[dest_idx];
3654                 if (src_idx >= 0)
3655                         src_map = &tp->rx_std_buffers[src_idx];
3656                 skb_size = tp->rx_pkt_buf_sz;
3657                 break;
3658
3659         case RXD_OPAQUE_RING_JUMBO:
3660                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3661                 desc = &tp->rx_jumbo[dest_idx];
3662                 map = &tp->rx_jumbo_buffers[dest_idx];
3663                 if (src_idx >= 0)
3664                         src_map = &tp->rx_jumbo_buffers[src_idx];
3665                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3666                 break;
3667
3668         default:
3669                 return -EINVAL;
3670         };
3671
3672         /* Do not overwrite any of the map or rp information
3673          * until we are sure we can commit to a new buffer.
3674          *
3675          * Callers depend upon this behavior and assume that
3676          * we leave everything unchanged if we fail.
3677          */
3678         skb = netdev_alloc_skb(tp->dev, skb_size);
3679         if (skb == NULL)
3680                 return -ENOMEM;
3681
3682         skb_reserve(skb, tp->rx_offset);
3683
3684         mapping = pci_map_single(tp->pdev, skb->data,
3685                                  skb_size - tp->rx_offset,
3686                                  PCI_DMA_FROMDEVICE);
3687
3688         map->skb = skb;
3689         pci_unmap_addr_set(map, mapping, mapping);
3690
3691         if (src_map != NULL)
3692                 src_map->skb = NULL;
3693
3694         desc->addr_hi = ((u64)mapping >> 32);
3695         desc->addr_lo = ((u64)mapping & 0xffffffff);
3696
3697         return skb_size;
3698 }
3699
3700 /* We only need to move over in the address because the other
3701  * members of the RX descriptor are invariant.  See notes above
3702  * tg3_alloc_rx_skb for full details.
3703  */
3704 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3705                            int src_idx, u32 dest_idx_unmasked)
3706 {
3707         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3708         struct ring_info *src_map, *dest_map;
3709         int dest_idx;
3710
3711         switch (opaque_key) {
3712         case RXD_OPAQUE_RING_STD:
3713                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3714                 dest_desc = &tp->rx_std[dest_idx];
3715                 dest_map = &tp->rx_std_buffers[dest_idx];
3716                 src_desc = &tp->rx_std[src_idx];
3717                 src_map = &tp->rx_std_buffers[src_idx];
3718                 break;
3719
3720         case RXD_OPAQUE_RING_JUMBO:
3721                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3722                 dest_desc = &tp->rx_jumbo[dest_idx];
3723                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3724                 src_desc = &tp->rx_jumbo[src_idx];
3725                 src_map = &tp->rx_jumbo_buffers[src_idx];
3726                 break;
3727
3728         default:
3729                 return;
3730         };
3731
3732         dest_map->skb = src_map->skb;
3733         pci_unmap_addr_set(dest_map, mapping,
3734                            pci_unmap_addr(src_map, mapping));
3735         dest_desc->addr_hi = src_desc->addr_hi;
3736         dest_desc->addr_lo = src_desc->addr_lo;
3737
3738         src_map->skb = NULL;
3739 }
3740
3741 #if TG3_VLAN_TAG_USED
3742 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3743 {
3744         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3745 }
3746 #endif
3747
3748 /* The RX ring scheme is composed of multiple rings which post fresh
3749  * buffers to the chip, and one special ring the chip uses to report
3750  * status back to the host.
3751  *
3752  * The special ring reports the status of received packets to the
3753  * host.  The chip does not write into the original descriptor the
3754  * RX buffer was obtained from.  The chip simply takes the original
3755  * descriptor as provided by the host, updates the status and length
3756  * field, then writes this into the next status ring entry.
3757  *
3758  * Each ring the host uses to post buffers to the chip is described
3759  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3760  * it is first placed into the on-chip ram.  When the packet's length
3761  * is known, it walks down the TG3_BDINFO entries to select the ring.
3762  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3763  * which is within the range of the new packet's length is chosen.
3764  *
3765  * The "separate ring for rx status" scheme may sound queer, but it makes
3766  * sense from a cache coherency perspective.  If only the host writes
3767  * to the buffer post rings, and only the chip writes to the rx status
3768  * rings, then cache lines never move beyond shared-modified state.
3769  * If both the host and chip were to write into the same ring, cache line
3770  * eviction could occur since both entities want it in an exclusive state.
3771  */
3772 static int tg3_rx(struct tg3 *tp, int budget)
3773 {
3774         u32 work_mask, rx_std_posted = 0;
3775         u32 sw_idx = tp->rx_rcb_ptr;
3776         u16 hw_idx;
3777         int received;
3778
3779         hw_idx = tp->hw_status->idx[0].rx_producer;
3780         /*
3781          * We need to order the read of hw_idx and the read of
3782          * the opaque cookie.
3783          */
3784         rmb();
3785         work_mask = 0;
3786         received = 0;
3787         while (sw_idx != hw_idx && budget > 0) {
3788                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3789                 unsigned int len;
3790                 struct sk_buff *skb;
3791                 dma_addr_t dma_addr;
3792                 u32 opaque_key, desc_idx, *post_ptr;
3793
3794                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3795                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3796                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3797                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3798                                                   mapping);
3799                         skb = tp->rx_std_buffers[desc_idx].skb;
3800                         post_ptr = &tp->rx_std_ptr;
3801                         rx_std_posted++;
3802                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3803                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3804                                                   mapping);
3805                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3806                         post_ptr = &tp->rx_jumbo_ptr;
3807                 }
3808                 else {
3809                         goto next_pkt_nopost;
3810                 }
3811
3812                 work_mask |= opaque_key;
3813
3814                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3815                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3816                 drop_it:
3817                         tg3_recycle_rx(tp, opaque_key,
3818                                        desc_idx, *post_ptr);
3819                 drop_it_no_recycle:
3820                         /* Other statistics kept track of by card. */
3821                         tp->net_stats.rx_dropped++;
3822                         goto next_pkt;
3823                 }
3824
3825                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3826
3827                 if (len > RX_COPY_THRESHOLD
3828                         && tp->rx_offset == 2
3829                         /* rx_offset != 2 iff this is a 5701 card running
3830                          * in PCI-X mode [see tg3_get_invariants()] */
3831                 ) {
3832                         int skb_size;
3833
3834                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3835                                                     desc_idx, *post_ptr);
3836                         if (skb_size < 0)
3837                                 goto drop_it;
3838
3839                         pci_unmap_single(tp->pdev, dma_addr,
3840                                          skb_size - tp->rx_offset,
3841                                          PCI_DMA_FROMDEVICE);
3842
3843                         skb_put(skb, len);
3844                 } else {
3845                         struct sk_buff *copy_skb;
3846
3847                         tg3_recycle_rx(tp, opaque_key,
3848                                        desc_idx, *post_ptr);
3849
3850                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3851                         if (copy_skb == NULL)
3852                                 goto drop_it_no_recycle;
3853
3854                         skb_reserve(copy_skb, 2);
3855                         skb_put(copy_skb, len);
3856                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3857                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3858                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3859
3860                         /* We'll reuse the original ring buffer. */
3861                         skb = copy_skb;
3862                 }
3863
3864                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3865                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3866                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3867                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3868                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3869                 else
3870                         skb->ip_summed = CHECKSUM_NONE;
3871
3872                 skb->protocol = eth_type_trans(skb, tp->dev);
3873 #if TG3_VLAN_TAG_USED
3874                 if (tp->vlgrp != NULL &&
3875                     desc->type_flags & RXD_FLAG_VLAN) {
3876                         tg3_vlan_rx(tp, skb,
3877                                     desc->err_vlan & RXD_VLAN_MASK);
3878                 } else
3879 #endif
3880                         netif_receive_skb(skb);
3881
3882                 tp->dev->last_rx = jiffies;
3883                 received++;
3884                 budget--;
3885
3886 next_pkt:
3887                 (*post_ptr)++;
3888
3889                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3890                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3891
3892                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3893                                      TG3_64BIT_REG_LOW, idx);
3894                         work_mask &= ~RXD_OPAQUE_RING_STD;
3895                         rx_std_posted = 0;
3896                 }
3897 next_pkt_nopost:
3898                 sw_idx++;
3899                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3900
3901                 /* Refresh hw_idx to see if there is new work */
3902                 if (sw_idx == hw_idx) {
3903                         hw_idx = tp->hw_status->idx[0].rx_producer;
3904                         rmb();
3905                 }
3906         }
3907
3908         /* ACK the status ring. */
3909         tp->rx_rcb_ptr = sw_idx;
3910         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3911
3912         /* Refill RX ring(s). */
3913         if (work_mask & RXD_OPAQUE_RING_STD) {
3914                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3915                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3916                              sw_idx);
3917         }
3918         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3919                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3920                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3921                              sw_idx);
3922         }
3923         mmiowb();
3924
3925         return received;
3926 }
3927
3928 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3929 {
3930         struct tg3_hw_status *sblk = tp->hw_status;
3931
3932         /* handle link change and other phy events */
3933         if (!(tp->tg3_flags &
3934               (TG3_FLAG_USE_LINKCHG_REG |
3935                TG3_FLAG_POLL_SERDES))) {
3936                 if (sblk->status & SD_STATUS_LINK_CHG) {
3937                         sblk->status = SD_STATUS_UPDATED |
3938                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3939                         spin_lock(&tp->lock);
3940                         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
3941                                 tw32_f(MAC_STATUS,
3942                                      (MAC_STATUS_SYNC_CHANGED |
3943                                       MAC_STATUS_CFG_CHANGED |
3944                                       MAC_STATUS_MI_COMPLETION |
3945                                       MAC_STATUS_LNKSTATE_CHANGED));
3946                                 udelay(40);
3947                         } else
3948                                 tg3_setup_phy(tp, 0);
3949                         spin_unlock(&tp->lock);
3950                 }
3951         }
3952
3953         /* run TX completion thread */
3954         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3955                 tg3_tx(tp);
3956                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3957                         return work_done;
3958         }
3959
3960         /* run RX thread, within the bounds set by NAPI.
3961          * All RX "locking" is done by ensuring outside
3962          * code synchronizes with tg3->napi.poll()
3963          */
3964         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3965                 work_done += tg3_rx(tp, budget - work_done);
3966
3967         return work_done;
3968 }
3969
3970 static int tg3_poll(struct napi_struct *napi, int budget)
3971 {
3972         struct tg3 *tp = container_of(napi, struct tg3, napi);
3973         int work_done = 0;
3974         struct tg3_hw_status *sblk = tp->hw_status;
3975
3976         while (1) {
3977                 work_done = tg3_poll_work(tp, work_done, budget);
3978
3979                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3980                         goto tx_recovery;
3981
3982                 if (unlikely(work_done >= budget))
3983                         break;
3984
3985                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3986                         /* tp->last_tag is used in tg3_restart_ints() below
3987                          * to tell the hw how much work has been processed,
3988                          * so we must read it before checking for more work.
3989                          */
3990                         tp->last_tag = sblk->status_tag;
3991                         rmb();
3992                 } else
3993                         sblk->status &= ~SD_STATUS_UPDATED;
3994
3995                 if (likely(!tg3_has_work(tp))) {
3996                         netif_rx_complete(tp->dev, napi);
3997                         tg3_restart_ints(tp);
3998                         break;
3999                 }
4000         }
4001
4002         return work_done;
4003
4004 tx_recovery:
4005         /* work_done is guaranteed to be less than budget. */
4006         netif_rx_complete(tp->dev, napi);
4007         schedule_work(&tp->reset_task);
4008         return work_done;
4009 }
4010
4011 static void tg3_irq_quiesce(struct tg3 *tp)
4012 {
4013         BUG_ON(tp->irq_sync);
4014
4015         tp->irq_sync = 1;
4016         smp_mb();
4017
4018         synchronize_irq(tp->pdev->irq);
4019 }
4020
4021 static inline int tg3_irq_sync(struct tg3 *tp)
4022 {
4023         return tp->irq_sync;
4024 }
4025
4026 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4027  * If irq_sync is non-zero, then the IRQ handler must be synchronized
4028  * with as well.  Most of the time, this is not necessary except when
4029  * shutting down the device.
4030  */
4031 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4032 {
4033         spin_lock_bh(&tp->lock);
4034         if (irq_sync)
4035                 tg3_irq_quiesce(tp);
4036 }
4037
4038 static inline void tg3_full_unlock(struct tg3 *tp)
4039 {
4040         spin_unlock_bh(&tp->lock);
4041 }
4042
4043 /* One-shot MSI handler - Chip automatically disables interrupt
4044  * after sending MSI so driver doesn't have to do it.
4045  */
4046 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4047 {
4048         struct net_device *dev = dev_id;
4049         struct tg3 *tp = netdev_priv(dev);
4050
4051         prefetch(tp->hw_status);
4052         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4053
4054         if (likely(!tg3_irq_sync(tp)))
4055                 netif_rx_schedule(dev, &tp->napi);
4056
4057         return IRQ_HANDLED;
4058 }
4059
4060 /* MSI ISR - No need to check for interrupt sharing and no need to
4061  * flush status block and interrupt mailbox. PCI ordering rules
4062  * guarantee that MSI will arrive after the status block.
4063  */
4064 static irqreturn_t tg3_msi(int irq, void *dev_id)
4065 {
4066         struct net_device *dev = dev_id;
4067         struct tg3 *tp = netdev_priv(dev);
4068
4069         prefetch(tp->hw_status);
4070         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4071         /*
4072          * Writing any value to intr-mbox-0 clears PCI INTA# and
4073          * chip-internal interrupt pending events.
4074          * Writing non-zero to intr-mbox-0 additional tells the
4075          * NIC to stop sending us irqs, engaging "in-intr-handler"
4076          * event coalescing.
4077          */
4078         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4079         if (likely(!tg3_irq_sync(tp)))
4080                 netif_rx_schedule(dev, &tp->napi);
4081
4082         return IRQ_RETVAL(1);
4083 }
4084
4085 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4086 {
4087         struct net_device *dev = dev_id;
4088         struct tg3 *tp = netdev_priv(dev);
4089         struct tg3_hw_status *sblk = tp->hw_status;
4090         unsigned int handled = 1;
4091
4092         /* In INTx mode, it is possible for the interrupt to arrive at
4093          * the CPU before the status block posted prior to the interrupt.
4094          * Reading the PCI State register will confirm whether the
4095          * interrupt is ours and will flush the status block.
4096          */
4097         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4098                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4099                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4100                         handled = 0;
4101                         goto out;
4102                 }
4103         }
4104
4105         /*
4106          * Writing any value to intr-mbox-0 clears PCI INTA# and
4107          * chip-internal interrupt pending events.
4108          * Writing non-zero to intr-mbox-0 additional tells the
4109          * NIC to stop sending us irqs, engaging "in-intr-handler"
4110          * event coalescing.
4111          *
4112          * Flush the mailbox to de-assert the IRQ immediately to prevent
4113          * spurious interrupts.  The flush impacts performance but
4114          * excessive spurious interrupts can be worse in some cases.
4115          */
4116         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4117         if (tg3_irq_sync(tp))
4118                 goto out;
4119         sblk->status &= ~SD_STATUS_UPDATED;
4120         if (likely(tg3_has_work(tp))) {
4121                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4122                 netif_rx_schedule(dev, &tp->napi);
4123         } else {
4124                 /* No work, shared interrupt perhaps?  re-enable
4125                  * interrupts, and flush that PCI write
4126                  */
4127                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4128                                0x00000000);
4129         }
4130 out:
4131         return IRQ_RETVAL(handled);
4132 }
4133
4134 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4135 {
4136         struct net_device *dev = dev_id;
4137         struct tg3 *tp = netdev_priv(dev);
4138         struct tg3_hw_status *sblk = tp->hw_status;
4139         unsigned int handled = 1;
4140
4141         /* In INTx mode, it is possible for the interrupt to arrive at
4142          * the CPU before the status block posted prior to the interrupt.
4143          * Reading the PCI State register will confirm whether the
4144          * interrupt is ours and will flush the status block.
4145          */
4146         if (unlikely(sblk->status_tag == tp->last_tag)) {
4147                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4148                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4149                         handled = 0;
4150                         goto out;
4151                 }
4152         }
4153
4154         /*
4155          * writing any value to intr-mbox-0 clears PCI INTA# and
4156          * chip-internal interrupt pending events.
4157          * writing non-zero to intr-mbox-0 additional tells the
4158          * NIC to stop sending us irqs, engaging "in-intr-handler"
4159          * event coalescing.
4160          *
4161          * Flush the mailbox to de-assert the IRQ immediately to prevent
4162          * spurious interrupts.  The flush impacts performance but
4163          * excessive spurious interrupts can be worse in some cases.
4164          */
4165         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4166         if (tg3_irq_sync(tp))
4167                 goto out;
4168         if (netif_rx_schedule_prep(dev, &tp->napi)) {
4169                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4170                 /* Update last_tag to mark that this status has been
4171                  * seen. Because interrupt may be shared, we may be
4172                  * racing with tg3_poll(), so only update last_tag
4173                  * if tg3_poll() is not scheduled.
4174                  */
4175                 tp->last_tag = sblk->status_tag;
4176                 __netif_rx_schedule(dev, &tp->napi);
4177         }
4178 out:
4179         return IRQ_RETVAL(handled);
4180 }
4181
4182 /* ISR for interrupt test */
4183 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4184 {
4185         struct net_device *dev = dev_id;
4186         struct tg3 *tp = netdev_priv(dev);
4187         struct tg3_hw_status *sblk = tp->hw_status;
4188
4189         if ((sblk->status & SD_STATUS_UPDATED) ||
4190             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4191                 tg3_disable_ints(tp);
4192                 return IRQ_RETVAL(1);
4193         }
4194         return IRQ_RETVAL(0);
4195 }
4196
4197 static int tg3_init_hw(struct tg3 *, int);
4198 static int tg3_halt(struct tg3 *, int, int);
4199
4200 /* Restart hardware after configuration changes, self-test, etc.
4201  * Invoked with tp->lock held.
4202  */
4203 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4204         __releases(tp->lock)
4205         __acquires(tp->lock)
4206 {
4207         int err;
4208
4209         err = tg3_init_hw(tp, reset_phy);
4210         if (err) {
4211                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4212                        "aborting.\n", tp->dev->name);
4213                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4214                 tg3_full_unlock(tp);
4215                 del_timer_sync(&tp->timer);
4216                 tp->irq_sync = 0;
4217                 napi_enable(&tp->napi);
4218                 dev_close(tp->dev);
4219                 tg3_full_lock(tp, 0);
4220         }
4221         return err;
4222 }
4223
4224 #ifdef CONFIG_NET_POLL_CONTROLLER
4225 static void tg3_poll_controller(struct net_device *dev)
4226 {
4227         struct tg3 *tp = netdev_priv(dev);
4228
4229         tg3_interrupt(tp->pdev->irq, dev);
4230 }
4231 #endif
4232
4233 static void tg3_reset_task(struct work_struct *work)
4234 {
4235         struct tg3 *tp = container_of(work, struct tg3, reset_task);
4236         unsigned int restart_timer;
4237
4238         tg3_full_lock(tp, 0);
4239
4240         if (!netif_running(tp->dev)) {
4241                 tg3_full_unlock(tp);
4242                 return;
4243         }
4244
4245         tg3_full_unlock(tp);
4246
4247         tg3_netif_stop(tp);
4248
4249         tg3_full_lock(tp, 1);
4250
4251         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4252         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4253
4254         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4255                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4256                 tp->write32_rx_mbox = tg3_write_flush_reg32;
4257                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4258                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4259         }
4260
4261         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4262         if (tg3_init_hw(tp, 1))
4263                 goto out;
4264
4265         tg3_netif_start(tp);
4266
4267         if (restart_timer)
4268                 mod_timer(&tp->timer, jiffies + 1);
4269
4270 out:
4271         tg3_full_unlock(tp);
4272 }
4273
4274 static void tg3_dump_short_state(struct tg3 *tp)
4275 {
4276         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4277                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4278         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4279                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4280 }
4281
4282 static void tg3_tx_timeout(struct net_device *dev)
4283 {
4284         struct tg3 *tp = netdev_priv(dev);
4285
4286         if (netif_msg_tx_err(tp)) {
4287                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4288                        dev->name);
4289                 tg3_dump_short_state(tp);
4290         }
4291
4292         schedule_work(&tp->reset_task);
4293 }
4294
4295 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4296 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4297 {
4298         u32 base = (u32) mapping & 0xffffffff;
4299
4300         return ((base > 0xffffdcc0) &&
4301                 (base + len + 8 < base));
4302 }
4303
4304 /* Test for DMA addresses > 40-bit */
4305 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4306                                           int len)
4307 {
4308 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4309         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4310                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4311         return 0;
4312 #else
4313         return 0;
4314 #endif
4315 }
4316
4317 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4318
4319 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4320 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4321                                        u32 last_plus_one, u32 *start,
4322                                        u32 base_flags, u32 mss)
4323 {
4324         struct sk_buff *new_skb;
4325         dma_addr_t new_addr = 0;
4326         u32 entry = *start;
4327         int i, ret = 0;
4328
4329         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4330                 new_skb = skb_copy(skb, GFP_ATOMIC);
4331         else {
4332                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4333
4334                 new_skb = skb_copy_expand(skb,
4335                                           skb_headroom(skb) + more_headroom,
4336                                           skb_tailroom(skb), GFP_ATOMIC);
4337         }
4338
4339         if (!new_skb) {
4340                 ret = -1;
4341         } else {
4342                 /* New SKB is guaranteed to be linear. */
4343                 entry = *start;
4344                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4345                                           PCI_DMA_TODEVICE);
4346                 /* Make sure new skb does not cross any 4G boundaries.
4347                  * Drop the packet if it does.
4348                  */
4349                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4350                         ret = -1;
4351                         dev_kfree_skb(new_skb);
4352                         new_skb = NULL;
4353                 } else {
4354                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4355                                     base_flags, 1 | (mss << 1));
4356                         *start = NEXT_TX(entry);
4357                 }
4358         }
4359
4360         /* Now clean up the sw ring entries. */
4361         i = 0;
4362         while (entry != last_plus_one) {
4363                 int len;
4364
4365                 if (i == 0)
4366                         len = skb_headlen(skb);
4367                 else
4368                         len = skb_shinfo(skb)->frags[i-1].size;
4369                 pci_unmap_single(tp->pdev,
4370                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4371                                  len, PCI_DMA_TODEVICE);
4372                 if (i == 0) {
4373                         tp->tx_buffers[entry].skb = new_skb;
4374                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4375                 } else {
4376                         tp->tx_buffers[entry].skb = NULL;
4377                 }
4378                 entry = NEXT_TX(entry);
4379                 i++;
4380         }
4381
4382         dev_kfree_skb(skb);
4383
4384         return ret;
4385 }
4386
4387 static void tg3_set_txd(struct tg3 *tp, int entry,
4388                         dma_addr_t mapping, int len, u32 flags,
4389                         u32 mss_and_is_end)
4390 {
4391         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4392         int is_end = (mss_and_is_end & 0x1);
4393         u32 mss = (mss_and_is_end >> 1);
4394         u32 vlan_tag = 0;
4395
4396         if (is_end)
4397                 flags |= TXD_FLAG_END;
4398         if (flags & TXD_FLAG_VLAN) {
4399                 vlan_tag = flags >> 16;
4400                 flags &= 0xffff;
4401         }
4402         vlan_tag |= (mss << TXD_MSS_SHIFT);
4403
4404         txd->addr_hi = ((u64) mapping >> 32);
4405         txd->addr_lo = ((u64) mapping & 0xffffffff);
4406         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4407         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4408 }
4409
4410 /* hard_start_xmit for devices that don't have any bugs and
4411  * support TG3_FLG2_HW_TSO_2 only.
4412  */
4413 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4414 {
4415         struct tg3 *tp = netdev_priv(dev);
4416         dma_addr_t mapping;
4417         u32 len, entry, base_flags, mss;
4418
4419         len = skb_headlen(skb);
4420
4421         /* We are running in BH disabled context with netif_tx_lock
4422          * and TX reclaim runs via tp->napi.poll inside of a software
4423          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4424          * no IRQ context deadlocks to worry about either.  Rejoice!
4425          */
4426         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4427                 if (!netif_queue_stopped(dev)) {
4428                         netif_stop_queue(dev);
4429
4430                         /* This is a hard error, log it. */
4431                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4432                                "queue awake!\n", dev->name);
4433                 }
4434                 return NETDEV_TX_BUSY;
4435         }
4436
4437         entry = tp->tx_prod;
4438         base_flags = 0;
4439         mss = 0;
4440         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4441                 int tcp_opt_len, ip_tcp_len;
4442
4443                 if (skb_header_cloned(skb) &&
4444                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4445                         dev_kfree_skb(skb);
4446                         goto out_unlock;
4447                 }
4448
4449                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4450                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4451                 else {
4452                         struct iphdr *iph = ip_hdr(skb);
4453
4454                         tcp_opt_len = tcp_optlen(skb);
4455                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4456
4457                         iph->check = 0;
4458                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4459                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4460                 }
4461
4462                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4463                                TXD_FLAG_CPU_POST_DMA);
4464
4465                 tcp_hdr(skb)->check = 0;
4466
4467         }
4468         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4469                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4470 #if TG3_VLAN_TAG_USED
4471         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4472                 base_flags |= (TXD_FLAG_VLAN |
4473                                (vlan_tx_tag_get(skb) << 16));
4474 #endif
4475
4476         /* Queue skb data, a.k.a. the main skb fragment. */
4477         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4478
4479         tp->tx_buffers[entry].skb = skb;
4480         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4481
4482         tg3_set_txd(tp, entry, mapping, len, base_flags,
4483                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4484
4485         entry = NEXT_TX(entry);
4486
4487         /* Now loop through additional data fragments, and queue them. */
4488         if (skb_shinfo(skb)->nr_frags > 0) {
4489                 unsigned int i, last;
4490
4491                 last = skb_shinfo(skb)->nr_frags - 1;
4492                 for (i = 0; i <= last; i++) {
4493                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4494
4495                         len = frag->size;
4496                         mapping = pci_map_page(tp->pdev,
4497                                                frag->page,
4498                                                frag->page_offset,
4499                                                len, PCI_DMA_TODEVICE);
4500
4501                         tp->tx_buffers[entry].skb = NULL;
4502                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4503
4504                         tg3_set_txd(tp, entry, mapping, len,
4505                                     base_flags, (i == last) | (mss << 1));
4506
4507                         entry = NEXT_TX(entry);
4508                 }
4509         }
4510
4511         /* Packets are ready, update Tx producer idx local and on card. */
4512         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4513
4514         tp->tx_prod = entry;
4515         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4516                 netif_stop_queue(dev);
4517                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4518                         netif_wake_queue(tp->dev);
4519         }
4520
4521 out_unlock:
4522         mmiowb();
4523
4524         dev->trans_start = jiffies;
4525
4526         return NETDEV_TX_OK;
4527 }
4528
4529 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4530
4531 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4532  * TSO header is greater than 80 bytes.
4533  */
4534 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4535 {
4536         struct sk_buff *segs, *nskb;
4537
4538         /* Estimate the number of fragments in the worst case */
4539         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4540                 netif_stop_queue(tp->dev);
4541                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4542                         return NETDEV_TX_BUSY;
4543
4544                 netif_wake_queue(tp->dev);
4545         }
4546
4547         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4548         if (IS_ERR(segs))
4549                 goto tg3_tso_bug_end;
4550
4551         do {
4552                 nskb = segs;
4553                 segs = segs->next;
4554                 nskb->next = NULL;
4555                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4556         } while (segs);
4557
4558 tg3_tso_bug_end:
4559         dev_kfree_skb(skb);
4560
4561         return NETDEV_TX_OK;
4562 }
4563
4564 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4565  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4566  */
4567 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4568 {
4569         struct tg3 *tp = netdev_priv(dev);
4570         dma_addr_t mapping;
4571         u32 len, entry, base_flags, mss;
4572         int would_hit_hwbug;
4573
4574         len = skb_headlen(skb);
4575
4576         /* We are running in BH disabled context with netif_tx_lock
4577          * and TX reclaim runs via tp->napi.poll inside of a software
4578          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4579          * no IRQ context deadlocks to worry about either.  Rejoice!
4580          */
4581         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4582                 if (!netif_queue_stopped(dev)) {
4583                         netif_stop_queue(dev);
4584
4585                         /* This is a hard error, log it. */
4586                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4587                                "queue awake!\n", dev->name);
4588                 }
4589                 return NETDEV_TX_BUSY;
4590         }
4591
4592         entry = tp->tx_prod;
4593         base_flags = 0;
4594         if (skb->ip_summed == CHECKSUM_PARTIAL)
4595                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4596         mss = 0;
4597         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4598                 struct iphdr *iph;
4599                 int tcp_opt_len, ip_tcp_len, hdr_len;
4600
4601                 if (skb_header_cloned(skb) &&
4602                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4603                         dev_kfree_skb(skb);
4604                         goto out_unlock;
4605                 }
4606
4607                 tcp_opt_len = tcp_optlen(skb);
4608                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4609
4610                 hdr_len = ip_tcp_len + tcp_opt_len;
4611                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4612                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4613                         return (tg3_tso_bug(tp, skb));
4614
4615                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4616                                TXD_FLAG_CPU_POST_DMA);
4617
4618                 iph = ip_hdr(skb);
4619                 iph->check = 0;
4620                 iph->tot_len = htons(mss + hdr_len);
4621                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4622                         tcp_hdr(skb)->check = 0;
4623                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4624                 } else
4625                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4626                                                                  iph->daddr, 0,
4627                                                                  IPPROTO_TCP,
4628                                                                  0);
4629
4630                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4631                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4632                         if (tcp_opt_len || iph->ihl > 5) {
4633                                 int tsflags;
4634
4635                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4636                                 mss |= (tsflags << 11);
4637                         }
4638                 } else {
4639                         if (tcp_opt_len || iph->ihl > 5) {
4640                                 int tsflags;
4641
4642                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4643                                 base_flags |= tsflags << 12;
4644                         }
4645                 }
4646         }
4647 #if TG3_VLAN_TAG_USED
4648         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4649                 base_flags |= (TXD_FLAG_VLAN |
4650                                (vlan_tx_tag_get(skb) << 16));
4651 #endif
4652
4653         /* Queue skb data, a.k.a. the main skb fragment. */
4654         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4655
4656         tp->tx_buffers[entry].skb = skb;
4657         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4658
4659         would_hit_hwbug = 0;
4660
4661         if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4662                 would_hit_hwbug = 1;
4663         else if (tg3_4g_overflow_test(mapping, len))
4664                 would_hit_hwbug = 1;
4665
4666         tg3_set_txd(tp, entry, mapping, len, base_flags,
4667                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4668
4669         entry = NEXT_TX(entry);
4670
4671         /* Now loop through additional data fragments, and queue them. */
4672         if (skb_shinfo(skb)->nr_frags > 0) {
4673                 unsigned int i, last;
4674
4675                 last = skb_shinfo(skb)->nr_frags - 1;
4676                 for (i = 0; i <= last; i++) {
4677                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4678
4679                         len = frag->size;
4680                         mapping = pci_map_page(tp->pdev,
4681                                                frag->page,
4682                                                frag->page_offset,
4683                                                len, PCI_DMA_TODEVICE);
4684
4685                         tp->tx_buffers[entry].skb = NULL;
4686                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4687
4688                         if (tg3_4g_overflow_test(mapping, len))
4689                                 would_hit_hwbug = 1;
4690
4691                         if (tg3_40bit_overflow_test(tp, mapping, len))
4692                                 would_hit_hwbug = 1;
4693
4694                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4695                                 tg3_set_txd(tp, entry, mapping, len,
4696                                             base_flags, (i == last)|(mss << 1));
4697                         else
4698                                 tg3_set_txd(tp, entry, mapping, len,
4699                                             base_flags, (i == last));
4700
4701                         entry = NEXT_TX(entry);
4702                 }
4703         }
4704
4705         if (would_hit_hwbug) {
4706                 u32 last_plus_one = entry;
4707                 u32 start;
4708
4709                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4710                 start &= (TG3_TX_RING_SIZE - 1);
4711
4712                 /* If the workaround fails due to memory/mapping
4713                  * failure, silently drop this packet.
4714                  */
4715                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4716                                                 &start, base_flags, mss))
4717                         goto out_unlock;
4718
4719                 entry = start;
4720         }
4721
4722         /* Packets are ready, update Tx producer idx local and on card. */
4723         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4724
4725         tp->tx_prod = entry;
4726         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4727                 netif_stop_queue(dev);
4728                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4729                         netif_wake_queue(tp->dev);
4730         }
4731
4732 out_unlock:
4733         mmiowb();
4734
4735         dev->trans_start = jiffies;
4736
4737         return NETDEV_TX_OK;
4738 }
4739
4740 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4741                                int new_mtu)
4742 {
4743         dev->mtu = new_mtu;
4744
4745         if (new_mtu > ETH_DATA_LEN) {
4746                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4747                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4748                         ethtool_op_set_tso(dev, 0);
4749                 }
4750                 else
4751                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4752         } else {
4753                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4754                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4755                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4756         }
4757 }
4758
4759 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4760 {
4761         struct tg3 *tp = netdev_priv(dev);
4762         int err;
4763
4764         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4765                 return -EINVAL;
4766
4767         if (!netif_running(dev)) {
4768                 /* We'll just catch it later when the
4769                  * device is up'd.
4770                  */
4771                 tg3_set_mtu(dev, tp, new_mtu);
4772                 return 0;
4773         }
4774
4775         tg3_netif_stop(tp);
4776
4777         tg3_full_lock(tp, 1);
4778
4779         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4780
4781         tg3_set_mtu(dev, tp, new_mtu);
4782
4783         err = tg3_restart_hw(tp, 0);
4784
4785         if (!err)
4786                 tg3_netif_start(tp);
4787
4788         tg3_full_unlock(tp);
4789
4790         return err;
4791 }
4792
4793 /* Free up pending packets in all rx/tx rings.
4794  *
4795  * The chip has been shut down and the driver detached from
4796  * the networking, so no interrupts or new tx packets will
4797  * end up in the driver.  tp->{tx,}lock is not held and we are not
4798  * in an interrupt context and thus may sleep.
4799  */
4800 static void tg3_free_rings(struct tg3 *tp)
4801 {
4802         struct ring_info *rxp;
4803         int i;
4804
4805         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4806                 rxp = &tp->rx_std_buffers[i];
4807
4808                 if (rxp->skb == NULL)
4809                         continue;
4810                 pci_unmap_single(tp->pdev,
4811                                  pci_unmap_addr(rxp, mapping),
4812                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4813                                  PCI_DMA_FROMDEVICE);
4814                 dev_kfree_skb_any(rxp->skb);
4815                 rxp->skb = NULL;
4816         }
4817
4818         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4819                 rxp = &tp->rx_jumbo_buffers[i];
4820
4821                 if (rxp->skb == NULL)
4822                         continue;
4823                 pci_unmap_single(tp->pdev,
4824                                  pci_unmap_addr(rxp, mapping),
4825                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4826                                  PCI_DMA_FROMDEVICE);
4827                 dev_kfree_skb_any(rxp->skb);
4828                 rxp->skb = NULL;
4829         }
4830
4831         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4832                 struct tx_ring_info *txp;
4833                 struct sk_buff *skb;
4834                 int j;
4835
4836                 txp = &tp->tx_buffers[i];
4837                 skb = txp->skb;
4838
4839                 if (skb == NULL) {
4840                         i++;
4841                         continue;
4842                 }
4843
4844                 pci_unmap_single(tp->pdev,
4845                                  pci_unmap_addr(txp, mapping),
4846                                  skb_headlen(skb),
4847                                  PCI_DMA_TODEVICE);
4848                 txp->skb = NULL;
4849
4850                 i++;
4851
4852                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4853                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4854                         pci_unmap_page(tp->pdev,
4855                                        pci_unmap_addr(txp, mapping),
4856                                        skb_shinfo(skb)->frags[j].size,
4857                                        PCI_DMA_TODEVICE);
4858                         i++;
4859                 }
4860
4861                 dev_kfree_skb_any(skb);
4862         }
4863 }
4864
4865 /* Initialize tx/rx rings for packet processing.
4866  *
4867  * The chip has been shut down and the driver detached from
4868  * the networking, so no interrupts or new tx packets will
4869  * end up in the driver.  tp->{tx,}lock are held and thus
4870  * we may not sleep.
4871  */
4872 static int tg3_init_rings(struct tg3 *tp)
4873 {
4874         u32 i;
4875
4876         /* Free up all the SKBs. */
4877         tg3_free_rings(tp);
4878
4879         /* Zero out all descriptors. */
4880         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4881         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4882         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4883         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4884
4885         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4886         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4887             (tp->dev->mtu > ETH_DATA_LEN))
4888                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4889
4890         /* Initialize invariants of the rings, we only set this
4891          * stuff once.  This works because the card does not
4892          * write into the rx buffer posting rings.
4893          */
4894         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4895                 struct tg3_rx_buffer_desc *rxd;
4896
4897                 rxd = &tp->rx_std[i];
4898                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4899                         << RXD_LEN_SHIFT;
4900                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4901                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4902                                (i << RXD_OPAQUE_INDEX_SHIFT));
4903         }
4904
4905         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4906                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4907                         struct tg3_rx_buffer_desc *rxd;
4908
4909                         rxd = &tp->rx_jumbo[i];
4910                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4911                                 << RXD_LEN_SHIFT;
4912                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4913                                 RXD_FLAG_JUMBO;
4914                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4915                                (i << RXD_OPAQUE_INDEX_SHIFT));
4916                 }
4917         }
4918
4919         /* Now allocate fresh SKBs for each rx ring. */
4920         for (i = 0; i < tp->rx_pending; i++) {
4921                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4922                         printk(KERN_WARNING PFX
4923                                "%s: Using a smaller RX standard ring, "
4924                                "only %d out of %d buffers were allocated "
4925                                "successfully.\n",
4926                                tp->dev->name, i, tp->rx_pending);
4927                         if (i == 0)
4928                                 return -ENOMEM;
4929                         tp->rx_pending = i;
4930                         break;
4931                 }
4932         }
4933
4934         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4935                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4936                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4937                                              -1, i) < 0) {
4938                                 printk(KERN_WARNING PFX
4939                                        "%s: Using a smaller RX jumbo ring, "
4940                                        "only %d out of %d buffers were "
4941                                        "allocated successfully.\n",
4942                                        tp->dev->name, i, tp->rx_jumbo_pending);
4943                                 if (i == 0) {
4944                                         tg3_free_rings(tp);
4945                                         return -ENOMEM;
4946                                 }
4947                                 tp->rx_jumbo_pending = i;
4948                                 break;
4949                         }
4950                 }
4951         }
4952         return 0;
4953 }
4954
4955 /*
4956  * Must not be invoked with interrupt sources disabled and
4957  * the hardware shutdown down.
4958  */
4959 static void tg3_free_consistent(struct tg3 *tp)
4960 {
4961         kfree(tp->rx_std_buffers);
4962         tp->rx_std_buffers = NULL;
4963         if (tp->rx_std) {
4964                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4965                                     tp->rx_std, tp->rx_std_mapping);
4966                 tp->rx_std = NULL;
4967         }
4968         if (tp->rx_jumbo) {
4969                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4970                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4971                 tp->rx_jumbo = NULL;
4972         }
4973         if (tp->rx_rcb) {
4974                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4975                                     tp->rx_rcb, tp->rx_rcb_mapping);
4976                 tp->rx_rcb = NULL;
4977         }
4978         if (tp->tx_ring) {
4979                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4980                         tp->tx_ring, tp->tx_desc_mapping);
4981                 tp->tx_ring = NULL;
4982         }
4983         if (tp->hw_status) {
4984                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4985                                     tp->hw_status, tp->status_mapping);
4986                 tp->hw_status = NULL;
4987         }
4988         if (tp->hw_stats) {
4989                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4990                                     tp->hw_stats, tp->stats_mapping);
4991                 tp->hw_stats = NULL;
4992         }
4993 }
4994
4995 /*
4996  * Must not be invoked with interrupt sources disabled and
4997  * the hardware shutdown down.  Can sleep.
4998  */
4999 static int tg3_alloc_consistent(struct tg3 *tp)
5000 {
5001         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5002                                       (TG3_RX_RING_SIZE +
5003                                        TG3_RX_JUMBO_RING_SIZE)) +
5004                                      (sizeof(struct tx_ring_info) *
5005                                       TG3_TX_RING_SIZE),
5006                                      GFP_KERNEL);
5007         if (!tp->rx_std_buffers)
5008                 return -ENOMEM;
5009
5010         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5011         tp->tx_buffers = (struct tx_ring_info *)
5012                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5013
5014         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5015                                           &tp->rx_std_mapping);
5016         if (!tp->rx_std)
5017                 goto err_out;
5018
5019         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5020                                             &tp->rx_jumbo_mapping);
5021
5022         if (!tp->rx_jumbo)
5023                 goto err_out;
5024
5025         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5026                                           &tp->rx_rcb_mapping);
5027         if (!tp->rx_rcb)
5028                 goto err_out;
5029
5030         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5031                                            &tp->tx_desc_mapping);
5032         if (!tp->tx_ring)
5033                 goto err_out;
5034
5035         tp->hw_status = pci_alloc_consistent(tp->pdev,
5036                                              TG3_HW_STATUS_SIZE,
5037                                              &tp->status_mapping);
5038         if (!tp->hw_status)
5039                 goto err_out;
5040
5041         tp->hw_stats = pci_alloc_consistent(tp->pdev,
5042                                             sizeof(struct tg3_hw_stats),
5043                                             &tp->stats_mapping);
5044         if (!tp->hw_stats)
5045                 goto err_out;
5046
5047         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5048         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5049
5050         return 0;
5051
5052 err_out:
5053         tg3_free_consistent(tp);
5054         return -ENOMEM;
5055 }
5056
5057 #define MAX_WAIT_CNT 1000
5058
5059 /* To stop a block, clear the enable bit and poll till it
5060  * clears.  tp->lock is held.
5061  */
5062 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5063 {
5064         unsigned int i;
5065         u32 val;
5066
5067         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5068                 switch (ofs) {
5069                 case RCVLSC_MODE:
5070                 case DMAC_MODE:
5071                 case MBFREE_MODE:
5072                 case BUFMGR_MODE:
5073                 case MEMARB_MODE:
5074                         /* We can't enable/disable these bits of the
5075                          * 5705/5750, just say success.
5076                          */
5077                         return 0;
5078
5079                 default:
5080                         break;
5081                 };
5082         }
5083
5084         val = tr32(ofs);
5085         val &= ~enable_bit;
5086         tw32_f(ofs, val);
5087
5088         for (i = 0; i < MAX_WAIT_CNT; i++) {
5089                 udelay(100);
5090                 val = tr32(ofs);
5091                 if ((val & enable_bit) == 0)
5092                         break;
5093         }
5094
5095         if (i == MAX_WAIT_CNT && !silent) {
5096                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5097                        "ofs=%lx enable_bit=%x\n",
5098                        ofs, enable_bit);
5099                 return -ENODEV;
5100         }
5101
5102         return 0;
5103 }
5104
5105 /* tp->lock is held. */
5106 static int tg3_abort_hw(struct tg3 *tp, int silent)
5107 {
5108         int i, err;
5109
5110         tg3_disable_ints(tp);
5111
5112         tp->rx_mode &= ~RX_MODE_ENABLE;
5113         tw32_f(MAC_RX_MODE, tp->rx_mode);
5114         udelay(10);
5115
5116         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5117         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5118         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5119         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5120         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5121         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5122
5123         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5124         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5125         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5126         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5127         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5128         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5129         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5130
5131         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5132         tw32_f(MAC_MODE, tp->mac_mode);
5133         udelay(40);
5134
5135         tp->tx_mode &= ~TX_MODE_ENABLE;
5136         tw32_f(MAC_TX_MODE, tp->tx_mode);
5137
5138         for (i = 0; i < MAX_WAIT_CNT; i++) {
5139                 udelay(100);
5140                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5141                         break;
5142         }
5143         if (i >= MAX_WAIT_CNT) {
5144                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5145                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5146                        tp->dev->name, tr32(MAC_TX_MODE));
5147                 err |= -ENODEV;
5148         }
5149
5150         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5151         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5152         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5153
5154         tw32(FTQ_RESET, 0xffffffff);
5155         tw32(FTQ_RESET, 0x00000000);
5156
5157         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5158         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5159
5160         if (tp->hw_status)
5161                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5162         if (tp->hw_stats)
5163                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5164
5165         return err;
5166 }
5167
5168 /* tp->lock is held. */
5169 static int tg3_nvram_lock(struct tg3 *tp)
5170 {
5171         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5172                 int i;
5173
5174                 if (tp->nvram_lock_cnt == 0) {
5175                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5176                         for (i = 0; i < 8000; i++) {
5177                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5178                                         break;
5179                                 udelay(20);
5180                         }
5181                         if (i == 8000) {
5182                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5183                                 return -ENODEV;
5184                         }
5185                 }
5186                 tp->nvram_lock_cnt++;
5187         }
5188         return 0;
5189 }
5190
5191 /* tp->lock is held. */
5192 static void tg3_nvram_unlock(struct tg3 *tp)
5193 {
5194         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5195                 if (tp->nvram_lock_cnt > 0)
5196                         tp->nvram_lock_cnt--;
5197                 if (tp->nvram_lock_cnt == 0)
5198                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5199         }
5200 }
5201
5202 /* tp->lock is held. */
5203 static void tg3_enable_nvram_access(struct tg3 *tp)
5204 {
5205         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5206             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5207                 u32 nvaccess = tr32(NVRAM_ACCESS);
5208
5209                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5210         }
5211 }
5212
5213 /* tp->lock is held. */
5214 static void tg3_disable_nvram_access(struct tg3 *tp)
5215 {
5216         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5217             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5218                 u32 nvaccess = tr32(NVRAM_ACCESS);
5219
5220                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5221         }
5222 }
5223
5224 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5225 {
5226         int i;
5227         u32 apedata;
5228
5229         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5230         if (apedata != APE_SEG_SIG_MAGIC)
5231                 return;
5232
5233         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5234         if (apedata != APE_FW_STATUS_READY)
5235                 return;
5236
5237         /* Wait for up to 1 millisecond for APE to service previous event. */
5238         for (i = 0; i < 10; i++) {
5239                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5240                         return;
5241
5242                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5243
5244                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5245                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5246                                         event | APE_EVENT_STATUS_EVENT_PENDING);
5247
5248                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5249
5250                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5251                         break;
5252
5253                 udelay(100);
5254         }
5255
5256         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5257                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5258 }
5259
5260 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5261 {
5262         u32 event;
5263         u32 apedata;
5264
5265         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5266                 return;
5267
5268         switch (kind) {
5269                 case RESET_KIND_INIT:
5270                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5271                                         APE_HOST_SEG_SIG_MAGIC);
5272                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5273                                         APE_HOST_SEG_LEN_MAGIC);
5274                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5275                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5276                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5277                                         APE_HOST_DRIVER_ID_MAGIC);
5278                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5279                                         APE_HOST_BEHAV_NO_PHYLOCK);
5280
5281                         event = APE_EVENT_STATUS_STATE_START;
5282                         break;
5283                 case RESET_KIND_SHUTDOWN:
5284                         event = APE_EVENT_STATUS_STATE_UNLOAD;
5285                         break;
5286                 case RESET_KIND_SUSPEND:
5287                         event = APE_EVENT_STATUS_STATE_SUSPEND;
5288                         break;
5289                 default:
5290                         return;
5291         }
5292
5293         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5294
5295         tg3_ape_send_event(tp, event);
5296 }
5297
5298 /* tp->lock is held. */
5299 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5300 {
5301         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5302                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5303
5304         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5305                 switch (kind) {
5306                 case RESET_KIND_INIT:
5307                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5308                                       DRV_STATE_START);
5309                         break;
5310
5311                 case RESET_KIND_SHUTDOWN:
5312                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5313                                       DRV_STATE_UNLOAD);
5314                         break;
5315
5316                 case RESET_KIND_SUSPEND:
5317                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5318                                       DRV_STATE_SUSPEND);
5319                         break;
5320
5321                 default:
5322                         break;
5323                 };
5324         }
5325
5326         if (kind == RESET_KIND_INIT ||
5327             kind == RESET_KIND_SUSPEND)
5328                 tg3_ape_driver_state_change(tp, kind);
5329 }
5330
5331 /* tp->lock is held. */
5332 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5333 {
5334         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5335                 switch (kind) {
5336                 case RESET_KIND_INIT:
5337                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5338                                       DRV_STATE_START_DONE);
5339                         break;
5340
5341                 case RESET_KIND_SHUTDOWN:
5342                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5343                                       DRV_STATE_UNLOAD_DONE);
5344                         break;
5345
5346                 default:
5347                         break;
5348                 };
5349         }
5350
5351         if (kind == RESET_KIND_SHUTDOWN)
5352                 tg3_ape_driver_state_change(tp, kind);
5353 }
5354
5355 /* tp->lock is held. */
5356 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5357 {
5358         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5359                 switch (kind) {
5360                 case RESET_KIND_INIT:
5361                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5362                                       DRV_STATE_START);
5363                         break;
5364
5365                 case RESET_KIND_SHUTDOWN:
5366                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5367                                       DRV_STATE_UNLOAD);
5368                         break;
5369
5370                 case RESET_KIND_SUSPEND:
5371                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5372                                       DRV_STATE_SUSPEND);
5373                         break;
5374
5375                 default:
5376                         break;
5377                 };
5378         }
5379 }
5380
5381 static int tg3_poll_fw(struct tg3 *tp)
5382 {
5383         int i;
5384         u32 val;
5385
5386         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5387                 /* Wait up to 20ms for init done. */
5388                 for (i = 0; i < 200; i++) {
5389                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5390                                 return 0;
5391                         udelay(100);
5392                 }
5393                 return -ENODEV;
5394         }
5395
5396         /* Wait for firmware initialization to complete. */
5397         for (i = 0; i < 100000; i++) {
5398                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5399                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5400                         break;
5401                 udelay(10);
5402         }
5403
5404         /* Chip might not be fitted with firmware.  Some Sun onboard
5405          * parts are configured like that.  So don't signal the timeout
5406          * of the above loop as an error, but do report the lack of
5407          * running firmware once.
5408          */
5409         if (i >= 100000 &&
5410             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5411                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5412
5413                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5414                        tp->dev->name);
5415         }
5416
5417         return 0;
5418 }
5419
5420 /* Save PCI command register before chip reset */
5421 static void tg3_save_pci_state(struct tg3 *tp)
5422 {
5423         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5424 }
5425
5426 /* Restore PCI state after chip reset */
5427 static void tg3_restore_pci_state(struct tg3 *tp)
5428 {
5429         u32 val;
5430
5431         /* Re-enable indirect register accesses. */
5432         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5433                                tp->misc_host_ctrl);
5434
5435         /* Set MAX PCI retry to zero. */
5436         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5437         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5438             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5439                 val |= PCISTATE_RETRY_SAME_DMA;
5440         /* Allow reads and writes to the APE register and memory space. */
5441         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5442                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5443                        PCISTATE_ALLOW_APE_SHMEM_WR;
5444         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5445
5446         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5447
5448         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5449                 pcie_set_readrq(tp->pdev, 4096);
5450         else {
5451                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5452                                       tp->pci_cacheline_sz);
5453                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5454                                       tp->pci_lat_timer);
5455         }
5456
5457         /* Make sure PCI-X relaxed ordering bit is clear. */
5458         if (tp->pcix_cap) {
5459                 u16 pcix_cmd;
5460
5461                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5462                                      &pcix_cmd);
5463                 pcix_cmd &= ~PCI_X_CMD_ERO;
5464                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5465                                       pcix_cmd);
5466         }
5467
5468         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5469
5470                 /* Chip reset on 5780 will reset MSI enable bit,
5471                  * so need to restore it.
5472                  */
5473                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5474                         u16 ctrl;
5475
5476                         pci_read_config_word(tp->pdev,
5477                                              tp->msi_cap + PCI_MSI_FLAGS,
5478                                              &ctrl);
5479                         pci_write_config_word(tp->pdev,
5480                                               tp->msi_cap + PCI_MSI_FLAGS,
5481                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5482                         val = tr32(MSGINT_MODE);
5483                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5484                 }
5485         }
5486 }
5487
5488 static void tg3_stop_fw(struct tg3 *);
5489
5490 /* tp->lock is held. */
5491 static int tg3_chip_reset(struct tg3 *tp)
5492 {
5493         u32 val;
5494         void (*write_op)(struct tg3 *, u32, u32);
5495         int err;
5496
5497         tg3_nvram_lock(tp);
5498
5499         tg3_mdio_stop(tp);
5500
5501         /* No matching tg3_nvram_unlock() after this because
5502          * chip reset below will undo the nvram lock.
5503          */
5504         tp->nvram_lock_cnt = 0;
5505
5506         /* GRC_MISC_CFG core clock reset will clear the memory
5507          * enable bit in PCI register 4 and the MSI enable bit
5508          * on some chips, so we save relevant registers here.
5509          */
5510         tg3_save_pci_state(tp);
5511
5512         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5513             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5514             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5515             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5516             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5517                 tw32(GRC_FASTBOOT_PC, 0);
5518
5519         /*
5520          * We must avoid the readl() that normally takes place.
5521          * It locks machines, causes machine checks, and other
5522          * fun things.  So, temporarily disable the 5701
5523          * hardware workaround, while we do the reset.
5524          */
5525         write_op = tp->write32;
5526         if (write_op == tg3_write_flush_reg32)
5527                 tp->write32 = tg3_write32;
5528
5529         /* Prevent the irq handler from reading or writing PCI registers
5530          * during chip reset when the memory enable bit in the PCI command
5531          * register may be cleared.  The chip does not generate interrupt
5532          * at this time, but the irq handler may still be called due to irq
5533          * sharing or irqpoll.
5534          */
5535         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5536         if (tp->hw_status) {
5537                 tp->hw_status->status = 0;
5538                 tp->hw_status->status_tag = 0;
5539         }
5540         tp->last_tag = 0;
5541         smp_mb();
5542         synchronize_irq(tp->pdev->irq);
5543
5544         /* do the reset */
5545         val = GRC_MISC_CFG_CORECLK_RESET;
5546
5547         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5548                 if (tr32(0x7e2c) == 0x60) {
5549                         tw32(0x7e2c, 0x20);
5550                 }
5551                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5552                         tw32(GRC_MISC_CFG, (1 << 29));
5553                         val |= (1 << 29);
5554                 }
5555         }
5556
5557         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5558                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5559                 tw32(GRC_VCPU_EXT_CTRL,
5560                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5561         }
5562
5563         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5564                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5565         tw32(GRC_MISC_CFG, val);
5566
5567         /* restore 5701 hardware bug workaround write method */
5568         tp->write32 = write_op;
5569
5570         /* Unfortunately, we have to delay before the PCI read back.
5571          * Some 575X chips even will not respond to a PCI cfg access
5572          * when the reset command is given to the chip.
5573          *
5574          * How do these hardware designers expect things to work
5575          * properly if the PCI write is posted for a long period
5576          * of time?  It is always necessary to have some method by
5577          * which a register read back can occur to push the write
5578          * out which does the reset.
5579          *
5580          * For most tg3 variants the trick below was working.
5581          * Ho hum...
5582          */
5583         udelay(120);
5584
5585         /* Flush PCI posted writes.  The normal MMIO registers
5586          * are inaccessible at this time so this is the only
5587          * way to make this reliably (actually, this is no longer
5588          * the case, see above).  I tried to use indirect
5589          * register read/write but this upset some 5701 variants.
5590          */
5591         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5592
5593         udelay(120);
5594
5595         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5596                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5597                         int i;
5598                         u32 cfg_val;
5599
5600                         /* Wait for link training to complete.  */
5601                         for (i = 0; i < 5000; i++)
5602                                 udelay(100);
5603
5604                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5605                         pci_write_config_dword(tp->pdev, 0xc4,
5606                                                cfg_val | (1 << 15));
5607                 }
5608                 /* Set PCIE max payload size and clear error status.  */
5609                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5610         }
5611
5612         tg3_restore_pci_state(tp);
5613
5614         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5615
5616         val = 0;
5617         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5618                 val = tr32(MEMARB_MODE);
5619         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5620
5621         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5622                 tg3_stop_fw(tp);
5623                 tw32(0x5000, 0x400);
5624         }
5625
5626         tw32(GRC_MODE, tp->grc_mode);
5627
5628         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5629                 val = tr32(0xc4);
5630
5631                 tw32(0xc4, val | (1 << 15));
5632         }
5633
5634         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5635             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5636                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5637                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5638                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5639                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5640         }
5641
5642         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5643                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5644                 tw32_f(MAC_MODE, tp->mac_mode);
5645         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5646                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5647                 tw32_f(MAC_MODE, tp->mac_mode);
5648         } else
5649                 tw32_f(MAC_MODE, 0);
5650         udelay(40);
5651
5652         tg3_mdio_start(tp);
5653
5654         err = tg3_poll_fw(tp);
5655         if (err)
5656                 return err;
5657
5658         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5659             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5660                 val = tr32(0x7c00);
5661
5662                 tw32(0x7c00, val | (1 << 25));
5663         }
5664
5665         /* Reprobe ASF enable state.  */
5666         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5667         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5668         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5669         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5670                 u32 nic_cfg;
5671
5672                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5673                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5674                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5675                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5676                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5677                 }
5678         }
5679
5680         return 0;
5681 }
5682
5683 /* tp->lock is held. */
5684 static void tg3_stop_fw(struct tg3 *tp)
5685 {
5686         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5687            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5688                 u32 val;
5689
5690                 /* Wait for RX cpu to ACK the previous event. */
5691                 tg3_wait_for_event_ack(tp);
5692
5693                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5694                 val = tr32(GRC_RX_CPU_EVENT);
5695                 val |= GRC_RX_CPU_DRIVER_EVENT;
5696                 tw32(GRC_RX_CPU_EVENT, val);
5697
5698                 /* Wait for RX cpu to ACK this event. */
5699                 tg3_wait_for_event_ack(tp);
5700         }
5701 }
5702
5703 /* tp->lock is held. */
5704 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5705 {
5706         int err;
5707
5708         tg3_stop_fw(tp);
5709
5710         tg3_write_sig_pre_reset(tp, kind);
5711
5712         tg3_abort_hw(tp, silent);
5713         err = tg3_chip_reset(tp);
5714
5715         tg3_write_sig_legacy(tp, kind);
5716         tg3_write_sig_post_reset(tp, kind);
5717
5718         if (err)
5719                 return err;
5720
5721         return 0;
5722 }
5723
5724 #define TG3_FW_RELEASE_MAJOR    0x0
5725 #define TG3_FW_RELASE_MINOR     0x0
5726 #define TG3_FW_RELEASE_FIX      0x0
5727 #define TG3_FW_START_ADDR       0x08000000
5728 #define TG3_FW_TEXT_ADDR        0x08000000
5729 #define TG3_FW_TEXT_LEN         0x9c0
5730 #define TG3_FW_RODATA_ADDR      0x080009c0
5731 #define TG3_FW_RODATA_LEN       0x60
5732 #define TG3_FW_DATA_ADDR        0x08000a40
5733 #define TG3_FW_DATA_LEN         0x20
5734 #define TG3_FW_SBSS_ADDR        0x08000a60
5735 #define TG3_FW_SBSS_LEN         0xc
5736 #define TG3_FW_BSS_ADDR         0x08000a70
5737 #define TG3_FW_BSS_LEN          0x10
5738
5739 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5740         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5741         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5742         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5743         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5744         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5745         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5746         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5747         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5748         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5749         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5750         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5751         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5752         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5753         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5754         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5755         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5756         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5757         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5758         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5759         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5760         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5761         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5762         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5763         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5764         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5765         0, 0, 0, 0, 0, 0,
5766         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5767         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5768         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5769         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5770         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5771         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5772         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5773         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5774         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5775         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5776         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5777         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5778         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5779         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5780         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5781         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5782         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5783         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5784         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5785         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5786         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5787         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5788         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5789         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5790         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5791         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5792         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5793         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5794         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5795         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5796         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5797         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5798         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5799         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5800         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5801         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5802         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5803         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5804         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5805         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5806         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5807         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5808         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5809         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5810         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5811         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5812         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5813         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5814         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5815         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5816         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5817         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5818         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5819         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5820         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5821         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5822         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5823         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5824         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5825         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5826         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5827         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5828         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5829         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5830         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5831 };
5832
5833 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5834         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5835         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5836         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5837         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5838         0x00000000
5839 };
5840
5841 #if 0 /* All zeros, don't eat up space with it. */
5842 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5843         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5844         0x00000000, 0x00000000, 0x00000000, 0x00000000
5845 };
5846 #endif
5847
5848 #define RX_CPU_SCRATCH_BASE     0x30000
5849 #define RX_CPU_SCRATCH_SIZE     0x04000
5850 #define TX_CPU_SCRATCH_BASE     0x34000
5851 #define TX_CPU_SCRATCH_SIZE     0x04000
5852
5853 /* tp->lock is held. */
5854 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5855 {
5856         int i;
5857
5858         BUG_ON(offset == TX_CPU_BASE &&
5859             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5860
5861         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5862                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5863
5864                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5865                 return 0;
5866         }
5867         if (offset == RX_CPU_BASE) {
5868                 for (i = 0; i < 10000; i++) {
5869                         tw32(offset + CPU_STATE, 0xffffffff);
5870                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5871                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5872                                 break;
5873                 }
5874
5875                 tw32(offset + CPU_STATE, 0xffffffff);
5876                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5877                 udelay(10);
5878         } else {
5879                 for (i = 0; i < 10000; i++) {
5880                         tw32(offset + CPU_STATE, 0xffffffff);
5881                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5882                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5883                                 break;
5884                 }
5885         }
5886
5887         if (i >= 10000) {
5888                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5889                        "and %s CPU\n",
5890                        tp->dev->name,
5891                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5892                 return -ENODEV;
5893         }
5894
5895         /* Clear firmware's nvram arbitration. */
5896         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5897                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5898         return 0;
5899 }
5900
5901 struct fw_info {
5902         unsigned int text_base;
5903         unsigned int text_len;
5904         const u32 *text_data;
5905         unsigned int rodata_base;
5906         unsigned int rodata_len;
5907         const u32 *rodata_data;
5908         unsigned int data_base;
5909         unsigned int data_len;
5910         const u32 *data_data;
5911 };
5912
5913 /* tp->lock is held. */
5914 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5915                                  int cpu_scratch_size, struct fw_info *info)
5916 {
5917         int err, lock_err, i;
5918         void (*write_op)(struct tg3 *, u32, u32);
5919
5920         if (cpu_base == TX_CPU_BASE &&
5921             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5922                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5923                        "TX cpu firmware on %s which is 5705.\n",
5924                        tp->dev->name);
5925                 return -EINVAL;
5926         }
5927
5928         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5929                 write_op = tg3_write_mem;
5930         else
5931                 write_op = tg3_write_indirect_reg32;
5932
5933         /* It is possible that bootcode is still loading at this point.
5934          * Get the nvram lock first before halting the cpu.
5935          */
5936         lock_err = tg3_nvram_lock(tp);
5937         err = tg3_halt_cpu(tp, cpu_base);
5938         if (!lock_err)
5939                 tg3_nvram_unlock(tp);
5940         if (err)
5941                 goto out;
5942
5943         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5944                 write_op(tp, cpu_scratch_base + i, 0);
5945         tw32(cpu_base + CPU_STATE, 0xffffffff);
5946         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5947         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5948                 write_op(tp, (cpu_scratch_base +
5949                               (info->text_base & 0xffff) +
5950                               (i * sizeof(u32))),
5951                          (info->text_data ?
5952                           info->text_data[i] : 0));
5953         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5954                 write_op(tp, (cpu_scratch_base +
5955                               (info->rodata_base & 0xffff) +
5956                               (i * sizeof(u32))),
5957                          (info->rodata_data ?
5958                           info->rodata_data[i] : 0));
5959         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5960                 write_op(tp, (cpu_scratch_base +
5961                               (info->data_base & 0xffff) +
5962                               (i * sizeof(u32))),
5963                          (info->data_data ?
5964                           info->data_data[i] : 0));
5965
5966         err = 0;
5967
5968 out:
5969         return err;
5970 }
5971
5972 /* tp->lock is held. */
5973 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5974 {
5975         struct fw_info info;
5976         int err, i;
5977
5978         info.text_base = TG3_FW_TEXT_ADDR;
5979         info.text_len = TG3_FW_TEXT_LEN;
5980         info.text_data = &tg3FwText[0];
5981         info.rodata_base = TG3_FW_RODATA_ADDR;
5982         info.rodata_len = TG3_FW_RODATA_LEN;
5983         info.rodata_data = &tg3FwRodata[0];
5984         info.data_base = TG3_FW_DATA_ADDR;
5985         info.data_len = TG3_FW_DATA_LEN;
5986         info.data_data = NULL;
5987
5988         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5989                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5990                                     &info);
5991         if (err)
5992                 return err;
5993
5994         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5995                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5996                                     &info);
5997         if (err)
5998                 return err;
5999
6000         /* Now startup only the RX cpu. */
6001         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6002         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6003
6004         for (i = 0; i < 5; i++) {
6005                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6006                         break;
6007                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6008                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
6009                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6010                 udelay(1000);
6011         }
6012         if (i >= 5) {
6013                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6014                        "to set RX CPU PC, is %08x should be %08x\n",
6015                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6016                        TG3_FW_TEXT_ADDR);
6017                 return -ENODEV;
6018         }
6019         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6020         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
6021
6022         return 0;
6023 }
6024
6025
6026 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
6027 #define TG3_TSO_FW_RELASE_MINOR         0x6
6028 #define TG3_TSO_FW_RELEASE_FIX          0x0
6029 #define TG3_TSO_FW_START_ADDR           0x08000000
6030 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
6031 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
6032 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
6033 #define TG3_TSO_FW_RODATA_LEN           0x60
6034 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
6035 #define TG3_TSO_FW_DATA_LEN             0x30
6036 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
6037 #define TG3_TSO_FW_SBSS_LEN             0x2c
6038 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
6039 #define TG3_TSO_FW_BSS_LEN              0x894
6040
6041 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
6042         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6043         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6044         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6045         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6046         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6047         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6048         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6049         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6050         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6051         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6052         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6053         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6054         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6055         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6056         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6057         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6058         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6059         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6060         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6061         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6062         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6063         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6064         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6065         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6066         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6067         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6068         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6069         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6070         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6071         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6072         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6073         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6074         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6075         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6076         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6077         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6078         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6079         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6080         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6081         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6082         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6083         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6084         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6085         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6086         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6087         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6088         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6089         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6090         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6091         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6092         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6093         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6094         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6095         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6096         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6097         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6098         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6099         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6100         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6101         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6102         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6103         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6104         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6105         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6106         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6107         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6108         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6109         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6110         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6111         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6112         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6113         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6114         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6115         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6116         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6117         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6118         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6119         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6120         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6121         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6122         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6123         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6124         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6125         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6126         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6127         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6128         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6129         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6130         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6131         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6132         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6133         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6134         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6135         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6136         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6137         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6138         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6139         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6140         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6141         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6142         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6143         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6144         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6145         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6146         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6147         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6148         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6149         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6150         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6151         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6152         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6153         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6154         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6155         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6156         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6157         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6158         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6159         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6160         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6161         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6162         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6163         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6164         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6165         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6166         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6167         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6168         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6169         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6170         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6171         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6172         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6173         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6174         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6175         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6176         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6177         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6178         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6179         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6180         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6181         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6182         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6183         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6184         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6185         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6186         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6187         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6188         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6189         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6190         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6191         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6192         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6193         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6194         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6195         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6196         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6197         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6198         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6199         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6200         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6201         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6202         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6203         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6204         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6205         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6206         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6207         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6208         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6209         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6210         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6211         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6212         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6213         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6214         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6215         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6216         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6217         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6218         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6219         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6220         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6221         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6222         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6223         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6224         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6225         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6226         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6227         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6228         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6229         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6230         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6231         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6232         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6233         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6234         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6235         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6236         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6237         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6238         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6239         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6240         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6241         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6242         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6243         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6244         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6245         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6246         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6247         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6248         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6249         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6250         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6251         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6252         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6253         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6254         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6255         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6256         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6257         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6258         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6259         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6260         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6261         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6262         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6263         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6264         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6265         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6266         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6267         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6268         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6269         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6270         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6271         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6272         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6273         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6274         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6275         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6276         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6277         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6278         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6279         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6280         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6281         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6282         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6283         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6284         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6285         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6286         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6287         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6288         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6289         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6290         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6291         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6292         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6293         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6294         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6295         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6296         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6297         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6298         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6299         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6300         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6301         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6302         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6303         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6304         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6305         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6306         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6307         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6308         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6309         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6310         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6311         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6312         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6313         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6314         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6315         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6316         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6317         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6318         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6319         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6320         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6321         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6322         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6323         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6324         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6325         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6326 };
6327
6328 static const u32 tg3TsoFwRodata[] = {
6329         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6330         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6331         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6332         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6333         0x00000000,
6334 };
6335
6336 static const u32 tg3TsoFwData[] = {
6337         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6338         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6339         0x00000000,
6340 };
6341
6342 /* 5705 needs a special version of the TSO firmware.  */
6343 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
6344 #define TG3_TSO5_FW_RELASE_MINOR        0x2
6345 #define TG3_TSO5_FW_RELEASE_FIX         0x0
6346 #define TG3_TSO5_FW_START_ADDR          0x00010000
6347 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
6348 #define TG3_TSO5_FW_TEXT_LEN            0xe90
6349 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
6350 #define TG3_TSO5_FW_RODATA_LEN          0x50
6351 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
6352 #define TG3_TSO5_FW_DATA_LEN            0x20
6353 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
6354 #define TG3_TSO5_FW_SBSS_LEN            0x28
6355 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
6356 #define TG3_TSO5_FW_BSS_LEN             0x88
6357
6358 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6359         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6360         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6361         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6362         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6363         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6364         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6365         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6366         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6367         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6368         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6369         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6370         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6371         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6372         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6373         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6374         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6375         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6376         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6377         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6378         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6379         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6380         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6381         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6382         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6383         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6384         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6385         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6386         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6387         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6388         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6389         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6390         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6391         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6392         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6393         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6394         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6395         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6396         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6397         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6398         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6399         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6400         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6401         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6402         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6403         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6404         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6405         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6406         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6407         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6408         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6409         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6410         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6411         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6412         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6413         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6414         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6415         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6416         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6417         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6418         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6419         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6420         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6421         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6422         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6423         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6424         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6425         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6426         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6427         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6428         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6429         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6430         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6431         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6432         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6433         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6434         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6435         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6436         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6437         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6438         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6439         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6440         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6441         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6442         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6443         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6444         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6445         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6446         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6447         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6448         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6449         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6450         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6451         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6452         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6453         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6454         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6455         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6456         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6457         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6458         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6459         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6460         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6461         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6462         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6463         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6464         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6465         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6466         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6467         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6468         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6469         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6470         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6471         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6472         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6473         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6474         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6475         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6476         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6477         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6478         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6479         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6480         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6481         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6482         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6483         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6484         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6485         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6486         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6487         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6488         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6489         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6490         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6491         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6492         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6493         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6494         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6495         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6496         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6497         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6498         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6499         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6500         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6501         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6502         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6503         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6504         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6505         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6506         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6507         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6508         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6509         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6510         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6511         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6512         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6513         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6514         0x00000000, 0x00000000, 0x00000000,
6515 };
6516
6517 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6518         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6519         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6520         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6521         0x00000000, 0x00000000, 0x00000000,
6522 };
6523
6524 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6525         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6526         0x00000000, 0x00000000, 0x00000000,
6527 };
6528
6529 /* tp->lock is held. */
6530 static int tg3_load_tso_firmware(struct tg3 *tp)
6531 {
6532         struct fw_info info;
6533         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6534         int err, i;
6535
6536         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6537                 return 0;
6538
6539         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6540                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6541                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6542                 info.text_data = &tg3Tso5FwText[0];
6543                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6544                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6545                 info.rodata_data = &tg3Tso5FwRodata[0];
6546                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6547                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6548                 info.data_data = &tg3Tso5FwData[0];
6549                 cpu_base = RX_CPU_BASE;
6550                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6551                 cpu_scratch_size = (info.text_len +
6552                                     info.rodata_len +
6553                                     info.data_len +
6554                                     TG3_TSO5_FW_SBSS_LEN +
6555                                     TG3_TSO5_FW_BSS_LEN);
6556         } else {
6557                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6558                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6559                 info.text_data = &tg3TsoFwText[0];
6560                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6561                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6562                 info.rodata_data = &tg3TsoFwRodata[0];
6563                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6564                 info.data_len = TG3_TSO_FW_DATA_LEN;
6565                 info.data_data = &tg3TsoFwData[0];
6566                 cpu_base = TX_CPU_BASE;
6567                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6568                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6569         }
6570
6571         err = tg3_load_firmware_cpu(tp, cpu_base,
6572                                     cpu_scratch_base, cpu_scratch_size,
6573                                     &info);
6574         if (err)
6575                 return err;
6576
6577         /* Now startup the cpu. */
6578         tw32(cpu_base + CPU_STATE, 0xffffffff);
6579         tw32_f(cpu_base + CPU_PC,    info.text_base);
6580
6581         for (i = 0; i < 5; i++) {
6582                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6583                         break;
6584                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6585                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6586                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6587                 udelay(1000);
6588         }
6589         if (i >= 5) {
6590                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6591                        "to set CPU PC, is %08x should be %08x\n",
6592                        tp->dev->name, tr32(cpu_base + CPU_PC),
6593                        info.text_base);
6594                 return -ENODEV;
6595         }
6596         tw32(cpu_base + CPU_STATE, 0xffffffff);
6597         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6598         return 0;
6599 }
6600
6601
6602 /* tp->lock is held. */
6603 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6604 {
6605         u32 addr_high, addr_low;
6606         int i;
6607
6608         addr_high = ((tp->dev->dev_addr[0] << 8) |
6609                      tp->dev->dev_addr[1]);
6610         addr_low = ((tp->dev->dev_addr[2] << 24) |
6611                     (tp->dev->dev_addr[3] << 16) |
6612                     (tp->dev->dev_addr[4] <<  8) |
6613                     (tp->dev->dev_addr[5] <<  0));
6614         for (i = 0; i < 4; i++) {
6615                 if (i == 1 && skip_mac_1)
6616                         continue;
6617                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6618                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6619         }
6620
6621         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6622             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6623                 for (i = 0; i < 12; i++) {
6624                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6625                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6626                 }
6627         }
6628
6629         addr_high = (tp->dev->dev_addr[0] +
6630                      tp->dev->dev_addr[1] +
6631                      tp->dev->dev_addr[2] +
6632                      tp->dev->dev_addr[3] +
6633                      tp->dev->dev_addr[4] +
6634                      tp->dev->dev_addr[5]) &
6635                 TX_BACKOFF_SEED_MASK;
6636         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6637 }
6638
6639 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6640 {
6641         struct tg3 *tp = netdev_priv(dev);
6642         struct sockaddr *addr = p;
6643         int err = 0, skip_mac_1 = 0;
6644
6645         if (!is_valid_ether_addr(addr->sa_data))
6646                 return -EINVAL;
6647
6648         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6649
6650         if (!netif_running(dev))
6651                 return 0;
6652
6653         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6654                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6655
6656                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6657                 addr0_low = tr32(MAC_ADDR_0_LOW);
6658                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6659                 addr1_low = tr32(MAC_ADDR_1_LOW);
6660
6661                 /* Skip MAC addr 1 if ASF is using it. */
6662                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6663                     !(addr1_high == 0 && addr1_low == 0))
6664                         skip_mac_1 = 1;
6665         }
6666         spin_lock_bh(&tp->lock);
6667         __tg3_set_mac_addr(tp, skip_mac_1);
6668         spin_unlock_bh(&tp->lock);
6669
6670         return err;
6671 }
6672
6673 /* tp->lock is held. */
6674 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6675                            dma_addr_t mapping, u32 maxlen_flags,
6676                            u32 nic_addr)
6677 {
6678         tg3_write_mem(tp,
6679                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6680                       ((u64) mapping >> 32));
6681         tg3_write_mem(tp,
6682                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6683                       ((u64) mapping & 0xffffffff));
6684         tg3_write_mem(tp,
6685                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6686                        maxlen_flags);
6687
6688         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6689                 tg3_write_mem(tp,
6690                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6691                               nic_addr);
6692 }
6693
6694 static void __tg3_set_rx_mode(struct net_device *);
6695 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6696 {
6697         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6698         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6699         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6700         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6701         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6702                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6703                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6704         }
6705         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6706         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6707         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6708                 u32 val = ec->stats_block_coalesce_usecs;
6709
6710                 if (!netif_carrier_ok(tp->dev))
6711                         val = 0;
6712
6713                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6714         }
6715 }
6716
6717 /* tp->lock is held. */
6718 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6719 {
6720         u32 val, rdmac_mode;
6721         int i, err, limit;
6722
6723         tg3_disable_ints(tp);
6724
6725         tg3_stop_fw(tp);
6726
6727         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6728
6729         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6730                 tg3_abort_hw(tp, 1);
6731         }
6732
6733         if (reset_phy &&
6734             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
6735                 tg3_phy_reset(tp);
6736
6737         err = tg3_chip_reset(tp);
6738         if (err)
6739                 return err;
6740
6741         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6742
6743         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
6744             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
6745                 val = tr32(TG3_CPMU_CTRL);
6746                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6747                 tw32(TG3_CPMU_CTRL, val);
6748
6749                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6750                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6751                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
6752                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6753
6754                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6755                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6756                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
6757                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6758
6759                 val = tr32(TG3_CPMU_HST_ACC);
6760                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
6761                 val |= CPMU_HST_ACC_MACCLK_6_25;
6762                 tw32(TG3_CPMU_HST_ACC, val);
6763         }
6764
6765         /* This works around an issue with Athlon chipsets on
6766          * B3 tigon3 silicon.  This bit has no effect on any
6767          * other revision.  But do not set this on PCI Express
6768          * chips and don't even touch the clocks if the CPMU is present.
6769          */
6770         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6771                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6772                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6773                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6774         }
6775
6776         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6777             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6778                 val = tr32(TG3PCI_PCISTATE);
6779                 val |= PCISTATE_RETRY_SAME_DMA;
6780                 tw32(TG3PCI_PCISTATE, val);
6781         }
6782
6783         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6784                 /* Allow reads and writes to the
6785                  * APE register and memory space.
6786                  */
6787                 val = tr32(TG3PCI_PCISTATE);
6788                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6789                        PCISTATE_ALLOW_APE_SHMEM_WR;
6790                 tw32(TG3PCI_PCISTATE, val);
6791         }
6792
6793         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6794                 /* Enable some hw fixes.  */
6795                 val = tr32(TG3PCI_MSI_DATA);
6796                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6797                 tw32(TG3PCI_MSI_DATA, val);
6798         }
6799
6800         /* Descriptor ring init may make accesses to the
6801          * NIC SRAM area to setup the TX descriptors, so we
6802          * can only do this after the hardware has been
6803          * successfully reset.
6804          */
6805         err = tg3_init_rings(tp);
6806         if (err)
6807                 return err;
6808
6809         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6810             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6811                 /* This value is determined during the probe time DMA
6812                  * engine test, tg3_test_dma.
6813                  */
6814                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6815         }
6816
6817         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6818                           GRC_MODE_4X_NIC_SEND_RINGS |
6819                           GRC_MODE_NO_TX_PHDR_CSUM |
6820                           GRC_MODE_NO_RX_PHDR_CSUM);
6821         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6822
6823         /* Pseudo-header checksum is done by hardware logic and not
6824          * the offload processers, so make the chip do the pseudo-
6825          * header checksums on receive.  For transmit it is more
6826          * convenient to do the pseudo-header checksum in software
6827          * as Linux does that on transmit for us in all cases.
6828          */
6829         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6830
6831         tw32(GRC_MODE,
6832              tp->grc_mode |
6833              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6834
6835         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6836         val = tr32(GRC_MISC_CFG);
6837         val &= ~0xff;
6838         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6839         tw32(GRC_MISC_CFG, val);
6840
6841         /* Initialize MBUF/DESC pool. */
6842         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6843                 /* Do nothing.  */
6844         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6845                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6846                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6847                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6848                 else
6849                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6850                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6851                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6852         }
6853         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6854                 int fw_len;
6855
6856                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6857                           TG3_TSO5_FW_RODATA_LEN +
6858                           TG3_TSO5_FW_DATA_LEN +
6859                           TG3_TSO5_FW_SBSS_LEN +
6860                           TG3_TSO5_FW_BSS_LEN);
6861                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6862                 tw32(BUFMGR_MB_POOL_ADDR,
6863                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6864                 tw32(BUFMGR_MB_POOL_SIZE,
6865                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6866         }
6867
6868         if (tp->dev->mtu <= ETH_DATA_LEN) {
6869                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6870                      tp->bufmgr_config.mbuf_read_dma_low_water);
6871                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6872                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6873                 tw32(BUFMGR_MB_HIGH_WATER,
6874                      tp->bufmgr_config.mbuf_high_water);
6875         } else {
6876                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6877                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6878                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6879                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6880                 tw32(BUFMGR_MB_HIGH_WATER,
6881                      tp->bufmgr_config.mbuf_high_water_jumbo);
6882         }
6883         tw32(BUFMGR_DMA_LOW_WATER,
6884              tp->bufmgr_config.dma_low_water);
6885         tw32(BUFMGR_DMA_HIGH_WATER,
6886              tp->bufmgr_config.dma_high_water);
6887
6888         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6889         for (i = 0; i < 2000; i++) {
6890                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6891                         break;
6892                 udelay(10);
6893         }
6894         if (i >= 2000) {
6895                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6896                        tp->dev->name);
6897                 return -ENODEV;
6898         }
6899
6900         /* Setup replenish threshold. */
6901         val = tp->rx_pending / 8;
6902         if (val == 0)
6903                 val = 1;
6904         else if (val > tp->rx_std_max_post)
6905                 val = tp->rx_std_max_post;
6906         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6907                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6908                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6909
6910                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6911                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6912         }
6913
6914         tw32(RCVBDI_STD_THRESH, val);
6915
6916         /* Initialize TG3_BDINFO's at:
6917          *  RCVDBDI_STD_BD:     standard eth size rx ring
6918          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6919          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6920          *
6921          * like so:
6922          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6923          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6924          *                              ring attribute flags
6925          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6926          *
6927          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6928          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6929          *
6930          * The size of each ring is fixed in the firmware, but the location is
6931          * configurable.
6932          */
6933         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6934              ((u64) tp->rx_std_mapping >> 32));
6935         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6936              ((u64) tp->rx_std_mapping & 0xffffffff));
6937         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6938              NIC_SRAM_RX_BUFFER_DESC);
6939
6940         /* Don't even try to program the JUMBO/MINI buffer descriptor
6941          * configs on 5705.
6942          */
6943         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6944                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6945                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6946         } else {
6947                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6948                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6949
6950                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6951                      BDINFO_FLAGS_DISABLED);
6952
6953                 /* Setup replenish threshold. */
6954                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6955
6956                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6957                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6958                              ((u64) tp->rx_jumbo_mapping >> 32));
6959                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6960                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6961                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6962                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6963                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6964                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6965                 } else {
6966                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6967                              BDINFO_FLAGS_DISABLED);
6968                 }
6969
6970         }
6971
6972         /* There is only one send ring on 5705/5750, no need to explicitly
6973          * disable the others.
6974          */
6975         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6976                 /* Clear out send RCB ring in SRAM. */
6977                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6978                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6979                                       BDINFO_FLAGS_DISABLED);
6980         }
6981
6982         tp->tx_prod = 0;
6983         tp->tx_cons = 0;
6984         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6985         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6986
6987         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6988                        tp->tx_desc_mapping,
6989                        (TG3_TX_RING_SIZE <<
6990                         BDINFO_FLAGS_MAXLEN_SHIFT),
6991                        NIC_SRAM_TX_BUFFER_DESC);
6992
6993         /* There is only one receive return ring on 5705/5750, no need
6994          * to explicitly disable the others.
6995          */
6996         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6997                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6998                      i += TG3_BDINFO_SIZE) {
6999                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7000                                       BDINFO_FLAGS_DISABLED);
7001                 }
7002         }
7003
7004         tp->rx_rcb_ptr = 0;
7005         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7006
7007         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7008                        tp->rx_rcb_mapping,
7009                        (TG3_RX_RCB_RING_SIZE(tp) <<
7010                         BDINFO_FLAGS_MAXLEN_SHIFT),
7011                        0);
7012
7013         tp->rx_std_ptr = tp->rx_pending;
7014         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7015                      tp->rx_std_ptr);
7016
7017         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7018                                                 tp->rx_jumbo_pending : 0;
7019         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7020                      tp->rx_jumbo_ptr);
7021
7022         /* Initialize MAC address and backoff seed. */
7023         __tg3_set_mac_addr(tp, 0);
7024
7025         /* MTU + ethernet header + FCS + optional VLAN tag */
7026         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7027
7028         /* The slot time is changed by tg3_setup_phy if we
7029          * run at gigabit with half duplex.
7030          */
7031         tw32(MAC_TX_LENGTHS,
7032              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7033              (6 << TX_LENGTHS_IPG_SHIFT) |
7034              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7035
7036         /* Receive rules. */
7037         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7038         tw32(RCVLPC_CONFIG, 0x0181);
7039
7040         /* Calculate RDMAC_MODE setting early, we need it to determine
7041          * the RCVLPC_STATE_ENABLE mask.
7042          */
7043         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7044                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7045                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7046                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7047                       RDMAC_MODE_LNGREAD_ENAB);
7048
7049         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
7050                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7051                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7052                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7053
7054         /* If statement applies to 5705 and 5750 PCI devices only */
7055         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7056              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7057             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7058                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7059                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7060                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7061                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7062                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7063                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7064                 }
7065         }
7066
7067         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7068                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7069
7070         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7071                 rdmac_mode |= (1 << 27);
7072
7073         /* Receive/send statistics. */
7074         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7075                 val = tr32(RCVLPC_STATS_ENABLE);
7076                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7077                 tw32(RCVLPC_STATS_ENABLE, val);
7078         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7079                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7080                 val = tr32(RCVLPC_STATS_ENABLE);
7081                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7082                 tw32(RCVLPC_STATS_ENABLE, val);
7083         } else {
7084                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7085         }
7086         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7087         tw32(SNDDATAI_STATSENAB, 0xffffff);
7088         tw32(SNDDATAI_STATSCTRL,
7089              (SNDDATAI_SCTRL_ENABLE |
7090               SNDDATAI_SCTRL_FASTUPD));
7091
7092         /* Setup host coalescing engine. */
7093         tw32(HOSTCC_MODE, 0);
7094         for (i = 0; i < 2000; i++) {
7095                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7096                         break;
7097                 udelay(10);
7098         }
7099
7100         __tg3_set_coalesce(tp, &tp->coal);
7101
7102         /* set status block DMA address */
7103         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7104              ((u64) tp->status_mapping >> 32));
7105         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7106              ((u64) tp->status_mapping & 0xffffffff));
7107
7108         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7109                 /* Status/statistics block address.  See tg3_timer,
7110                  * the tg3_periodic_fetch_stats call there, and
7111                  * tg3_get_stats to see how this works for 5705/5750 chips.
7112                  */
7113                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7114                      ((u64) tp->stats_mapping >> 32));
7115                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7116                      ((u64) tp->stats_mapping & 0xffffffff));
7117                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7118                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7119         }
7120
7121         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7122
7123         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7124         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7125         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7126                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7127
7128         /* Clear statistics/status block in chip, and status block in ram. */
7129         for (i = NIC_SRAM_STATS_BLK;
7130              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7131              i += sizeof(u32)) {
7132                 tg3_write_mem(tp, i, 0);
7133                 udelay(40);
7134         }
7135         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7136
7137         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7138                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7139                 /* reset to prevent losing 1st rx packet intermittently */
7140                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7141                 udelay(10);
7142         }
7143
7144         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7145                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7146         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7147             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7148             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7149                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7150         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7151         udelay(40);
7152
7153         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7154          * If TG3_FLG2_IS_NIC is zero, we should read the
7155          * register to preserve the GPIO settings for LOMs. The GPIOs,
7156          * whether used as inputs or outputs, are set by boot code after
7157          * reset.
7158          */
7159         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7160                 u32 gpio_mask;
7161
7162                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7163                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7164                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7165
7166                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7167                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7168                                      GRC_LCLCTRL_GPIO_OUTPUT3;
7169
7170                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7171                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7172
7173                 tp->grc_local_ctrl &= ~gpio_mask;
7174                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7175
7176                 /* GPIO1 must be driven high for eeprom write protect */
7177                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7178                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7179                                                GRC_LCLCTRL_GPIO_OUTPUT1);
7180         }
7181         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7182         udelay(100);
7183
7184         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7185         tp->last_tag = 0;
7186
7187         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7188                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7189                 udelay(40);
7190         }
7191
7192         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7193                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7194                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7195                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7196                WDMAC_MODE_LNGREAD_ENAB);
7197
7198         /* If statement applies to 5705 and 5750 PCI devices only */
7199         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7200              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7201             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7202                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7203                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7204                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7205                         /* nothing */
7206                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7207                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7208                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7209                         val |= WDMAC_MODE_RX_ACCEL;
7210                 }
7211         }
7212
7213         /* Enable host coalescing bug fix */
7214         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7215             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7216             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7217             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
7218                 val |= WDMAC_MODE_STATUS_TAG_FIX;
7219
7220         tw32_f(WDMAC_MODE, val);
7221         udelay(40);
7222
7223         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7224                 u16 pcix_cmd;
7225
7226                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7227                                      &pcix_cmd);
7228                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7229                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7230                         pcix_cmd |= PCI_X_CMD_READ_2K;
7231                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7232                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7233                         pcix_cmd |= PCI_X_CMD_READ_2K;
7234                 }
7235                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7236                                       pcix_cmd);
7237         }
7238
7239         tw32_f(RDMAC_MODE, rdmac_mode);
7240         udelay(40);
7241
7242         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7243         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7244                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7245
7246         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7247                 tw32(SNDDATAC_MODE,
7248                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7249         else
7250                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7251
7252         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7253         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7254         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7255         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7256         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7257                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7258         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7259         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7260
7261         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7262                 err = tg3_load_5701_a0_firmware_fix(tp);
7263                 if (err)
7264                         return err;
7265         }
7266
7267         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7268                 err = tg3_load_tso_firmware(tp);
7269                 if (err)
7270                         return err;
7271         }
7272
7273         tp->tx_mode = TX_MODE_ENABLE;
7274         tw32_f(MAC_TX_MODE, tp->tx_mode);
7275         udelay(100);
7276
7277         tp->rx_mode = RX_MODE_ENABLE;
7278         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7279             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7280                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7281
7282         tw32_f(MAC_RX_MODE, tp->rx_mode);
7283         udelay(10);
7284
7285         tw32(MAC_LED_CTRL, tp->led_ctrl);
7286
7287         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7288         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7289                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7290                 udelay(10);
7291         }
7292         tw32_f(MAC_RX_MODE, tp->rx_mode);
7293         udelay(10);
7294
7295         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7296                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7297                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7298                         /* Set drive transmission level to 1.2V  */
7299                         /* only if the signal pre-emphasis bit is not set  */
7300                         val = tr32(MAC_SERDES_CFG);
7301                         val &= 0xfffff000;
7302                         val |= 0x880;
7303                         tw32(MAC_SERDES_CFG, val);
7304                 }
7305                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7306                         tw32(MAC_SERDES_CFG, 0x616000);
7307         }
7308
7309         /* Prevent chip from dropping frames when flow control
7310          * is enabled.
7311          */
7312         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7313
7314         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7315             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7316                 /* Use hardware link auto-negotiation */
7317                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7318         }
7319
7320         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7321             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7322                 u32 tmp;
7323
7324                 tmp = tr32(SERDES_RX_CTRL);
7325                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7326                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7327                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7328                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7329         }
7330
7331         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7332                 if (tp->link_config.phy_is_low_power) {
7333                         tp->link_config.phy_is_low_power = 0;
7334                         tp->link_config.speed = tp->link_config.orig_speed;
7335                         tp->link_config.duplex = tp->link_config.orig_duplex;
7336                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
7337                 }
7338
7339                 err = tg3_setup_phy(tp, 0);
7340                 if (err)
7341                         return err;
7342
7343                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7344                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7345                         u32 tmp;
7346
7347                         /* Clear CRC stats. */
7348                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7349                                 tg3_writephy(tp, MII_TG3_TEST1,
7350                                              tmp | MII_TG3_TEST1_CRC_EN);
7351                                 tg3_readphy(tp, 0x14, &tmp);
7352                         }
7353                 }
7354         }
7355
7356         __tg3_set_rx_mode(tp->dev);
7357
7358         /* Initialize receive rules. */
7359         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7360         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7361         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7362         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7363
7364         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7365             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7366                 limit = 8;
7367         else
7368                 limit = 16;
7369         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7370                 limit -= 4;
7371         switch (limit) {
7372         case 16:
7373                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7374         case 15:
7375                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7376         case 14:
7377                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7378         case 13:
7379                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7380         case 12:
7381                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7382         case 11:
7383                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7384         case 10:
7385                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7386         case 9:
7387                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7388         case 8:
7389                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7390         case 7:
7391                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7392         case 6:
7393                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7394         case 5:
7395                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7396         case 4:
7397                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7398         case 3:
7399                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7400         case 2:
7401         case 1:
7402
7403         default:
7404                 break;
7405         };
7406
7407         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7408                 /* Write our heartbeat update interval to APE. */
7409                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7410                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7411
7412         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7413
7414         return 0;
7415 }
7416
7417 /* Called at device open time to get the chip ready for
7418  * packet processing.  Invoked with tp->lock held.
7419  */
7420 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7421 {
7422         int err;
7423
7424         /* Force the chip into D0. */
7425         err = tg3_set_power_state(tp, PCI_D0);
7426         if (err)
7427                 goto out;
7428
7429         tg3_switch_clocks(tp);
7430
7431         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7432
7433         err = tg3_reset_hw(tp, reset_phy);
7434
7435 out:
7436         return err;
7437 }
7438
7439 #define TG3_STAT_ADD32(PSTAT, REG) \
7440 do {    u32 __val = tr32(REG); \
7441         (PSTAT)->low += __val; \
7442         if ((PSTAT)->low < __val) \
7443                 (PSTAT)->high += 1; \
7444 } while (0)
7445
7446 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7447 {
7448         struct tg3_hw_stats *sp = tp->hw_stats;
7449
7450         if (!netif_carrier_ok(tp->dev))
7451                 return;
7452
7453         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7454         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7455         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7456         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7457         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7458         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7459         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7460         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7461         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7462         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7463         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7464         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7465         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7466
7467         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7468         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7469         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7470         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7471         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7472         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7473         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7474         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7475         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7476         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7477         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7478         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7479         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7480         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7481
7482         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7483         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7484         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7485 }
7486
7487 static void tg3_timer(unsigned long __opaque)
7488 {
7489         struct tg3 *tp = (struct tg3 *) __opaque;
7490
7491         if (tp->irq_sync)
7492                 goto restart_timer;
7493
7494         spin_lock(&tp->lock);
7495
7496         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7497                 /* All of this garbage is because when using non-tagged
7498                  * IRQ status the mailbox/status_block protocol the chip
7499                  * uses with the cpu is race prone.
7500                  */
7501                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7502                         tw32(GRC_LOCAL_CTRL,
7503                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7504                 } else {
7505                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7506                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7507                 }
7508
7509                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7510                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7511                         spin_unlock(&tp->lock);
7512                         schedule_work(&tp->reset_task);
7513                         return;
7514                 }
7515         }
7516
7517         /* This part only runs once per second. */
7518         if (!--tp->timer_counter) {
7519                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7520                         tg3_periodic_fetch_stats(tp);
7521
7522                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7523                         u32 mac_stat;
7524                         int phy_event;
7525
7526                         mac_stat = tr32(MAC_STATUS);
7527
7528                         phy_event = 0;
7529                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7530                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7531                                         phy_event = 1;
7532                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7533                                 phy_event = 1;
7534
7535                         if (phy_event)
7536                                 tg3_setup_phy(tp, 0);
7537                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7538                         u32 mac_stat = tr32(MAC_STATUS);
7539                         int need_setup = 0;
7540
7541                         if (netif_carrier_ok(tp->dev) &&
7542                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7543                                 need_setup = 1;
7544                         }
7545                         if (! netif_carrier_ok(tp->dev) &&
7546                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7547                                          MAC_STATUS_SIGNAL_DET))) {
7548                                 need_setup = 1;
7549                         }
7550                         if (need_setup) {
7551                                 if (!tp->serdes_counter) {
7552                                         tw32_f(MAC_MODE,
7553                                              (tp->mac_mode &
7554                                               ~MAC_MODE_PORT_MODE_MASK));
7555                                         udelay(40);
7556                                         tw32_f(MAC_MODE, tp->mac_mode);
7557                                         udelay(40);
7558                                 }
7559                                 tg3_setup_phy(tp, 0);
7560                         }
7561                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7562                         tg3_serdes_parallel_detect(tp);
7563
7564                 tp->timer_counter = tp->timer_multiplier;
7565         }
7566
7567         /* Heartbeat is only sent once every 2 seconds.
7568          *
7569          * The heartbeat is to tell the ASF firmware that the host
7570          * driver is still alive.  In the event that the OS crashes,
7571          * ASF needs to reset the hardware to free up the FIFO space
7572          * that may be filled with rx packets destined for the host.
7573          * If the FIFO is full, ASF will no longer function properly.
7574          *
7575          * Unintended resets have been reported on real time kernels
7576          * where the timer doesn't run on time.  Netpoll will also have
7577          * same problem.
7578          *
7579          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7580          * to check the ring condition when the heartbeat is expiring
7581          * before doing the reset.  This will prevent most unintended
7582          * resets.
7583          */
7584         if (!--tp->asf_counter) {
7585                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7586                         u32 val;
7587
7588                         tg3_wait_for_event_ack(tp);
7589
7590                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7591                                       FWCMD_NICDRV_ALIVE3);
7592                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7593                         /* 5 seconds timeout */
7594                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7595                         val = tr32(GRC_RX_CPU_EVENT);
7596                         val |= GRC_RX_CPU_DRIVER_EVENT;
7597                         tw32_f(GRC_RX_CPU_EVENT, val);
7598                 }
7599                 tp->asf_counter = tp->asf_multiplier;
7600         }
7601
7602         spin_unlock(&tp->lock);
7603
7604 restart_timer:
7605         tp->timer.expires = jiffies + tp->timer_offset;
7606         add_timer(&tp->timer);
7607 }
7608
7609 static int tg3_request_irq(struct tg3 *tp)
7610 {
7611         irq_handler_t fn;
7612         unsigned long flags;
7613         struct net_device *dev = tp->dev;
7614
7615         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7616                 fn = tg3_msi;
7617                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7618                         fn = tg3_msi_1shot;
7619                 flags = IRQF_SAMPLE_RANDOM;
7620         } else {
7621                 fn = tg3_interrupt;
7622                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7623                         fn = tg3_interrupt_tagged;
7624                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7625         }
7626         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7627 }
7628
7629 static int tg3_test_interrupt(struct tg3 *tp)
7630 {
7631         struct net_device *dev = tp->dev;
7632         int err, i, intr_ok = 0;
7633
7634         if (!netif_running(dev))
7635                 return -ENODEV;
7636
7637         tg3_disable_ints(tp);
7638
7639         free_irq(tp->pdev->irq, dev);
7640
7641         err = request_irq(tp->pdev->irq, tg3_test_isr,
7642                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7643         if (err)
7644                 return err;
7645
7646         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7647         tg3_enable_ints(tp);
7648
7649         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7650                HOSTCC_MODE_NOW);
7651
7652         for (i = 0; i < 5; i++) {
7653                 u32 int_mbox, misc_host_ctrl;
7654
7655                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7656                                         TG3_64BIT_REG_LOW);
7657                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7658
7659                 if ((int_mbox != 0) ||
7660                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7661                         intr_ok = 1;
7662                         break;
7663                 }
7664
7665                 msleep(10);
7666         }
7667
7668         tg3_disable_ints(tp);
7669
7670         free_irq(tp->pdev->irq, dev);
7671
7672         err = tg3_request_irq(tp);
7673
7674         if (err)
7675                 return err;
7676
7677         if (intr_ok)
7678                 return 0;
7679
7680         return -EIO;
7681 }
7682
7683 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7684  * successfully restored
7685  */
7686 static int tg3_test_msi(struct tg3 *tp)
7687 {
7688         struct net_device *dev = tp->dev;
7689         int err;
7690         u16 pci_cmd;
7691
7692         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7693                 return 0;
7694
7695         /* Turn off SERR reporting in case MSI terminates with Master
7696          * Abort.
7697          */
7698         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7699         pci_write_config_word(tp->pdev, PCI_COMMAND,
7700                               pci_cmd & ~PCI_COMMAND_SERR);
7701
7702         err = tg3_test_interrupt(tp);
7703
7704         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7705
7706         if (!err)
7707                 return 0;
7708
7709         /* other failures */
7710         if (err != -EIO)
7711                 return err;
7712
7713         /* MSI test failed, go back to INTx mode */
7714         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7715                "switching to INTx mode. Please report this failure to "
7716                "the PCI maintainer and include system chipset information.\n",
7717                        tp->dev->name);
7718
7719         free_irq(tp->pdev->irq, dev);
7720         pci_disable_msi(tp->pdev);
7721
7722         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7723
7724         err = tg3_request_irq(tp);
7725         if (err)
7726                 return err;
7727
7728         /* Need to reset the chip because the MSI cycle may have terminated
7729          * with Master Abort.
7730          */
7731         tg3_full_lock(tp, 1);
7732
7733         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7734         err = tg3_init_hw(tp, 1);
7735
7736         tg3_full_unlock(tp);
7737
7738         if (err)
7739                 free_irq(tp->pdev->irq, dev);
7740
7741         return err;
7742 }
7743
7744 static int tg3_open(struct net_device *dev)
7745 {
7746         struct tg3 *tp = netdev_priv(dev);
7747         int err;
7748
7749         netif_carrier_off(tp->dev);
7750
7751         tg3_full_lock(tp, 0);
7752
7753         err = tg3_set_power_state(tp, PCI_D0);
7754         if (err) {
7755                 tg3_full_unlock(tp);
7756                 return err;
7757         }
7758
7759         tg3_disable_ints(tp);
7760         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7761
7762         tg3_full_unlock(tp);
7763
7764         /* The placement of this call is tied
7765          * to the setup and use of Host TX descriptors.
7766          */
7767         err = tg3_alloc_consistent(tp);
7768         if (err)
7769                 return err;
7770
7771         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7772                 /* All MSI supporting chips should support tagged
7773                  * status.  Assert that this is the case.
7774                  */
7775                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7776                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7777                                "Not using MSI.\n", tp->dev->name);
7778                 } else if (pci_enable_msi(tp->pdev) == 0) {
7779                         u32 msi_mode;
7780
7781                         msi_mode = tr32(MSGINT_MODE);
7782                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7783                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7784                 }
7785         }
7786         err = tg3_request_irq(tp);
7787
7788         if (err) {
7789                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7790                         pci_disable_msi(tp->pdev);
7791                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7792                 }
7793                 tg3_free_consistent(tp);
7794                 return err;
7795         }
7796
7797         napi_enable(&tp->napi);
7798
7799         tg3_full_lock(tp, 0);
7800
7801         err = tg3_init_hw(tp, 1);
7802         if (err) {
7803                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7804                 tg3_free_rings(tp);
7805         } else {
7806                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7807                         tp->timer_offset = HZ;
7808                 else
7809                         tp->timer_offset = HZ / 10;
7810
7811                 BUG_ON(tp->timer_offset > HZ);
7812                 tp->timer_counter = tp->timer_multiplier =
7813                         (HZ / tp->timer_offset);
7814                 tp->asf_counter = tp->asf_multiplier =
7815                         ((HZ / tp->timer_offset) * 2);
7816
7817                 init_timer(&tp->timer);
7818                 tp->timer.expires = jiffies + tp->timer_offset;
7819                 tp->timer.data = (unsigned long) tp;
7820                 tp->timer.function = tg3_timer;
7821         }
7822
7823         tg3_full_unlock(tp);
7824
7825         if (err) {
7826                 napi_disable(&tp->napi);
7827                 free_irq(tp->pdev->irq, dev);
7828                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7829                         pci_disable_msi(tp->pdev);
7830                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7831                 }
7832                 tg3_free_consistent(tp);
7833                 return err;
7834         }
7835
7836         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7837                 err = tg3_test_msi(tp);
7838
7839                 if (err) {
7840                         tg3_full_lock(tp, 0);
7841
7842                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7843                                 pci_disable_msi(tp->pdev);
7844                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7845                         }
7846                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7847                         tg3_free_rings(tp);
7848                         tg3_free_consistent(tp);
7849
7850                         tg3_full_unlock(tp);
7851
7852                         napi_disable(&tp->napi);
7853
7854                         return err;
7855                 }
7856
7857                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7858                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7859                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7860
7861                                 tw32(PCIE_TRANSACTION_CFG,
7862                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7863                         }
7864                 }
7865         }
7866
7867         tg3_full_lock(tp, 0);
7868
7869         add_timer(&tp->timer);
7870         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7871         tg3_enable_ints(tp);
7872
7873         tg3_full_unlock(tp);
7874
7875         netif_start_queue(dev);
7876
7877         return 0;
7878 }
7879
7880 #if 0
7881 /*static*/ void tg3_dump_state(struct tg3 *tp)
7882 {
7883         u32 val32, val32_2, val32_3, val32_4, val32_5;
7884         u16 val16;
7885         int i;
7886
7887         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7888         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7889         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7890                val16, val32);
7891
7892         /* MAC block */
7893         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7894                tr32(MAC_MODE), tr32(MAC_STATUS));
7895         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7896                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7897         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7898                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7899         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7900                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7901
7902         /* Send data initiator control block */
7903         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7904                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7905         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7906                tr32(SNDDATAI_STATSCTRL));
7907
7908         /* Send data completion control block */
7909         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7910
7911         /* Send BD ring selector block */
7912         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7913                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7914
7915         /* Send BD initiator control block */
7916         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7917                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7918
7919         /* Send BD completion control block */
7920         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7921
7922         /* Receive list placement control block */
7923         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7924                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7925         printk("       RCVLPC_STATSCTRL[%08x]\n",
7926                tr32(RCVLPC_STATSCTRL));
7927
7928         /* Receive data and receive BD initiator control block */
7929         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7930                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7931
7932         /* Receive data completion control block */
7933         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7934                tr32(RCVDCC_MODE));
7935
7936         /* Receive BD initiator control block */
7937         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7938                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7939
7940         /* Receive BD completion control block */
7941         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7942                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7943
7944         /* Receive list selector control block */
7945         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7946                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7947
7948         /* Mbuf cluster free block */
7949         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7950                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7951
7952         /* Host coalescing control block */
7953         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7954                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7955         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7956                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7957                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7958         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7959                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7960                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7961         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7962                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7963         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7964                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7965
7966         /* Memory arbiter control block */
7967         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7968                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7969
7970         /* Buffer manager control block */
7971         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7972                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7973         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7974                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7975         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7976                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7977                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7978                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7979
7980         /* Read DMA control block */
7981         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7982                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7983
7984         /* Write DMA control block */
7985         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7986                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7987
7988         /* DMA completion block */
7989         printk("DEBUG: DMAC_MODE[%08x]\n",
7990                tr32(DMAC_MODE));
7991
7992         /* GRC block */
7993         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7994                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7995         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7996                tr32(GRC_LOCAL_CTRL));
7997
7998         /* TG3_BDINFOs */
7999         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8000                tr32(RCVDBDI_JUMBO_BD + 0x0),
8001                tr32(RCVDBDI_JUMBO_BD + 0x4),
8002                tr32(RCVDBDI_JUMBO_BD + 0x8),
8003                tr32(RCVDBDI_JUMBO_BD + 0xc));
8004         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8005                tr32(RCVDBDI_STD_BD + 0x0),
8006                tr32(RCVDBDI_STD_BD + 0x4),
8007                tr32(RCVDBDI_STD_BD + 0x8),
8008                tr32(RCVDBDI_STD_BD + 0xc));
8009         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8010                tr32(RCVDBDI_MINI_BD + 0x0),
8011                tr32(RCVDBDI_MINI_BD + 0x4),
8012                tr32(RCVDBDI_MINI_BD + 0x8),
8013                tr32(RCVDBDI_MINI_BD + 0xc));
8014
8015         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8016         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8017         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8018         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8019         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8020                val32, val32_2, val32_3, val32_4);
8021
8022         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8023         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8024         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8025         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8026         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8027                val32, val32_2, val32_3, val32_4);
8028
8029         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8030         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8031         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8032         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8033         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8034         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8035                val32, val32_2, val32_3, val32_4, val32_5);
8036
8037         /* SW status block */
8038         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8039                tp->hw_status->status,
8040                tp->hw_status->status_tag,
8041                tp->hw_status->rx_jumbo_consumer,
8042                tp->hw_status->rx_consumer,
8043                tp->hw_status->rx_mini_consumer,
8044                tp->hw_status->idx[0].rx_producer,
8045                tp->hw_status->idx[0].tx_consumer);
8046
8047         /* SW statistics block */
8048         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8049                ((u32 *)tp->hw_stats)[0],
8050                ((u32 *)tp->hw_stats)[1],
8051                ((u32 *)tp->hw_stats)[2],
8052                ((u32 *)tp->hw_stats)[3]);
8053
8054         /* Mailboxes */
8055         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8056                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8057                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8058                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8059                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8060
8061         /* NIC side send descriptors. */
8062         for (i = 0; i < 6; i++) {
8063                 unsigned long txd;
8064
8065                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8066                         + (i * sizeof(struct tg3_tx_buffer_desc));
8067                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8068                        i,
8069                        readl(txd + 0x0), readl(txd + 0x4),
8070                        readl(txd + 0x8), readl(txd + 0xc));
8071         }
8072
8073         /* NIC side RX descriptors. */
8074         for (i = 0; i < 6; i++) {
8075                 unsigned long rxd;
8076
8077                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8078                         + (i * sizeof(struct tg3_rx_buffer_desc));
8079                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8080                        i,
8081                        readl(rxd + 0x0), readl(rxd + 0x4),
8082                        readl(rxd + 0x8), readl(rxd + 0xc));
8083                 rxd += (4 * sizeof(u32));
8084                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8085                        i,
8086                        readl(rxd + 0x0), readl(rxd + 0x4),
8087                        readl(rxd + 0x8), readl(rxd + 0xc));
8088         }
8089
8090         for (i = 0; i < 6; i++) {
8091                 unsigned long rxd;
8092
8093                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8094                         + (i * sizeof(struct tg3_rx_buffer_desc));
8095                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8096                        i,
8097                        readl(rxd + 0x0), readl(rxd + 0x4),
8098                        readl(rxd + 0x8), readl(rxd + 0xc));
8099                 rxd += (4 * sizeof(u32));
8100                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8101                        i,
8102                        readl(rxd + 0x0), readl(rxd + 0x4),
8103                        readl(rxd + 0x8), readl(rxd + 0xc));
8104         }
8105 }
8106 #endif
8107
8108 static struct net_device_stats *tg3_get_stats(struct net_device *);
8109 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8110
8111 static int tg3_close(struct net_device *dev)
8112 {
8113         struct tg3 *tp = netdev_priv(dev);
8114
8115         napi_disable(&tp->napi);
8116         cancel_work_sync(&tp->reset_task);
8117
8118         netif_stop_queue(dev);
8119
8120         del_timer_sync(&tp->timer);
8121
8122         tg3_full_lock(tp, 1);
8123 #if 0
8124         tg3_dump_state(tp);
8125 #endif
8126
8127         tg3_disable_ints(tp);
8128
8129         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8130         tg3_free_rings(tp);
8131         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8132
8133         tg3_full_unlock(tp);
8134
8135         free_irq(tp->pdev->irq, dev);
8136         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8137                 pci_disable_msi(tp->pdev);
8138                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8139         }
8140
8141         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8142                sizeof(tp->net_stats_prev));
8143         memcpy(&tp->estats_prev, tg3_get_estats(tp),
8144                sizeof(tp->estats_prev));
8145
8146         tg3_free_consistent(tp);
8147
8148         tg3_set_power_state(tp, PCI_D3hot);
8149
8150         netif_carrier_off(tp->dev);
8151
8152         return 0;
8153 }
8154
8155 static inline unsigned long get_stat64(tg3_stat64_t *val)
8156 {
8157         unsigned long ret;
8158
8159 #if (BITS_PER_LONG == 32)
8160         ret = val->low;
8161 #else
8162         ret = ((u64)val->high << 32) | ((u64)val->low);
8163 #endif
8164         return ret;
8165 }
8166
8167 static unsigned long calc_crc_errors(struct tg3 *tp)
8168 {
8169         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8170
8171         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8172             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8173              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8174                 u32 val;
8175
8176                 spin_lock_bh(&tp->lock);
8177                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8178                         tg3_writephy(tp, MII_TG3_TEST1,
8179                                      val | MII_TG3_TEST1_CRC_EN);
8180                         tg3_readphy(tp, 0x14, &val);
8181                 } else
8182                         val = 0;
8183                 spin_unlock_bh(&tp->lock);
8184
8185                 tp->phy_crc_errors += val;
8186
8187                 return tp->phy_crc_errors;
8188         }
8189
8190         return get_stat64(&hw_stats->rx_fcs_errors);
8191 }
8192
8193 #define ESTAT_ADD(member) \
8194         estats->member =        old_estats->member + \
8195                                 get_stat64(&hw_stats->member)
8196
8197 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8198 {
8199         struct tg3_ethtool_stats *estats = &tp->estats;
8200         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8201         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8202
8203         if (!hw_stats)
8204                 return old_estats;
8205
8206         ESTAT_ADD(rx_octets);
8207         ESTAT_ADD(rx_fragments);
8208         ESTAT_ADD(rx_ucast_packets);
8209         ESTAT_ADD(rx_mcast_packets);
8210         ESTAT_ADD(rx_bcast_packets);
8211         ESTAT_ADD(rx_fcs_errors);
8212         ESTAT_ADD(rx_align_errors);
8213         ESTAT_ADD(rx_xon_pause_rcvd);
8214         ESTAT_ADD(rx_xoff_pause_rcvd);
8215         ESTAT_ADD(rx_mac_ctrl_rcvd);
8216         ESTAT_ADD(rx_xoff_entered);
8217         ESTAT_ADD(rx_frame_too_long_errors);
8218         ESTAT_ADD(rx_jabbers);
8219         ESTAT_ADD(rx_undersize_packets);
8220         ESTAT_ADD(rx_in_length_errors);
8221         ESTAT_ADD(rx_out_length_errors);
8222         ESTAT_ADD(rx_64_or_less_octet_packets);
8223         ESTAT_ADD(rx_65_to_127_octet_packets);
8224         ESTAT_ADD(rx_128_to_255_octet_packets);
8225         ESTAT_ADD(rx_256_to_511_octet_packets);
8226         ESTAT_ADD(rx_512_to_1023_octet_packets);
8227         ESTAT_ADD(rx_1024_to_1522_octet_packets);
8228         ESTAT_ADD(rx_1523_to_2047_octet_packets);
8229         ESTAT_ADD(rx_2048_to_4095_octet_packets);
8230         ESTAT_ADD(rx_4096_to_8191_octet_packets);
8231         ESTAT_ADD(rx_8192_to_9022_octet_packets);
8232
8233         ESTAT_ADD(tx_octets);
8234         ESTAT_ADD(tx_collisions);
8235         ESTAT_ADD(tx_xon_sent);
8236         ESTAT_ADD(tx_xoff_sent);
8237         ESTAT_ADD(tx_flow_control);
8238         ESTAT_ADD(tx_mac_errors);
8239         ESTAT_ADD(tx_single_collisions);
8240         ESTAT_ADD(tx_mult_collisions);
8241         ESTAT_ADD(tx_deferred);
8242         ESTAT_ADD(tx_excessive_collisions);
8243         ESTAT_ADD(tx_late_collisions);
8244         ESTAT_ADD(tx_collide_2times);
8245         ESTAT_ADD(tx_collide_3times);
8246         ESTAT_ADD(tx_collide_4times);
8247         ESTAT_ADD(tx_collide_5times);
8248         ESTAT_ADD(tx_collide_6times);
8249         ESTAT_ADD(tx_collide_7times);
8250         ESTAT_ADD(tx_collide_8times);
8251         ESTAT_ADD(tx_collide_9times);
8252         ESTAT_ADD(tx_collide_10times);
8253         ESTAT_ADD(tx_collide_11times);
8254         ESTAT_ADD(tx_collide_12times);
8255         ESTAT_ADD(tx_collide_13times);
8256         ESTAT_ADD(tx_collide_14times);
8257         ESTAT_ADD(tx_collide_15times);
8258         ESTAT_ADD(tx_ucast_packets);
8259         ESTAT_ADD(tx_mcast_packets);
8260         ESTAT_ADD(tx_bcast_packets);
8261         ESTAT_ADD(tx_carrier_sense_errors);
8262         ESTAT_ADD(tx_discards);
8263         ESTAT_ADD(tx_errors);
8264
8265         ESTAT_ADD(dma_writeq_full);
8266         ESTAT_ADD(dma_write_prioq_full);
8267         ESTAT_ADD(rxbds_empty);
8268         ESTAT_ADD(rx_discards);
8269         ESTAT_ADD(rx_errors);
8270         ESTAT_ADD(rx_threshold_hit);
8271
8272         ESTAT_ADD(dma_readq_full);
8273         ESTAT_ADD(dma_read_prioq_full);
8274         ESTAT_ADD(tx_comp_queue_full);
8275
8276         ESTAT_ADD(ring_set_send_prod_index);
8277         ESTAT_ADD(ring_status_update);
8278         ESTAT_ADD(nic_irqs);
8279         ESTAT_ADD(nic_avoided_irqs);
8280         ESTAT_ADD(nic_tx_threshold_hit);
8281
8282         return estats;
8283 }
8284
8285 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8286 {
8287         struct tg3 *tp = netdev_priv(dev);
8288         struct net_device_stats *stats = &tp->net_stats;
8289         struct net_device_stats *old_stats = &tp->net_stats_prev;
8290         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8291
8292         if (!hw_stats)
8293                 return old_stats;
8294
8295         stats->rx_packets = old_stats->rx_packets +
8296                 get_stat64(&hw_stats->rx_ucast_packets) +
8297                 get_stat64(&hw_stats->rx_mcast_packets) +
8298                 get_stat64(&hw_stats->rx_bcast_packets);
8299
8300         stats->tx_packets = old_stats->tx_packets +
8301                 get_stat64(&hw_stats->tx_ucast_packets) +
8302                 get_stat64(&hw_stats->tx_mcast_packets) +
8303                 get_stat64(&hw_stats->tx_bcast_packets);
8304
8305         stats->rx_bytes = old_stats->rx_bytes +
8306                 get_stat64(&hw_stats->rx_octets);
8307         stats->tx_bytes = old_stats->tx_bytes +
8308                 get_stat64(&hw_stats->tx_octets);
8309
8310         stats->rx_errors = old_stats->rx_errors +
8311                 get_stat64(&hw_stats->rx_errors);
8312         stats->tx_errors = old_stats->tx_errors +
8313                 get_stat64(&hw_stats->tx_errors) +
8314                 get_stat64(&hw_stats->tx_mac_errors) +
8315                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8316                 get_stat64(&hw_stats->tx_discards);
8317
8318         stats->multicast = old_stats->multicast +
8319                 get_stat64(&hw_stats->rx_mcast_packets);
8320         stats->collisions = old_stats->collisions +
8321                 get_stat64(&hw_stats->tx_collisions);
8322
8323         stats->rx_length_errors = old_stats->rx_length_errors +
8324                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8325                 get_stat64(&hw_stats->rx_undersize_packets);
8326
8327         stats->rx_over_errors = old_stats->rx_over_errors +
8328                 get_stat64(&hw_stats->rxbds_empty);
8329         stats->rx_frame_errors = old_stats->rx_frame_errors +
8330                 get_stat64(&hw_stats->rx_align_errors);
8331         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8332                 get_stat64(&hw_stats->tx_discards);
8333         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8334                 get_stat64(&hw_stats->tx_carrier_sense_errors);
8335
8336         stats->rx_crc_errors = old_stats->rx_crc_errors +
8337                 calc_crc_errors(tp);
8338
8339         stats->rx_missed_errors = old_stats->rx_missed_errors +
8340                 get_stat64(&hw_stats->rx_discards);
8341
8342         return stats;
8343 }
8344
8345 static inline u32 calc_crc(unsigned char *buf, int len)
8346 {
8347         u32 reg;
8348         u32 tmp;
8349         int j, k;
8350
8351         reg = 0xffffffff;
8352
8353         for (j = 0; j < len; j++) {
8354                 reg ^= buf[j];
8355
8356                 for (k = 0; k < 8; k++) {
8357                         tmp = reg & 0x01;
8358
8359                         reg >>= 1;
8360
8361                         if (tmp) {
8362                                 reg ^= 0xedb88320;
8363                         }
8364                 }
8365         }
8366
8367         return ~reg;
8368 }
8369
8370 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8371 {
8372         /* accept or reject all multicast frames */
8373         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8374         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8375         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8376         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8377 }
8378
8379 static void __tg3_set_rx_mode(struct net_device *dev)
8380 {
8381         struct tg3 *tp = netdev_priv(dev);
8382         u32 rx_mode;
8383
8384         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8385                                   RX_MODE_KEEP_VLAN_TAG);
8386
8387         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8388          * flag clear.
8389          */
8390 #if TG3_VLAN_TAG_USED
8391         if (!tp->vlgrp &&
8392             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8393                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8394 #else
8395         /* By definition, VLAN is disabled always in this
8396          * case.
8397          */
8398         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8399                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8400 #endif
8401
8402         if (dev->flags & IFF_PROMISC) {
8403                 /* Promiscuous mode. */
8404                 rx_mode |= RX_MODE_PROMISC;
8405         } else if (dev->flags & IFF_ALLMULTI) {
8406                 /* Accept all multicast. */
8407                 tg3_set_multi (tp, 1);
8408         } else if (dev->mc_count < 1) {
8409                 /* Reject all multicast. */
8410                 tg3_set_multi (tp, 0);
8411         } else {
8412                 /* Accept one or more multicast(s). */
8413                 struct dev_mc_list *mclist;
8414                 unsigned int i;
8415                 u32 mc_filter[4] = { 0, };
8416                 u32 regidx;
8417                 u32 bit;
8418                 u32 crc;
8419
8420                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8421                      i++, mclist = mclist->next) {
8422
8423                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8424                         bit = ~crc & 0x7f;
8425                         regidx = (bit & 0x60) >> 5;
8426                         bit &= 0x1f;
8427                         mc_filter[regidx] |= (1 << bit);
8428                 }
8429
8430                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8431                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8432                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8433                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8434         }
8435
8436         if (rx_mode != tp->rx_mode) {
8437                 tp->rx_mode = rx_mode;
8438                 tw32_f(MAC_RX_MODE, rx_mode);
8439                 udelay(10);
8440         }
8441 }
8442
8443 static void tg3_set_rx_mode(struct net_device *dev)
8444 {
8445         struct tg3 *tp = netdev_priv(dev);
8446
8447         if (!netif_running(dev))
8448                 return;
8449
8450         tg3_full_lock(tp, 0);
8451         __tg3_set_rx_mode(dev);
8452         tg3_full_unlock(tp);
8453 }
8454
8455 #define TG3_REGDUMP_LEN         (32 * 1024)
8456
8457 static int tg3_get_regs_len(struct net_device *dev)
8458 {
8459         return TG3_REGDUMP_LEN;
8460 }
8461
8462 static void tg3_get_regs(struct net_device *dev,
8463                 struct ethtool_regs *regs, void *_p)
8464 {
8465         u32 *p = _p;
8466         struct tg3 *tp = netdev_priv(dev);
8467         u8 *orig_p = _p;
8468         int i;
8469
8470         regs->version = 0;
8471
8472         memset(p, 0, TG3_REGDUMP_LEN);
8473
8474         if (tp->link_config.phy_is_low_power)
8475                 return;
8476
8477         tg3_full_lock(tp, 0);
8478
8479 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8480 #define GET_REG32_LOOP(base,len)                \
8481 do {    p = (u32 *)(orig_p + (base));           \
8482         for (i = 0; i < len; i += 4)            \
8483                 __GET_REG32((base) + i);        \
8484 } while (0)
8485 #define GET_REG32_1(reg)                        \
8486 do {    p = (u32 *)(orig_p + (reg));            \
8487         __GET_REG32((reg));                     \
8488 } while (0)
8489
8490         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8491         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8492         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8493         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8494         GET_REG32_1(SNDDATAC_MODE);
8495         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8496         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8497         GET_REG32_1(SNDBDC_MODE);
8498         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8499         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8500         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8501         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8502         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8503         GET_REG32_1(RCVDCC_MODE);
8504         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8505         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8506         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8507         GET_REG32_1(MBFREE_MODE);
8508         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8509         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8510         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8511         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8512         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8513         GET_REG32_1(RX_CPU_MODE);
8514         GET_REG32_1(RX_CPU_STATE);
8515         GET_REG32_1(RX_CPU_PGMCTR);
8516         GET_REG32_1(RX_CPU_HWBKPT);
8517         GET_REG32_1(TX_CPU_MODE);
8518         GET_REG32_1(TX_CPU_STATE);
8519         GET_REG32_1(TX_CPU_PGMCTR);
8520         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8521         GET_REG32_LOOP(FTQ_RESET, 0x120);
8522         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8523         GET_REG32_1(DMAC_MODE);
8524         GET_REG32_LOOP(GRC_MODE, 0x4c);
8525         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8526                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8527
8528 #undef __GET_REG32
8529 #undef GET_REG32_LOOP
8530 #undef GET_REG32_1
8531
8532         tg3_full_unlock(tp);
8533 }
8534
8535 static int tg3_get_eeprom_len(struct net_device *dev)
8536 {
8537         struct tg3 *tp = netdev_priv(dev);
8538
8539         return tp->nvram_size;
8540 }
8541
8542 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8543 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8544 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8545
8546 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8547 {
8548         struct tg3 *tp = netdev_priv(dev);
8549         int ret;
8550         u8  *pd;
8551         u32 i, offset, len, b_offset, b_count;
8552         __le32 val;
8553
8554         if (tp->link_config.phy_is_low_power)
8555                 return -EAGAIN;
8556
8557         offset = eeprom->offset;
8558         len = eeprom->len;
8559         eeprom->len = 0;
8560
8561         eeprom->magic = TG3_EEPROM_MAGIC;
8562
8563         if (offset & 3) {
8564                 /* adjustments to start on required 4 byte boundary */
8565                 b_offset = offset & 3;
8566                 b_count = 4 - b_offset;
8567                 if (b_count > len) {
8568                         /* i.e. offset=1 len=2 */
8569                         b_count = len;
8570                 }
8571                 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8572                 if (ret)
8573                         return ret;
8574                 memcpy(data, ((char*)&val) + b_offset, b_count);
8575                 len -= b_count;
8576                 offset += b_count;
8577                 eeprom->len += b_count;
8578         }
8579
8580         /* read bytes upto the last 4 byte boundary */
8581         pd = &data[eeprom->len];
8582         for (i = 0; i < (len - (len & 3)); i += 4) {
8583                 ret = tg3_nvram_read_le(tp, offset + i, &val);
8584                 if (ret) {
8585                         eeprom->len += i;
8586                         return ret;
8587                 }
8588                 memcpy(pd + i, &val, 4);
8589         }
8590         eeprom->len += i;
8591
8592         if (len & 3) {
8593                 /* read last bytes not ending on 4 byte boundary */
8594                 pd = &data[eeprom->len];
8595                 b_count = len & 3;
8596                 b_offset = offset + len - b_count;
8597                 ret = tg3_nvram_read_le(tp, b_offset, &val);
8598                 if (ret)
8599                         return ret;
8600                 memcpy(pd, &val, b_count);
8601                 eeprom->len += b_count;
8602         }
8603         return 0;
8604 }
8605
8606 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8607
8608 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8609 {
8610         struct tg3 *tp = netdev_priv(dev);
8611         int ret;
8612         u32 offset, len, b_offset, odd_len;
8613         u8 *buf;
8614         __le32 start, end;
8615
8616         if (tp->link_config.phy_is_low_power)
8617                 return -EAGAIN;
8618
8619         if (eeprom->magic != TG3_EEPROM_MAGIC)
8620                 return -EINVAL;
8621
8622         offset = eeprom->offset;
8623         len = eeprom->len;
8624
8625         if ((b_offset = (offset & 3))) {
8626                 /* adjustments to start on required 4 byte boundary */
8627                 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8628                 if (ret)
8629                         return ret;
8630                 len += b_offset;
8631                 offset &= ~3;
8632                 if (len < 4)
8633                         len = 4;
8634         }
8635
8636         odd_len = 0;
8637         if (len & 3) {
8638                 /* adjustments to end on required 4 byte boundary */
8639                 odd_len = 1;
8640                 len = (len + 3) & ~3;
8641                 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8642                 if (ret)
8643                         return ret;
8644         }
8645
8646         buf = data;
8647         if (b_offset || odd_len) {
8648                 buf = kmalloc(len, GFP_KERNEL);
8649                 if (!buf)
8650                         return -ENOMEM;
8651                 if (b_offset)
8652                         memcpy(buf, &start, 4);
8653                 if (odd_len)
8654                         memcpy(buf+len-4, &end, 4);
8655                 memcpy(buf + b_offset, data, eeprom->len);
8656         }
8657
8658         ret = tg3_nvram_write_block(tp, offset, len, buf);
8659
8660         if (buf != data)
8661                 kfree(buf);
8662
8663         return ret;
8664 }
8665
8666 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8667 {
8668         struct tg3 *tp = netdev_priv(dev);
8669
8670         cmd->supported = (SUPPORTED_Autoneg);
8671
8672         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8673                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8674                                    SUPPORTED_1000baseT_Full);
8675
8676         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8677                 cmd->supported |= (SUPPORTED_100baseT_Half |
8678                                   SUPPORTED_100baseT_Full |
8679                                   SUPPORTED_10baseT_Half |
8680                                   SUPPORTED_10baseT_Full |
8681                                   SUPPORTED_TP);
8682                 cmd->port = PORT_TP;
8683         } else {
8684                 cmd->supported |= SUPPORTED_FIBRE;
8685                 cmd->port = PORT_FIBRE;
8686         }
8687
8688         cmd->advertising = tp->link_config.advertising;
8689         if (netif_running(dev)) {
8690                 cmd->speed = tp->link_config.active_speed;
8691                 cmd->duplex = tp->link_config.active_duplex;
8692         }
8693         cmd->phy_address = PHY_ADDR;
8694         cmd->transceiver = 0;
8695         cmd->autoneg = tp->link_config.autoneg;
8696         cmd->maxtxpkt = 0;
8697         cmd->maxrxpkt = 0;
8698         return 0;
8699 }
8700
8701 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8702 {
8703         struct tg3 *tp = netdev_priv(dev);
8704
8705         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8706                 /* These are the only valid advertisement bits allowed.  */
8707                 if (cmd->autoneg == AUTONEG_ENABLE &&
8708                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8709                                           ADVERTISED_1000baseT_Full |
8710                                           ADVERTISED_Autoneg |
8711                                           ADVERTISED_FIBRE)))
8712                         return -EINVAL;
8713                 /* Fiber can only do SPEED_1000.  */
8714                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8715                          (cmd->speed != SPEED_1000))
8716                         return -EINVAL;
8717         /* Copper cannot force SPEED_1000.  */
8718         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8719                    (cmd->speed == SPEED_1000))
8720                 return -EINVAL;
8721         else if ((cmd->speed == SPEED_1000) &&
8722                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8723                 return -EINVAL;
8724
8725         tg3_full_lock(tp, 0);
8726
8727         tp->link_config.autoneg = cmd->autoneg;
8728         if (cmd->autoneg == AUTONEG_ENABLE) {
8729                 tp->link_config.advertising = (cmd->advertising |
8730                                               ADVERTISED_Autoneg);
8731                 tp->link_config.speed = SPEED_INVALID;
8732                 tp->link_config.duplex = DUPLEX_INVALID;
8733         } else {
8734                 tp->link_config.advertising = 0;
8735                 tp->link_config.speed = cmd->speed;
8736                 tp->link_config.duplex = cmd->duplex;
8737         }
8738
8739         tp->link_config.orig_speed = tp->link_config.speed;
8740         tp->link_config.orig_duplex = tp->link_config.duplex;
8741         tp->link_config.orig_autoneg = tp->link_config.autoneg;
8742
8743         if (netif_running(dev))
8744                 tg3_setup_phy(tp, 1);
8745
8746         tg3_full_unlock(tp);
8747
8748         return 0;
8749 }
8750
8751 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8752 {
8753         struct tg3 *tp = netdev_priv(dev);
8754
8755         strcpy(info->driver, DRV_MODULE_NAME);
8756         strcpy(info->version, DRV_MODULE_VERSION);
8757         strcpy(info->fw_version, tp->fw_ver);
8758         strcpy(info->bus_info, pci_name(tp->pdev));
8759 }
8760
8761 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8762 {
8763         struct tg3 *tp = netdev_priv(dev);
8764
8765         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8766                 wol->supported = WAKE_MAGIC;
8767         else
8768                 wol->supported = 0;
8769         wol->wolopts = 0;
8770         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8771                 wol->wolopts = WAKE_MAGIC;
8772         memset(&wol->sopass, 0, sizeof(wol->sopass));
8773 }
8774
8775 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8776 {
8777         struct tg3 *tp = netdev_priv(dev);
8778
8779         if (wol->wolopts & ~WAKE_MAGIC)
8780                 return -EINVAL;
8781         if ((wol->wolopts & WAKE_MAGIC) &&
8782             !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8783                 return -EINVAL;
8784
8785         spin_lock_bh(&tp->lock);
8786         if (wol->wolopts & WAKE_MAGIC)
8787                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8788         else
8789                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8790         spin_unlock_bh(&tp->lock);
8791
8792         return 0;
8793 }
8794
8795 static u32 tg3_get_msglevel(struct net_device *dev)
8796 {
8797         struct tg3 *tp = netdev_priv(dev);
8798         return tp->msg_enable;
8799 }
8800
8801 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8802 {
8803         struct tg3 *tp = netdev_priv(dev);
8804         tp->msg_enable = value;
8805 }
8806
8807 static int tg3_set_tso(struct net_device *dev, u32 value)
8808 {
8809         struct tg3 *tp = netdev_priv(dev);
8810
8811         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8812                 if (value)
8813                         return -EINVAL;
8814                 return 0;
8815         }
8816         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8817             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8818                 if (value) {
8819                         dev->features |= NETIF_F_TSO6;
8820                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8821                                 dev->features |= NETIF_F_TSO_ECN;
8822                 } else
8823                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8824         }
8825         return ethtool_op_set_tso(dev, value);
8826 }
8827
8828 static int tg3_nway_reset(struct net_device *dev)
8829 {
8830         struct tg3 *tp = netdev_priv(dev);
8831         u32 bmcr;
8832         int r;
8833
8834         if (!netif_running(dev))
8835                 return -EAGAIN;
8836
8837         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8838                 return -EINVAL;
8839
8840         spin_lock_bh(&tp->lock);
8841         r = -EINVAL;
8842         tg3_readphy(tp, MII_BMCR, &bmcr);
8843         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8844             ((bmcr & BMCR_ANENABLE) ||
8845              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8846                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8847                                            BMCR_ANENABLE);
8848                 r = 0;
8849         }
8850         spin_unlock_bh(&tp->lock);
8851
8852         return r;
8853 }
8854
8855 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8856 {
8857         struct tg3 *tp = netdev_priv(dev);
8858
8859         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8860         ering->rx_mini_max_pending = 0;
8861         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8862                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8863         else
8864                 ering->rx_jumbo_max_pending = 0;
8865
8866         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8867
8868         ering->rx_pending = tp->rx_pending;
8869         ering->rx_mini_pending = 0;
8870         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8871                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8872         else
8873                 ering->rx_jumbo_pending = 0;
8874
8875         ering->tx_pending = tp->tx_pending;
8876 }
8877
8878 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8879 {
8880         struct tg3 *tp = netdev_priv(dev);
8881         int irq_sync = 0, err = 0;
8882
8883         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8884             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8885             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8886             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8887             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8888              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8889                 return -EINVAL;
8890
8891         if (netif_running(dev)) {
8892                 tg3_netif_stop(tp);
8893                 irq_sync = 1;
8894         }
8895
8896         tg3_full_lock(tp, irq_sync);
8897
8898         tp->rx_pending = ering->rx_pending;
8899
8900         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8901             tp->rx_pending > 63)
8902                 tp->rx_pending = 63;
8903         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8904         tp->tx_pending = ering->tx_pending;
8905
8906         if (netif_running(dev)) {
8907                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8908                 err = tg3_restart_hw(tp, 1);
8909                 if (!err)
8910                         tg3_netif_start(tp);
8911         }
8912
8913         tg3_full_unlock(tp);
8914
8915         return err;
8916 }
8917
8918 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8919 {
8920         struct tg3 *tp = netdev_priv(dev);
8921
8922         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8923
8924         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
8925                 epause->rx_pause = 1;
8926         else
8927                 epause->rx_pause = 0;
8928
8929         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
8930                 epause->tx_pause = 1;
8931         else
8932                 epause->tx_pause = 0;
8933 }
8934
8935 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8936 {
8937         struct tg3 *tp = netdev_priv(dev);
8938         int irq_sync = 0, err = 0;
8939
8940         if (netif_running(dev)) {
8941                 tg3_netif_stop(tp);
8942                 irq_sync = 1;
8943         }
8944
8945         tg3_full_lock(tp, irq_sync);
8946
8947         if (epause->autoneg)
8948                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8949         else
8950                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8951         if (epause->rx_pause)
8952                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
8953         else
8954                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
8955         if (epause->tx_pause)
8956                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
8957         else
8958                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
8959
8960         if (netif_running(dev)) {
8961                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8962                 err = tg3_restart_hw(tp, 1);
8963                 if (!err)
8964                         tg3_netif_start(tp);
8965         }
8966
8967         tg3_full_unlock(tp);
8968
8969         return err;
8970 }
8971
8972 static u32 tg3_get_rx_csum(struct net_device *dev)
8973 {
8974         struct tg3 *tp = netdev_priv(dev);
8975         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8976 }
8977
8978 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8979 {
8980         struct tg3 *tp = netdev_priv(dev);
8981
8982         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8983                 if (data != 0)
8984                         return -EINVAL;
8985                 return 0;
8986         }
8987
8988         spin_lock_bh(&tp->lock);
8989         if (data)
8990                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8991         else
8992                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8993         spin_unlock_bh(&tp->lock);
8994
8995         return 0;
8996 }
8997
8998 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8999 {
9000         struct tg3 *tp = netdev_priv(dev);
9001
9002         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9003                 if (data != 0)
9004                         return -EINVAL;
9005                 return 0;
9006         }
9007
9008         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9009             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9010             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9011             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9012                 ethtool_op_set_tx_ipv6_csum(dev, data);
9013         else
9014                 ethtool_op_set_tx_csum(dev, data);
9015
9016         return 0;
9017 }
9018
9019 static int tg3_get_sset_count (struct net_device *dev, int sset)
9020 {
9021         switch (sset) {
9022         case ETH_SS_TEST:
9023                 return TG3_NUM_TEST;
9024         case ETH_SS_STATS:
9025                 return TG3_NUM_STATS;
9026         default:
9027                 return -EOPNOTSUPP;
9028         }
9029 }
9030
9031 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9032 {
9033         switch (stringset) {
9034         case ETH_SS_STATS:
9035                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9036                 break;
9037         case ETH_SS_TEST:
9038                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9039                 break;
9040         default:
9041                 WARN_ON(1);     /* we need a WARN() */
9042                 break;
9043         }
9044 }
9045
9046 static int tg3_phys_id(struct net_device *dev, u32 data)
9047 {
9048         struct tg3 *tp = netdev_priv(dev);
9049         int i;
9050
9051         if (!netif_running(tp->dev))
9052                 return -EAGAIN;
9053
9054         if (data == 0)
9055                 data = UINT_MAX / 2;
9056
9057         for (i = 0; i < (data * 2); i++) {
9058                 if ((i % 2) == 0)
9059                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9060                                            LED_CTRL_1000MBPS_ON |
9061                                            LED_CTRL_100MBPS_ON |
9062                                            LED_CTRL_10MBPS_ON |
9063                                            LED_CTRL_TRAFFIC_OVERRIDE |
9064                                            LED_CTRL_TRAFFIC_BLINK |
9065                                            LED_CTRL_TRAFFIC_LED);
9066
9067                 else
9068                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9069                                            LED_CTRL_TRAFFIC_OVERRIDE);
9070
9071                 if (msleep_interruptible(500))
9072                         break;
9073         }
9074         tw32(MAC_LED_CTRL, tp->led_ctrl);
9075         return 0;
9076 }
9077
9078 static void tg3_get_ethtool_stats (struct net_device *dev,
9079                                    struct ethtool_stats *estats, u64 *tmp_stats)
9080 {
9081         struct tg3 *tp = netdev_priv(dev);
9082         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9083 }
9084
9085 #define NVRAM_TEST_SIZE 0x100
9086 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
9087 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
9088 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
9089 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9090 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9091
9092 static int tg3_test_nvram(struct tg3 *tp)
9093 {
9094         u32 csum, magic;
9095         __le32 *buf;
9096         int i, j, k, err = 0, size;
9097
9098         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9099                 return -EIO;
9100
9101         if (magic == TG3_EEPROM_MAGIC)
9102                 size = NVRAM_TEST_SIZE;
9103         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9104                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9105                     TG3_EEPROM_SB_FORMAT_1) {
9106                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9107                         case TG3_EEPROM_SB_REVISION_0:
9108                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9109                                 break;
9110                         case TG3_EEPROM_SB_REVISION_2:
9111                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9112                                 break;
9113                         case TG3_EEPROM_SB_REVISION_3:
9114                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9115                                 break;
9116                         default:
9117                                 return 0;
9118                         }
9119                 } else
9120                         return 0;
9121         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9122                 size = NVRAM_SELFBOOT_HW_SIZE;
9123         else
9124                 return -EIO;
9125
9126         buf = kmalloc(size, GFP_KERNEL);
9127         if (buf == NULL)
9128                 return -ENOMEM;
9129
9130         err = -EIO;
9131         for (i = 0, j = 0; i < size; i += 4, j++) {
9132                 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9133                         break;
9134         }
9135         if (i < size)
9136                 goto out;
9137
9138         /* Selfboot format */
9139         magic = swab32(le32_to_cpu(buf[0]));
9140         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9141             TG3_EEPROM_MAGIC_FW) {
9142                 u8 *buf8 = (u8 *) buf, csum8 = 0;
9143
9144                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9145                     TG3_EEPROM_SB_REVISION_2) {
9146                         /* For rev 2, the csum doesn't include the MBA. */
9147                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9148                                 csum8 += buf8[i];
9149                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9150                                 csum8 += buf8[i];
9151                 } else {
9152                         for (i = 0; i < size; i++)
9153                                 csum8 += buf8[i];
9154                 }
9155
9156                 if (csum8 == 0) {
9157                         err = 0;
9158                         goto out;
9159                 }
9160
9161                 err = -EIO;
9162                 goto out;
9163         }
9164
9165         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9166             TG3_EEPROM_MAGIC_HW) {
9167                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9168                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9169                 u8 *buf8 = (u8 *) buf;
9170
9171                 /* Separate the parity bits and the data bytes.  */
9172                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9173                         if ((i == 0) || (i == 8)) {
9174                                 int l;
9175                                 u8 msk;
9176
9177                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9178                                         parity[k++] = buf8[i] & msk;
9179                                 i++;
9180                         }
9181                         else if (i == 16) {
9182                                 int l;
9183                                 u8 msk;
9184
9185                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9186                                         parity[k++] = buf8[i] & msk;
9187                                 i++;
9188
9189                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9190                                         parity[k++] = buf8[i] & msk;
9191                                 i++;
9192                         }
9193                         data[j++] = buf8[i];
9194                 }
9195
9196                 err = -EIO;
9197                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9198                         u8 hw8 = hweight8(data[i]);
9199
9200                         if ((hw8 & 0x1) && parity[i])
9201                                 goto out;
9202                         else if (!(hw8 & 0x1) && !parity[i])
9203                                 goto out;
9204                 }
9205                 err = 0;
9206                 goto out;
9207         }
9208
9209         /* Bootstrap checksum at offset 0x10 */
9210         csum = calc_crc((unsigned char *) buf, 0x10);
9211         if(csum != le32_to_cpu(buf[0x10/4]))
9212                 goto out;
9213
9214         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9215         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9216         if (csum != le32_to_cpu(buf[0xfc/4]))
9217                  goto out;
9218
9219         err = 0;
9220
9221 out:
9222         kfree(buf);
9223         return err;
9224 }
9225
9226 #define TG3_SERDES_TIMEOUT_SEC  2
9227 #define TG3_COPPER_TIMEOUT_SEC  6
9228
9229 static int tg3_test_link(struct tg3 *tp)
9230 {
9231         int i, max;
9232
9233         if (!netif_running(tp->dev))
9234                 return -ENODEV;
9235
9236         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9237                 max = TG3_SERDES_TIMEOUT_SEC;
9238         else
9239                 max = TG3_COPPER_TIMEOUT_SEC;
9240
9241         for (i = 0; i < max; i++) {
9242                 if (netif_carrier_ok(tp->dev))
9243                         return 0;
9244
9245                 if (msleep_interruptible(1000))
9246                         break;
9247         }
9248
9249         return -EIO;
9250 }
9251
9252 /* Only test the commonly used registers */
9253 static int tg3_test_registers(struct tg3 *tp)
9254 {
9255         int i, is_5705, is_5750;
9256         u32 offset, read_mask, write_mask, val, save_val, read_val;
9257         static struct {
9258                 u16 offset;
9259                 u16 flags;
9260 #define TG3_FL_5705     0x1
9261 #define TG3_FL_NOT_5705 0x2
9262 #define TG3_FL_NOT_5788 0x4
9263 #define TG3_FL_NOT_5750 0x8
9264                 u32 read_mask;
9265                 u32 write_mask;
9266         } reg_tbl[] = {
9267                 /* MAC Control Registers */
9268                 { MAC_MODE, TG3_FL_NOT_5705,
9269                         0x00000000, 0x00ef6f8c },
9270                 { MAC_MODE, TG3_FL_5705,
9271                         0x00000000, 0x01ef6b8c },
9272                 { MAC_STATUS, TG3_FL_NOT_5705,
9273                         0x03800107, 0x00000000 },
9274                 { MAC_STATUS, TG3_FL_5705,
9275                         0x03800100, 0x00000000 },
9276                 { MAC_ADDR_0_HIGH, 0x0000,
9277                         0x00000000, 0x0000ffff },
9278                 { MAC_ADDR_0_LOW, 0x0000,
9279                         0x00000000, 0xffffffff },
9280                 { MAC_RX_MTU_SIZE, 0x0000,
9281                         0x00000000, 0x0000ffff },
9282                 { MAC_TX_MODE, 0x0000,
9283                         0x00000000, 0x00000070 },
9284                 { MAC_TX_LENGTHS, 0x0000,
9285                         0x00000000, 0x00003fff },
9286                 { MAC_RX_MODE, TG3_FL_NOT_5705,
9287                         0x00000000, 0x000007fc },
9288                 { MAC_RX_MODE, TG3_FL_5705,
9289                         0x00000000, 0x000007dc },
9290                 { MAC_HASH_REG_0, 0x0000,
9291                         0x00000000, 0xffffffff },
9292                 { MAC_HASH_REG_1, 0x0000,
9293                         0x00000000, 0xffffffff },
9294                 { MAC_HASH_REG_2, 0x0000,
9295                         0x00000000, 0xffffffff },
9296                 { MAC_HASH_REG_3, 0x0000,
9297                         0x00000000, 0xffffffff },
9298
9299                 /* Receive Data and Receive BD Initiator Control Registers. */
9300                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9301                         0x00000000, 0xffffffff },
9302                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9303                         0x00000000, 0xffffffff },
9304                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9305                         0x00000000, 0x00000003 },
9306                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9307                         0x00000000, 0xffffffff },
9308                 { RCVDBDI_STD_BD+0, 0x0000,
9309                         0x00000000, 0xffffffff },
9310                 { RCVDBDI_STD_BD+4, 0x0000,
9311                         0x00000000, 0xffffffff },
9312                 { RCVDBDI_STD_BD+8, 0x0000,
9313                         0x00000000, 0xffff0002 },
9314                 { RCVDBDI_STD_BD+0xc, 0x0000,
9315                         0x00000000, 0xffffffff },
9316
9317                 /* Receive BD Initiator Control Registers. */
9318                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9319                         0x00000000, 0xffffffff },
9320                 { RCVBDI_STD_THRESH, TG3_FL_5705,
9321                         0x00000000, 0x000003ff },
9322                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9323                         0x00000000, 0xffffffff },
9324
9325                 /* Host Coalescing Control Registers. */
9326                 { HOSTCC_MODE, TG3_FL_NOT_5705,
9327                         0x00000000, 0x00000004 },
9328                 { HOSTCC_MODE, TG3_FL_5705,
9329                         0x00000000, 0x000000f6 },
9330                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9331                         0x00000000, 0xffffffff },
9332                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9333                         0x00000000, 0x000003ff },
9334                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9335                         0x00000000, 0xffffffff },
9336                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9337                         0x00000000, 0x000003ff },
9338                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9339                         0x00000000, 0xffffffff },
9340                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9341                         0x00000000, 0x000000ff },
9342                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9343                         0x00000000, 0xffffffff },
9344                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9345                         0x00000000, 0x000000ff },
9346                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9347                         0x00000000, 0xffffffff },
9348                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9349                         0x00000000, 0xffffffff },
9350                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9351                         0x00000000, 0xffffffff },
9352                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9353                         0x00000000, 0x000000ff },
9354                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9355                         0x00000000, 0xffffffff },
9356                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9357                         0x00000000, 0x000000ff },
9358                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9359                         0x00000000, 0xffffffff },
9360                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9361                         0x00000000, 0xffffffff },
9362                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9363                         0x00000000, 0xffffffff },
9364                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9365                         0x00000000, 0xffffffff },
9366                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9367                         0x00000000, 0xffffffff },
9368                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9369                         0xffffffff, 0x00000000 },
9370                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9371                         0xffffffff, 0x00000000 },
9372
9373                 /* Buffer Manager Control Registers. */
9374                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9375                         0x00000000, 0x007fff80 },
9376                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9377                         0x00000000, 0x007fffff },
9378                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9379                         0x00000000, 0x0000003f },
9380                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9381                         0x00000000, 0x000001ff },
9382                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9383                         0x00000000, 0x000001ff },
9384                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9385                         0xffffffff, 0x00000000 },
9386                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9387                         0xffffffff, 0x00000000 },
9388
9389                 /* Mailbox Registers */
9390                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9391                         0x00000000, 0x000001ff },
9392                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9393                         0x00000000, 0x000001ff },
9394                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9395                         0x00000000, 0x000007ff },
9396                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9397                         0x00000000, 0x000001ff },
9398
9399                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9400         };
9401
9402         is_5705 = is_5750 = 0;
9403         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9404                 is_5705 = 1;
9405                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9406                         is_5750 = 1;
9407         }
9408
9409         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9410                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9411                         continue;
9412
9413                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9414                         continue;
9415
9416                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9417                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9418                         continue;
9419
9420                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9421                         continue;
9422
9423                 offset = (u32) reg_tbl[i].offset;
9424                 read_mask = reg_tbl[i].read_mask;
9425                 write_mask = reg_tbl[i].write_mask;
9426
9427                 /* Save the original register content */
9428                 save_val = tr32(offset);
9429
9430                 /* Determine the read-only value. */
9431                 read_val = save_val & read_mask;
9432
9433                 /* Write zero to the register, then make sure the read-only bits
9434                  * are not changed and the read/write bits are all zeros.
9435                  */
9436                 tw32(offset, 0);
9437
9438                 val = tr32(offset);
9439
9440                 /* Test the read-only and read/write bits. */
9441                 if (((val & read_mask) != read_val) || (val & write_mask))
9442                         goto out;
9443
9444                 /* Write ones to all the bits defined by RdMask and WrMask, then
9445                  * make sure the read-only bits are not changed and the
9446                  * read/write bits are all ones.
9447                  */
9448                 tw32(offset, read_mask | write_mask);
9449
9450                 val = tr32(offset);
9451
9452                 /* Test the read-only bits. */
9453                 if ((val & read_mask) != read_val)
9454                         goto out;
9455
9456                 /* Test the read/write bits. */
9457                 if ((val & write_mask) != write_mask)
9458                         goto out;
9459
9460                 tw32(offset, save_val);
9461         }
9462
9463         return 0;
9464
9465 out:
9466         if (netif_msg_hw(tp))
9467                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9468                        offset);
9469         tw32(offset, save_val);
9470         return -EIO;
9471 }
9472
9473 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9474 {
9475         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9476         int i;
9477         u32 j;
9478
9479         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9480                 for (j = 0; j < len; j += 4) {
9481                         u32 val;
9482
9483                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9484                         tg3_read_mem(tp, offset + j, &val);
9485                         if (val != test_pattern[i])
9486                                 return -EIO;
9487                 }
9488         }
9489         return 0;
9490 }
9491
9492 static int tg3_test_memory(struct tg3 *tp)
9493 {
9494         static struct mem_entry {
9495                 u32 offset;
9496                 u32 len;
9497         } mem_tbl_570x[] = {
9498                 { 0x00000000, 0x00b50},
9499                 { 0x00002000, 0x1c000},
9500                 { 0xffffffff, 0x00000}
9501         }, mem_tbl_5705[] = {
9502                 { 0x00000100, 0x0000c},
9503                 { 0x00000200, 0x00008},
9504                 { 0x00004000, 0x00800},
9505                 { 0x00006000, 0x01000},
9506                 { 0x00008000, 0x02000},
9507                 { 0x00010000, 0x0e000},
9508                 { 0xffffffff, 0x00000}
9509         }, mem_tbl_5755[] = {
9510                 { 0x00000200, 0x00008},
9511                 { 0x00004000, 0x00800},
9512                 { 0x00006000, 0x00800},
9513                 { 0x00008000, 0x02000},
9514                 { 0x00010000, 0x0c000},
9515                 { 0xffffffff, 0x00000}
9516         }, mem_tbl_5906[] = {
9517                 { 0x00000200, 0x00008},
9518                 { 0x00004000, 0x00400},
9519                 { 0x00006000, 0x00400},
9520                 { 0x00008000, 0x01000},
9521                 { 0x00010000, 0x01000},
9522                 { 0xffffffff, 0x00000}
9523         };
9524         struct mem_entry *mem_tbl;
9525         int err = 0;
9526         int i;
9527
9528         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9529                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9530                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9531                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9532                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9533                         mem_tbl = mem_tbl_5755;
9534                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9535                         mem_tbl = mem_tbl_5906;
9536                 else
9537                         mem_tbl = mem_tbl_5705;
9538         } else
9539                 mem_tbl = mem_tbl_570x;
9540
9541         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9542                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9543                     mem_tbl[i].len)) != 0)
9544                         break;
9545         }
9546
9547         return err;
9548 }
9549
9550 #define TG3_MAC_LOOPBACK        0
9551 #define TG3_PHY_LOOPBACK        1
9552
9553 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9554 {
9555         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9556         u32 desc_idx;
9557         struct sk_buff *skb, *rx_skb;
9558         u8 *tx_data;
9559         dma_addr_t map;
9560         int num_pkts, tx_len, rx_len, i, err;
9561         struct tg3_rx_buffer_desc *desc;
9562
9563         if (loopback_mode == TG3_MAC_LOOPBACK) {
9564                 /* HW errata - mac loopback fails in some cases on 5780.
9565                  * Normal traffic and PHY loopback are not affected by
9566                  * errata.
9567                  */
9568                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9569                         return 0;
9570
9571                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9572                            MAC_MODE_PORT_INT_LPBACK;
9573                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9574                         mac_mode |= MAC_MODE_LINK_POLARITY;
9575                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9576                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9577                 else
9578                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9579                 tw32(MAC_MODE, mac_mode);
9580         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9581                 u32 val;
9582
9583                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9584                         u32 phytest;
9585
9586                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9587                                 u32 phy;
9588
9589                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9590                                              phytest | MII_TG3_EPHY_SHADOW_EN);
9591                                 if (!tg3_readphy(tp, 0x1b, &phy))
9592                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
9593                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9594                         }
9595                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9596                 } else
9597                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9598
9599                 tg3_phy_toggle_automdix(tp, 0);
9600
9601                 tg3_writephy(tp, MII_BMCR, val);
9602                 udelay(40);
9603
9604                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9605                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9606                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9607                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9608                 } else
9609                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9610
9611                 /* reset to prevent losing 1st rx packet intermittently */
9612                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9613                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9614                         udelay(10);
9615                         tw32_f(MAC_RX_MODE, tp->rx_mode);
9616                 }
9617                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9618                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9619                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9620                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9621                                 mac_mode |= MAC_MODE_LINK_POLARITY;
9622                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
9623                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9624                 }
9625                 tw32(MAC_MODE, mac_mode);
9626         }
9627         else
9628                 return -EINVAL;
9629
9630         err = -EIO;
9631
9632         tx_len = 1514;
9633         skb = netdev_alloc_skb(tp->dev, tx_len);
9634         if (!skb)
9635                 return -ENOMEM;
9636
9637         tx_data = skb_put(skb, tx_len);
9638         memcpy(tx_data, tp->dev->dev_addr, 6);
9639         memset(tx_data + 6, 0x0, 8);
9640
9641         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9642
9643         for (i = 14; i < tx_len; i++)
9644                 tx_data[i] = (u8) (i & 0xff);
9645
9646         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9647
9648         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9649              HOSTCC_MODE_NOW);
9650
9651         udelay(10);
9652
9653         rx_start_idx = tp->hw_status->idx[0].rx_producer;
9654
9655         num_pkts = 0;
9656
9657         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9658
9659         tp->tx_prod++;
9660         num_pkts++;
9661
9662         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9663                      tp->tx_prod);
9664         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9665
9666         udelay(10);
9667
9668         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
9669         for (i = 0; i < 25; i++) {
9670                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9671                        HOSTCC_MODE_NOW);
9672
9673                 udelay(10);
9674
9675                 tx_idx = tp->hw_status->idx[0].tx_consumer;
9676                 rx_idx = tp->hw_status->idx[0].rx_producer;
9677                 if ((tx_idx == tp->tx_prod) &&
9678                     (rx_idx == (rx_start_idx + num_pkts)))
9679                         break;
9680         }
9681
9682         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9683         dev_kfree_skb(skb);
9684
9685         if (tx_idx != tp->tx_prod)
9686                 goto out;
9687
9688         if (rx_idx != rx_start_idx + num_pkts)
9689                 goto out;
9690
9691         desc = &tp->rx_rcb[rx_start_idx];
9692         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9693         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9694         if (opaque_key != RXD_OPAQUE_RING_STD)
9695                 goto out;
9696
9697         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9698             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9699                 goto out;
9700
9701         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9702         if (rx_len != tx_len)
9703                 goto out;
9704
9705         rx_skb = tp->rx_std_buffers[desc_idx].skb;
9706
9707         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9708         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9709
9710         for (i = 14; i < tx_len; i++) {
9711                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9712                         goto out;
9713         }
9714         err = 0;
9715
9716         /* tg3_free_rings will unmap and free the rx_skb */
9717 out:
9718         return err;
9719 }
9720
9721 #define TG3_MAC_LOOPBACK_FAILED         1
9722 #define TG3_PHY_LOOPBACK_FAILED         2
9723 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
9724                                          TG3_PHY_LOOPBACK_FAILED)
9725
9726 static int tg3_test_loopback(struct tg3 *tp)
9727 {
9728         int err = 0;
9729         u32 cpmuctrl = 0;
9730
9731         if (!netif_running(tp->dev))
9732                 return TG3_LOOPBACK_FAILED;
9733
9734         err = tg3_reset_hw(tp, 1);
9735         if (err)
9736                 return TG3_LOOPBACK_FAILED;
9737
9738         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9739             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
9740                 int i;
9741                 u32 status;
9742
9743                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9744
9745                 /* Wait for up to 40 microseconds to acquire lock. */
9746                 for (i = 0; i < 4; i++) {
9747                         status = tr32(TG3_CPMU_MUTEX_GNT);
9748                         if (status == CPMU_MUTEX_GNT_DRIVER)
9749                                 break;
9750                         udelay(10);
9751                 }
9752
9753                 if (status != CPMU_MUTEX_GNT_DRIVER)
9754                         return TG3_LOOPBACK_FAILED;
9755
9756                 /* Turn off link-based power management. */
9757                 cpmuctrl = tr32(TG3_CPMU_CTRL);
9758                 tw32(TG3_CPMU_CTRL,
9759                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9760                                   CPMU_CTRL_LINK_AWARE_MODE));
9761         }
9762
9763         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9764                 err |= TG3_MAC_LOOPBACK_FAILED;
9765
9766         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9767             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
9768                 tw32(TG3_CPMU_CTRL, cpmuctrl);
9769
9770                 /* Release the mutex */
9771                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9772         }
9773
9774         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
9775             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
9776                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9777                         err |= TG3_PHY_LOOPBACK_FAILED;
9778         }
9779
9780         return err;
9781 }
9782
9783 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9784                           u64 *data)
9785 {
9786         struct tg3 *tp = netdev_priv(dev);
9787
9788         if (tp->link_config.phy_is_low_power)
9789                 tg3_set_power_state(tp, PCI_D0);
9790
9791         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9792
9793         if (tg3_test_nvram(tp) != 0) {
9794                 etest->flags |= ETH_TEST_FL_FAILED;
9795                 data[0] = 1;
9796         }
9797         if (tg3_test_link(tp) != 0) {
9798                 etest->flags |= ETH_TEST_FL_FAILED;
9799                 data[1] = 1;
9800         }
9801         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9802                 int err, irq_sync = 0;
9803
9804                 if (netif_running(dev)) {
9805                         tg3_netif_stop(tp);
9806                         irq_sync = 1;
9807                 }
9808
9809                 tg3_full_lock(tp, irq_sync);
9810
9811                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9812                 err = tg3_nvram_lock(tp);
9813                 tg3_halt_cpu(tp, RX_CPU_BASE);
9814                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9815                         tg3_halt_cpu(tp, TX_CPU_BASE);
9816                 if (!err)
9817                         tg3_nvram_unlock(tp);
9818
9819                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9820                         tg3_phy_reset(tp);
9821
9822                 if (tg3_test_registers(tp) != 0) {
9823                         etest->flags |= ETH_TEST_FL_FAILED;
9824                         data[2] = 1;
9825                 }
9826                 if (tg3_test_memory(tp) != 0) {
9827                         etest->flags |= ETH_TEST_FL_FAILED;
9828                         data[3] = 1;
9829                 }
9830                 if ((data[4] = tg3_test_loopback(tp)) != 0)
9831                         etest->flags |= ETH_TEST_FL_FAILED;
9832
9833                 tg3_full_unlock(tp);
9834
9835                 if (tg3_test_interrupt(tp) != 0) {
9836                         etest->flags |= ETH_TEST_FL_FAILED;
9837                         data[5] = 1;
9838                 }
9839
9840                 tg3_full_lock(tp, 0);
9841
9842                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9843                 if (netif_running(dev)) {
9844                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9845                         if (!tg3_restart_hw(tp, 1))
9846                                 tg3_netif_start(tp);
9847                 }
9848
9849                 tg3_full_unlock(tp);
9850         }
9851         if (tp->link_config.phy_is_low_power)
9852                 tg3_set_power_state(tp, PCI_D3hot);
9853
9854 }
9855
9856 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9857 {
9858         struct mii_ioctl_data *data = if_mii(ifr);
9859         struct tg3 *tp = netdev_priv(dev);
9860         int err;
9861
9862         switch(cmd) {
9863         case SIOCGMIIPHY:
9864                 data->phy_id = PHY_ADDR;
9865
9866                 /* fallthru */
9867         case SIOCGMIIREG: {
9868                 u32 mii_regval;
9869
9870                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9871                         break;                  /* We have no PHY */
9872
9873                 if (tp->link_config.phy_is_low_power)
9874                         return -EAGAIN;
9875
9876                 spin_lock_bh(&tp->lock);
9877                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9878                 spin_unlock_bh(&tp->lock);
9879
9880                 data->val_out = mii_regval;
9881
9882                 return err;
9883         }
9884
9885         case SIOCSMIIREG:
9886                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9887                         break;                  /* We have no PHY */
9888
9889                 if (!capable(CAP_NET_ADMIN))
9890                         return -EPERM;
9891
9892                 if (tp->link_config.phy_is_low_power)
9893                         return -EAGAIN;
9894
9895                 spin_lock_bh(&tp->lock);
9896                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9897                 spin_unlock_bh(&tp->lock);
9898
9899                 return err;
9900
9901         default:
9902                 /* do nothing */
9903                 break;
9904         }
9905         return -EOPNOTSUPP;
9906 }
9907
9908 #if TG3_VLAN_TAG_USED
9909 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9910 {
9911         struct tg3 *tp = netdev_priv(dev);
9912
9913         if (netif_running(dev))
9914                 tg3_netif_stop(tp);
9915
9916         tg3_full_lock(tp, 0);
9917
9918         tp->vlgrp = grp;
9919
9920         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9921         __tg3_set_rx_mode(dev);
9922
9923         if (netif_running(dev))
9924                 tg3_netif_start(tp);
9925
9926         tg3_full_unlock(tp);
9927 }
9928 #endif
9929
9930 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9931 {
9932         struct tg3 *tp = netdev_priv(dev);
9933
9934         memcpy(ec, &tp->coal, sizeof(*ec));
9935         return 0;
9936 }
9937
9938 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9939 {
9940         struct tg3 *tp = netdev_priv(dev);
9941         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9942         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9943
9944         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9945                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9946                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9947                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9948                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9949         }
9950
9951         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9952             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9953             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9954             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9955             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9956             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9957             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9958             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9959             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9960             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9961                 return -EINVAL;
9962
9963         /* No rx interrupts will be generated if both are zero */
9964         if ((ec->rx_coalesce_usecs == 0) &&
9965             (ec->rx_max_coalesced_frames == 0))
9966                 return -EINVAL;
9967
9968         /* No tx interrupts will be generated if both are zero */
9969         if ((ec->tx_coalesce_usecs == 0) &&
9970             (ec->tx_max_coalesced_frames == 0))
9971                 return -EINVAL;
9972
9973         /* Only copy relevant parameters, ignore all others. */
9974         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9975         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9976         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9977         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9978         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9979         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9980         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9981         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9982         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9983
9984         if (netif_running(dev)) {
9985                 tg3_full_lock(tp, 0);
9986                 __tg3_set_coalesce(tp, &tp->coal);
9987                 tg3_full_unlock(tp);
9988         }
9989         return 0;
9990 }
9991
9992 static const struct ethtool_ops tg3_ethtool_ops = {
9993         .get_settings           = tg3_get_settings,
9994         .set_settings           = tg3_set_settings,
9995         .get_drvinfo            = tg3_get_drvinfo,
9996         .get_regs_len           = tg3_get_regs_len,
9997         .get_regs               = tg3_get_regs,
9998         .get_wol                = tg3_get_wol,
9999         .set_wol                = tg3_set_wol,
10000         .get_msglevel           = tg3_get_msglevel,
10001         .set_msglevel           = tg3_set_msglevel,
10002         .nway_reset             = tg3_nway_reset,
10003         .get_link               = ethtool_op_get_link,
10004         .get_eeprom_len         = tg3_get_eeprom_len,
10005         .get_eeprom             = tg3_get_eeprom,
10006         .set_eeprom             = tg3_set_eeprom,
10007         .get_ringparam          = tg3_get_ringparam,
10008         .set_ringparam          = tg3_set_ringparam,
10009         .get_pauseparam         = tg3_get_pauseparam,
10010         .set_pauseparam         = tg3_set_pauseparam,
10011         .get_rx_csum            = tg3_get_rx_csum,
10012         .set_rx_csum            = tg3_set_rx_csum,
10013         .set_tx_csum            = tg3_set_tx_csum,
10014         .set_sg                 = ethtool_op_set_sg,
10015         .set_tso                = tg3_set_tso,
10016         .self_test              = tg3_self_test,
10017         .get_strings            = tg3_get_strings,
10018         .phys_id                = tg3_phys_id,
10019         .get_ethtool_stats      = tg3_get_ethtool_stats,
10020         .get_coalesce           = tg3_get_coalesce,
10021         .set_coalesce           = tg3_set_coalesce,
10022         .get_sset_count         = tg3_get_sset_count,
10023 };
10024
10025 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10026 {
10027         u32 cursize, val, magic;
10028
10029         tp->nvram_size = EEPROM_CHIP_SIZE;
10030
10031         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
10032                 return;
10033
10034         if ((magic != TG3_EEPROM_MAGIC) &&
10035             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10036             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10037                 return;
10038
10039         /*
10040          * Size the chip by reading offsets at increasing powers of two.
10041          * When we encounter our validation signature, we know the addressing
10042          * has wrapped around, and thus have our chip size.
10043          */
10044         cursize = 0x10;
10045
10046         while (cursize < tp->nvram_size) {
10047                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
10048                         return;
10049
10050                 if (val == magic)
10051                         break;
10052
10053                 cursize <<= 1;
10054         }
10055
10056         tp->nvram_size = cursize;
10057 }
10058
10059 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10060 {
10061         u32 val;
10062
10063         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
10064                 return;
10065
10066         /* Selfboot format */
10067         if (val != TG3_EEPROM_MAGIC) {
10068                 tg3_get_eeprom_size(tp);
10069                 return;
10070         }
10071
10072         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10073                 if (val != 0) {
10074                         tp->nvram_size = (val >> 16) * 1024;
10075                         return;
10076                 }
10077         }
10078         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10079 }
10080
10081 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10082 {
10083         u32 nvcfg1;
10084
10085         nvcfg1 = tr32(NVRAM_CFG1);
10086         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10087                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10088         }
10089         else {
10090                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10091                 tw32(NVRAM_CFG1, nvcfg1);
10092         }
10093
10094         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10095             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10096                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10097                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10098                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10099                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10100                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10101                                 break;
10102                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10103                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10104                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10105                                 break;
10106                         case FLASH_VENDOR_ATMEL_EEPROM:
10107                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10108                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10109                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10110                                 break;
10111                         case FLASH_VENDOR_ST:
10112                                 tp->nvram_jedecnum = JEDEC_ST;
10113                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10114                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10115                                 break;
10116                         case FLASH_VENDOR_SAIFUN:
10117                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
10118                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10119                                 break;
10120                         case FLASH_VENDOR_SST_SMALL:
10121                         case FLASH_VENDOR_SST_LARGE:
10122                                 tp->nvram_jedecnum = JEDEC_SST;
10123                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10124                                 break;
10125                 }
10126         }
10127         else {
10128                 tp->nvram_jedecnum = JEDEC_ATMEL;
10129                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10130                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10131         }
10132 }
10133
10134 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10135 {
10136         u32 nvcfg1;
10137
10138         nvcfg1 = tr32(NVRAM_CFG1);
10139
10140         /* NVRAM protection for TPM */
10141         if (nvcfg1 & (1 << 27))
10142                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10143
10144         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10145                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10146                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10147                         tp->nvram_jedecnum = JEDEC_ATMEL;
10148                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10149                         break;
10150                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10151                         tp->nvram_jedecnum = JEDEC_ATMEL;
10152                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10153                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10154                         break;
10155                 case FLASH_5752VENDOR_ST_M45PE10:
10156                 case FLASH_5752VENDOR_ST_M45PE20:
10157                 case FLASH_5752VENDOR_ST_M45PE40:
10158                         tp->nvram_jedecnum = JEDEC_ST;
10159                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10160                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10161                         break;
10162         }
10163
10164         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10165                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10166                         case FLASH_5752PAGE_SIZE_256:
10167                                 tp->nvram_pagesize = 256;
10168                                 break;
10169                         case FLASH_5752PAGE_SIZE_512:
10170                                 tp->nvram_pagesize = 512;
10171                                 break;
10172                         case FLASH_5752PAGE_SIZE_1K:
10173                                 tp->nvram_pagesize = 1024;
10174                                 break;
10175                         case FLASH_5752PAGE_SIZE_2K:
10176                                 tp->nvram_pagesize = 2048;
10177                                 break;
10178                         case FLASH_5752PAGE_SIZE_4K:
10179                                 tp->nvram_pagesize = 4096;
10180                                 break;
10181                         case FLASH_5752PAGE_SIZE_264:
10182                                 tp->nvram_pagesize = 264;
10183                                 break;
10184                 }
10185         }
10186         else {
10187                 /* For eeprom, set pagesize to maximum eeprom size */
10188                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10189
10190                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10191                 tw32(NVRAM_CFG1, nvcfg1);
10192         }
10193 }
10194
10195 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10196 {
10197         u32 nvcfg1, protect = 0;
10198
10199         nvcfg1 = tr32(NVRAM_CFG1);
10200
10201         /* NVRAM protection for TPM */
10202         if (nvcfg1 & (1 << 27)) {
10203                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10204                 protect = 1;
10205         }
10206
10207         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10208         switch (nvcfg1) {
10209                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10210                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10211                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10212                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10213                         tp->nvram_jedecnum = JEDEC_ATMEL;
10214                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10215                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10216                         tp->nvram_pagesize = 264;
10217                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10218                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10219                                 tp->nvram_size = (protect ? 0x3e200 :
10220                                                   TG3_NVRAM_SIZE_512KB);
10221                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10222                                 tp->nvram_size = (protect ? 0x1f200 :
10223                                                   TG3_NVRAM_SIZE_256KB);
10224                         else
10225                                 tp->nvram_size = (protect ? 0x1f200 :
10226                                                   TG3_NVRAM_SIZE_128KB);
10227                         break;
10228                 case FLASH_5752VENDOR_ST_M45PE10:
10229                 case FLASH_5752VENDOR_ST_M45PE20:
10230                 case FLASH_5752VENDOR_ST_M45PE40:
10231                         tp->nvram_jedecnum = JEDEC_ST;
10232                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10233                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10234                         tp->nvram_pagesize = 256;
10235                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10236                                 tp->nvram_size = (protect ?
10237                                                   TG3_NVRAM_SIZE_64KB :
10238                                                   TG3_NVRAM_SIZE_128KB);
10239                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10240                                 tp->nvram_size = (protect ?
10241                                                   TG3_NVRAM_SIZE_64KB :
10242                                                   TG3_NVRAM_SIZE_256KB);
10243                         else
10244                                 tp->nvram_size = (protect ?
10245                                                   TG3_NVRAM_SIZE_128KB :
10246                                                   TG3_NVRAM_SIZE_512KB);
10247                         break;
10248         }
10249 }
10250
10251 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10252 {
10253         u32 nvcfg1;
10254
10255         nvcfg1 = tr32(NVRAM_CFG1);
10256
10257         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10258                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10259                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10260                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10261                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10262                         tp->nvram_jedecnum = JEDEC_ATMEL;
10263                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10264                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10265
10266                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10267                         tw32(NVRAM_CFG1, nvcfg1);
10268                         break;
10269                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10270                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10271                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10272                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10273                         tp->nvram_jedecnum = JEDEC_ATMEL;
10274                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10275                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10276                         tp->nvram_pagesize = 264;
10277                         break;
10278                 case FLASH_5752VENDOR_ST_M45PE10:
10279                 case FLASH_5752VENDOR_ST_M45PE20:
10280                 case FLASH_5752VENDOR_ST_M45PE40:
10281                         tp->nvram_jedecnum = JEDEC_ST;
10282                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10283                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10284                         tp->nvram_pagesize = 256;
10285                         break;
10286         }
10287 }
10288
10289 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10290 {
10291         u32 nvcfg1, protect = 0;
10292
10293         nvcfg1 = tr32(NVRAM_CFG1);
10294
10295         /* NVRAM protection for TPM */
10296         if (nvcfg1 & (1 << 27)) {
10297                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10298                 protect = 1;
10299         }
10300
10301         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10302         switch (nvcfg1) {
10303                 case FLASH_5761VENDOR_ATMEL_ADB021D:
10304                 case FLASH_5761VENDOR_ATMEL_ADB041D:
10305                 case FLASH_5761VENDOR_ATMEL_ADB081D:
10306                 case FLASH_5761VENDOR_ATMEL_ADB161D:
10307                 case FLASH_5761VENDOR_ATMEL_MDB021D:
10308                 case FLASH_5761VENDOR_ATMEL_MDB041D:
10309                 case FLASH_5761VENDOR_ATMEL_MDB081D:
10310                 case FLASH_5761VENDOR_ATMEL_MDB161D:
10311                         tp->nvram_jedecnum = JEDEC_ATMEL;
10312                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10313                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10314                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10315                         tp->nvram_pagesize = 256;
10316                         break;
10317                 case FLASH_5761VENDOR_ST_A_M45PE20:
10318                 case FLASH_5761VENDOR_ST_A_M45PE40:
10319                 case FLASH_5761VENDOR_ST_A_M45PE80:
10320                 case FLASH_5761VENDOR_ST_A_M45PE16:
10321                 case FLASH_5761VENDOR_ST_M_M45PE20:
10322                 case FLASH_5761VENDOR_ST_M_M45PE40:
10323                 case FLASH_5761VENDOR_ST_M_M45PE80:
10324                 case FLASH_5761VENDOR_ST_M_M45PE16:
10325                         tp->nvram_jedecnum = JEDEC_ST;
10326                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10327                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10328                         tp->nvram_pagesize = 256;
10329                         break;
10330         }
10331
10332         if (protect) {
10333                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10334         } else {
10335                 switch (nvcfg1) {
10336                         case FLASH_5761VENDOR_ATMEL_ADB161D:
10337                         case FLASH_5761VENDOR_ATMEL_MDB161D:
10338                         case FLASH_5761VENDOR_ST_A_M45PE16:
10339                         case FLASH_5761VENDOR_ST_M_M45PE16:
10340                                 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10341                                 break;
10342                         case FLASH_5761VENDOR_ATMEL_ADB081D:
10343                         case FLASH_5761VENDOR_ATMEL_MDB081D:
10344                         case FLASH_5761VENDOR_ST_A_M45PE80:
10345                         case FLASH_5761VENDOR_ST_M_M45PE80:
10346                                 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10347                                 break;
10348                         case FLASH_5761VENDOR_ATMEL_ADB041D:
10349                         case FLASH_5761VENDOR_ATMEL_MDB041D:
10350                         case FLASH_5761VENDOR_ST_A_M45PE40:
10351                         case FLASH_5761VENDOR_ST_M_M45PE40:
10352                                 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10353                                 break;
10354                         case FLASH_5761VENDOR_ATMEL_ADB021D:
10355                         case FLASH_5761VENDOR_ATMEL_MDB021D:
10356                         case FLASH_5761VENDOR_ST_A_M45PE20:
10357                         case FLASH_5761VENDOR_ST_M_M45PE20:
10358                                 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10359                                 break;
10360                 }
10361         }
10362 }
10363
10364 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10365 {
10366         tp->nvram_jedecnum = JEDEC_ATMEL;
10367         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10368         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10369 }
10370
10371 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10372 static void __devinit tg3_nvram_init(struct tg3 *tp)
10373 {
10374         tw32_f(GRC_EEPROM_ADDR,
10375              (EEPROM_ADDR_FSM_RESET |
10376               (EEPROM_DEFAULT_CLOCK_PERIOD <<
10377                EEPROM_ADDR_CLKPERD_SHIFT)));
10378
10379         msleep(1);
10380
10381         /* Enable seeprom accesses. */
10382         tw32_f(GRC_LOCAL_CTRL,
10383              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10384         udelay(100);
10385
10386         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10387             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10388                 tp->tg3_flags |= TG3_FLAG_NVRAM;
10389
10390                 if (tg3_nvram_lock(tp)) {
10391                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10392                                "tg3_nvram_init failed.\n", tp->dev->name);
10393                         return;
10394                 }
10395                 tg3_enable_nvram_access(tp);
10396
10397                 tp->nvram_size = 0;
10398
10399                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10400                         tg3_get_5752_nvram_info(tp);
10401                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10402                         tg3_get_5755_nvram_info(tp);
10403                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10404                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
10405                         tg3_get_5787_nvram_info(tp);
10406                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10407                         tg3_get_5761_nvram_info(tp);
10408                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10409                         tg3_get_5906_nvram_info(tp);
10410                 else
10411                         tg3_get_nvram_info(tp);
10412
10413                 if (tp->nvram_size == 0)
10414                         tg3_get_nvram_size(tp);
10415
10416                 tg3_disable_nvram_access(tp);
10417                 tg3_nvram_unlock(tp);
10418
10419         } else {
10420                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10421
10422                 tg3_get_eeprom_size(tp);
10423         }
10424 }
10425
10426 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10427                                         u32 offset, u32 *val)
10428 {
10429         u32 tmp;
10430         int i;
10431
10432         if (offset > EEPROM_ADDR_ADDR_MASK ||
10433             (offset % 4) != 0)
10434                 return -EINVAL;
10435
10436         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10437                                         EEPROM_ADDR_DEVID_MASK |
10438                                         EEPROM_ADDR_READ);
10439         tw32(GRC_EEPROM_ADDR,
10440              tmp |
10441              (0 << EEPROM_ADDR_DEVID_SHIFT) |
10442              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10443               EEPROM_ADDR_ADDR_MASK) |
10444              EEPROM_ADDR_READ | EEPROM_ADDR_START);
10445
10446         for (i = 0; i < 1000; i++) {
10447                 tmp = tr32(GRC_EEPROM_ADDR);
10448
10449                 if (tmp & EEPROM_ADDR_COMPLETE)
10450                         break;
10451                 msleep(1);
10452         }
10453         if (!(tmp & EEPROM_ADDR_COMPLETE))
10454                 return -EBUSY;
10455
10456         *val = tr32(GRC_EEPROM_DATA);
10457         return 0;
10458 }
10459
10460 #define NVRAM_CMD_TIMEOUT 10000
10461
10462 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10463 {
10464         int i;
10465
10466         tw32(NVRAM_CMD, nvram_cmd);
10467         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10468                 udelay(10);
10469                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10470                         udelay(10);
10471                         break;
10472                 }
10473         }
10474         if (i == NVRAM_CMD_TIMEOUT) {
10475                 return -EBUSY;
10476         }
10477         return 0;
10478 }
10479
10480 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10481 {
10482         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10483             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10484             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10485            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10486             (tp->nvram_jedecnum == JEDEC_ATMEL))
10487
10488                 addr = ((addr / tp->nvram_pagesize) <<
10489                         ATMEL_AT45DB0X1B_PAGE_POS) +
10490                        (addr % tp->nvram_pagesize);
10491
10492         return addr;
10493 }
10494
10495 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10496 {
10497         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10498             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10499             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10500            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10501             (tp->nvram_jedecnum == JEDEC_ATMEL))
10502
10503                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10504                         tp->nvram_pagesize) +
10505                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10506
10507         return addr;
10508 }
10509
10510 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10511 {
10512         int ret;
10513
10514         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10515                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10516
10517         offset = tg3_nvram_phys_addr(tp, offset);
10518
10519         if (offset > NVRAM_ADDR_MSK)
10520                 return -EINVAL;
10521
10522         ret = tg3_nvram_lock(tp);
10523         if (ret)
10524                 return ret;
10525
10526         tg3_enable_nvram_access(tp);
10527
10528         tw32(NVRAM_ADDR, offset);
10529         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10530                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10531
10532         if (ret == 0)
10533                 *val = swab32(tr32(NVRAM_RDDATA));
10534
10535         tg3_disable_nvram_access(tp);
10536
10537         tg3_nvram_unlock(tp);
10538
10539         return ret;
10540 }
10541
10542 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10543 {
10544         u32 v;
10545         int res = tg3_nvram_read(tp, offset, &v);
10546         if (!res)
10547                 *val = cpu_to_le32(v);
10548         return res;
10549 }
10550
10551 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10552 {
10553         int err;
10554         u32 tmp;
10555
10556         err = tg3_nvram_read(tp, offset, &tmp);
10557         *val = swab32(tmp);
10558         return err;
10559 }
10560
10561 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10562                                     u32 offset, u32 len, u8 *buf)
10563 {
10564         int i, j, rc = 0;
10565         u32 val;
10566
10567         for (i = 0; i < len; i += 4) {
10568                 u32 addr;
10569                 __le32 data;
10570
10571                 addr = offset + i;
10572
10573                 memcpy(&data, buf + i, 4);
10574
10575                 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10576
10577                 val = tr32(GRC_EEPROM_ADDR);
10578                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10579
10580                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10581                         EEPROM_ADDR_READ);
10582                 tw32(GRC_EEPROM_ADDR, val |
10583                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10584                         (addr & EEPROM_ADDR_ADDR_MASK) |
10585                         EEPROM_ADDR_START |
10586                         EEPROM_ADDR_WRITE);
10587
10588                 for (j = 0; j < 1000; j++) {
10589                         val = tr32(GRC_EEPROM_ADDR);
10590
10591                         if (val & EEPROM_ADDR_COMPLETE)
10592                                 break;
10593                         msleep(1);
10594                 }
10595                 if (!(val & EEPROM_ADDR_COMPLETE)) {
10596                         rc = -EBUSY;
10597                         break;
10598                 }
10599         }
10600
10601         return rc;
10602 }
10603
10604 /* offset and length are dword aligned */
10605 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10606                 u8 *buf)
10607 {
10608         int ret = 0;
10609         u32 pagesize = tp->nvram_pagesize;
10610         u32 pagemask = pagesize - 1;
10611         u32 nvram_cmd;
10612         u8 *tmp;
10613
10614         tmp = kmalloc(pagesize, GFP_KERNEL);
10615         if (tmp == NULL)
10616                 return -ENOMEM;
10617
10618         while (len) {
10619                 int j;
10620                 u32 phy_addr, page_off, size;
10621
10622                 phy_addr = offset & ~pagemask;
10623
10624                 for (j = 0; j < pagesize; j += 4) {
10625                         if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
10626                                                 (__le32 *) (tmp + j))))
10627                                 break;
10628                 }
10629                 if (ret)
10630                         break;
10631
10632                 page_off = offset & pagemask;
10633                 size = pagesize;
10634                 if (len < size)
10635                         size = len;
10636
10637                 len -= size;
10638
10639                 memcpy(tmp + page_off, buf, size);
10640
10641                 offset = offset + (pagesize - page_off);
10642
10643                 tg3_enable_nvram_access(tp);
10644
10645                 /*
10646                  * Before we can erase the flash page, we need
10647                  * to issue a special "write enable" command.
10648                  */
10649                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10650
10651                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10652                         break;
10653
10654                 /* Erase the target page */
10655                 tw32(NVRAM_ADDR, phy_addr);
10656
10657                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10658                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10659
10660                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10661                         break;
10662
10663                 /* Issue another write enable to start the write. */
10664                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10665
10666                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10667                         break;
10668
10669                 for (j = 0; j < pagesize; j += 4) {
10670                         __be32 data;
10671
10672                         data = *((__be32 *) (tmp + j));
10673                         /* swab32(le32_to_cpu(data)), actually */
10674                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
10675
10676                         tw32(NVRAM_ADDR, phy_addr + j);
10677
10678                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10679                                 NVRAM_CMD_WR;
10680
10681                         if (j == 0)
10682                                 nvram_cmd |= NVRAM_CMD_FIRST;
10683                         else if (j == (pagesize - 4))
10684                                 nvram_cmd |= NVRAM_CMD_LAST;
10685
10686                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10687                                 break;
10688                 }
10689                 if (ret)
10690                         break;
10691         }
10692
10693         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10694         tg3_nvram_exec_cmd(tp, nvram_cmd);
10695
10696         kfree(tmp);
10697
10698         return ret;
10699 }
10700
10701 /* offset and length are dword aligned */
10702 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10703                 u8 *buf)
10704 {
10705         int i, ret = 0;
10706
10707         for (i = 0; i < len; i += 4, offset += 4) {
10708                 u32 page_off, phy_addr, nvram_cmd;
10709                 __be32 data;
10710
10711                 memcpy(&data, buf + i, 4);
10712                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10713
10714                 page_off = offset % tp->nvram_pagesize;
10715
10716                 phy_addr = tg3_nvram_phys_addr(tp, offset);
10717
10718                 tw32(NVRAM_ADDR, phy_addr);
10719
10720                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10721
10722                 if ((page_off == 0) || (i == 0))
10723                         nvram_cmd |= NVRAM_CMD_FIRST;
10724                 if (page_off == (tp->nvram_pagesize - 4))
10725                         nvram_cmd |= NVRAM_CMD_LAST;
10726
10727                 if (i == (len - 4))
10728                         nvram_cmd |= NVRAM_CMD_LAST;
10729
10730                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
10731                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10732                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10733                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10734                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
10735                     (tp->nvram_jedecnum == JEDEC_ST) &&
10736                     (nvram_cmd & NVRAM_CMD_FIRST)) {
10737
10738                         if ((ret = tg3_nvram_exec_cmd(tp,
10739                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10740                                 NVRAM_CMD_DONE)))
10741
10742                                 break;
10743                 }
10744                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10745                         /* We always do complete word writes to eeprom. */
10746                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10747                 }
10748
10749                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10750                         break;
10751         }
10752         return ret;
10753 }
10754
10755 /* offset and length are dword aligned */
10756 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10757 {
10758         int ret;
10759
10760         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10761                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10762                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
10763                 udelay(40);
10764         }
10765
10766         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10767                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10768         }
10769         else {
10770                 u32 grc_mode;
10771
10772                 ret = tg3_nvram_lock(tp);
10773                 if (ret)
10774                         return ret;
10775
10776                 tg3_enable_nvram_access(tp);
10777                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10778                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10779                         tw32(NVRAM_WRITE1, 0x406);
10780
10781                 grc_mode = tr32(GRC_MODE);
10782                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10783
10784                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10785                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10786
10787                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
10788                                 buf);
10789                 }
10790                 else {
10791                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10792                                 buf);
10793                 }
10794
10795                 grc_mode = tr32(GRC_MODE);
10796                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10797
10798                 tg3_disable_nvram_access(tp);
10799                 tg3_nvram_unlock(tp);
10800         }
10801
10802         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10803                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10804                 udelay(40);
10805         }
10806
10807         return ret;
10808 }
10809
10810 struct subsys_tbl_ent {
10811         u16 subsys_vendor, subsys_devid;
10812         u32 phy_id;
10813 };
10814
10815 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10816         /* Broadcom boards. */
10817         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10818         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10819         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10820         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
10821         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10822         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10823         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
10824         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10825         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10826         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10827         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10828
10829         /* 3com boards. */
10830         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10831         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10832         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
10833         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10834         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10835
10836         /* DELL boards. */
10837         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10838         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10839         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10840         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10841
10842         /* Compaq boards. */
10843         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10844         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10845         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
10846         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10847         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10848
10849         /* IBM boards. */
10850         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10851 };
10852
10853 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10854 {
10855         int i;
10856
10857         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10858                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10859                      tp->pdev->subsystem_vendor) &&
10860                     (subsys_id_to_phy_id[i].subsys_devid ==
10861                      tp->pdev->subsystem_device))
10862                         return &subsys_id_to_phy_id[i];
10863         }
10864         return NULL;
10865 }
10866
10867 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10868 {
10869         u32 val;
10870         u16 pmcsr;
10871
10872         /* On some early chips the SRAM cannot be accessed in D3hot state,
10873          * so need make sure we're in D0.
10874          */
10875         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10876         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10877         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10878         msleep(1);
10879
10880         /* Make sure register accesses (indirect or otherwise)
10881          * will function correctly.
10882          */
10883         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10884                                tp->misc_host_ctrl);
10885
10886         /* The memory arbiter has to be enabled in order for SRAM accesses
10887          * to succeed.  Normally on powerup the tg3 chip firmware will make
10888          * sure it is enabled, but other entities such as system netboot
10889          * code might disable it.
10890          */
10891         val = tr32(MEMARB_MODE);
10892         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10893
10894         tp->phy_id = PHY_ID_INVALID;
10895         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10896
10897         /* Assume an onboard device and WOL capable by default.  */
10898         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10899
10900         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10901                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10902                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10903                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10904                 }
10905                 val = tr32(VCPU_CFGSHDW);
10906                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
10907                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10908                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10909                     (val & VCPU_CFGSHDW_WOL_MAGPKT))
10910                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10911                 return;
10912         }
10913
10914         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10915         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10916                 u32 nic_cfg, led_cfg;
10917                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10918                 int eeprom_phy_serdes = 0;
10919
10920                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10921                 tp->nic_sram_data_cfg = nic_cfg;
10922
10923                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10924                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10925                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10926                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10927                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10928                     (ver > 0) && (ver < 0x100))
10929                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10930
10931                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10932                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10933                         eeprom_phy_serdes = 1;
10934
10935                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10936                 if (nic_phy_id != 0) {
10937                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10938                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10939
10940                         eeprom_phy_id  = (id1 >> 16) << 10;
10941                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
10942                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10943                 } else
10944                         eeprom_phy_id = 0;
10945
10946                 tp->phy_id = eeprom_phy_id;
10947                 if (eeprom_phy_serdes) {
10948                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10949                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10950                         else
10951                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10952                 }
10953
10954                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10955                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10956                                     SHASTA_EXT_LED_MODE_MASK);
10957                 else
10958                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10959
10960                 switch (led_cfg) {
10961                 default:
10962                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10963                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10964                         break;
10965
10966                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10967                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10968                         break;
10969
10970                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10971                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10972
10973                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10974                          * read on some older 5700/5701 bootcode.
10975                          */
10976                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10977                             ASIC_REV_5700 ||
10978                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
10979                             ASIC_REV_5701)
10980                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10981
10982                         break;
10983
10984                 case SHASTA_EXT_LED_SHARED:
10985                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10986                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10987                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10988                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10989                                                  LED_CTRL_MODE_PHY_2);
10990                         break;
10991
10992                 case SHASTA_EXT_LED_MAC:
10993                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10994                         break;
10995
10996                 case SHASTA_EXT_LED_COMBO:
10997                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10998                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10999                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11000                                                  LED_CTRL_MODE_PHY_2);
11001                         break;
11002
11003                 };
11004
11005                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11006                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11007                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11008                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11009
11010                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11011                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11012
11013                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11014                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11015                         if ((tp->pdev->subsystem_vendor ==
11016                              PCI_VENDOR_ID_ARIMA) &&
11017                             (tp->pdev->subsystem_device == 0x205a ||
11018                              tp->pdev->subsystem_device == 0x2063))
11019                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11020                 } else {
11021                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11022                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11023                 }
11024
11025                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11026                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11027                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11028                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11029                 }
11030                 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
11031                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11032                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11033                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11034                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11035
11036                 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
11037                     nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
11038                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11039
11040                 if (cfg2 & (1 << 17))
11041                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11042
11043                 /* serdes signal pre-emphasis in register 0x590 set by */
11044                 /* bootcode if bit 18 is set */
11045                 if (cfg2 & (1 << 18))
11046                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11047
11048                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11049                         u32 cfg3;
11050
11051                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11052                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11053                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11054                 }
11055         }
11056 }
11057
11058 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11059 {
11060         int i;
11061         u32 val;
11062
11063         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11064         tw32(OTP_CTRL, cmd);
11065
11066         /* Wait for up to 1 ms for command to execute. */
11067         for (i = 0; i < 100; i++) {
11068                 val = tr32(OTP_STATUS);
11069                 if (val & OTP_STATUS_CMD_DONE)
11070                         break;
11071                 udelay(10);
11072         }
11073
11074         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11075 }
11076
11077 /* Read the gphy configuration from the OTP region of the chip.  The gphy
11078  * configuration is a 32-bit value that straddles the alignment boundary.
11079  * We do two 32-bit reads and then shift and merge the results.
11080  */
11081 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11082 {
11083         u32 bhalf_otp, thalf_otp;
11084
11085         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11086
11087         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11088                 return 0;
11089
11090         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11091
11092         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11093                 return 0;
11094
11095         thalf_otp = tr32(OTP_READ_DATA);
11096
11097         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11098
11099         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11100                 return 0;
11101
11102         bhalf_otp = tr32(OTP_READ_DATA);
11103
11104         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11105 }
11106
11107 static int __devinit tg3_phy_probe(struct tg3 *tp)
11108 {
11109         u32 hw_phy_id_1, hw_phy_id_2;
11110         u32 hw_phy_id, hw_phy_id_masked;
11111         int err;
11112
11113         /* Reading the PHY ID register can conflict with ASF
11114          * firwmare access to the PHY hardware.
11115          */
11116         err = 0;
11117         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11118             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11119                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11120         } else {
11121                 /* Now read the physical PHY_ID from the chip and verify
11122                  * that it is sane.  If it doesn't look good, we fall back
11123                  * to either the hard-coded table based PHY_ID and failing
11124                  * that the value found in the eeprom area.
11125                  */
11126                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11127                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11128
11129                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
11130                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11131                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
11132
11133                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11134         }
11135
11136         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11137                 tp->phy_id = hw_phy_id;
11138                 if (hw_phy_id_masked == PHY_ID_BCM8002)
11139                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11140                 else
11141                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11142         } else {
11143                 if (tp->phy_id != PHY_ID_INVALID) {
11144                         /* Do nothing, phy ID already set up in
11145                          * tg3_get_eeprom_hw_cfg().
11146                          */
11147                 } else {
11148                         struct subsys_tbl_ent *p;
11149
11150                         /* No eeprom signature?  Try the hardcoded
11151                          * subsys device table.
11152                          */
11153                         p = lookup_by_subsys(tp);
11154                         if (!p)
11155                                 return -ENODEV;
11156
11157                         tp->phy_id = p->phy_id;
11158                         if (!tp->phy_id ||
11159                             tp->phy_id == PHY_ID_BCM8002)
11160                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11161                 }
11162         }
11163
11164         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11165             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11166             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11167                 u32 bmsr, adv_reg, tg3_ctrl, mask;
11168
11169                 tg3_readphy(tp, MII_BMSR, &bmsr);
11170                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11171                     (bmsr & BMSR_LSTATUS))
11172                         goto skip_phy_reset;
11173
11174                 err = tg3_phy_reset(tp);
11175                 if (err)
11176                         return err;
11177
11178                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11179                            ADVERTISE_100HALF | ADVERTISE_100FULL |
11180                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11181                 tg3_ctrl = 0;
11182                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11183                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11184                                     MII_TG3_CTRL_ADV_1000_FULL);
11185                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11186                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11187                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11188                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
11189                 }
11190
11191                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11192                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11193                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11194                 if (!tg3_copper_is_advertising_all(tp, mask)) {
11195                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11196
11197                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11198                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11199
11200                         tg3_writephy(tp, MII_BMCR,
11201                                      BMCR_ANENABLE | BMCR_ANRESTART);
11202                 }
11203                 tg3_phy_set_wirespeed(tp);
11204
11205                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11206                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11207                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11208         }
11209
11210 skip_phy_reset:
11211         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11212                 err = tg3_init_5401phy_dsp(tp);
11213                 if (err)
11214                         return err;
11215         }
11216
11217         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11218                 err = tg3_init_5401phy_dsp(tp);
11219         }
11220
11221         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11222                 tp->link_config.advertising =
11223                         (ADVERTISED_1000baseT_Half |
11224                          ADVERTISED_1000baseT_Full |
11225                          ADVERTISED_Autoneg |
11226                          ADVERTISED_FIBRE);
11227         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11228                 tp->link_config.advertising &=
11229                         ~(ADVERTISED_1000baseT_Half |
11230                           ADVERTISED_1000baseT_Full);
11231
11232         return err;
11233 }
11234
11235 static void __devinit tg3_read_partno(struct tg3 *tp)
11236 {
11237         unsigned char vpd_data[256];
11238         unsigned int i;
11239         u32 magic;
11240
11241         if (tg3_nvram_read_swab(tp, 0x0, &magic))
11242                 goto out_not_found;
11243
11244         if (magic == TG3_EEPROM_MAGIC) {
11245                 for (i = 0; i < 256; i += 4) {
11246                         u32 tmp;
11247
11248                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11249                                 goto out_not_found;
11250
11251                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
11252                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
11253                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11254                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11255                 }
11256         } else {
11257                 int vpd_cap;
11258
11259                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11260                 for (i = 0; i < 256; i += 4) {
11261                         u32 tmp, j = 0;
11262                         __le32 v;
11263                         u16 tmp16;
11264
11265                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11266                                               i);
11267                         while (j++ < 100) {
11268                                 pci_read_config_word(tp->pdev, vpd_cap +
11269                                                      PCI_VPD_ADDR, &tmp16);
11270                                 if (tmp16 & 0x8000)
11271                                         break;
11272                                 msleep(1);
11273                         }
11274                         if (!(tmp16 & 0x8000))
11275                                 goto out_not_found;
11276
11277                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11278                                               &tmp);
11279                         v = cpu_to_le32(tmp);
11280                         memcpy(&vpd_data[i], &v, 4);
11281                 }
11282         }
11283
11284         /* Now parse and find the part number. */
11285         for (i = 0; i < 254; ) {
11286                 unsigned char val = vpd_data[i];
11287                 unsigned int block_end;
11288
11289                 if (val == 0x82 || val == 0x91) {
11290                         i = (i + 3 +
11291                              (vpd_data[i + 1] +
11292                               (vpd_data[i + 2] << 8)));
11293                         continue;
11294                 }
11295
11296                 if (val != 0x90)
11297                         goto out_not_found;
11298
11299                 block_end = (i + 3 +
11300                              (vpd_data[i + 1] +
11301                               (vpd_data[i + 2] << 8)));
11302                 i += 3;
11303
11304                 if (block_end > 256)
11305                         goto out_not_found;
11306
11307                 while (i < (block_end - 2)) {
11308                         if (vpd_data[i + 0] == 'P' &&
11309                             vpd_data[i + 1] == 'N') {
11310                                 int partno_len = vpd_data[i + 2];
11311
11312                                 i += 3;
11313                                 if (partno_len > 24 || (partno_len + i) > 256)
11314                                         goto out_not_found;
11315
11316                                 memcpy(tp->board_part_number,
11317                                        &vpd_data[i], partno_len);
11318
11319                                 /* Success. */
11320                                 return;
11321                         }
11322                         i += 3 + vpd_data[i + 2];
11323                 }
11324
11325                 /* Part number not found. */
11326                 goto out_not_found;
11327         }
11328
11329 out_not_found:
11330         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11331                 strcpy(tp->board_part_number, "BCM95906");
11332         else
11333                 strcpy(tp->board_part_number, "none");
11334 }
11335
11336 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11337 {
11338         u32 val;
11339
11340         if (tg3_nvram_read_swab(tp, offset, &val) ||
11341             (val & 0xfc000000) != 0x0c000000 ||
11342             tg3_nvram_read_swab(tp, offset + 4, &val) ||
11343             val != 0)
11344                 return 0;
11345
11346         return 1;
11347 }
11348
11349 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11350 {
11351         u32 val, offset, start;
11352         u32 ver_offset;
11353         int i, bcnt;
11354
11355         if (tg3_nvram_read_swab(tp, 0, &val))
11356                 return;
11357
11358         if (val != TG3_EEPROM_MAGIC)
11359                 return;
11360
11361         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11362             tg3_nvram_read_swab(tp, 0x4, &start))
11363                 return;
11364
11365         offset = tg3_nvram_logical_addr(tp, offset);
11366
11367         if (!tg3_fw_img_is_valid(tp, offset) ||
11368             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11369                 return;
11370
11371         offset = offset + ver_offset - start;
11372         for (i = 0; i < 16; i += 4) {
11373                 __le32 v;
11374                 if (tg3_nvram_read_le(tp, offset + i, &v))
11375                         return;
11376
11377                 memcpy(tp->fw_ver + i, &v, 4);
11378         }
11379
11380         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11381              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11382                 return;
11383
11384         for (offset = TG3_NVM_DIR_START;
11385              offset < TG3_NVM_DIR_END;
11386              offset += TG3_NVM_DIRENT_SIZE) {
11387                 if (tg3_nvram_read_swab(tp, offset, &val))
11388                         return;
11389
11390                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11391                         break;
11392         }
11393
11394         if (offset == TG3_NVM_DIR_END)
11395                 return;
11396
11397         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11398                 start = 0x08000000;
11399         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11400                 return;
11401
11402         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11403             !tg3_fw_img_is_valid(tp, offset) ||
11404             tg3_nvram_read_swab(tp, offset + 8, &val))
11405                 return;
11406
11407         offset += val - start;
11408
11409         bcnt = strlen(tp->fw_ver);
11410
11411         tp->fw_ver[bcnt++] = ',';
11412         tp->fw_ver[bcnt++] = ' ';
11413
11414         for (i = 0; i < 4; i++) {
11415                 __le32 v;
11416                 if (tg3_nvram_read_le(tp, offset, &v))
11417                         return;
11418
11419                 offset += sizeof(v);
11420
11421                 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11422                         memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11423                         break;
11424                 }
11425
11426                 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11427                 bcnt += sizeof(v);
11428         }
11429
11430         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11431 }
11432
11433 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11434
11435 static int __devinit tg3_get_invariants(struct tg3 *tp)
11436 {
11437         static struct pci_device_id write_reorder_chipsets[] = {
11438                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11439                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11440                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11441                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11442                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11443                              PCI_DEVICE_ID_VIA_8385_0) },
11444                 { },
11445         };
11446         u32 misc_ctrl_reg;
11447         u32 cacheline_sz_reg;
11448         u32 pci_state_reg, grc_misc_cfg;
11449         u32 val;
11450         u16 pci_cmd;
11451         int err, pcie_cap;
11452
11453         /* Force memory write invalidate off.  If we leave it on,
11454          * then on 5700_BX chips we have to enable a workaround.
11455          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11456          * to match the cacheline size.  The Broadcom driver have this
11457          * workaround but turns MWI off all the times so never uses
11458          * it.  This seems to suggest that the workaround is insufficient.
11459          */
11460         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11461         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11462         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11463
11464         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11465          * has the register indirect write enable bit set before
11466          * we try to access any of the MMIO registers.  It is also
11467          * critical that the PCI-X hw workaround situation is decided
11468          * before that as well.
11469          */
11470         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11471                               &misc_ctrl_reg);
11472
11473         tp->pci_chip_rev_id = (misc_ctrl_reg >>
11474                                MISC_HOST_CTRL_CHIPREV_SHIFT);
11475         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11476                 u32 prod_id_asic_rev;
11477
11478                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11479                                       &prod_id_asic_rev);
11480                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11481         }
11482
11483         /* Wrong chip ID in 5752 A0. This code can be removed later
11484          * as A0 is not in production.
11485          */
11486         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11487                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11488
11489         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11490          * we need to disable memory and use config. cycles
11491          * only to access all registers. The 5702/03 chips
11492          * can mistakenly decode the special cycles from the
11493          * ICH chipsets as memory write cycles, causing corruption
11494          * of register and memory space. Only certain ICH bridges
11495          * will drive special cycles with non-zero data during the
11496          * address phase which can fall within the 5703's address
11497          * range. This is not an ICH bug as the PCI spec allows
11498          * non-zero address during special cycles. However, only
11499          * these ICH bridges are known to drive non-zero addresses
11500          * during special cycles.
11501          *
11502          * Since special cycles do not cross PCI bridges, we only
11503          * enable this workaround if the 5703 is on the secondary
11504          * bus of these ICH bridges.
11505          */
11506         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11507             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11508                 static struct tg3_dev_id {
11509                         u32     vendor;
11510                         u32     device;
11511                         u32     rev;
11512                 } ich_chipsets[] = {
11513                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11514                           PCI_ANY_ID },
11515                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11516                           PCI_ANY_ID },
11517                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11518                           0xa },
11519                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11520                           PCI_ANY_ID },
11521                         { },
11522                 };
11523                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11524                 struct pci_dev *bridge = NULL;
11525
11526                 while (pci_id->vendor != 0) {
11527                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
11528                                                 bridge);
11529                         if (!bridge) {
11530                                 pci_id++;
11531                                 continue;
11532                         }
11533                         if (pci_id->rev != PCI_ANY_ID) {
11534                                 if (bridge->revision > pci_id->rev)
11535                                         continue;
11536                         }
11537                         if (bridge->subordinate &&
11538                             (bridge->subordinate->number ==
11539                              tp->pdev->bus->number)) {
11540
11541                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11542                                 pci_dev_put(bridge);
11543                                 break;
11544                         }
11545                 }
11546         }
11547
11548         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11549                 static struct tg3_dev_id {
11550                         u32     vendor;
11551                         u32     device;
11552                 } bridge_chipsets[] = {
11553                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11554                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11555                         { },
11556                 };
11557                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11558                 struct pci_dev *bridge = NULL;
11559
11560                 while (pci_id->vendor != 0) {
11561                         bridge = pci_get_device(pci_id->vendor,
11562                                                 pci_id->device,
11563                                                 bridge);
11564                         if (!bridge) {
11565                                 pci_id++;
11566                                 continue;
11567                         }
11568                         if (bridge->subordinate &&
11569                             (bridge->subordinate->number <=
11570                              tp->pdev->bus->number) &&
11571                             (bridge->subordinate->subordinate >=
11572                              tp->pdev->bus->number)) {
11573                                 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11574                                 pci_dev_put(bridge);
11575                                 break;
11576                         }
11577                 }
11578         }
11579
11580         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11581          * DMA addresses > 40-bit. This bridge may have other additional
11582          * 57xx devices behind it in some 4-port NIC designs for example.
11583          * Any tg3 device found behind the bridge will also need the 40-bit
11584          * DMA workaround.
11585          */
11586         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11587             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11588                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11589                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11590                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11591         }
11592         else {
11593                 struct pci_dev *bridge = NULL;
11594
11595                 do {
11596                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11597                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
11598                                                 bridge);
11599                         if (bridge && bridge->subordinate &&
11600                             (bridge->subordinate->number <=
11601                              tp->pdev->bus->number) &&
11602                             (bridge->subordinate->subordinate >=
11603                              tp->pdev->bus->number)) {
11604                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11605                                 pci_dev_put(bridge);
11606                                 break;
11607                         }
11608                 } while (bridge);
11609         }
11610
11611         /* Initialize misc host control in PCI block. */
11612         tp->misc_host_ctrl |= (misc_ctrl_reg &
11613                                MISC_HOST_CTRL_CHIPREV);
11614         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11615                                tp->misc_host_ctrl);
11616
11617         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11618                               &cacheline_sz_reg);
11619
11620         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
11621         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
11622         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
11623         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
11624
11625         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11626             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11627                 tp->pdev_peer = tg3_find_peer(tp);
11628
11629         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11630             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11631             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11632             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11633             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11634             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11635             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11636             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11637                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11638
11639         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11640             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11641                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11642
11643         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11644                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11645                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11646                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11647                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11648                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11649                      tp->pdev_peer == tp->pdev))
11650                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11651
11652                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11653                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11654                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11655                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11656                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11657                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11658                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
11659                 } else {
11660                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
11661                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11662                                 ASIC_REV_5750 &&
11663                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
11664                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
11665                 }
11666         }
11667
11668         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11669              (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11670                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11671
11672         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11673         if (pcie_cap != 0) {
11674                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11675
11676                 pcie_set_readrq(tp->pdev, 4096);
11677
11678                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11679                         u16 lnkctl;
11680
11681                         pci_read_config_word(tp->pdev,
11682                                              pcie_cap + PCI_EXP_LNKCTL,
11683                                              &lnkctl);
11684                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11685                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11686                 }
11687         }
11688
11689         /* If we have an AMD 762 or VIA K8T800 chipset, write
11690          * reordering to the mailbox registers done by the host
11691          * controller can cause major troubles.  We read back from
11692          * every mailbox register write to force the writes to be
11693          * posted to the chip in order.
11694          */
11695         if (pci_dev_present(write_reorder_chipsets) &&
11696             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11697                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11698
11699         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11700             tp->pci_lat_timer < 64) {
11701                 tp->pci_lat_timer = 64;
11702
11703                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
11704                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
11705                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
11706                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
11707
11708                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11709                                        cacheline_sz_reg);
11710         }
11711
11712         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11713             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11714                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11715                 if (!tp->pcix_cap) {
11716                         printk(KERN_ERR PFX "Cannot find PCI-X "
11717                                             "capability, aborting.\n");
11718                         return -EIO;
11719                 }
11720         }
11721
11722         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11723                               &pci_state_reg);
11724
11725         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
11726                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11727
11728                 /* If this is a 5700 BX chipset, and we are in PCI-X
11729                  * mode, enable register write workaround.
11730                  *
11731                  * The workaround is to use indirect register accesses
11732                  * for all chip writes not to mailbox registers.
11733                  */
11734                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11735                         u32 pm_reg;
11736
11737                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11738
11739                         /* The chip can have it's power management PCI config
11740                          * space registers clobbered due to this bug.
11741                          * So explicitly force the chip into D0 here.
11742                          */
11743                         pci_read_config_dword(tp->pdev,
11744                                               tp->pm_cap + PCI_PM_CTRL,
11745                                               &pm_reg);
11746                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11747                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
11748                         pci_write_config_dword(tp->pdev,
11749                                                tp->pm_cap + PCI_PM_CTRL,
11750                                                pm_reg);
11751
11752                         /* Also, force SERR#/PERR# in PCI command. */
11753                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11754                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11755                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11756                 }
11757         }
11758
11759         /* 5700 BX chips need to have their TX producer index mailboxes
11760          * written twice to workaround a bug.
11761          */
11762         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11763                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11764
11765         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11766                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11767         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11768                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11769
11770         /* Chip-specific fixup from Broadcom driver */
11771         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11772             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11773                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11774                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11775         }
11776
11777         /* Default fast path register access methods */
11778         tp->read32 = tg3_read32;
11779         tp->write32 = tg3_write32;
11780         tp->read32_mbox = tg3_read32;
11781         tp->write32_mbox = tg3_write32;
11782         tp->write32_tx_mbox = tg3_write32;
11783         tp->write32_rx_mbox = tg3_write32;
11784
11785         /* Various workaround register access methods */
11786         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11787                 tp->write32 = tg3_write_indirect_reg32;
11788         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11789                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11790                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11791                 /*
11792                  * Back to back register writes can cause problems on these
11793                  * chips, the workaround is to read back all reg writes
11794                  * except those to mailbox regs.
11795                  *
11796                  * See tg3_write_indirect_reg32().
11797                  */
11798                 tp->write32 = tg3_write_flush_reg32;
11799         }
11800
11801
11802         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11803             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11804                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11805                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11806                         tp->write32_rx_mbox = tg3_write_flush_reg32;
11807         }
11808
11809         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11810                 tp->read32 = tg3_read_indirect_reg32;
11811                 tp->write32 = tg3_write_indirect_reg32;
11812                 tp->read32_mbox = tg3_read_indirect_mbox;
11813                 tp->write32_mbox = tg3_write_indirect_mbox;
11814                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11815                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11816
11817                 iounmap(tp->regs);
11818                 tp->regs = NULL;
11819
11820                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11821                 pci_cmd &= ~PCI_COMMAND_MEMORY;
11822                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11823         }
11824         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11825                 tp->read32_mbox = tg3_read32_mbox_5906;
11826                 tp->write32_mbox = tg3_write32_mbox_5906;
11827                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11828                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11829         }
11830
11831         if (tp->write32 == tg3_write_indirect_reg32 ||
11832             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11833              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11834               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
11835                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11836
11837         /* Get eeprom hw config before calling tg3_set_power_state().
11838          * In particular, the TG3_FLG2_IS_NIC flag must be
11839          * determined before calling tg3_set_power_state() so that
11840          * we know whether or not to switch out of Vaux power.
11841          * When the flag is set, it means that GPIO1 is used for eeprom
11842          * write protect and also implies that it is a LOM where GPIOs
11843          * are not used to switch power.
11844          */
11845         tg3_get_eeprom_hw_cfg(tp);
11846
11847         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11848                 /* Allow reads and writes to the
11849                  * APE register and memory space.
11850                  */
11851                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11852                                  PCISTATE_ALLOW_APE_SHMEM_WR;
11853                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11854                                        pci_state_reg);
11855         }
11856
11857         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11858             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11859                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11860
11861                 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
11862                     tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
11863                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
11864                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
11865                         tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
11866         }
11867
11868         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11869          * GPIO1 driven high will bring 5700's external PHY out of reset.
11870          * It is also used as eeprom write protect on LOMs.
11871          */
11872         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11873         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11874             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11875                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11876                                        GRC_LCLCTRL_GPIO_OUTPUT1);
11877         /* Unused GPIO3 must be driven as output on 5752 because there
11878          * are no pull-up resistors on unused GPIO pins.
11879          */
11880         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11881                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
11882
11883         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11884                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11885
11886         /* Force the chip into D0. */
11887         err = tg3_set_power_state(tp, PCI_D0);
11888         if (err) {
11889                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11890                        pci_name(tp->pdev));
11891                 return err;
11892         }
11893
11894         /* 5700 B0 chips do not support checksumming correctly due
11895          * to hardware bugs.
11896          */
11897         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11898                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11899
11900         /* Derive initial jumbo mode from MTU assigned in
11901          * ether_setup() via the alloc_etherdev() call
11902          */
11903         if (tp->dev->mtu > ETH_DATA_LEN &&
11904             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11905                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
11906
11907         /* Determine WakeOnLan speed to use. */
11908         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11909             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11910             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11911             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11912                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11913         } else {
11914                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11915         }
11916
11917         /* A few boards don't want Ethernet@WireSpeed phy feature */
11918         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11919             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11920              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
11921              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
11922             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
11923             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
11924                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11925
11926         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11927             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11928                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11929         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11930                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11931
11932         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11933                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11934                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11935                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11936                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11937                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11938                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11939                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11940                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11941                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11942                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11943                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11944         }
11945
11946         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11947             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
11948                 tp->phy_otp = tg3_read_otp_phycfg(tp);
11949                 if (tp->phy_otp == 0)
11950                         tp->phy_otp = TG3_OTP_DEFAULT;
11951         }
11952
11953         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
11954                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
11955         else
11956                 tp->mi_mode = MAC_MI_MODE_BASE;
11957
11958         tp->coalesce_mode = 0;
11959         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11960             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11961                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11962
11963         err = tg3_mdio_init(tp);
11964         if (err)
11965                 return err;
11966
11967         /* Initialize data/descriptor byte/word swapping. */
11968         val = tr32(GRC_MODE);
11969         val &= GRC_MODE_HOST_STACKUP;
11970         tw32(GRC_MODE, val | tp->grc_mode);
11971
11972         tg3_switch_clocks(tp);
11973
11974         /* Clear this out for sanity. */
11975         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11976
11977         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11978                               &pci_state_reg);
11979         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11980             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11981                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11982
11983                 if (chiprevid == CHIPREV_ID_5701_A0 ||
11984                     chiprevid == CHIPREV_ID_5701_B0 ||
11985                     chiprevid == CHIPREV_ID_5701_B2 ||
11986                     chiprevid == CHIPREV_ID_5701_B5) {
11987                         void __iomem *sram_base;
11988
11989                         /* Write some dummy words into the SRAM status block
11990                          * area, see if it reads back correctly.  If the return
11991                          * value is bad, force enable the PCIX workaround.
11992                          */
11993                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11994
11995                         writel(0x00000000, sram_base);
11996                         writel(0x00000000, sram_base + 4);
11997                         writel(0xffffffff, sram_base + 4);
11998                         if (readl(sram_base) != 0x00000000)
11999                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12000                 }
12001         }
12002
12003         udelay(50);
12004         tg3_nvram_init(tp);
12005
12006         grc_misc_cfg = tr32(GRC_MISC_CFG);
12007         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12008
12009         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12010             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12011              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12012                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12013
12014         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12015             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12016                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12017         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12018                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12019                                       HOSTCC_MODE_CLRTICK_TXBD);
12020
12021                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12022                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12023                                        tp->misc_host_ctrl);
12024         }
12025
12026         /* these are limited to 10/100 only */
12027         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12028              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12029             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12030              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12031              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12032               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12033               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12034             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12035              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12036               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12037               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12038             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12039                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12040
12041         err = tg3_phy_probe(tp);
12042         if (err) {
12043                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12044                        pci_name(tp->pdev), err);
12045                 /* ... but do not return immediately ... */
12046         }
12047
12048         tg3_read_partno(tp);
12049         tg3_read_fw_ver(tp);
12050
12051         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12052                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12053         } else {
12054                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12055                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12056                 else
12057                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12058         }
12059
12060         /* 5700 {AX,BX} chips have a broken status block link
12061          * change bit implementation, so we must use the
12062          * status register in those cases.
12063          */
12064         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12065                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12066         else
12067                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12068
12069         /* The led_ctrl is set during tg3_phy_probe, here we might
12070          * have to force the link status polling mechanism based
12071          * upon subsystem IDs.
12072          */
12073         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12074             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12075             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12076                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12077                                   TG3_FLAG_USE_LINKCHG_REG);
12078         }
12079
12080         /* For all SERDES we poll the MAC status register. */
12081         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12082                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12083         else
12084                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12085
12086         /* All chips before 5787 can get confused if TX buffers
12087          * straddle the 4GB address boundary in some cases.
12088          */
12089         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12090             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12091             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12092             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12093             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12094                 tp->dev->hard_start_xmit = tg3_start_xmit;
12095         else
12096                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
12097
12098         tp->rx_offset = 2;
12099         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12100             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12101                 tp->rx_offset = 0;
12102
12103         tp->rx_std_max_post = TG3_RX_RING_SIZE;
12104
12105         /* Increment the rx prod index on the rx std ring by at most
12106          * 8 for these chips to workaround hw errata.
12107          */
12108         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12109             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12110             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12111                 tp->rx_std_max_post = 8;
12112
12113         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12114                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12115                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
12116
12117         return err;
12118 }
12119
12120 #ifdef CONFIG_SPARC
12121 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12122 {
12123         struct net_device *dev = tp->dev;
12124         struct pci_dev *pdev = tp->pdev;
12125         struct device_node *dp = pci_device_to_OF_node(pdev);
12126         const unsigned char *addr;
12127         int len;
12128
12129         addr = of_get_property(dp, "local-mac-address", &len);
12130         if (addr && len == 6) {
12131                 memcpy(dev->dev_addr, addr, 6);
12132                 memcpy(dev->perm_addr, dev->dev_addr, 6);
12133                 return 0;
12134         }
12135         return -ENODEV;
12136 }
12137
12138 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12139 {
12140         struct net_device *dev = tp->dev;
12141
12142         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12143         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12144         return 0;
12145 }
12146 #endif
12147
12148 static int __devinit tg3_get_device_address(struct tg3 *tp)
12149 {
12150         struct net_device *dev = tp->dev;
12151         u32 hi, lo, mac_offset;
12152         int addr_ok = 0;
12153
12154 #ifdef CONFIG_SPARC
12155         if (!tg3_get_macaddr_sparc(tp))
12156                 return 0;
12157 #endif
12158
12159         mac_offset = 0x7c;
12160         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12161             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12162                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12163                         mac_offset = 0xcc;
12164                 if (tg3_nvram_lock(tp))
12165                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12166                 else
12167                         tg3_nvram_unlock(tp);
12168         }
12169         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12170                 mac_offset = 0x10;
12171
12172         /* First try to get it from MAC address mailbox. */
12173         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12174         if ((hi >> 16) == 0x484b) {
12175                 dev->dev_addr[0] = (hi >>  8) & 0xff;
12176                 dev->dev_addr[1] = (hi >>  0) & 0xff;
12177
12178                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12179                 dev->dev_addr[2] = (lo >> 24) & 0xff;
12180                 dev->dev_addr[3] = (lo >> 16) & 0xff;
12181                 dev->dev_addr[4] = (lo >>  8) & 0xff;
12182                 dev->dev_addr[5] = (lo >>  0) & 0xff;
12183
12184                 /* Some old bootcode may report a 0 MAC address in SRAM */
12185                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12186         }
12187         if (!addr_ok) {
12188                 /* Next, try NVRAM. */
12189                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12190                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12191                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
12192                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
12193                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
12194                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
12195                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
12196                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
12197                 }
12198                 /* Finally just fetch it out of the MAC control regs. */
12199                 else {
12200                         hi = tr32(MAC_ADDR_0_HIGH);
12201                         lo = tr32(MAC_ADDR_0_LOW);
12202
12203                         dev->dev_addr[5] = lo & 0xff;
12204                         dev->dev_addr[4] = (lo >> 8) & 0xff;
12205                         dev->dev_addr[3] = (lo >> 16) & 0xff;
12206                         dev->dev_addr[2] = (lo >> 24) & 0xff;
12207                         dev->dev_addr[1] = hi & 0xff;
12208                         dev->dev_addr[0] = (hi >> 8) & 0xff;
12209                 }
12210         }
12211
12212         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12213 #ifdef CONFIG_SPARC
12214                 if (!tg3_get_default_macaddr_sparc(tp))
12215                         return 0;
12216 #endif
12217                 return -EINVAL;
12218         }
12219         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12220         return 0;
12221 }
12222
12223 #define BOUNDARY_SINGLE_CACHELINE       1
12224 #define BOUNDARY_MULTI_CACHELINE        2
12225
12226 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12227 {
12228         int cacheline_size;
12229         u8 byte;
12230         int goal;
12231
12232         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12233         if (byte == 0)
12234                 cacheline_size = 1024;
12235         else
12236                 cacheline_size = (int) byte * 4;
12237
12238         /* On 5703 and later chips, the boundary bits have no
12239          * effect.
12240          */
12241         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12242             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12243             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12244                 goto out;
12245
12246 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12247         goal = BOUNDARY_MULTI_CACHELINE;
12248 #else
12249 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12250         goal = BOUNDARY_SINGLE_CACHELINE;
12251 #else
12252         goal = 0;
12253 #endif
12254 #endif
12255
12256         if (!goal)
12257                 goto out;
12258
12259         /* PCI controllers on most RISC systems tend to disconnect
12260          * when a device tries to burst across a cache-line boundary.
12261          * Therefore, letting tg3 do so just wastes PCI bandwidth.
12262          *
12263          * Unfortunately, for PCI-E there are only limited
12264          * write-side controls for this, and thus for reads
12265          * we will still get the disconnects.  We'll also waste
12266          * these PCI cycles for both read and write for chips
12267          * other than 5700 and 5701 which do not implement the
12268          * boundary bits.
12269          */
12270         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12271             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12272                 switch (cacheline_size) {
12273                 case 16:
12274                 case 32:
12275                 case 64:
12276                 case 128:
12277                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12278                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12279                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12280                         } else {
12281                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12282                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12283                         }
12284                         break;
12285
12286                 case 256:
12287                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12288                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12289                         break;
12290
12291                 default:
12292                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12293                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12294                         break;
12295                 };
12296         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12297                 switch (cacheline_size) {
12298                 case 16:
12299                 case 32:
12300                 case 64:
12301                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12302                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12303                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12304                                 break;
12305                         }
12306                         /* fallthrough */
12307                 case 128:
12308                 default:
12309                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12310                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12311                         break;
12312                 };
12313         } else {
12314                 switch (cacheline_size) {
12315                 case 16:
12316                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12317                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12318                                         DMA_RWCTRL_WRITE_BNDRY_16);
12319                                 break;
12320                         }
12321                         /* fallthrough */
12322                 case 32:
12323                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12324                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12325                                         DMA_RWCTRL_WRITE_BNDRY_32);
12326                                 break;
12327                         }
12328                         /* fallthrough */
12329                 case 64:
12330                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12331                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12332                                         DMA_RWCTRL_WRITE_BNDRY_64);
12333                                 break;
12334                         }
12335                         /* fallthrough */
12336                 case 128:
12337                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12338                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12339                                         DMA_RWCTRL_WRITE_BNDRY_128);
12340                                 break;
12341                         }
12342                         /* fallthrough */
12343                 case 256:
12344                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
12345                                 DMA_RWCTRL_WRITE_BNDRY_256);
12346                         break;
12347                 case 512:
12348                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
12349                                 DMA_RWCTRL_WRITE_BNDRY_512);
12350                         break;
12351                 case 1024:
12352                 default:
12353                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12354                                 DMA_RWCTRL_WRITE_BNDRY_1024);
12355                         break;
12356                 };
12357         }
12358
12359 out:
12360         return val;
12361 }
12362
12363 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12364 {
12365         struct tg3_internal_buffer_desc test_desc;
12366         u32 sram_dma_descs;
12367         int i, ret;
12368
12369         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12370
12371         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12372         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12373         tw32(RDMAC_STATUS, 0);
12374         tw32(WDMAC_STATUS, 0);
12375
12376         tw32(BUFMGR_MODE, 0);
12377         tw32(FTQ_RESET, 0);
12378
12379         test_desc.addr_hi = ((u64) buf_dma) >> 32;
12380         test_desc.addr_lo = buf_dma & 0xffffffff;
12381         test_desc.nic_mbuf = 0x00002100;
12382         test_desc.len = size;
12383
12384         /*
12385          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12386          * the *second* time the tg3 driver was getting loaded after an
12387          * initial scan.
12388          *
12389          * Broadcom tells me:
12390          *   ...the DMA engine is connected to the GRC block and a DMA
12391          *   reset may affect the GRC block in some unpredictable way...
12392          *   The behavior of resets to individual blocks has not been tested.
12393          *
12394          * Broadcom noted the GRC reset will also reset all sub-components.
12395          */
12396         if (to_device) {
12397                 test_desc.cqid_sqid = (13 << 8) | 2;
12398
12399                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12400                 udelay(40);
12401         } else {
12402                 test_desc.cqid_sqid = (16 << 8) | 7;
12403
12404                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12405                 udelay(40);
12406         }
12407         test_desc.flags = 0x00000005;
12408
12409         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12410                 u32 val;
12411
12412                 val = *(((u32 *)&test_desc) + i);
12413                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12414                                        sram_dma_descs + (i * sizeof(u32)));
12415                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12416         }
12417         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12418
12419         if (to_device) {
12420                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12421         } else {
12422                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12423         }
12424
12425         ret = -ENODEV;
12426         for (i = 0; i < 40; i++) {
12427                 u32 val;
12428
12429                 if (to_device)
12430                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12431                 else
12432                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12433                 if ((val & 0xffff) == sram_dma_descs) {
12434                         ret = 0;
12435                         break;
12436                 }
12437
12438                 udelay(100);
12439         }
12440
12441         return ret;
12442 }
12443
12444 #define TEST_BUFFER_SIZE        0x2000
12445
12446 static int __devinit tg3_test_dma(struct tg3 *tp)
12447 {
12448         dma_addr_t buf_dma;
12449         u32 *buf, saved_dma_rwctrl;
12450         int ret;
12451
12452         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12453         if (!buf) {
12454                 ret = -ENOMEM;
12455                 goto out_nofree;
12456         }
12457
12458         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12459                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12460
12461         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12462
12463         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12464                 /* DMA read watermark not used on PCIE */
12465                 tp->dma_rwctrl |= 0x00180000;
12466         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12467                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12468                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12469                         tp->dma_rwctrl |= 0x003f0000;
12470                 else
12471                         tp->dma_rwctrl |= 0x003f000f;
12472         } else {
12473                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12474                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12475                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12476                         u32 read_water = 0x7;
12477
12478                         /* If the 5704 is behind the EPB bridge, we can
12479                          * do the less restrictive ONE_DMA workaround for
12480                          * better performance.
12481                          */
12482                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12483                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12484                                 tp->dma_rwctrl |= 0x8000;
12485                         else if (ccval == 0x6 || ccval == 0x7)
12486                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12487
12488                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12489                                 read_water = 4;
12490                         /* Set bit 23 to enable PCIX hw bug fix */
12491                         tp->dma_rwctrl |=
12492                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12493                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12494                                 (1 << 23);
12495                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12496                         /* 5780 always in PCIX mode */
12497                         tp->dma_rwctrl |= 0x00144000;
12498                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12499                         /* 5714 always in PCIX mode */
12500                         tp->dma_rwctrl |= 0x00148000;
12501                 } else {
12502                         tp->dma_rwctrl |= 0x001b000f;
12503                 }
12504         }
12505
12506         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12507             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12508                 tp->dma_rwctrl &= 0xfffffff0;
12509
12510         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12511             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12512                 /* Remove this if it causes problems for some boards. */
12513                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12514
12515                 /* On 5700/5701 chips, we need to set this bit.
12516                  * Otherwise the chip will issue cacheline transactions
12517                  * to streamable DMA memory with not all the byte
12518                  * enables turned on.  This is an error on several
12519                  * RISC PCI controllers, in particular sparc64.
12520                  *
12521                  * On 5703/5704 chips, this bit has been reassigned
12522                  * a different meaning.  In particular, it is used
12523                  * on those chips to enable a PCI-X workaround.
12524                  */
12525                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12526         }
12527
12528         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12529
12530 #if 0
12531         /* Unneeded, already done by tg3_get_invariants.  */
12532         tg3_switch_clocks(tp);
12533 #endif
12534
12535         ret = 0;
12536         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12537             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12538                 goto out;
12539
12540         /* It is best to perform DMA test with maximum write burst size
12541          * to expose the 5700/5701 write DMA bug.
12542          */
12543         saved_dma_rwctrl = tp->dma_rwctrl;
12544         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12545         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12546
12547         while (1) {
12548                 u32 *p = buf, i;
12549
12550                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12551                         p[i] = i;
12552
12553                 /* Send the buffer to the chip. */
12554                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12555                 if (ret) {
12556                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12557                         break;
12558                 }
12559
12560 #if 0
12561                 /* validate data reached card RAM correctly. */
12562                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12563                         u32 val;
12564                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
12565                         if (le32_to_cpu(val) != p[i]) {
12566                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
12567                                 /* ret = -ENODEV here? */
12568                         }
12569                         p[i] = 0;
12570                 }
12571 #endif
12572                 /* Now read it back. */
12573                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12574                 if (ret) {
12575                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12576
12577                         break;
12578                 }
12579
12580                 /* Verify it. */
12581                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12582                         if (p[i] == i)
12583                                 continue;
12584
12585                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12586                             DMA_RWCTRL_WRITE_BNDRY_16) {
12587                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12588                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12589                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12590                                 break;
12591                         } else {
12592                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12593                                 ret = -ENODEV;
12594                                 goto out;
12595                         }
12596                 }
12597
12598                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12599                         /* Success. */
12600                         ret = 0;
12601                         break;
12602                 }
12603         }
12604         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12605             DMA_RWCTRL_WRITE_BNDRY_16) {
12606                 static struct pci_device_id dma_wait_state_chipsets[] = {
12607                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12608                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12609                         { },
12610                 };
12611
12612                 /* DMA test passed without adjusting DMA boundary,
12613                  * now look for chipsets that are known to expose the
12614                  * DMA bug without failing the test.
12615                  */
12616                 if (pci_dev_present(dma_wait_state_chipsets)) {
12617                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12618                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12619                 }
12620                 else
12621                         /* Safe to use the calculated DMA boundary. */
12622                         tp->dma_rwctrl = saved_dma_rwctrl;
12623
12624                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12625         }
12626
12627 out:
12628         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12629 out_nofree:
12630         return ret;
12631 }
12632
12633 static void __devinit tg3_init_link_config(struct tg3 *tp)
12634 {
12635         tp->link_config.advertising =
12636                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12637                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12638                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12639                  ADVERTISED_Autoneg | ADVERTISED_MII);
12640         tp->link_config.speed = SPEED_INVALID;
12641         tp->link_config.duplex = DUPLEX_INVALID;
12642         tp->link_config.autoneg = AUTONEG_ENABLE;
12643         tp->link_config.active_speed = SPEED_INVALID;
12644         tp->link_config.active_duplex = DUPLEX_INVALID;
12645         tp->link_config.phy_is_low_power = 0;
12646         tp->link_config.orig_speed = SPEED_INVALID;
12647         tp->link_config.orig_duplex = DUPLEX_INVALID;
12648         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12649 }
12650
12651 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12652 {
12653         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12654                 tp->bufmgr_config.mbuf_read_dma_low_water =
12655                         DEFAULT_MB_RDMA_LOW_WATER_5705;
12656                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12657                         DEFAULT_MB_MACRX_LOW_WATER_5705;
12658                 tp->bufmgr_config.mbuf_high_water =
12659                         DEFAULT_MB_HIGH_WATER_5705;
12660                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12661                         tp->bufmgr_config.mbuf_mac_rx_low_water =
12662                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
12663                         tp->bufmgr_config.mbuf_high_water =
12664                                 DEFAULT_MB_HIGH_WATER_5906;
12665                 }
12666
12667                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12668                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12669                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12670                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12671                 tp->bufmgr_config.mbuf_high_water_jumbo =
12672                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12673         } else {
12674                 tp->bufmgr_config.mbuf_read_dma_low_water =
12675                         DEFAULT_MB_RDMA_LOW_WATER;
12676                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12677                         DEFAULT_MB_MACRX_LOW_WATER;
12678                 tp->bufmgr_config.mbuf_high_water =
12679                         DEFAULT_MB_HIGH_WATER;
12680
12681                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12682                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12683                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12684                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12685                 tp->bufmgr_config.mbuf_high_water_jumbo =
12686                         DEFAULT_MB_HIGH_WATER_JUMBO;
12687         }
12688
12689         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12690         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12691 }
12692
12693 static char * __devinit tg3_phy_string(struct tg3 *tp)
12694 {
12695         switch (tp->phy_id & PHY_ID_MASK) {
12696         case PHY_ID_BCM5400:    return "5400";
12697         case PHY_ID_BCM5401:    return "5401";
12698         case PHY_ID_BCM5411:    return "5411";
12699         case PHY_ID_BCM5701:    return "5701";
12700         case PHY_ID_BCM5703:    return "5703";
12701         case PHY_ID_BCM5704:    return "5704";
12702         case PHY_ID_BCM5705:    return "5705";
12703         case PHY_ID_BCM5750:    return "5750";
12704         case PHY_ID_BCM5752:    return "5752";
12705         case PHY_ID_BCM5714:    return "5714";
12706         case PHY_ID_BCM5780:    return "5780";
12707         case PHY_ID_BCM5755:    return "5755";
12708         case PHY_ID_BCM5787:    return "5787";
12709         case PHY_ID_BCM5784:    return "5784";
12710         case PHY_ID_BCM5756:    return "5722/5756";
12711         case PHY_ID_BCM5906:    return "5906";
12712         case PHY_ID_BCM5761:    return "5761";
12713         case PHY_ID_BCM8002:    return "8002/serdes";
12714         case 0:                 return "serdes";
12715         default:                return "unknown";
12716         };
12717 }
12718
12719 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12720 {
12721         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12722                 strcpy(str, "PCI Express");
12723                 return str;
12724         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12725                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12726
12727                 strcpy(str, "PCIX:");
12728
12729                 if ((clock_ctrl == 7) ||
12730                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12731                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12732                         strcat(str, "133MHz");
12733                 else if (clock_ctrl == 0)
12734                         strcat(str, "33MHz");
12735                 else if (clock_ctrl == 2)
12736                         strcat(str, "50MHz");
12737                 else if (clock_ctrl == 4)
12738                         strcat(str, "66MHz");
12739                 else if (clock_ctrl == 6)
12740                         strcat(str, "100MHz");
12741         } else {
12742                 strcpy(str, "PCI:");
12743                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12744                         strcat(str, "66MHz");
12745                 else
12746                         strcat(str, "33MHz");
12747         }
12748         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12749                 strcat(str, ":32-bit");
12750         else
12751                 strcat(str, ":64-bit");
12752         return str;
12753 }
12754
12755 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
12756 {
12757         struct pci_dev *peer;
12758         unsigned int func, devnr = tp->pdev->devfn & ~7;
12759
12760         for (func = 0; func < 8; func++) {
12761                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12762                 if (peer && peer != tp->pdev)
12763                         break;
12764                 pci_dev_put(peer);
12765         }
12766         /* 5704 can be configured in single-port mode, set peer to
12767          * tp->pdev in that case.
12768          */
12769         if (!peer) {
12770                 peer = tp->pdev;
12771                 return peer;
12772         }
12773
12774         /*
12775          * We don't need to keep the refcount elevated; there's no way
12776          * to remove one half of this device without removing the other
12777          */
12778         pci_dev_put(peer);
12779
12780         return peer;
12781 }
12782
12783 static void __devinit tg3_init_coal(struct tg3 *tp)
12784 {
12785         struct ethtool_coalesce *ec = &tp->coal;
12786
12787         memset(ec, 0, sizeof(*ec));
12788         ec->cmd = ETHTOOL_GCOALESCE;
12789         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12790         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12791         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12792         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12793         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12794         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12795         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12796         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12797         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12798
12799         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12800                                  HOSTCC_MODE_CLRTICK_TXBD)) {
12801                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12802                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12803                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12804                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12805         }
12806
12807         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12808                 ec->rx_coalesce_usecs_irq = 0;
12809                 ec->tx_coalesce_usecs_irq = 0;
12810                 ec->stats_block_coalesce_usecs = 0;
12811         }
12812 }
12813
12814 static int __devinit tg3_init_one(struct pci_dev *pdev,
12815                                   const struct pci_device_id *ent)
12816 {
12817         static int tg3_version_printed = 0;
12818         resource_size_t tg3reg_base;
12819         unsigned long tg3reg_len;
12820         struct net_device *dev;
12821         struct tg3 *tp;
12822         int err, pm_cap;
12823         char str[40];
12824         u64 dma_mask, persist_dma_mask;
12825         DECLARE_MAC_BUF(mac);
12826
12827         if (tg3_version_printed++ == 0)
12828                 printk(KERN_INFO "%s", version);
12829
12830         err = pci_enable_device(pdev);
12831         if (err) {
12832                 printk(KERN_ERR PFX "Cannot enable PCI device, "
12833                        "aborting.\n");
12834                 return err;
12835         }
12836
12837         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12838                 printk(KERN_ERR PFX "Cannot find proper PCI device "
12839                        "base address, aborting.\n");
12840                 err = -ENODEV;
12841                 goto err_out_disable_pdev;
12842         }
12843
12844         err = pci_request_regions(pdev, DRV_MODULE_NAME);
12845         if (err) {
12846                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12847                        "aborting.\n");
12848                 goto err_out_disable_pdev;
12849         }
12850
12851         pci_set_master(pdev);
12852
12853         /* Find power-management capability. */
12854         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12855         if (pm_cap == 0) {
12856                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12857                        "aborting.\n");
12858                 err = -EIO;
12859                 goto err_out_free_res;
12860         }
12861
12862         tg3reg_base = pci_resource_start(pdev, 0);
12863         tg3reg_len = pci_resource_len(pdev, 0);
12864
12865         dev = alloc_etherdev(sizeof(*tp));
12866         if (!dev) {
12867                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12868                 err = -ENOMEM;
12869                 goto err_out_free_res;
12870         }
12871
12872         SET_NETDEV_DEV(dev, &pdev->dev);
12873
12874 #if TG3_VLAN_TAG_USED
12875         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12876         dev->vlan_rx_register = tg3_vlan_rx_register;
12877 #endif
12878
12879         tp = netdev_priv(dev);
12880         tp->pdev = pdev;
12881         tp->dev = dev;
12882         tp->pm_cap = pm_cap;
12883         tp->mac_mode = TG3_DEF_MAC_MODE;
12884         tp->rx_mode = TG3_DEF_RX_MODE;
12885         tp->tx_mode = TG3_DEF_TX_MODE;
12886
12887         if (tg3_debug > 0)
12888                 tp->msg_enable = tg3_debug;
12889         else
12890                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12891
12892         /* The word/byte swap controls here control register access byte
12893          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
12894          * setting below.
12895          */
12896         tp->misc_host_ctrl =
12897                 MISC_HOST_CTRL_MASK_PCI_INT |
12898                 MISC_HOST_CTRL_WORD_SWAP |
12899                 MISC_HOST_CTRL_INDIR_ACCESS |
12900                 MISC_HOST_CTRL_PCISTATE_RW;
12901
12902         /* The NONFRM (non-frame) byte/word swap controls take effect
12903          * on descriptor entries, anything which isn't packet data.
12904          *
12905          * The StrongARM chips on the board (one for tx, one for rx)
12906          * are running in big-endian mode.
12907          */
12908         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12909                         GRC_MODE_WSWAP_NONFRM_DATA);
12910 #ifdef __BIG_ENDIAN
12911         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12912 #endif
12913         spin_lock_init(&tp->lock);
12914         spin_lock_init(&tp->indirect_lock);
12915         INIT_WORK(&tp->reset_task, tg3_reset_task);
12916
12917         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
12918         if (!tp->regs) {
12919                 printk(KERN_ERR PFX "Cannot map device registers, "
12920                        "aborting.\n");
12921                 err = -ENOMEM;
12922                 goto err_out_free_dev;
12923         }
12924
12925         tg3_init_link_config(tp);
12926
12927         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12928         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12929         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12930
12931         dev->open = tg3_open;
12932         dev->stop = tg3_close;
12933         dev->get_stats = tg3_get_stats;
12934         dev->set_multicast_list = tg3_set_rx_mode;
12935         dev->set_mac_address = tg3_set_mac_addr;
12936         dev->do_ioctl = tg3_ioctl;
12937         dev->tx_timeout = tg3_tx_timeout;
12938         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
12939         dev->ethtool_ops = &tg3_ethtool_ops;
12940         dev->watchdog_timeo = TG3_TX_TIMEOUT;
12941         dev->change_mtu = tg3_change_mtu;
12942         dev->irq = pdev->irq;
12943 #ifdef CONFIG_NET_POLL_CONTROLLER
12944         dev->poll_controller = tg3_poll_controller;
12945 #endif
12946
12947         err = tg3_get_invariants(tp);
12948         if (err) {
12949                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12950                        "aborting.\n");
12951                 goto err_out_iounmap;
12952         }
12953
12954         /* The EPB bridge inside 5714, 5715, and 5780 and any
12955          * device behind the EPB cannot support DMA addresses > 40-bit.
12956          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12957          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12958          * do DMA address check in tg3_start_xmit().
12959          */
12960         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12961                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12962         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
12963                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12964 #ifdef CONFIG_HIGHMEM
12965                 dma_mask = DMA_64BIT_MASK;
12966 #endif
12967         } else
12968                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12969
12970         /* Configure DMA attributes. */
12971         if (dma_mask > DMA_32BIT_MASK) {
12972                 err = pci_set_dma_mask(pdev, dma_mask);
12973                 if (!err) {
12974                         dev->features |= NETIF_F_HIGHDMA;
12975                         err = pci_set_consistent_dma_mask(pdev,
12976                                                           persist_dma_mask);
12977                         if (err < 0) {
12978                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12979                                        "DMA for consistent allocations\n");
12980                                 goto err_out_iounmap;
12981                         }
12982                 }
12983         }
12984         if (err || dma_mask == DMA_32BIT_MASK) {
12985                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12986                 if (err) {
12987                         printk(KERN_ERR PFX "No usable DMA configuration, "
12988                                "aborting.\n");
12989                         goto err_out_iounmap;
12990                 }
12991         }
12992
12993         tg3_init_bufmgr_config(tp);
12994
12995         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12996                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12997         }
12998         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12999             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13000             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13001             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13002             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13003                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13004         } else {
13005                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13006         }
13007
13008         /* TSO is on by default on chips that support hardware TSO.
13009          * Firmware TSO on older chips gives lower performance, so it
13010          * is off by default, but can be enabled using ethtool.
13011          */
13012         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13013                 dev->features |= NETIF_F_TSO;
13014                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13015                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
13016                         dev->features |= NETIF_F_TSO6;
13017                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13018                         dev->features |= NETIF_F_TSO_ECN;
13019         }
13020
13021
13022         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13023             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13024             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13025                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13026                 tp->rx_pending = 63;
13027         }
13028
13029         err = tg3_get_device_address(tp);
13030         if (err) {
13031                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13032                        "aborting.\n");
13033                 goto err_out_iounmap;
13034         }
13035
13036         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13037                 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
13038                         printk(KERN_ERR PFX "Cannot find proper PCI device "
13039                                "base address for APE, aborting.\n");
13040                         err = -ENODEV;
13041                         goto err_out_iounmap;
13042                 }
13043
13044                 tg3reg_base = pci_resource_start(pdev, 2);
13045                 tg3reg_len = pci_resource_len(pdev, 2);
13046
13047                 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
13048                 if (!tp->aperegs) {
13049                         printk(KERN_ERR PFX "Cannot map APE registers, "
13050                                "aborting.\n");
13051                         err = -ENOMEM;
13052                         goto err_out_iounmap;
13053                 }
13054
13055                 tg3_ape_lock_init(tp);
13056         }
13057
13058         /*
13059          * Reset chip in case UNDI or EFI driver did not shutdown
13060          * DMA self test will enable WDMAC and we'll see (spurious)
13061          * pending DMA on the PCI bus at that point.
13062          */
13063         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13064             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13065                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13066                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13067         }
13068
13069         err = tg3_test_dma(tp);
13070         if (err) {
13071                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13072                 goto err_out_apeunmap;
13073         }
13074
13075         /* Tigon3 can do ipv4 only... and some chips have buggy
13076          * checksumming.
13077          */
13078         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13079                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13080                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13081                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13082                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13083                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13084                         dev->features |= NETIF_F_IPV6_CSUM;
13085
13086                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13087         } else
13088                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13089
13090         /* flow control autonegotiation is default behavior */
13091         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13092         tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
13093
13094         tg3_init_coal(tp);
13095
13096         pci_set_drvdata(pdev, dev);
13097
13098         err = register_netdev(dev);
13099         if (err) {
13100                 printk(KERN_ERR PFX "Cannot register net device, "
13101                        "aborting.\n");
13102                 goto err_out_apeunmap;
13103         }
13104
13105         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
13106                "(%s) %s Ethernet %s\n",
13107                dev->name,
13108                tp->board_part_number,
13109                tp->pci_chip_rev_id,
13110                tg3_phy_string(tp),
13111                tg3_bus_string(tp, str),
13112                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13113                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13114                  "10/100/1000Base-T")),
13115                print_mac(mac, dev->dev_addr));
13116
13117         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
13118                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
13119                dev->name,
13120                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13121                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13122                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13123                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13124                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
13125                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13126         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13127                dev->name, tp->dma_rwctrl,
13128                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13129                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13130
13131         return 0;
13132
13133 err_out_apeunmap:
13134         if (tp->aperegs) {
13135                 iounmap(tp->aperegs);
13136                 tp->aperegs = NULL;
13137         }
13138
13139 err_out_iounmap:
13140         if (tp->regs) {
13141                 iounmap(tp->regs);
13142                 tp->regs = NULL;
13143         }
13144
13145 err_out_free_dev:
13146         free_netdev(dev);
13147
13148 err_out_free_res:
13149         pci_release_regions(pdev);
13150
13151 err_out_disable_pdev:
13152         pci_disable_device(pdev);
13153         pci_set_drvdata(pdev, NULL);
13154         return err;
13155 }
13156
13157 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13158 {
13159         struct net_device *dev = pci_get_drvdata(pdev);
13160
13161         if (dev) {
13162                 struct tg3 *tp = netdev_priv(dev);
13163
13164                 flush_scheduled_work();
13165
13166                 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
13167                         tg3_mdio_fini(tp);
13168
13169                 unregister_netdev(dev);
13170                 if (tp->aperegs) {
13171                         iounmap(tp->aperegs);
13172                         tp->aperegs = NULL;
13173                 }
13174                 if (tp->regs) {
13175                         iounmap(tp->regs);
13176                         tp->regs = NULL;
13177                 }
13178                 free_netdev(dev);
13179                 pci_release_regions(pdev);
13180                 pci_disable_device(pdev);
13181                 pci_set_drvdata(pdev, NULL);
13182         }
13183 }
13184
13185 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13186 {
13187         struct net_device *dev = pci_get_drvdata(pdev);
13188         struct tg3 *tp = netdev_priv(dev);
13189         int err;
13190
13191         /* PCI register 4 needs to be saved whether netif_running() or not.
13192          * MSI address and data need to be saved if using MSI and
13193          * netif_running().
13194          */
13195         pci_save_state(pdev);
13196
13197         if (!netif_running(dev))
13198                 return 0;
13199
13200         flush_scheduled_work();
13201         tg3_netif_stop(tp);
13202
13203         del_timer_sync(&tp->timer);
13204
13205         tg3_full_lock(tp, 1);
13206         tg3_disable_ints(tp);
13207         tg3_full_unlock(tp);
13208
13209         netif_device_detach(dev);
13210
13211         tg3_full_lock(tp, 0);
13212         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13213         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13214         tg3_full_unlock(tp);
13215
13216         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
13217         if (err) {
13218                 tg3_full_lock(tp, 0);
13219
13220                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13221                 if (tg3_restart_hw(tp, 1))
13222                         goto out;
13223
13224                 tp->timer.expires = jiffies + tp->timer_offset;
13225                 add_timer(&tp->timer);
13226
13227                 netif_device_attach(dev);
13228                 tg3_netif_start(tp);
13229
13230 out:
13231                 tg3_full_unlock(tp);
13232         }
13233
13234         return err;
13235 }
13236
13237 static int tg3_resume(struct pci_dev *pdev)
13238 {
13239         struct net_device *dev = pci_get_drvdata(pdev);
13240         struct tg3 *tp = netdev_priv(dev);
13241         int err;
13242
13243         pci_restore_state(tp->pdev);
13244
13245         if (!netif_running(dev))
13246                 return 0;
13247
13248         err = tg3_set_power_state(tp, PCI_D0);
13249         if (err)
13250                 return err;
13251
13252         netif_device_attach(dev);
13253
13254         tg3_full_lock(tp, 0);
13255
13256         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13257         err = tg3_restart_hw(tp, 1);
13258         if (err)
13259                 goto out;
13260
13261         tp->timer.expires = jiffies + tp->timer_offset;
13262         add_timer(&tp->timer);
13263
13264         tg3_netif_start(tp);
13265
13266 out:
13267         tg3_full_unlock(tp);
13268
13269         return err;
13270 }
13271
13272 static struct pci_driver tg3_driver = {
13273         .name           = DRV_MODULE_NAME,
13274         .id_table       = tg3_pci_tbl,
13275         .probe          = tg3_init_one,
13276         .remove         = __devexit_p(tg3_remove_one),
13277         .suspend        = tg3_suspend,
13278         .resume         = tg3_resume
13279 };
13280
13281 static int __init tg3_init(void)
13282 {
13283         return pci_register_driver(&tg3_driver);
13284 }
13285
13286 static void __exit tg3_cleanup(void)
13287 {
13288         pci_unregister_driver(&tg3_driver);
13289 }
13290
13291 module_init(tg3_init);
13292 module_exit(tg3_cleanup);