]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/tg3.c
[TG3]: Add 5761 APE support
[net-next-2.6.git] / drivers / net / tg3.c
CommitLineData
1da177e4
LT
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
65610fba 7 * Copyright (C) 2005-2007 Broadcom Corporation.
1da177e4
LT
8 *
9 * Firmware is:
49cabf49
MC
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
1da177e4
LT
16 */
17
1da177e4
LT
18
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/compiler.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
14c85021 26#include <linux/in.h>
1da177e4
LT
27#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/workqueue.h>
61487480 39#include <linux/prefetch.h>
f9a5f7d3 40#include <linux/dma-mapping.h>
1da177e4
LT
41
42#include <net/checksum.h>
c9bdd4b5 43#include <net/ip.h>
1da177e4
LT
44
45#include <asm/system.h>
46#include <asm/io.h>
47#include <asm/byteorder.h>
48#include <asm/uaccess.h>
49
49b6e95f 50#ifdef CONFIG_SPARC
1da177e4 51#include <asm/idprom.h>
49b6e95f 52#include <asm/prom.h>
1da177e4
LT
53#endif
54
55#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56#define TG3_VLAN_TAG_USED 1
57#else
58#define TG3_VLAN_TAG_USED 0
59#endif
60
1da177e4 61#define TG3_TSO_SUPPORT 1
1da177e4
LT
62
63#include "tg3.h"
64
65#define DRV_MODULE_NAME "tg3"
66#define PFX DRV_MODULE_NAME ": "
182f6ed5
MC
67#define DRV_MODULE_VERSION "3.82"
68#define DRV_MODULE_RELDATE "October 5, 2007"
1da177e4
LT
69
70#define TG3_DEF_MAC_MODE 0
71#define TG3_DEF_RX_MODE 0
72#define TG3_DEF_TX_MODE 0
73#define TG3_DEF_MSG_ENABLE \
74 (NETIF_MSG_DRV | \
75 NETIF_MSG_PROBE | \
76 NETIF_MSG_LINK | \
77 NETIF_MSG_TIMER | \
78 NETIF_MSG_IFDOWN | \
79 NETIF_MSG_IFUP | \
80 NETIF_MSG_RX_ERR | \
81 NETIF_MSG_TX_ERR)
82
83/* length of time before we decide the hardware is borked,
84 * and dev->tx_timeout() should be called to fix the problem
85 */
86#define TG3_TX_TIMEOUT (5 * HZ)
87
88/* hardware minimum and maximum for a single frame's data payload */
89#define TG3_MIN_MTU 60
90#define TG3_MAX_MTU(tp) \
0f893dc6 91 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
1da177e4
LT
92
93/* These numbers seem to be hard coded in the NIC firmware somehow.
94 * You can't change the ring sizes, but you can change where you place
95 * them in the NIC onboard memory.
96 */
97#define TG3_RX_RING_SIZE 512
98#define TG3_DEF_RX_RING_PENDING 200
99#define TG3_RX_JUMBO_RING_SIZE 256
100#define TG3_DEF_RX_JUMBO_RING_PENDING 100
101
102/* Do not place this n-ring entries value into the tp struct itself,
103 * we really want to expose these constants to GCC so that modulo et
104 * al. operations are done with shifts and masks instead of with
105 * hw multiply/modulo instructions. Another solution would be to
106 * replace things like '% foo' with '& (foo - 1)'.
107 */
108#define TG3_RX_RCB_RING_SIZE(tp) \
109 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
110
111#define TG3_TX_RING_SIZE 512
112#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
113
114#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
115 TG3_RX_RING_SIZE)
116#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117 TG3_RX_JUMBO_RING_SIZE)
118#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_RCB_RING_SIZE(tp))
120#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
121 TG3_TX_RING_SIZE)
1da177e4
LT
122#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
125#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
126
127/* minimum number of free TX descriptors required to wake up TX process */
42952231 128#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
1da177e4
LT
129
130/* number of ETHTOOL_GSTATS u64's */
131#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
4cafd3f5
MC
133#define TG3_NUM_TEST 6
134
1da177e4
LT
135static char version[] __devinitdata =
136 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140MODULE_LICENSE("GPL");
141MODULE_VERSION(DRV_MODULE_VERSION);
142
143static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
144module_param(tg3_debug, int, 0);
145MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147static struct pci_device_id tg3_pci_tbl[] = {
13185217
HK
148 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
126a3368 172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
13185217
HK
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
126a3368 187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
13185217
HK
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
676917d4 191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
13185217
HK
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
b5d3772c
MC
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
d30cdd28
MC
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
13185217
HK
203 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
204 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
205 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
206 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
207 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
208 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
209 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
210 {}
1da177e4
LT
211};
212
213MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
214
50da859d 215static const struct {
1da177e4
LT
216 const char string[ETH_GSTRING_LEN];
217} ethtool_stats_keys[TG3_NUM_STATS] = {
218 { "rx_octets" },
219 { "rx_fragments" },
220 { "rx_ucast_packets" },
221 { "rx_mcast_packets" },
222 { "rx_bcast_packets" },
223 { "rx_fcs_errors" },
224 { "rx_align_errors" },
225 { "rx_xon_pause_rcvd" },
226 { "rx_xoff_pause_rcvd" },
227 { "rx_mac_ctrl_rcvd" },
228 { "rx_xoff_entered" },
229 { "rx_frame_too_long_errors" },
230 { "rx_jabbers" },
231 { "rx_undersize_packets" },
232 { "rx_in_length_errors" },
233 { "rx_out_length_errors" },
234 { "rx_64_or_less_octet_packets" },
235 { "rx_65_to_127_octet_packets" },
236 { "rx_128_to_255_octet_packets" },
237 { "rx_256_to_511_octet_packets" },
238 { "rx_512_to_1023_octet_packets" },
239 { "rx_1024_to_1522_octet_packets" },
240 { "rx_1523_to_2047_octet_packets" },
241 { "rx_2048_to_4095_octet_packets" },
242 { "rx_4096_to_8191_octet_packets" },
243 { "rx_8192_to_9022_octet_packets" },
244
245 { "tx_octets" },
246 { "tx_collisions" },
247
248 { "tx_xon_sent" },
249 { "tx_xoff_sent" },
250 { "tx_flow_control" },
251 { "tx_mac_errors" },
252 { "tx_single_collisions" },
253 { "tx_mult_collisions" },
254 { "tx_deferred" },
255 { "tx_excessive_collisions" },
256 { "tx_late_collisions" },
257 { "tx_collide_2times" },
258 { "tx_collide_3times" },
259 { "tx_collide_4times" },
260 { "tx_collide_5times" },
261 { "tx_collide_6times" },
262 { "tx_collide_7times" },
263 { "tx_collide_8times" },
264 { "tx_collide_9times" },
265 { "tx_collide_10times" },
266 { "tx_collide_11times" },
267 { "tx_collide_12times" },
268 { "tx_collide_13times" },
269 { "tx_collide_14times" },
270 { "tx_collide_15times" },
271 { "tx_ucast_packets" },
272 { "tx_mcast_packets" },
273 { "tx_bcast_packets" },
274 { "tx_carrier_sense_errors" },
275 { "tx_discards" },
276 { "tx_errors" },
277
278 { "dma_writeq_full" },
279 { "dma_write_prioq_full" },
280 { "rxbds_empty" },
281 { "rx_discards" },
282 { "rx_errors" },
283 { "rx_threshold_hit" },
284
285 { "dma_readq_full" },
286 { "dma_read_prioq_full" },
287 { "tx_comp_queue_full" },
288
289 { "ring_set_send_prod_index" },
290 { "ring_status_update" },
291 { "nic_irqs" },
292 { "nic_avoided_irqs" },
293 { "nic_tx_threshold_hit" }
294};
295
50da859d 296static const struct {
4cafd3f5
MC
297 const char string[ETH_GSTRING_LEN];
298} ethtool_test_keys[TG3_NUM_TEST] = {
299 { "nvram test (online) " },
300 { "link test (online) " },
301 { "register test (offline)" },
302 { "memory test (offline)" },
303 { "loopback test (offline)" },
304 { "interrupt test (offline)" },
305};
306
b401e9e2
MC
307static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
308{
309 writel(val, tp->regs + off);
310}
311
312static u32 tg3_read32(struct tg3 *tp, u32 off)
313{
6aa20a22 314 return (readl(tp->regs + off));
b401e9e2
MC
315}
316
0d3031d9
MC
317static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
318{
319 writel(val, tp->aperegs + off);
320}
321
322static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
323{
324 return (readl(tp->aperegs + off));
325}
326
1da177e4
LT
327static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
328{
6892914f
MC
329 unsigned long flags;
330
331 spin_lock_irqsave(&tp->indirect_lock, flags);
1ee582d8
MC
332 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
333 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
6892914f 334 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1ee582d8
MC
335}
336
337static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
338{
339 writel(val, tp->regs + off);
340 readl(tp->regs + off);
1da177e4
LT
341}
342
6892914f 343static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
1da177e4 344{
6892914f
MC
345 unsigned long flags;
346 u32 val;
347
348 spin_lock_irqsave(&tp->indirect_lock, flags);
349 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
350 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
351 spin_unlock_irqrestore(&tp->indirect_lock, flags);
352 return val;
353}
354
355static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
356{
357 unsigned long flags;
358
359 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
360 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
361 TG3_64BIT_REG_LOW, val);
362 return;
363 }
364 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
365 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
366 TG3_64BIT_REG_LOW, val);
367 return;
1da177e4 368 }
6892914f
MC
369
370 spin_lock_irqsave(&tp->indirect_lock, flags);
371 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
372 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
373 spin_unlock_irqrestore(&tp->indirect_lock, flags);
374
375 /* In indirect mode when disabling interrupts, we also need
376 * to clear the interrupt bit in the GRC local ctrl register.
377 */
378 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
379 (val == 0x1)) {
380 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
381 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
382 }
383}
384
385static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
386{
387 unsigned long flags;
388 u32 val;
389
390 spin_lock_irqsave(&tp->indirect_lock, flags);
391 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
392 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
393 spin_unlock_irqrestore(&tp->indirect_lock, flags);
394 return val;
395}
396
b401e9e2
MC
397/* usec_wait specifies the wait time in usec when writing to certain registers
398 * where it is unsafe to read back the register without some delay.
399 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
400 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
401 */
402static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
6892914f 403{
b401e9e2
MC
404 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
405 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
406 /* Non-posted methods */
407 tp->write32(tp, off, val);
408 else {
409 /* Posted method */
410 tg3_write32(tp, off, val);
411 if (usec_wait)
412 udelay(usec_wait);
413 tp->read32(tp, off);
414 }
415 /* Wait again after the read for the posted method to guarantee that
416 * the wait time is met.
417 */
418 if (usec_wait)
419 udelay(usec_wait);
1da177e4
LT
420}
421
09ee929c
MC
422static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
423{
424 tp->write32_mbox(tp, off, val);
6892914f
MC
425 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
426 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
427 tp->read32_mbox(tp, off);
09ee929c
MC
428}
429
20094930 430static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
1da177e4
LT
431{
432 void __iomem *mbox = tp->regs + off;
433 writel(val, mbox);
434 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
435 writel(val, mbox);
436 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
437 readl(mbox);
438}
439
b5d3772c
MC
440static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
441{
442 return (readl(tp->regs + off + GRCMBOX_BASE));
443}
444
445static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
446{
447 writel(val, tp->regs + off + GRCMBOX_BASE);
448}
449
20094930 450#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
09ee929c 451#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
20094930
MC
452#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
453#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
09ee929c 454#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
20094930
MC
455
456#define tw32(reg,val) tp->write32(tp, reg, val)
b401e9e2
MC
457#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
458#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
20094930 459#define tr32(reg) tp->read32(tp, reg)
1da177e4
LT
460
461static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
462{
6892914f
MC
463 unsigned long flags;
464
b5d3772c
MC
465 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
466 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
467 return;
468
6892914f 469 spin_lock_irqsave(&tp->indirect_lock, flags);
bbadf503
MC
470 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
471 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
472 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 473
bbadf503
MC
474 /* Always leave this as zero. */
475 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
476 } else {
477 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
478 tw32_f(TG3PCI_MEM_WIN_DATA, val);
28fbef78 479
bbadf503
MC
480 /* Always leave this as zero. */
481 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
482 }
483 spin_unlock_irqrestore(&tp->indirect_lock, flags);
758a6139
DM
484}
485
1da177e4
LT
486static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
487{
6892914f
MC
488 unsigned long flags;
489
b5d3772c
MC
490 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
491 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
492 *val = 0;
493 return;
494 }
495
6892914f 496 spin_lock_irqsave(&tp->indirect_lock, flags);
bbadf503
MC
497 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
498 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
499 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 500
bbadf503
MC
501 /* Always leave this as zero. */
502 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
503 } else {
504 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
505 *val = tr32(TG3PCI_MEM_WIN_DATA);
506
507 /* Always leave this as zero. */
508 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
509 }
6892914f 510 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
511}
512
0d3031d9
MC
513static void tg3_ape_lock_init(struct tg3 *tp)
514{
515 int i;
516
517 /* Make sure the driver hasn't any stale locks. */
518 for (i = 0; i < 8; i++)
519 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
520 APE_LOCK_GRANT_DRIVER);
521}
522
523static int tg3_ape_lock(struct tg3 *tp, int locknum)
524{
525 int i, off;
526 int ret = 0;
527 u32 status;
528
529 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
530 return 0;
531
532 switch (locknum) {
533 case TG3_APE_LOCK_MEM:
534 break;
535 default:
536 return -EINVAL;
537 }
538
539 off = 4 * locknum;
540
541 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
542
543 /* Wait for up to 1 millisecond to acquire lock. */
544 for (i = 0; i < 100; i++) {
545 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
546 if (status == APE_LOCK_GRANT_DRIVER)
547 break;
548 udelay(10);
549 }
550
551 if (status != APE_LOCK_GRANT_DRIVER) {
552 /* Revoke the lock request. */
553 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
554 APE_LOCK_GRANT_DRIVER);
555
556 ret = -EBUSY;
557 }
558
559 return ret;
560}
561
562static void tg3_ape_unlock(struct tg3 *tp, int locknum)
563{
564 int off;
565
566 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
567 return;
568
569 switch (locknum) {
570 case TG3_APE_LOCK_MEM:
571 break;
572 default:
573 return;
574 }
575
576 off = 4 * locknum;
577 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
578}
579
1da177e4
LT
580static void tg3_disable_ints(struct tg3 *tp)
581{
582 tw32(TG3PCI_MISC_HOST_CTRL,
583 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c 584 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
1da177e4
LT
585}
586
587static inline void tg3_cond_int(struct tg3 *tp)
588{
38f3843e
MC
589 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
590 (tp->hw_status->status & SD_STATUS_UPDATED))
1da177e4 591 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
b5d3772c
MC
592 else
593 tw32(HOSTCC_MODE, tp->coalesce_mode |
594 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
1da177e4
LT
595}
596
597static void tg3_enable_ints(struct tg3 *tp)
598{
bbe832c0
MC
599 tp->irq_sync = 0;
600 wmb();
601
1da177e4
LT
602 tw32(TG3PCI_MISC_HOST_CTRL,
603 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c
MC
604 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
605 (tp->last_tag << 24));
fcfa0a32
MC
606 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
607 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608 (tp->last_tag << 24));
1da177e4
LT
609 tg3_cond_int(tp);
610}
611
04237ddd
MC
612static inline unsigned int tg3_has_work(struct tg3 *tp)
613{
614 struct tg3_hw_status *sblk = tp->hw_status;
615 unsigned int work_exists = 0;
616
617 /* check for phy events */
618 if (!(tp->tg3_flags &
619 (TG3_FLAG_USE_LINKCHG_REG |
620 TG3_FLAG_POLL_SERDES))) {
621 if (sblk->status & SD_STATUS_LINK_CHG)
622 work_exists = 1;
623 }
624 /* check for RX/TX work to do */
625 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
626 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
627 work_exists = 1;
628
629 return work_exists;
630}
631
1da177e4 632/* tg3_restart_ints
04237ddd
MC
633 * similar to tg3_enable_ints, but it accurately determines whether there
634 * is new work pending and can return without flushing the PIO write
6aa20a22 635 * which reenables interrupts
1da177e4
LT
636 */
637static void tg3_restart_ints(struct tg3 *tp)
638{
fac9b83e
DM
639 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
640 tp->last_tag << 24);
1da177e4
LT
641 mmiowb();
642
fac9b83e
DM
643 /* When doing tagged status, this work check is unnecessary.
644 * The last_tag we write above tells the chip which piece of
645 * work we've completed.
646 */
647 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
648 tg3_has_work(tp))
04237ddd
MC
649 tw32(HOSTCC_MODE, tp->coalesce_mode |
650 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
1da177e4
LT
651}
652
653static inline void tg3_netif_stop(struct tg3 *tp)
654{
bbe832c0 655 tp->dev->trans_start = jiffies; /* prevent tx timeout */
bea3348e 656 napi_disable(&tp->napi);
1da177e4
LT
657 netif_tx_disable(tp->dev);
658}
659
660static inline void tg3_netif_start(struct tg3 *tp)
661{
662 netif_wake_queue(tp->dev);
663 /* NOTE: unconditional netif_wake_queue is only appropriate
664 * so long as all callers are assured to have free tx slots
665 * (such as after tg3_init_hw)
666 */
bea3348e 667 napi_enable(&tp->napi);
f47c11ee
DM
668 tp->hw_status->status |= SD_STATUS_UPDATED;
669 tg3_enable_ints(tp);
1da177e4
LT
670}
671
672static void tg3_switch_clocks(struct tg3 *tp)
673{
674 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
675 u32 orig_clock_ctrl;
676
795d01c5
MC
677 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
678 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
4cf78e4f
MC
679 return;
680
1da177e4
LT
681 orig_clock_ctrl = clock_ctrl;
682 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
683 CLOCK_CTRL_CLKRUN_OENABLE |
684 0x1f);
685 tp->pci_clock_ctrl = clock_ctrl;
686
687 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
688 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
b401e9e2
MC
689 tw32_wait_f(TG3PCI_CLOCK_CTRL,
690 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1da177e4
LT
691 }
692 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
b401e9e2
MC
693 tw32_wait_f(TG3PCI_CLOCK_CTRL,
694 clock_ctrl |
695 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
696 40);
697 tw32_wait_f(TG3PCI_CLOCK_CTRL,
698 clock_ctrl | (CLOCK_CTRL_ALTCLK),
699 40);
1da177e4 700 }
b401e9e2 701 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1da177e4
LT
702}
703
704#define PHY_BUSY_LOOPS 5000
705
706static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
707{
708 u32 frame_val;
709 unsigned int loops;
710 int ret;
711
712 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
713 tw32_f(MAC_MI_MODE,
714 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
715 udelay(80);
716 }
717
718 *val = 0x0;
719
720 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
721 MI_COM_PHY_ADDR_MASK);
722 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
723 MI_COM_REG_ADDR_MASK);
724 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
6aa20a22 725
1da177e4
LT
726 tw32_f(MAC_MI_COM, frame_val);
727
728 loops = PHY_BUSY_LOOPS;
729 while (loops != 0) {
730 udelay(10);
731 frame_val = tr32(MAC_MI_COM);
732
733 if ((frame_val & MI_COM_BUSY) == 0) {
734 udelay(5);
735 frame_val = tr32(MAC_MI_COM);
736 break;
737 }
738 loops -= 1;
739 }
740
741 ret = -EBUSY;
742 if (loops != 0) {
743 *val = frame_val & MI_COM_DATA_MASK;
744 ret = 0;
745 }
746
747 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
748 tw32_f(MAC_MI_MODE, tp->mi_mode);
749 udelay(80);
750 }
751
752 return ret;
753}
754
755static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
756{
757 u32 frame_val;
758 unsigned int loops;
759 int ret;
760
b5d3772c
MC
761 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
762 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
763 return 0;
764
1da177e4
LT
765 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
766 tw32_f(MAC_MI_MODE,
767 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
768 udelay(80);
769 }
770
771 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
772 MI_COM_PHY_ADDR_MASK);
773 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
774 MI_COM_REG_ADDR_MASK);
775 frame_val |= (val & MI_COM_DATA_MASK);
776 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
6aa20a22 777
1da177e4
LT
778 tw32_f(MAC_MI_COM, frame_val);
779
780 loops = PHY_BUSY_LOOPS;
781 while (loops != 0) {
782 udelay(10);
783 frame_val = tr32(MAC_MI_COM);
784 if ((frame_val & MI_COM_BUSY) == 0) {
785 udelay(5);
786 frame_val = tr32(MAC_MI_COM);
787 break;
788 }
789 loops -= 1;
790 }
791
792 ret = -EBUSY;
793 if (loops != 0)
794 ret = 0;
795
796 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
797 tw32_f(MAC_MI_MODE, tp->mi_mode);
798 udelay(80);
799 }
800
801 return ret;
802}
803
9ef8ca99
MC
804static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
805{
806 u32 phy;
807
808 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
809 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
810 return;
811
812 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
813 u32 ephy;
814
815 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
816 tg3_writephy(tp, MII_TG3_EPHY_TEST,
817 ephy | MII_TG3_EPHY_SHADOW_EN);
818 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
819 if (enable)
820 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
821 else
822 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
823 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
824 }
825 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
826 }
827 } else {
828 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
829 MII_TG3_AUXCTL_SHDWSEL_MISC;
830 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
831 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
832 if (enable)
833 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
834 else
835 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
836 phy |= MII_TG3_AUXCTL_MISC_WREN;
837 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
838 }
839 }
840}
841
1da177e4
LT
842static void tg3_phy_set_wirespeed(struct tg3 *tp)
843{
844 u32 val;
845
846 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
847 return;
848
849 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
850 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
851 tg3_writephy(tp, MII_TG3_AUX_CTRL,
852 (val | (1 << 15) | (1 << 4)));
853}
854
855static int tg3_bmcr_reset(struct tg3 *tp)
856{
857 u32 phy_control;
858 int limit, err;
859
860 /* OK, reset it, and poll the BMCR_RESET bit until it
861 * clears or we time out.
862 */
863 phy_control = BMCR_RESET;
864 err = tg3_writephy(tp, MII_BMCR, phy_control);
865 if (err != 0)
866 return -EBUSY;
867
868 limit = 5000;
869 while (limit--) {
870 err = tg3_readphy(tp, MII_BMCR, &phy_control);
871 if (err != 0)
872 return -EBUSY;
873
874 if ((phy_control & BMCR_RESET) == 0) {
875 udelay(40);
876 break;
877 }
878 udelay(10);
879 }
880 if (limit <= 0)
881 return -EBUSY;
882
883 return 0;
884}
885
886static int tg3_wait_macro_done(struct tg3 *tp)
887{
888 int limit = 100;
889
890 while (limit--) {
891 u32 tmp32;
892
893 if (!tg3_readphy(tp, 0x16, &tmp32)) {
894 if ((tmp32 & 0x1000) == 0)
895 break;
896 }
897 }
898 if (limit <= 0)
899 return -EBUSY;
900
901 return 0;
902}
903
904static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
905{
906 static const u32 test_pat[4][6] = {
907 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
908 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
909 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
910 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
911 };
912 int chan;
913
914 for (chan = 0; chan < 4; chan++) {
915 int i;
916
917 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
918 (chan * 0x2000) | 0x0200);
919 tg3_writephy(tp, 0x16, 0x0002);
920
921 for (i = 0; i < 6; i++)
922 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
923 test_pat[chan][i]);
924
925 tg3_writephy(tp, 0x16, 0x0202);
926 if (tg3_wait_macro_done(tp)) {
927 *resetp = 1;
928 return -EBUSY;
929 }
930
931 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
932 (chan * 0x2000) | 0x0200);
933 tg3_writephy(tp, 0x16, 0x0082);
934 if (tg3_wait_macro_done(tp)) {
935 *resetp = 1;
936 return -EBUSY;
937 }
938
939 tg3_writephy(tp, 0x16, 0x0802);
940 if (tg3_wait_macro_done(tp)) {
941 *resetp = 1;
942 return -EBUSY;
943 }
944
945 for (i = 0; i < 6; i += 2) {
946 u32 low, high;
947
948 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
949 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
950 tg3_wait_macro_done(tp)) {
951 *resetp = 1;
952 return -EBUSY;
953 }
954 low &= 0x7fff;
955 high &= 0x000f;
956 if (low != test_pat[chan][i] ||
957 high != test_pat[chan][i+1]) {
958 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
959 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
960 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
961
962 return -EBUSY;
963 }
964 }
965 }
966
967 return 0;
968}
969
970static int tg3_phy_reset_chanpat(struct tg3 *tp)
971{
972 int chan;
973
974 for (chan = 0; chan < 4; chan++) {
975 int i;
976
977 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
978 (chan * 0x2000) | 0x0200);
979 tg3_writephy(tp, 0x16, 0x0002);
980 for (i = 0; i < 6; i++)
981 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
982 tg3_writephy(tp, 0x16, 0x0202);
983 if (tg3_wait_macro_done(tp))
984 return -EBUSY;
985 }
986
987 return 0;
988}
989
990static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
991{
992 u32 reg32, phy9_orig;
993 int retries, do_phy_reset, err;
994
995 retries = 10;
996 do_phy_reset = 1;
997 do {
998 if (do_phy_reset) {
999 err = tg3_bmcr_reset(tp);
1000 if (err)
1001 return err;
1002 do_phy_reset = 0;
1003 }
1004
1005 /* Disable transmitter and interrupt. */
1006 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1007 continue;
1008
1009 reg32 |= 0x3000;
1010 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1011
1012 /* Set full-duplex, 1000 mbps. */
1013 tg3_writephy(tp, MII_BMCR,
1014 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1015
1016 /* Set to master mode. */
1017 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1018 continue;
1019
1020 tg3_writephy(tp, MII_TG3_CTRL,
1021 (MII_TG3_CTRL_AS_MASTER |
1022 MII_TG3_CTRL_ENABLE_AS_MASTER));
1023
1024 /* Enable SM_DSP_CLOCK and 6dB. */
1025 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1026
1027 /* Block the PHY control access. */
1028 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1029 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1030
1031 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1032 if (!err)
1033 break;
1034 } while (--retries);
1035
1036 err = tg3_phy_reset_chanpat(tp);
1037 if (err)
1038 return err;
1039
1040 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1041 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1042
1043 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1044 tg3_writephy(tp, 0x16, 0x0000);
1045
1046 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1047 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1048 /* Set Extended packet length bit for jumbo frames */
1049 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1050 }
1051 else {
1052 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1053 }
1054
1055 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1056
1057 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1058 reg32 &= ~0x3000;
1059 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1060 } else if (!err)
1061 err = -EBUSY;
1062
1063 return err;
1064}
1065
c8e1e82b
MC
1066static void tg3_link_report(struct tg3 *);
1067
1da177e4
LT
1068/* This will reset the tigon3 PHY if there is no valid
1069 * link unless the FORCE argument is non-zero.
1070 */
1071static int tg3_phy_reset(struct tg3 *tp)
1072{
1073 u32 phy_status;
1074 int err;
1075
60189ddf
MC
1076 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1077 u32 val;
1078
1079 val = tr32(GRC_MISC_CFG);
1080 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1081 udelay(40);
1082 }
1da177e4
LT
1083 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1084 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1085 if (err != 0)
1086 return -EBUSY;
1087
c8e1e82b
MC
1088 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1089 netif_carrier_off(tp->dev);
1090 tg3_link_report(tp);
1091 }
1092
1da177e4
LT
1093 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1094 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1095 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1096 err = tg3_phy_reset_5703_4_5(tp);
1097 if (err)
1098 return err;
1099 goto out;
1100 }
1101
1102 err = tg3_bmcr_reset(tp);
1103 if (err)
1104 return err;
1105
1106out:
1107 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1108 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1109 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1110 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1111 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1112 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1113 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1114 }
1115 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1116 tg3_writephy(tp, 0x1c, 0x8d68);
1117 tg3_writephy(tp, 0x1c, 0x8d68);
1118 }
1119 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1120 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1121 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1122 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1123 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1124 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1125 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1126 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1127 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1128 }
c424cb24
MC
1129 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1130 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1131 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
c1d2a196
MC
1132 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1133 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1134 tg3_writephy(tp, MII_TG3_TEST1,
1135 MII_TG3_TEST1_TRIM_EN | 0x4);
1136 } else
1137 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
c424cb24
MC
1138 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1139 }
1da177e4
LT
1140 /* Set Extended packet length bit (bit 14) on all chips that */
1141 /* support jumbo frames */
1142 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1143 /* Cannot do read-modify-write on 5401 */
1144 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
0f893dc6 1145 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1146 u32 phy_reg;
1147
1148 /* Set bit 14 with read-modify-write to preserve other bits */
1149 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1150 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1151 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1152 }
1153
1154 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1155 * jumbo frames transmission.
1156 */
0f893dc6 1157 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1158 u32 phy_reg;
1159
1160 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1161 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1162 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1163 }
1164
715116a1 1165 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
715116a1
MC
1166 /* adjust output voltage */
1167 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
715116a1
MC
1168 }
1169
9ef8ca99 1170 tg3_phy_toggle_automdix(tp, 1);
1da177e4
LT
1171 tg3_phy_set_wirespeed(tp);
1172 return 0;
1173}
1174
1175static void tg3_frob_aux_power(struct tg3 *tp)
1176{
1177 struct tg3 *tp_peer = tp;
1178
9d26e213 1179 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1da177e4
LT
1180 return;
1181
8c2dc7e1
MC
1182 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1183 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1184 struct net_device *dev_peer;
1185
1186 dev_peer = pci_get_drvdata(tp->pdev_peer);
bc1c7567 1187 /* remove_one() may have been run on the peer. */
8c2dc7e1 1188 if (!dev_peer)
bc1c7567
MC
1189 tp_peer = tp;
1190 else
1191 tp_peer = netdev_priv(dev_peer);
1da177e4
LT
1192 }
1193
1da177e4 1194 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
6921d201
MC
1195 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1196 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1197 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1da177e4
LT
1198 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1199 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
b401e9e2
MC
1200 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1201 (GRC_LCLCTRL_GPIO_OE0 |
1202 GRC_LCLCTRL_GPIO_OE1 |
1203 GRC_LCLCTRL_GPIO_OE2 |
1204 GRC_LCLCTRL_GPIO_OUTPUT0 |
1205 GRC_LCLCTRL_GPIO_OUTPUT1),
1206 100);
1da177e4
LT
1207 } else {
1208 u32 no_gpio2;
dc56b7d4 1209 u32 grc_local_ctrl = 0;
1da177e4
LT
1210
1211 if (tp_peer != tp &&
1212 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1213 return;
1214
dc56b7d4
MC
1215 /* Workaround to prevent overdrawing Amps. */
1216 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1217 ASIC_REV_5714) {
1218 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
b401e9e2
MC
1219 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1220 grc_local_ctrl, 100);
dc56b7d4
MC
1221 }
1222
1da177e4
LT
1223 /* On 5753 and variants, GPIO2 cannot be used. */
1224 no_gpio2 = tp->nic_sram_data_cfg &
1225 NIC_SRAM_DATA_CFG_NO_GPIO2;
1226
dc56b7d4 1227 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1da177e4
LT
1228 GRC_LCLCTRL_GPIO_OE1 |
1229 GRC_LCLCTRL_GPIO_OE2 |
1230 GRC_LCLCTRL_GPIO_OUTPUT1 |
1231 GRC_LCLCTRL_GPIO_OUTPUT2;
1232 if (no_gpio2) {
1233 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1234 GRC_LCLCTRL_GPIO_OUTPUT2);
1235 }
b401e9e2
MC
1236 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1237 grc_local_ctrl, 100);
1da177e4
LT
1238
1239 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1240
b401e9e2
MC
1241 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1242 grc_local_ctrl, 100);
1da177e4
LT
1243
1244 if (!no_gpio2) {
1245 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
b401e9e2
MC
1246 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1247 grc_local_ctrl, 100);
1da177e4
LT
1248 }
1249 }
1250 } else {
1251 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1252 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1253 if (tp_peer != tp &&
1254 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1255 return;
1256
b401e9e2
MC
1257 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1258 (GRC_LCLCTRL_GPIO_OE1 |
1259 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4 1260
b401e9e2
MC
1261 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1262 GRC_LCLCTRL_GPIO_OE1, 100);
1da177e4 1263
b401e9e2
MC
1264 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1265 (GRC_LCLCTRL_GPIO_OE1 |
1266 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4
LT
1267 }
1268 }
1269}
1270
e8f3f6ca
MC
1271static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1272{
1273 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1274 return 1;
1275 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1276 if (speed != SPEED_10)
1277 return 1;
1278 } else if (speed == SPEED_10)
1279 return 1;
1280
1281 return 0;
1282}
1283
1da177e4
LT
1284static int tg3_setup_phy(struct tg3 *, int);
1285
1286#define RESET_KIND_SHUTDOWN 0
1287#define RESET_KIND_INIT 1
1288#define RESET_KIND_SUSPEND 2
1289
1290static void tg3_write_sig_post_reset(struct tg3 *, int);
1291static int tg3_halt_cpu(struct tg3 *, u32);
6921d201
MC
1292static int tg3_nvram_lock(struct tg3 *);
1293static void tg3_nvram_unlock(struct tg3 *);
1da177e4 1294
15c3b696
MC
1295static void tg3_power_down_phy(struct tg3 *tp)
1296{
5129724a
MC
1297 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1298 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1299 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1300 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1301
1302 sg_dig_ctrl |=
1303 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1304 tw32(SG_DIG_CTRL, sg_dig_ctrl);
1305 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1306 }
3f7045c1 1307 return;
5129724a 1308 }
3f7045c1 1309
60189ddf
MC
1310 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1311 u32 val;
1312
1313 tg3_bmcr_reset(tp);
1314 val = tr32(GRC_MISC_CFG);
1315 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1316 udelay(40);
1317 return;
1318 } else {
715116a1
MC
1319 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1320 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1321 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1322 }
3f7045c1 1323
15c3b696
MC
1324 /* The PHY should not be powered down on some chips because
1325 * of bugs.
1326 */
1327 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1328 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1329 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1330 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1331 return;
1332 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1333}
1334
bc1c7567 1335static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1da177e4
LT
1336{
1337 u32 misc_host_ctrl;
1338 u16 power_control, power_caps;
1339 int pm = tp->pm_cap;
1340
1341 /* Make sure register accesses (indirect or otherwise)
1342 * will function correctly.
1343 */
1344 pci_write_config_dword(tp->pdev,
1345 TG3PCI_MISC_HOST_CTRL,
1346 tp->misc_host_ctrl);
1347
1348 pci_read_config_word(tp->pdev,
1349 pm + PCI_PM_CTRL,
1350 &power_control);
1351 power_control |= PCI_PM_CTRL_PME_STATUS;
1352 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1353 switch (state) {
bc1c7567 1354 case PCI_D0:
1da177e4
LT
1355 power_control |= 0;
1356 pci_write_config_word(tp->pdev,
1357 pm + PCI_PM_CTRL,
1358 power_control);
8c6bda1a
MC
1359 udelay(100); /* Delay after power state change */
1360
9d26e213
MC
1361 /* Switch out of Vaux if it is a NIC */
1362 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
b401e9e2 1363 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1da177e4
LT
1364
1365 return 0;
1366
bc1c7567 1367 case PCI_D1:
1da177e4
LT
1368 power_control |= 1;
1369 break;
1370
bc1c7567 1371 case PCI_D2:
1da177e4
LT
1372 power_control |= 2;
1373 break;
1374
bc1c7567 1375 case PCI_D3hot:
1da177e4
LT
1376 power_control |= 3;
1377 break;
1378
1379 default:
1380 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1381 "requested.\n",
1382 tp->dev->name, state);
1383 return -EINVAL;
1384 };
1385
1386 power_control |= PCI_PM_CTRL_PME_ENABLE;
1387
1388 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1389 tw32(TG3PCI_MISC_HOST_CTRL,
1390 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1391
1392 if (tp->link_config.phy_is_low_power == 0) {
1393 tp->link_config.phy_is_low_power = 1;
1394 tp->link_config.orig_speed = tp->link_config.speed;
1395 tp->link_config.orig_duplex = tp->link_config.duplex;
1396 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1397 }
1398
747e8f8b 1399 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1da177e4
LT
1400 tp->link_config.speed = SPEED_10;
1401 tp->link_config.duplex = DUPLEX_HALF;
1402 tp->link_config.autoneg = AUTONEG_ENABLE;
1403 tg3_setup_phy(tp, 0);
1404 }
1405
b5d3772c
MC
1406 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1407 u32 val;
1408
1409 val = tr32(GRC_VCPU_EXT_CTRL);
1410 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1411 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
6921d201
MC
1412 int i;
1413 u32 val;
1414
1415 for (i = 0; i < 200; i++) {
1416 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1417 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1418 break;
1419 msleep(1);
1420 }
1421 }
a85feb8c
GZ
1422 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1423 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1424 WOL_DRV_STATE_SHUTDOWN |
1425 WOL_DRV_WOL |
1426 WOL_SET_MAGIC_PKT);
6921d201 1427
1da177e4
LT
1428 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1429
1430 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1431 u32 mac_mode;
1432
1433 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1434 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1435 udelay(40);
1436
3f7045c1
MC
1437 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1438 mac_mode = MAC_MODE_PORT_MODE_GMII;
1439 else
1440 mac_mode = MAC_MODE_PORT_MODE_MII;
1da177e4 1441
e8f3f6ca
MC
1442 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1443 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1444 ASIC_REV_5700) {
1445 u32 speed = (tp->tg3_flags &
1446 TG3_FLAG_WOL_SPEED_100MB) ?
1447 SPEED_100 : SPEED_10;
1448 if (tg3_5700_link_polarity(tp, speed))
1449 mac_mode |= MAC_MODE_LINK_POLARITY;
1450 else
1451 mac_mode &= ~MAC_MODE_LINK_POLARITY;
1452 }
1da177e4
LT
1453 } else {
1454 mac_mode = MAC_MODE_PORT_MODE_TBI;
1455 }
1456
cbf46853 1457 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1da177e4
LT
1458 tw32(MAC_LED_CTRL, tp->led_ctrl);
1459
1460 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1461 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1462 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1463
1464 tw32_f(MAC_MODE, mac_mode);
1465 udelay(100);
1466
1467 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1468 udelay(10);
1469 }
1470
1471 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1472 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1473 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1474 u32 base_val;
1475
1476 base_val = tp->pci_clock_ctrl;
1477 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1478 CLOCK_CTRL_TXCLK_DISABLE);
1479
b401e9e2
MC
1480 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1481 CLOCK_CTRL_PWRDOWN_PLL133, 40);
d7b0a857 1482 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
795d01c5 1483 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
d7b0a857 1484 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
4cf78e4f 1485 /* do nothing */
85e94ced 1486 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1da177e4
LT
1487 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1488 u32 newbits1, newbits2;
1489
1490 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1491 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1492 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1493 CLOCK_CTRL_TXCLK_DISABLE |
1494 CLOCK_CTRL_ALTCLK);
1495 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1496 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1497 newbits1 = CLOCK_CTRL_625_CORE;
1498 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1499 } else {
1500 newbits1 = CLOCK_CTRL_ALTCLK;
1501 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1502 }
1503
b401e9e2
MC
1504 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1505 40);
1da177e4 1506
b401e9e2
MC
1507 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1508 40);
1da177e4
LT
1509
1510 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1511 u32 newbits3;
1512
1513 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1514 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1515 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1516 CLOCK_CTRL_TXCLK_DISABLE |
1517 CLOCK_CTRL_44MHZ_CORE);
1518 } else {
1519 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1520 }
1521
b401e9e2
MC
1522 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1523 tp->pci_clock_ctrl | newbits3, 40);
1da177e4
LT
1524 }
1525 }
1526
6921d201 1527 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
0d3031d9
MC
1528 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1529 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
3f7045c1 1530 tg3_power_down_phy(tp);
6921d201 1531
1da177e4
LT
1532 tg3_frob_aux_power(tp);
1533
1534 /* Workaround for unstable PLL clock */
1535 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1536 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1537 u32 val = tr32(0x7d00);
1538
1539 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1540 tw32(0x7d00, val);
6921d201 1541 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
ec41c7df
MC
1542 int err;
1543
1544 err = tg3_nvram_lock(tp);
1da177e4 1545 tg3_halt_cpu(tp, RX_CPU_BASE);
ec41c7df
MC
1546 if (!err)
1547 tg3_nvram_unlock(tp);
6921d201 1548 }
1da177e4
LT
1549 }
1550
bbadf503
MC
1551 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1552
1da177e4
LT
1553 /* Finally, set the new power state. */
1554 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
8c6bda1a 1555 udelay(100); /* Delay after power state change */
1da177e4 1556
1da177e4
LT
1557 return 0;
1558}
1559
1560static void tg3_link_report(struct tg3 *tp)
1561{
1562 if (!netif_carrier_ok(tp->dev)) {
9f88f29f
MC
1563 if (netif_msg_link(tp))
1564 printk(KERN_INFO PFX "%s: Link is down.\n",
1565 tp->dev->name);
1566 } else if (netif_msg_link(tp)) {
1da177e4
LT
1567 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1568 tp->dev->name,
1569 (tp->link_config.active_speed == SPEED_1000 ?
1570 1000 :
1571 (tp->link_config.active_speed == SPEED_100 ?
1572 100 : 10)),
1573 (tp->link_config.active_duplex == DUPLEX_FULL ?
1574 "full" : "half"));
1575
1576 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1577 "%s for RX.\n",
1578 tp->dev->name,
1579 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1580 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1581 }
1582}
1583
1584static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1585{
1586 u32 new_tg3_flags = 0;
1587 u32 old_rx_mode = tp->rx_mode;
1588 u32 old_tx_mode = tp->tx_mode;
1589
1590 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
747e8f8b
MC
1591
1592 /* Convert 1000BaseX flow control bits to 1000BaseT
1593 * bits before resolving flow control.
1594 */
1595 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1596 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1597 ADVERTISE_PAUSE_ASYM);
1598 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1599
1600 if (local_adv & ADVERTISE_1000XPAUSE)
1601 local_adv |= ADVERTISE_PAUSE_CAP;
1602 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1603 local_adv |= ADVERTISE_PAUSE_ASYM;
1604 if (remote_adv & LPA_1000XPAUSE)
1605 remote_adv |= LPA_PAUSE_CAP;
1606 if (remote_adv & LPA_1000XPAUSE_ASYM)
1607 remote_adv |= LPA_PAUSE_ASYM;
1608 }
1609
1da177e4
LT
1610 if (local_adv & ADVERTISE_PAUSE_CAP) {
1611 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1612 if (remote_adv & LPA_PAUSE_CAP)
1613 new_tg3_flags |=
1614 (TG3_FLAG_RX_PAUSE |
1615 TG3_FLAG_TX_PAUSE);
1616 else if (remote_adv & LPA_PAUSE_ASYM)
1617 new_tg3_flags |=
1618 (TG3_FLAG_RX_PAUSE);
1619 } else {
1620 if (remote_adv & LPA_PAUSE_CAP)
1621 new_tg3_flags |=
1622 (TG3_FLAG_RX_PAUSE |
1623 TG3_FLAG_TX_PAUSE);
1624 }
1625 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1626 if ((remote_adv & LPA_PAUSE_CAP) &&
1627 (remote_adv & LPA_PAUSE_ASYM))
1628 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1629 }
1630
1631 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1632 tp->tg3_flags |= new_tg3_flags;
1633 } else {
1634 new_tg3_flags = tp->tg3_flags;
1635 }
1636
1637 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1638 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1639 else
1640 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1641
1642 if (old_rx_mode != tp->rx_mode) {
1643 tw32_f(MAC_RX_MODE, tp->rx_mode);
1644 }
6aa20a22 1645
1da177e4
LT
1646 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1647 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1648 else
1649 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1650
1651 if (old_tx_mode != tp->tx_mode) {
1652 tw32_f(MAC_TX_MODE, tp->tx_mode);
1653 }
1654}
1655
1656static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1657{
1658 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1659 case MII_TG3_AUX_STAT_10HALF:
1660 *speed = SPEED_10;
1661 *duplex = DUPLEX_HALF;
1662 break;
1663
1664 case MII_TG3_AUX_STAT_10FULL:
1665 *speed = SPEED_10;
1666 *duplex = DUPLEX_FULL;
1667 break;
1668
1669 case MII_TG3_AUX_STAT_100HALF:
1670 *speed = SPEED_100;
1671 *duplex = DUPLEX_HALF;
1672 break;
1673
1674 case MII_TG3_AUX_STAT_100FULL:
1675 *speed = SPEED_100;
1676 *duplex = DUPLEX_FULL;
1677 break;
1678
1679 case MII_TG3_AUX_STAT_1000HALF:
1680 *speed = SPEED_1000;
1681 *duplex = DUPLEX_HALF;
1682 break;
1683
1684 case MII_TG3_AUX_STAT_1000FULL:
1685 *speed = SPEED_1000;
1686 *duplex = DUPLEX_FULL;
1687 break;
1688
1689 default:
715116a1
MC
1690 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1691 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1692 SPEED_10;
1693 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1694 DUPLEX_HALF;
1695 break;
1696 }
1da177e4
LT
1697 *speed = SPEED_INVALID;
1698 *duplex = DUPLEX_INVALID;
1699 break;
1700 };
1701}
1702
1703static void tg3_phy_copper_begin(struct tg3 *tp)
1704{
1705 u32 new_adv;
1706 int i;
1707
1708 if (tp->link_config.phy_is_low_power) {
1709 /* Entering low power mode. Disable gigabit and
1710 * 100baseT advertisements.
1711 */
1712 tg3_writephy(tp, MII_TG3_CTRL, 0);
1713
1714 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1715 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1716 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1717 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1718
1719 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1720 } else if (tp->link_config.speed == SPEED_INVALID) {
1da177e4
LT
1721 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1722 tp->link_config.advertising &=
1723 ~(ADVERTISED_1000baseT_Half |
1724 ADVERTISED_1000baseT_Full);
1725
1726 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1727 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1728 new_adv |= ADVERTISE_10HALF;
1729 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1730 new_adv |= ADVERTISE_10FULL;
1731 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1732 new_adv |= ADVERTISE_100HALF;
1733 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1734 new_adv |= ADVERTISE_100FULL;
1735 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1736
1737 if (tp->link_config.advertising &
1738 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1739 new_adv = 0;
1740 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1741 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1742 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1743 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1744 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1745 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1746 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1747 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1748 MII_TG3_CTRL_ENABLE_AS_MASTER);
1749 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1750 } else {
1751 tg3_writephy(tp, MII_TG3_CTRL, 0);
1752 }
1753 } else {
1754 /* Asking for a specific link mode. */
1755 if (tp->link_config.speed == SPEED_1000) {
1756 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1757 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1758
1759 if (tp->link_config.duplex == DUPLEX_FULL)
1760 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1761 else
1762 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1763 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1764 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1765 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1766 MII_TG3_CTRL_ENABLE_AS_MASTER);
1767 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1768 } else {
1769 tg3_writephy(tp, MII_TG3_CTRL, 0);
1770
1771 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1772 if (tp->link_config.speed == SPEED_100) {
1773 if (tp->link_config.duplex == DUPLEX_FULL)
1774 new_adv |= ADVERTISE_100FULL;
1775 else
1776 new_adv |= ADVERTISE_100HALF;
1777 } else {
1778 if (tp->link_config.duplex == DUPLEX_FULL)
1779 new_adv |= ADVERTISE_10FULL;
1780 else
1781 new_adv |= ADVERTISE_10HALF;
1782 }
1783 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1784 }
1785 }
1786
1787 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1788 tp->link_config.speed != SPEED_INVALID) {
1789 u32 bmcr, orig_bmcr;
1790
1791 tp->link_config.active_speed = tp->link_config.speed;
1792 tp->link_config.active_duplex = tp->link_config.duplex;
1793
1794 bmcr = 0;
1795 switch (tp->link_config.speed) {
1796 default:
1797 case SPEED_10:
1798 break;
1799
1800 case SPEED_100:
1801 bmcr |= BMCR_SPEED100;
1802 break;
1803
1804 case SPEED_1000:
1805 bmcr |= TG3_BMCR_SPEED1000;
1806 break;
1807 };
1808
1809 if (tp->link_config.duplex == DUPLEX_FULL)
1810 bmcr |= BMCR_FULLDPLX;
1811
1812 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1813 (bmcr != orig_bmcr)) {
1814 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1815 for (i = 0; i < 1500; i++) {
1816 u32 tmp;
1817
1818 udelay(10);
1819 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1820 tg3_readphy(tp, MII_BMSR, &tmp))
1821 continue;
1822 if (!(tmp & BMSR_LSTATUS)) {
1823 udelay(40);
1824 break;
1825 }
1826 }
1827 tg3_writephy(tp, MII_BMCR, bmcr);
1828 udelay(40);
1829 }
1830 } else {
1831 tg3_writephy(tp, MII_BMCR,
1832 BMCR_ANENABLE | BMCR_ANRESTART);
1833 }
1834}
1835
1836static int tg3_init_5401phy_dsp(struct tg3 *tp)
1837{
1838 int err;
1839
1840 /* Turn off tap power management. */
1841 /* Set Extended packet length bit */
1842 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1843
1844 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1845 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1846
1847 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1848 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1849
1850 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1851 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1852
1853 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1854 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1855
1856 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1857 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1858
1859 udelay(40);
1860
1861 return err;
1862}
1863
3600d918 1864static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1da177e4 1865{
3600d918
MC
1866 u32 adv_reg, all_mask = 0;
1867
1868 if (mask & ADVERTISED_10baseT_Half)
1869 all_mask |= ADVERTISE_10HALF;
1870 if (mask & ADVERTISED_10baseT_Full)
1871 all_mask |= ADVERTISE_10FULL;
1872 if (mask & ADVERTISED_100baseT_Half)
1873 all_mask |= ADVERTISE_100HALF;
1874 if (mask & ADVERTISED_100baseT_Full)
1875 all_mask |= ADVERTISE_100FULL;
1da177e4
LT
1876
1877 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1878 return 0;
1879
1da177e4
LT
1880 if ((adv_reg & all_mask) != all_mask)
1881 return 0;
1882 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1883 u32 tg3_ctrl;
1884
3600d918
MC
1885 all_mask = 0;
1886 if (mask & ADVERTISED_1000baseT_Half)
1887 all_mask |= ADVERTISE_1000HALF;
1888 if (mask & ADVERTISED_1000baseT_Full)
1889 all_mask |= ADVERTISE_1000FULL;
1890
1da177e4
LT
1891 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1892 return 0;
1893
1da177e4
LT
1894 if ((tg3_ctrl & all_mask) != all_mask)
1895 return 0;
1896 }
1897 return 1;
1898}
1899
1900static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1901{
1902 int current_link_up;
1903 u32 bmsr, dummy;
1904 u16 current_speed;
1905 u8 current_duplex;
1906 int i, err;
1907
1908 tw32(MAC_EVENT, 0);
1909
1910 tw32_f(MAC_STATUS,
1911 (MAC_STATUS_SYNC_CHANGED |
1912 MAC_STATUS_CFG_CHANGED |
1913 MAC_STATUS_MI_COMPLETION |
1914 MAC_STATUS_LNKSTATE_CHANGED));
1915 udelay(40);
1916
1917 tp->mi_mode = MAC_MI_MODE_BASE;
1918 tw32_f(MAC_MI_MODE, tp->mi_mode);
1919 udelay(80);
1920
1921 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1922
1923 /* Some third-party PHYs need to be reset on link going
1924 * down.
1925 */
1926 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1927 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1928 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1929 netif_carrier_ok(tp->dev)) {
1930 tg3_readphy(tp, MII_BMSR, &bmsr);
1931 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1932 !(bmsr & BMSR_LSTATUS))
1933 force_reset = 1;
1934 }
1935 if (force_reset)
1936 tg3_phy_reset(tp);
1937
1938 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1939 tg3_readphy(tp, MII_BMSR, &bmsr);
1940 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1941 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1942 bmsr = 0;
1943
1944 if (!(bmsr & BMSR_LSTATUS)) {
1945 err = tg3_init_5401phy_dsp(tp);
1946 if (err)
1947 return err;
1948
1949 tg3_readphy(tp, MII_BMSR, &bmsr);
1950 for (i = 0; i < 1000; i++) {
1951 udelay(10);
1952 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1953 (bmsr & BMSR_LSTATUS)) {
1954 udelay(40);
1955 break;
1956 }
1957 }
1958
1959 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1960 !(bmsr & BMSR_LSTATUS) &&
1961 tp->link_config.active_speed == SPEED_1000) {
1962 err = tg3_phy_reset(tp);
1963 if (!err)
1964 err = tg3_init_5401phy_dsp(tp);
1965 if (err)
1966 return err;
1967 }
1968 }
1969 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1970 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1971 /* 5701 {A0,B0} CRC bug workaround */
1972 tg3_writephy(tp, 0x15, 0x0a75);
1973 tg3_writephy(tp, 0x1c, 0x8c68);
1974 tg3_writephy(tp, 0x1c, 0x8d68);
1975 tg3_writephy(tp, 0x1c, 0x8c68);
1976 }
1977
1978 /* Clear pending interrupts... */
1979 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1980 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1981
1982 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1983 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
715116a1 1984 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1da177e4
LT
1985 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1986
1987 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1988 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1989 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1990 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1991 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1992 else
1993 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1994 }
1995
1996 current_link_up = 0;
1997 current_speed = SPEED_INVALID;
1998 current_duplex = DUPLEX_INVALID;
1999
2000 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2001 u32 val;
2002
2003 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2004 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2005 if (!(val & (1 << 10))) {
2006 val |= (1 << 10);
2007 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2008 goto relink;
2009 }
2010 }
2011
2012 bmsr = 0;
2013 for (i = 0; i < 100; i++) {
2014 tg3_readphy(tp, MII_BMSR, &bmsr);
2015 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2016 (bmsr & BMSR_LSTATUS))
2017 break;
2018 udelay(40);
2019 }
2020
2021 if (bmsr & BMSR_LSTATUS) {
2022 u32 aux_stat, bmcr;
2023
2024 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2025 for (i = 0; i < 2000; i++) {
2026 udelay(10);
2027 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2028 aux_stat)
2029 break;
2030 }
2031
2032 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2033 &current_speed,
2034 &current_duplex);
2035
2036 bmcr = 0;
2037 for (i = 0; i < 200; i++) {
2038 tg3_readphy(tp, MII_BMCR, &bmcr);
2039 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2040 continue;
2041 if (bmcr && bmcr != 0x7fff)
2042 break;
2043 udelay(10);
2044 }
2045
2046 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2047 if (bmcr & BMCR_ANENABLE) {
2048 current_link_up = 1;
2049
2050 /* Force autoneg restart if we are exiting
2051 * low power mode.
2052 */
3600d918
MC
2053 if (!tg3_copper_is_advertising_all(tp,
2054 tp->link_config.advertising))
1da177e4
LT
2055 current_link_up = 0;
2056 } else {
2057 current_link_up = 0;
2058 }
2059 } else {
2060 if (!(bmcr & BMCR_ANENABLE) &&
2061 tp->link_config.speed == current_speed &&
2062 tp->link_config.duplex == current_duplex) {
2063 current_link_up = 1;
2064 } else {
2065 current_link_up = 0;
2066 }
2067 }
2068
2069 tp->link_config.active_speed = current_speed;
2070 tp->link_config.active_duplex = current_duplex;
2071 }
2072
2073 if (current_link_up == 1 &&
2074 (tp->link_config.active_duplex == DUPLEX_FULL) &&
2075 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2076 u32 local_adv, remote_adv;
2077
2078 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2079 local_adv = 0;
2080 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2081
2082 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2083 remote_adv = 0;
2084
2085 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
2086
2087 /* If we are not advertising full pause capability,
2088 * something is wrong. Bring the link down and reconfigure.
2089 */
2090 if (local_adv != ADVERTISE_PAUSE_CAP) {
2091 current_link_up = 0;
2092 } else {
2093 tg3_setup_flow_control(tp, local_adv, remote_adv);
2094 }
2095 }
2096relink:
6921d201 2097 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1da177e4
LT
2098 u32 tmp;
2099
2100 tg3_phy_copper_begin(tp);
2101
2102 tg3_readphy(tp, MII_BMSR, &tmp);
2103 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2104 (tmp & BMSR_LSTATUS))
2105 current_link_up = 1;
2106 }
2107
2108 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2109 if (current_link_up == 1) {
2110 if (tp->link_config.active_speed == SPEED_100 ||
2111 tp->link_config.active_speed == SPEED_10)
2112 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2113 else
2114 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2115 } else
2116 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2117
2118 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2119 if (tp->link_config.active_duplex == DUPLEX_HALF)
2120 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2121
1da177e4 2122 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
e8f3f6ca
MC
2123 if (current_link_up == 1 &&
2124 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
1da177e4 2125 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
e8f3f6ca
MC
2126 else
2127 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1da177e4
LT
2128 }
2129
2130 /* ??? Without this setting Netgear GA302T PHY does not
2131 * ??? send/receive packets...
2132 */
2133 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2134 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2135 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2136 tw32_f(MAC_MI_MODE, tp->mi_mode);
2137 udelay(80);
2138 }
2139
2140 tw32_f(MAC_MODE, tp->mac_mode);
2141 udelay(40);
2142
2143 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2144 /* Polled via timer. */
2145 tw32_f(MAC_EVENT, 0);
2146 } else {
2147 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2148 }
2149 udelay(40);
2150
2151 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2152 current_link_up == 1 &&
2153 tp->link_config.active_speed == SPEED_1000 &&
2154 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2155 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2156 udelay(120);
2157 tw32_f(MAC_STATUS,
2158 (MAC_STATUS_SYNC_CHANGED |
2159 MAC_STATUS_CFG_CHANGED));
2160 udelay(40);
2161 tg3_write_mem(tp,
2162 NIC_SRAM_FIRMWARE_MBOX,
2163 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2164 }
2165
2166 if (current_link_up != netif_carrier_ok(tp->dev)) {
2167 if (current_link_up)
2168 netif_carrier_on(tp->dev);
2169 else
2170 netif_carrier_off(tp->dev);
2171 tg3_link_report(tp);
2172 }
2173
2174 return 0;
2175}
2176
2177struct tg3_fiber_aneginfo {
2178 int state;
2179#define ANEG_STATE_UNKNOWN 0
2180#define ANEG_STATE_AN_ENABLE 1
2181#define ANEG_STATE_RESTART_INIT 2
2182#define ANEG_STATE_RESTART 3
2183#define ANEG_STATE_DISABLE_LINK_OK 4
2184#define ANEG_STATE_ABILITY_DETECT_INIT 5
2185#define ANEG_STATE_ABILITY_DETECT 6
2186#define ANEG_STATE_ACK_DETECT_INIT 7
2187#define ANEG_STATE_ACK_DETECT 8
2188#define ANEG_STATE_COMPLETE_ACK_INIT 9
2189#define ANEG_STATE_COMPLETE_ACK 10
2190#define ANEG_STATE_IDLE_DETECT_INIT 11
2191#define ANEG_STATE_IDLE_DETECT 12
2192#define ANEG_STATE_LINK_OK 13
2193#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2194#define ANEG_STATE_NEXT_PAGE_WAIT 15
2195
2196 u32 flags;
2197#define MR_AN_ENABLE 0x00000001
2198#define MR_RESTART_AN 0x00000002
2199#define MR_AN_COMPLETE 0x00000004
2200#define MR_PAGE_RX 0x00000008
2201#define MR_NP_LOADED 0x00000010
2202#define MR_TOGGLE_TX 0x00000020
2203#define MR_LP_ADV_FULL_DUPLEX 0x00000040
2204#define MR_LP_ADV_HALF_DUPLEX 0x00000080
2205#define MR_LP_ADV_SYM_PAUSE 0x00000100
2206#define MR_LP_ADV_ASYM_PAUSE 0x00000200
2207#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2208#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2209#define MR_LP_ADV_NEXT_PAGE 0x00001000
2210#define MR_TOGGLE_RX 0x00002000
2211#define MR_NP_RX 0x00004000
2212
2213#define MR_LINK_OK 0x80000000
2214
2215 unsigned long link_time, cur_time;
2216
2217 u32 ability_match_cfg;
2218 int ability_match_count;
2219
2220 char ability_match, idle_match, ack_match;
2221
2222 u32 txconfig, rxconfig;
2223#define ANEG_CFG_NP 0x00000080
2224#define ANEG_CFG_ACK 0x00000040
2225#define ANEG_CFG_RF2 0x00000020
2226#define ANEG_CFG_RF1 0x00000010
2227#define ANEG_CFG_PS2 0x00000001
2228#define ANEG_CFG_PS1 0x00008000
2229#define ANEG_CFG_HD 0x00004000
2230#define ANEG_CFG_FD 0x00002000
2231#define ANEG_CFG_INVAL 0x00001f06
2232
2233};
2234#define ANEG_OK 0
2235#define ANEG_DONE 1
2236#define ANEG_TIMER_ENAB 2
2237#define ANEG_FAILED -1
2238
2239#define ANEG_STATE_SETTLE_TIME 10000
2240
2241static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2242 struct tg3_fiber_aneginfo *ap)
2243{
2244 unsigned long delta;
2245 u32 rx_cfg_reg;
2246 int ret;
2247
2248 if (ap->state == ANEG_STATE_UNKNOWN) {
2249 ap->rxconfig = 0;
2250 ap->link_time = 0;
2251 ap->cur_time = 0;
2252 ap->ability_match_cfg = 0;
2253 ap->ability_match_count = 0;
2254 ap->ability_match = 0;
2255 ap->idle_match = 0;
2256 ap->ack_match = 0;
2257 }
2258 ap->cur_time++;
2259
2260 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2261 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2262
2263 if (rx_cfg_reg != ap->ability_match_cfg) {
2264 ap->ability_match_cfg = rx_cfg_reg;
2265 ap->ability_match = 0;
2266 ap->ability_match_count = 0;
2267 } else {
2268 if (++ap->ability_match_count > 1) {
2269 ap->ability_match = 1;
2270 ap->ability_match_cfg = rx_cfg_reg;
2271 }
2272 }
2273 if (rx_cfg_reg & ANEG_CFG_ACK)
2274 ap->ack_match = 1;
2275 else
2276 ap->ack_match = 0;
2277
2278 ap->idle_match = 0;
2279 } else {
2280 ap->idle_match = 1;
2281 ap->ability_match_cfg = 0;
2282 ap->ability_match_count = 0;
2283 ap->ability_match = 0;
2284 ap->ack_match = 0;
2285
2286 rx_cfg_reg = 0;
2287 }
2288
2289 ap->rxconfig = rx_cfg_reg;
2290 ret = ANEG_OK;
2291
2292 switch(ap->state) {
2293 case ANEG_STATE_UNKNOWN:
2294 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2295 ap->state = ANEG_STATE_AN_ENABLE;
2296
2297 /* fallthru */
2298 case ANEG_STATE_AN_ENABLE:
2299 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2300 if (ap->flags & MR_AN_ENABLE) {
2301 ap->link_time = 0;
2302 ap->cur_time = 0;
2303 ap->ability_match_cfg = 0;
2304 ap->ability_match_count = 0;
2305 ap->ability_match = 0;
2306 ap->idle_match = 0;
2307 ap->ack_match = 0;
2308
2309 ap->state = ANEG_STATE_RESTART_INIT;
2310 } else {
2311 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2312 }
2313 break;
2314
2315 case ANEG_STATE_RESTART_INIT:
2316 ap->link_time = ap->cur_time;
2317 ap->flags &= ~(MR_NP_LOADED);
2318 ap->txconfig = 0;
2319 tw32(MAC_TX_AUTO_NEG, 0);
2320 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2321 tw32_f(MAC_MODE, tp->mac_mode);
2322 udelay(40);
2323
2324 ret = ANEG_TIMER_ENAB;
2325 ap->state = ANEG_STATE_RESTART;
2326
2327 /* fallthru */
2328 case ANEG_STATE_RESTART:
2329 delta = ap->cur_time - ap->link_time;
2330 if (delta > ANEG_STATE_SETTLE_TIME) {
2331 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2332 } else {
2333 ret = ANEG_TIMER_ENAB;
2334 }
2335 break;
2336
2337 case ANEG_STATE_DISABLE_LINK_OK:
2338 ret = ANEG_DONE;
2339 break;
2340
2341 case ANEG_STATE_ABILITY_DETECT_INIT:
2342 ap->flags &= ~(MR_TOGGLE_TX);
2343 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2344 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2345 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2346 tw32_f(MAC_MODE, tp->mac_mode);
2347 udelay(40);
2348
2349 ap->state = ANEG_STATE_ABILITY_DETECT;
2350 break;
2351
2352 case ANEG_STATE_ABILITY_DETECT:
2353 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2354 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2355 }
2356 break;
2357
2358 case ANEG_STATE_ACK_DETECT_INIT:
2359 ap->txconfig |= ANEG_CFG_ACK;
2360 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2361 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2362 tw32_f(MAC_MODE, tp->mac_mode);
2363 udelay(40);
2364
2365 ap->state = ANEG_STATE_ACK_DETECT;
2366
2367 /* fallthru */
2368 case ANEG_STATE_ACK_DETECT:
2369 if (ap->ack_match != 0) {
2370 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2371 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2372 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2373 } else {
2374 ap->state = ANEG_STATE_AN_ENABLE;
2375 }
2376 } else if (ap->ability_match != 0 &&
2377 ap->rxconfig == 0) {
2378 ap->state = ANEG_STATE_AN_ENABLE;
2379 }
2380 break;
2381
2382 case ANEG_STATE_COMPLETE_ACK_INIT:
2383 if (ap->rxconfig & ANEG_CFG_INVAL) {
2384 ret = ANEG_FAILED;
2385 break;
2386 }
2387 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2388 MR_LP_ADV_HALF_DUPLEX |
2389 MR_LP_ADV_SYM_PAUSE |
2390 MR_LP_ADV_ASYM_PAUSE |
2391 MR_LP_ADV_REMOTE_FAULT1 |
2392 MR_LP_ADV_REMOTE_FAULT2 |
2393 MR_LP_ADV_NEXT_PAGE |
2394 MR_TOGGLE_RX |
2395 MR_NP_RX);
2396 if (ap->rxconfig & ANEG_CFG_FD)
2397 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2398 if (ap->rxconfig & ANEG_CFG_HD)
2399 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2400 if (ap->rxconfig & ANEG_CFG_PS1)
2401 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2402 if (ap->rxconfig & ANEG_CFG_PS2)
2403 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2404 if (ap->rxconfig & ANEG_CFG_RF1)
2405 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2406 if (ap->rxconfig & ANEG_CFG_RF2)
2407 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2408 if (ap->rxconfig & ANEG_CFG_NP)
2409 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2410
2411 ap->link_time = ap->cur_time;
2412
2413 ap->flags ^= (MR_TOGGLE_TX);
2414 if (ap->rxconfig & 0x0008)
2415 ap->flags |= MR_TOGGLE_RX;
2416 if (ap->rxconfig & ANEG_CFG_NP)
2417 ap->flags |= MR_NP_RX;
2418 ap->flags |= MR_PAGE_RX;
2419
2420 ap->state = ANEG_STATE_COMPLETE_ACK;
2421 ret = ANEG_TIMER_ENAB;
2422 break;
2423
2424 case ANEG_STATE_COMPLETE_ACK:
2425 if (ap->ability_match != 0 &&
2426 ap->rxconfig == 0) {
2427 ap->state = ANEG_STATE_AN_ENABLE;
2428 break;
2429 }
2430 delta = ap->cur_time - ap->link_time;
2431 if (delta > ANEG_STATE_SETTLE_TIME) {
2432 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2433 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2434 } else {
2435 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2436 !(ap->flags & MR_NP_RX)) {
2437 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2438 } else {
2439 ret = ANEG_FAILED;
2440 }
2441 }
2442 }
2443 break;
2444
2445 case ANEG_STATE_IDLE_DETECT_INIT:
2446 ap->link_time = ap->cur_time;
2447 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2448 tw32_f(MAC_MODE, tp->mac_mode);
2449 udelay(40);
2450
2451 ap->state = ANEG_STATE_IDLE_DETECT;
2452 ret = ANEG_TIMER_ENAB;
2453 break;
2454
2455 case ANEG_STATE_IDLE_DETECT:
2456 if (ap->ability_match != 0 &&
2457 ap->rxconfig == 0) {
2458 ap->state = ANEG_STATE_AN_ENABLE;
2459 break;
2460 }
2461 delta = ap->cur_time - ap->link_time;
2462 if (delta > ANEG_STATE_SETTLE_TIME) {
2463 /* XXX another gem from the Broadcom driver :( */
2464 ap->state = ANEG_STATE_LINK_OK;
2465 }
2466 break;
2467
2468 case ANEG_STATE_LINK_OK:
2469 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2470 ret = ANEG_DONE;
2471 break;
2472
2473 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2474 /* ??? unimplemented */
2475 break;
2476
2477 case ANEG_STATE_NEXT_PAGE_WAIT:
2478 /* ??? unimplemented */
2479 break;
2480
2481 default:
2482 ret = ANEG_FAILED;
2483 break;
2484 };
2485
2486 return ret;
2487}
2488
2489static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2490{
2491 int res = 0;
2492 struct tg3_fiber_aneginfo aninfo;
2493 int status = ANEG_FAILED;
2494 unsigned int tick;
2495 u32 tmp;
2496
2497 tw32_f(MAC_TX_AUTO_NEG, 0);
2498
2499 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2500 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2501 udelay(40);
2502
2503 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2504 udelay(40);
2505
2506 memset(&aninfo, 0, sizeof(aninfo));
2507 aninfo.flags |= MR_AN_ENABLE;
2508 aninfo.state = ANEG_STATE_UNKNOWN;
2509 aninfo.cur_time = 0;
2510 tick = 0;
2511 while (++tick < 195000) {
2512 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2513 if (status == ANEG_DONE || status == ANEG_FAILED)
2514 break;
2515
2516 udelay(1);
2517 }
2518
2519 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2520 tw32_f(MAC_MODE, tp->mac_mode);
2521 udelay(40);
2522
2523 *flags = aninfo.flags;
2524
2525 if (status == ANEG_DONE &&
2526 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2527 MR_LP_ADV_FULL_DUPLEX)))
2528 res = 1;
2529
2530 return res;
2531}
2532
2533static void tg3_init_bcm8002(struct tg3 *tp)
2534{
2535 u32 mac_status = tr32(MAC_STATUS);
2536 int i;
2537
2538 /* Reset when initting first time or we have a link. */
2539 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2540 !(mac_status & MAC_STATUS_PCS_SYNCED))
2541 return;
2542
2543 /* Set PLL lock range. */
2544 tg3_writephy(tp, 0x16, 0x8007);
2545
2546 /* SW reset */
2547 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2548
2549 /* Wait for reset to complete. */
2550 /* XXX schedule_timeout() ... */
2551 for (i = 0; i < 500; i++)
2552 udelay(10);
2553
2554 /* Config mode; select PMA/Ch 1 regs. */
2555 tg3_writephy(tp, 0x10, 0x8411);
2556
2557 /* Enable auto-lock and comdet, select txclk for tx. */
2558 tg3_writephy(tp, 0x11, 0x0a10);
2559
2560 tg3_writephy(tp, 0x18, 0x00a0);
2561 tg3_writephy(tp, 0x16, 0x41ff);
2562
2563 /* Assert and deassert POR. */
2564 tg3_writephy(tp, 0x13, 0x0400);
2565 udelay(40);
2566 tg3_writephy(tp, 0x13, 0x0000);
2567
2568 tg3_writephy(tp, 0x11, 0x0a50);
2569 udelay(40);
2570 tg3_writephy(tp, 0x11, 0x0a10);
2571
2572 /* Wait for signal to stabilize */
2573 /* XXX schedule_timeout() ... */
2574 for (i = 0; i < 15000; i++)
2575 udelay(10);
2576
2577 /* Deselect the channel register so we can read the PHYID
2578 * later.
2579 */
2580 tg3_writephy(tp, 0x10, 0x8011);
2581}
2582
2583static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2584{
2585 u32 sg_dig_ctrl, sg_dig_status;
2586 u32 serdes_cfg, expected_sg_dig_ctrl;
2587 int workaround, port_a;
2588 int current_link_up;
2589
2590 serdes_cfg = 0;
2591 expected_sg_dig_ctrl = 0;
2592 workaround = 0;
2593 port_a = 1;
2594 current_link_up = 0;
2595
2596 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2597 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2598 workaround = 1;
2599 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2600 port_a = 0;
2601
2602 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2603 /* preserve bits 20-23 for voltage regulator */
2604 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2605 }
2606
2607 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2608
2609 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2610 if (sg_dig_ctrl & (1 << 31)) {
2611 if (workaround) {
2612 u32 val = serdes_cfg;
2613
2614 if (port_a)
2615 val |= 0xc010000;
2616 else
2617 val |= 0x4010000;
2618 tw32_f(MAC_SERDES_CFG, val);
2619 }
2620 tw32_f(SG_DIG_CTRL, 0x01388400);
2621 }
2622 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2623 tg3_setup_flow_control(tp, 0, 0);
2624 current_link_up = 1;
2625 }
2626 goto out;
2627 }
2628
2629 /* Want auto-negotiation. */
2630 expected_sg_dig_ctrl = 0x81388400;
2631
2632 /* Pause capability */
2633 expected_sg_dig_ctrl |= (1 << 11);
2634
2635 /* Asymettric pause */
2636 expected_sg_dig_ctrl |= (1 << 12);
2637
2638 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3d3ebe74
MC
2639 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2640 tp->serdes_counter &&
2641 ((mac_status & (MAC_STATUS_PCS_SYNCED |
2642 MAC_STATUS_RCVD_CFG)) ==
2643 MAC_STATUS_PCS_SYNCED)) {
2644 tp->serdes_counter--;
2645 current_link_up = 1;
2646 goto out;
2647 }
2648restart_autoneg:
1da177e4
LT
2649 if (workaround)
2650 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2651 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2652 udelay(5);
2653 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2654
3d3ebe74
MC
2655 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2656 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
1da177e4
LT
2657 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2658 MAC_STATUS_SIGNAL_DET)) {
3d3ebe74 2659 sg_dig_status = tr32(SG_DIG_STATUS);
1da177e4
LT
2660 mac_status = tr32(MAC_STATUS);
2661
2662 if ((sg_dig_status & (1 << 1)) &&
2663 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2664 u32 local_adv, remote_adv;
2665
2666 local_adv = ADVERTISE_PAUSE_CAP;
2667 remote_adv = 0;
2668 if (sg_dig_status & (1 << 19))
2669 remote_adv |= LPA_PAUSE_CAP;
2670 if (sg_dig_status & (1 << 20))
2671 remote_adv |= LPA_PAUSE_ASYM;
2672
2673 tg3_setup_flow_control(tp, local_adv, remote_adv);
2674 current_link_up = 1;
3d3ebe74
MC
2675 tp->serdes_counter = 0;
2676 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
1da177e4 2677 } else if (!(sg_dig_status & (1 << 1))) {
3d3ebe74
MC
2678 if (tp->serdes_counter)
2679 tp->serdes_counter--;
1da177e4
LT
2680 else {
2681 if (workaround) {
2682 u32 val = serdes_cfg;
2683
2684 if (port_a)
2685 val |= 0xc010000;
2686 else
2687 val |= 0x4010000;
2688
2689 tw32_f(MAC_SERDES_CFG, val);
2690 }
2691
2692 tw32_f(SG_DIG_CTRL, 0x01388400);
2693 udelay(40);
2694
2695 /* Link parallel detection - link is up */
2696 /* only if we have PCS_SYNC and not */
2697 /* receiving config code words */
2698 mac_status = tr32(MAC_STATUS);
2699 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2700 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2701 tg3_setup_flow_control(tp, 0, 0);
2702 current_link_up = 1;
3d3ebe74
MC
2703 tp->tg3_flags2 |=
2704 TG3_FLG2_PARALLEL_DETECT;
2705 tp->serdes_counter =
2706 SERDES_PARALLEL_DET_TIMEOUT;
2707 } else
2708 goto restart_autoneg;
1da177e4
LT
2709 }
2710 }
3d3ebe74
MC
2711 } else {
2712 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2713 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
1da177e4
LT
2714 }
2715
2716out:
2717 return current_link_up;
2718}
2719
2720static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2721{
2722 int current_link_up = 0;
2723
5cf64b8a 2724 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
1da177e4 2725 goto out;
1da177e4
LT
2726
2727 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2728 u32 flags;
2729 int i;
6aa20a22 2730
1da177e4
LT
2731 if (fiber_autoneg(tp, &flags)) {
2732 u32 local_adv, remote_adv;
2733
2734 local_adv = ADVERTISE_PAUSE_CAP;
2735 remote_adv = 0;
2736 if (flags & MR_LP_ADV_SYM_PAUSE)
2737 remote_adv |= LPA_PAUSE_CAP;
2738 if (flags & MR_LP_ADV_ASYM_PAUSE)
2739 remote_adv |= LPA_PAUSE_ASYM;
2740
2741 tg3_setup_flow_control(tp, local_adv, remote_adv);
2742
1da177e4
LT
2743 current_link_up = 1;
2744 }
2745 for (i = 0; i < 30; i++) {
2746 udelay(20);
2747 tw32_f(MAC_STATUS,
2748 (MAC_STATUS_SYNC_CHANGED |
2749 MAC_STATUS_CFG_CHANGED));
2750 udelay(40);
2751 if ((tr32(MAC_STATUS) &
2752 (MAC_STATUS_SYNC_CHANGED |
2753 MAC_STATUS_CFG_CHANGED)) == 0)
2754 break;
2755 }
2756
2757 mac_status = tr32(MAC_STATUS);
2758 if (current_link_up == 0 &&
2759 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2760 !(mac_status & MAC_STATUS_RCVD_CFG))
2761 current_link_up = 1;
2762 } else {
2763 /* Forcing 1000FD link up. */
2764 current_link_up = 1;
1da177e4
LT
2765
2766 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2767 udelay(40);
e8f3f6ca
MC
2768
2769 tw32_f(MAC_MODE, tp->mac_mode);
2770 udelay(40);
1da177e4
LT
2771 }
2772
2773out:
2774 return current_link_up;
2775}
2776
2777static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2778{
2779 u32 orig_pause_cfg;
2780 u16 orig_active_speed;
2781 u8 orig_active_duplex;
2782 u32 mac_status;
2783 int current_link_up;
2784 int i;
2785
2786 orig_pause_cfg =
2787 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2788 TG3_FLAG_TX_PAUSE));
2789 orig_active_speed = tp->link_config.active_speed;
2790 orig_active_duplex = tp->link_config.active_duplex;
2791
2792 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2793 netif_carrier_ok(tp->dev) &&
2794 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2795 mac_status = tr32(MAC_STATUS);
2796 mac_status &= (MAC_STATUS_PCS_SYNCED |
2797 MAC_STATUS_SIGNAL_DET |
2798 MAC_STATUS_CFG_CHANGED |
2799 MAC_STATUS_RCVD_CFG);
2800 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2801 MAC_STATUS_SIGNAL_DET)) {
2802 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2803 MAC_STATUS_CFG_CHANGED));
2804 return 0;
2805 }
2806 }
2807
2808 tw32_f(MAC_TX_AUTO_NEG, 0);
2809
2810 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2811 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2812 tw32_f(MAC_MODE, tp->mac_mode);
2813 udelay(40);
2814
2815 if (tp->phy_id == PHY_ID_BCM8002)
2816 tg3_init_bcm8002(tp);
2817
2818 /* Enable link change event even when serdes polling. */
2819 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2820 udelay(40);
2821
2822 current_link_up = 0;
2823 mac_status = tr32(MAC_STATUS);
2824
2825 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2826 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2827 else
2828 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2829
1da177e4
LT
2830 tp->hw_status->status =
2831 (SD_STATUS_UPDATED |
2832 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2833
2834 for (i = 0; i < 100; i++) {
2835 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2836 MAC_STATUS_CFG_CHANGED));
2837 udelay(5);
2838 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3d3ebe74
MC
2839 MAC_STATUS_CFG_CHANGED |
2840 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
1da177e4
LT
2841 break;
2842 }
2843
2844 mac_status = tr32(MAC_STATUS);
2845 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2846 current_link_up = 0;
3d3ebe74
MC
2847 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2848 tp->serdes_counter == 0) {
1da177e4
LT
2849 tw32_f(MAC_MODE, (tp->mac_mode |
2850 MAC_MODE_SEND_CONFIGS));
2851 udelay(1);
2852 tw32_f(MAC_MODE, tp->mac_mode);
2853 }
2854 }
2855
2856 if (current_link_up == 1) {
2857 tp->link_config.active_speed = SPEED_1000;
2858 tp->link_config.active_duplex = DUPLEX_FULL;
2859 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2860 LED_CTRL_LNKLED_OVERRIDE |
2861 LED_CTRL_1000MBPS_ON));
2862 } else {
2863 tp->link_config.active_speed = SPEED_INVALID;
2864 tp->link_config.active_duplex = DUPLEX_INVALID;
2865 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2866 LED_CTRL_LNKLED_OVERRIDE |
2867 LED_CTRL_TRAFFIC_OVERRIDE));
2868 }
2869
2870 if (current_link_up != netif_carrier_ok(tp->dev)) {
2871 if (current_link_up)
2872 netif_carrier_on(tp->dev);
2873 else
2874 netif_carrier_off(tp->dev);
2875 tg3_link_report(tp);
2876 } else {
2877 u32 now_pause_cfg =
2878 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2879 TG3_FLAG_TX_PAUSE);
2880 if (orig_pause_cfg != now_pause_cfg ||
2881 orig_active_speed != tp->link_config.active_speed ||
2882 orig_active_duplex != tp->link_config.active_duplex)
2883 tg3_link_report(tp);
2884 }
2885
2886 return 0;
2887}
2888
747e8f8b
MC
2889static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2890{
2891 int current_link_up, err = 0;
2892 u32 bmsr, bmcr;
2893 u16 current_speed;
2894 u8 current_duplex;
2895
2896 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2897 tw32_f(MAC_MODE, tp->mac_mode);
2898 udelay(40);
2899
2900 tw32(MAC_EVENT, 0);
2901
2902 tw32_f(MAC_STATUS,
2903 (MAC_STATUS_SYNC_CHANGED |
2904 MAC_STATUS_CFG_CHANGED |
2905 MAC_STATUS_MI_COMPLETION |
2906 MAC_STATUS_LNKSTATE_CHANGED));
2907 udelay(40);
2908
2909 if (force_reset)
2910 tg3_phy_reset(tp);
2911
2912 current_link_up = 0;
2913 current_speed = SPEED_INVALID;
2914 current_duplex = DUPLEX_INVALID;
2915
2916 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2917 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
2918 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2919 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2920 bmsr |= BMSR_LSTATUS;
2921 else
2922 bmsr &= ~BMSR_LSTATUS;
2923 }
747e8f8b
MC
2924
2925 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2926
2927 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2928 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2929 /* do nothing, just check for link up at the end */
2930 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2931 u32 adv, new_adv;
2932
2933 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2934 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2935 ADVERTISE_1000XPAUSE |
2936 ADVERTISE_1000XPSE_ASYM |
2937 ADVERTISE_SLCT);
2938
2939 /* Always advertise symmetric PAUSE just like copper */
2940 new_adv |= ADVERTISE_1000XPAUSE;
2941
2942 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2943 new_adv |= ADVERTISE_1000XHALF;
2944 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2945 new_adv |= ADVERTISE_1000XFULL;
2946
2947 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2948 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2949 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2950 tg3_writephy(tp, MII_BMCR, bmcr);
2951
2952 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3d3ebe74 2953 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
747e8f8b
MC
2954 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2955
2956 return err;
2957 }
2958 } else {
2959 u32 new_bmcr;
2960
2961 bmcr &= ~BMCR_SPEED1000;
2962 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2963
2964 if (tp->link_config.duplex == DUPLEX_FULL)
2965 new_bmcr |= BMCR_FULLDPLX;
2966
2967 if (new_bmcr != bmcr) {
2968 /* BMCR_SPEED1000 is a reserved bit that needs
2969 * to be set on write.
2970 */
2971 new_bmcr |= BMCR_SPEED1000;
2972
2973 /* Force a linkdown */
2974 if (netif_carrier_ok(tp->dev)) {
2975 u32 adv;
2976
2977 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2978 adv &= ~(ADVERTISE_1000XFULL |
2979 ADVERTISE_1000XHALF |
2980 ADVERTISE_SLCT);
2981 tg3_writephy(tp, MII_ADVERTISE, adv);
2982 tg3_writephy(tp, MII_BMCR, bmcr |
2983 BMCR_ANRESTART |
2984 BMCR_ANENABLE);
2985 udelay(10);
2986 netif_carrier_off(tp->dev);
2987 }
2988 tg3_writephy(tp, MII_BMCR, new_bmcr);
2989 bmcr = new_bmcr;
2990 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2991 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
2992 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2993 ASIC_REV_5714) {
2994 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2995 bmsr |= BMSR_LSTATUS;
2996 else
2997 bmsr &= ~BMSR_LSTATUS;
2998 }
747e8f8b
MC
2999 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3000 }
3001 }
3002
3003 if (bmsr & BMSR_LSTATUS) {
3004 current_speed = SPEED_1000;
3005 current_link_up = 1;
3006 if (bmcr & BMCR_FULLDPLX)
3007 current_duplex = DUPLEX_FULL;
3008 else
3009 current_duplex = DUPLEX_HALF;
3010
3011 if (bmcr & BMCR_ANENABLE) {
3012 u32 local_adv, remote_adv, common;
3013
3014 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3015 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3016 common = local_adv & remote_adv;
3017 if (common & (ADVERTISE_1000XHALF |
3018 ADVERTISE_1000XFULL)) {
3019 if (common & ADVERTISE_1000XFULL)
3020 current_duplex = DUPLEX_FULL;
3021 else
3022 current_duplex = DUPLEX_HALF;
3023
3024 tg3_setup_flow_control(tp, local_adv,
3025 remote_adv);
3026 }
3027 else
3028 current_link_up = 0;
3029 }
3030 }
3031
3032 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3033 if (tp->link_config.active_duplex == DUPLEX_HALF)
3034 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3035
3036 tw32_f(MAC_MODE, tp->mac_mode);
3037 udelay(40);
3038
3039 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3040
3041 tp->link_config.active_speed = current_speed;
3042 tp->link_config.active_duplex = current_duplex;
3043
3044 if (current_link_up != netif_carrier_ok(tp->dev)) {
3045 if (current_link_up)
3046 netif_carrier_on(tp->dev);
3047 else {
3048 netif_carrier_off(tp->dev);
3049 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3050 }
3051 tg3_link_report(tp);
3052 }
3053 return err;
3054}
3055
3056static void tg3_serdes_parallel_detect(struct tg3 *tp)
3057{
3d3ebe74 3058 if (tp->serdes_counter) {
747e8f8b 3059 /* Give autoneg time to complete. */
3d3ebe74 3060 tp->serdes_counter--;
747e8f8b
MC
3061 return;
3062 }
3063 if (!netif_carrier_ok(tp->dev) &&
3064 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3065 u32 bmcr;
3066
3067 tg3_readphy(tp, MII_BMCR, &bmcr);
3068 if (bmcr & BMCR_ANENABLE) {
3069 u32 phy1, phy2;
3070
3071 /* Select shadow register 0x1f */
3072 tg3_writephy(tp, 0x1c, 0x7c00);
3073 tg3_readphy(tp, 0x1c, &phy1);
3074
3075 /* Select expansion interrupt status register */
3076 tg3_writephy(tp, 0x17, 0x0f01);
3077 tg3_readphy(tp, 0x15, &phy2);
3078 tg3_readphy(tp, 0x15, &phy2);
3079
3080 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3081 /* We have signal detect and not receiving
3082 * config code words, link is up by parallel
3083 * detection.
3084 */
3085
3086 bmcr &= ~BMCR_ANENABLE;
3087 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3088 tg3_writephy(tp, MII_BMCR, bmcr);
3089 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3090 }
3091 }
3092 }
3093 else if (netif_carrier_ok(tp->dev) &&
3094 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3095 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3096 u32 phy2;
3097
3098 /* Select expansion interrupt status register */
3099 tg3_writephy(tp, 0x17, 0x0f01);
3100 tg3_readphy(tp, 0x15, &phy2);
3101 if (phy2 & 0x20) {
3102 u32 bmcr;
3103
3104 /* Config code words received, turn on autoneg. */
3105 tg3_readphy(tp, MII_BMCR, &bmcr);
3106 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3107
3108 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3109
3110 }
3111 }
3112}
3113
1da177e4
LT
3114static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3115{
3116 int err;
3117
3118 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3119 err = tg3_setup_fiber_phy(tp, force_reset);
747e8f8b
MC
3120 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3121 err = tg3_setup_fiber_mii_phy(tp, force_reset);
1da177e4
LT
3122 } else {
3123 err = tg3_setup_copper_phy(tp, force_reset);
3124 }
3125
3126 if (tp->link_config.active_speed == SPEED_1000 &&
3127 tp->link_config.active_duplex == DUPLEX_HALF)
3128 tw32(MAC_TX_LENGTHS,
3129 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3130 (6 << TX_LENGTHS_IPG_SHIFT) |
3131 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3132 else
3133 tw32(MAC_TX_LENGTHS,
3134 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3135 (6 << TX_LENGTHS_IPG_SHIFT) |
3136 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3137
3138 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3139 if (netif_carrier_ok(tp->dev)) {
3140 tw32(HOSTCC_STAT_COAL_TICKS,
15f9850d 3141 tp->coal.stats_block_coalesce_usecs);
1da177e4
LT
3142 } else {
3143 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3144 }
3145 }
3146
8ed5d97e
MC
3147 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3148 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3149 if (!netif_carrier_ok(tp->dev))
3150 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3151 tp->pwrmgmt_thresh;
3152 else
3153 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3154 tw32(PCIE_PWR_MGMT_THRESH, val);
3155 }
3156
1da177e4
LT
3157 return err;
3158}
3159
df3e6548
MC
3160/* This is called whenever we suspect that the system chipset is re-
3161 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3162 * is bogus tx completions. We try to recover by setting the
3163 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3164 * in the workqueue.
3165 */
3166static void tg3_tx_recover(struct tg3 *tp)
3167{
3168 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3169 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3170
3171 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3172 "mapped I/O cycles to the network device, attempting to "
3173 "recover. Please report the problem to the driver maintainer "
3174 "and include system chipset information.\n", tp->dev->name);
3175
3176 spin_lock(&tp->lock);
df3e6548 3177 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
df3e6548
MC
3178 spin_unlock(&tp->lock);
3179}
3180
1b2a7205
MC
3181static inline u32 tg3_tx_avail(struct tg3 *tp)
3182{
3183 smp_mb();
3184 return (tp->tx_pending -
3185 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3186}
3187
1da177e4
LT
3188/* Tigon3 never reports partial packet sends. So we do not
3189 * need special logic to handle SKBs that have not had all
3190 * of their frags sent yet, like SunGEM does.
3191 */
3192static void tg3_tx(struct tg3 *tp)
3193{
3194 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3195 u32 sw_idx = tp->tx_cons;
3196
3197 while (sw_idx != hw_idx) {
3198 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3199 struct sk_buff *skb = ri->skb;
df3e6548
MC
3200 int i, tx_bug = 0;
3201
3202 if (unlikely(skb == NULL)) {
3203 tg3_tx_recover(tp);
3204 return;
3205 }
1da177e4 3206
1da177e4
LT
3207 pci_unmap_single(tp->pdev,
3208 pci_unmap_addr(ri, mapping),
3209 skb_headlen(skb),
3210 PCI_DMA_TODEVICE);
3211
3212 ri->skb = NULL;
3213
3214 sw_idx = NEXT_TX(sw_idx);
3215
3216 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1da177e4 3217 ri = &tp->tx_buffers[sw_idx];
df3e6548
MC
3218 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3219 tx_bug = 1;
1da177e4
LT
3220
3221 pci_unmap_page(tp->pdev,
3222 pci_unmap_addr(ri, mapping),
3223 skb_shinfo(skb)->frags[i].size,
3224 PCI_DMA_TODEVICE);
3225
3226 sw_idx = NEXT_TX(sw_idx);
3227 }
3228
f47c11ee 3229 dev_kfree_skb(skb);
df3e6548
MC
3230
3231 if (unlikely(tx_bug)) {
3232 tg3_tx_recover(tp);
3233 return;
3234 }
1da177e4
LT
3235 }
3236
3237 tp->tx_cons = sw_idx;
3238
1b2a7205
MC
3239 /* Need to make the tx_cons update visible to tg3_start_xmit()
3240 * before checking for netif_queue_stopped(). Without the
3241 * memory barrier, there is a small possibility that tg3_start_xmit()
3242 * will miss it and cause the queue to be stopped forever.
3243 */
3244 smp_mb();
3245
3246 if (unlikely(netif_queue_stopped(tp->dev) &&
42952231 3247 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
1b2a7205 3248 netif_tx_lock(tp->dev);
51b91468 3249 if (netif_queue_stopped(tp->dev) &&
42952231 3250 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
51b91468 3251 netif_wake_queue(tp->dev);
1b2a7205 3252 netif_tx_unlock(tp->dev);
51b91468 3253 }
1da177e4
LT
3254}
3255
3256/* Returns size of skb allocated or < 0 on error.
3257 *
3258 * We only need to fill in the address because the other members
3259 * of the RX descriptor are invariant, see tg3_init_rings.
3260 *
3261 * Note the purposeful assymetry of cpu vs. chip accesses. For
3262 * posting buffers we only dirty the first cache line of the RX
3263 * descriptor (containing the address). Whereas for the RX status
3264 * buffers the cpu only reads the last cacheline of the RX descriptor
3265 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3266 */
3267static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3268 int src_idx, u32 dest_idx_unmasked)
3269{
3270 struct tg3_rx_buffer_desc *desc;
3271 struct ring_info *map, *src_map;
3272 struct sk_buff *skb;
3273 dma_addr_t mapping;
3274 int skb_size, dest_idx;
3275
3276 src_map = NULL;
3277 switch (opaque_key) {
3278 case RXD_OPAQUE_RING_STD:
3279 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3280 desc = &tp->rx_std[dest_idx];
3281 map = &tp->rx_std_buffers[dest_idx];
3282 if (src_idx >= 0)
3283 src_map = &tp->rx_std_buffers[src_idx];
7e72aad4 3284 skb_size = tp->rx_pkt_buf_sz;
1da177e4
LT
3285 break;
3286
3287 case RXD_OPAQUE_RING_JUMBO:
3288 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3289 desc = &tp->rx_jumbo[dest_idx];
3290 map = &tp->rx_jumbo_buffers[dest_idx];
3291 if (src_idx >= 0)
3292 src_map = &tp->rx_jumbo_buffers[src_idx];
3293 skb_size = RX_JUMBO_PKT_BUF_SZ;
3294 break;
3295
3296 default:
3297 return -EINVAL;
3298 };
3299
3300 /* Do not overwrite any of the map or rp information
3301 * until we are sure we can commit to a new buffer.
3302 *
3303 * Callers depend upon this behavior and assume that
3304 * we leave everything unchanged if we fail.
3305 */
a20e9c62 3306 skb = netdev_alloc_skb(tp->dev, skb_size);
1da177e4
LT
3307 if (skb == NULL)
3308 return -ENOMEM;
3309
1da177e4
LT
3310 skb_reserve(skb, tp->rx_offset);
3311
3312 mapping = pci_map_single(tp->pdev, skb->data,
3313 skb_size - tp->rx_offset,
3314 PCI_DMA_FROMDEVICE);
3315
3316 map->skb = skb;
3317 pci_unmap_addr_set(map, mapping, mapping);
3318
3319 if (src_map != NULL)
3320 src_map->skb = NULL;
3321
3322 desc->addr_hi = ((u64)mapping >> 32);
3323 desc->addr_lo = ((u64)mapping & 0xffffffff);
3324
3325 return skb_size;
3326}
3327
3328/* We only need to move over in the address because the other
3329 * members of the RX descriptor are invariant. See notes above
3330 * tg3_alloc_rx_skb for full details.
3331 */
3332static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3333 int src_idx, u32 dest_idx_unmasked)
3334{
3335 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3336 struct ring_info *src_map, *dest_map;
3337 int dest_idx;
3338
3339 switch (opaque_key) {
3340 case RXD_OPAQUE_RING_STD:
3341 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3342 dest_desc = &tp->rx_std[dest_idx];
3343 dest_map = &tp->rx_std_buffers[dest_idx];
3344 src_desc = &tp->rx_std[src_idx];
3345 src_map = &tp->rx_std_buffers[src_idx];
3346 break;
3347
3348 case RXD_OPAQUE_RING_JUMBO:
3349 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3350 dest_desc = &tp->rx_jumbo[dest_idx];
3351 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3352 src_desc = &tp->rx_jumbo[src_idx];
3353 src_map = &tp->rx_jumbo_buffers[src_idx];
3354 break;
3355
3356 default:
3357 return;
3358 };
3359
3360 dest_map->skb = src_map->skb;
3361 pci_unmap_addr_set(dest_map, mapping,
3362 pci_unmap_addr(src_map, mapping));
3363 dest_desc->addr_hi = src_desc->addr_hi;
3364 dest_desc->addr_lo = src_desc->addr_lo;
3365
3366 src_map->skb = NULL;
3367}
3368
3369#if TG3_VLAN_TAG_USED
3370static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3371{
3372 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3373}
3374#endif
3375
3376/* The RX ring scheme is composed of multiple rings which post fresh
3377 * buffers to the chip, and one special ring the chip uses to report
3378 * status back to the host.
3379 *
3380 * The special ring reports the status of received packets to the
3381 * host. The chip does not write into the original descriptor the
3382 * RX buffer was obtained from. The chip simply takes the original
3383 * descriptor as provided by the host, updates the status and length
3384 * field, then writes this into the next status ring entry.
3385 *
3386 * Each ring the host uses to post buffers to the chip is described
3387 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3388 * it is first placed into the on-chip ram. When the packet's length
3389 * is known, it walks down the TG3_BDINFO entries to select the ring.
3390 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3391 * which is within the range of the new packet's length is chosen.
3392 *
3393 * The "separate ring for rx status" scheme may sound queer, but it makes
3394 * sense from a cache coherency perspective. If only the host writes
3395 * to the buffer post rings, and only the chip writes to the rx status
3396 * rings, then cache lines never move beyond shared-modified state.
3397 * If both the host and chip were to write into the same ring, cache line
3398 * eviction could occur since both entities want it in an exclusive state.
3399 */
3400static int tg3_rx(struct tg3 *tp, int budget)
3401{
f92905de 3402 u32 work_mask, rx_std_posted = 0;
483ba50b
MC
3403 u32 sw_idx = tp->rx_rcb_ptr;
3404 u16 hw_idx;
1da177e4
LT
3405 int received;
3406
3407 hw_idx = tp->hw_status->idx[0].rx_producer;
3408 /*
3409 * We need to order the read of hw_idx and the read of
3410 * the opaque cookie.
3411 */
3412 rmb();
1da177e4
LT
3413 work_mask = 0;
3414 received = 0;
3415 while (sw_idx != hw_idx && budget > 0) {
3416 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3417 unsigned int len;
3418 struct sk_buff *skb;
3419 dma_addr_t dma_addr;
3420 u32 opaque_key, desc_idx, *post_ptr;
3421
3422 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3423 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3424 if (opaque_key == RXD_OPAQUE_RING_STD) {
3425 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3426 mapping);
3427 skb = tp->rx_std_buffers[desc_idx].skb;
3428 post_ptr = &tp->rx_std_ptr;
f92905de 3429 rx_std_posted++;
1da177e4
LT
3430 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3431 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3432 mapping);
3433 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3434 post_ptr = &tp->rx_jumbo_ptr;
3435 }
3436 else {
3437 goto next_pkt_nopost;
3438 }
3439
3440 work_mask |= opaque_key;
3441
3442 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3443 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3444 drop_it:
3445 tg3_recycle_rx(tp, opaque_key,
3446 desc_idx, *post_ptr);
3447 drop_it_no_recycle:
3448 /* Other statistics kept track of by card. */
3449 tp->net_stats.rx_dropped++;
3450 goto next_pkt;
3451 }
3452
3453 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3454
6aa20a22 3455 if (len > RX_COPY_THRESHOLD
1da177e4
LT
3456 && tp->rx_offset == 2
3457 /* rx_offset != 2 iff this is a 5701 card running
3458 * in PCI-X mode [see tg3_get_invariants()] */
3459 ) {
3460 int skb_size;
3461
3462 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3463 desc_idx, *post_ptr);
3464 if (skb_size < 0)
3465 goto drop_it;
3466
3467 pci_unmap_single(tp->pdev, dma_addr,
3468 skb_size - tp->rx_offset,
3469 PCI_DMA_FROMDEVICE);
3470
3471 skb_put(skb, len);
3472 } else {
3473 struct sk_buff *copy_skb;
3474
3475 tg3_recycle_rx(tp, opaque_key,
3476 desc_idx, *post_ptr);
3477
a20e9c62 3478 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
1da177e4
LT
3479 if (copy_skb == NULL)
3480 goto drop_it_no_recycle;
3481
1da177e4
LT
3482 skb_reserve(copy_skb, 2);
3483 skb_put(copy_skb, len);
3484 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
d626f62b 3485 skb_copy_from_linear_data(skb, copy_skb->data, len);
1da177e4
LT
3486 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3487
3488 /* We'll reuse the original ring buffer. */
3489 skb = copy_skb;
3490 }
3491
3492 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3493 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3494 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3495 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3496 skb->ip_summed = CHECKSUM_UNNECESSARY;
3497 else
3498 skb->ip_summed = CHECKSUM_NONE;
3499
3500 skb->protocol = eth_type_trans(skb, tp->dev);
3501#if TG3_VLAN_TAG_USED
3502 if (tp->vlgrp != NULL &&
3503 desc->type_flags & RXD_FLAG_VLAN) {
3504 tg3_vlan_rx(tp, skb,
3505 desc->err_vlan & RXD_VLAN_MASK);
3506 } else
3507#endif
3508 netif_receive_skb(skb);
3509
3510 tp->dev->last_rx = jiffies;
3511 received++;
3512 budget--;
3513
3514next_pkt:
3515 (*post_ptr)++;
f92905de
MC
3516
3517 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3518 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3519
3520 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3521 TG3_64BIT_REG_LOW, idx);
3522 work_mask &= ~RXD_OPAQUE_RING_STD;
3523 rx_std_posted = 0;
3524 }
1da177e4 3525next_pkt_nopost:
483ba50b 3526 sw_idx++;
6b31a515 3527 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
52f6d697
MC
3528
3529 /* Refresh hw_idx to see if there is new work */
3530 if (sw_idx == hw_idx) {
3531 hw_idx = tp->hw_status->idx[0].rx_producer;
3532 rmb();
3533 }
1da177e4
LT
3534 }
3535
3536 /* ACK the status ring. */
483ba50b
MC
3537 tp->rx_rcb_ptr = sw_idx;
3538 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
1da177e4
LT
3539
3540 /* Refill RX ring(s). */
3541 if (work_mask & RXD_OPAQUE_RING_STD) {
3542 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3543 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3544 sw_idx);
3545 }
3546 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3547 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3548 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3549 sw_idx);
3550 }
3551 mmiowb();
3552
3553 return received;
3554}
3555
bea3348e 3556static int tg3_poll(struct napi_struct *napi, int budget)
1da177e4 3557{
bea3348e
SH
3558 struct tg3 *tp = container_of(napi, struct tg3, napi);
3559 struct net_device *netdev = tp->dev;
1da177e4 3560 struct tg3_hw_status *sblk = tp->hw_status;
bea3348e 3561 int work_done = 0;
1da177e4 3562
1da177e4
LT
3563 /* handle link change and other phy events */
3564 if (!(tp->tg3_flags &
3565 (TG3_FLAG_USE_LINKCHG_REG |
3566 TG3_FLAG_POLL_SERDES))) {
3567 if (sblk->status & SD_STATUS_LINK_CHG) {
3568 sblk->status = SD_STATUS_UPDATED |
3569 (sblk->status & ~SD_STATUS_LINK_CHG);
f47c11ee 3570 spin_lock(&tp->lock);
1da177e4 3571 tg3_setup_phy(tp, 0);
f47c11ee 3572 spin_unlock(&tp->lock);
1da177e4
LT
3573 }
3574 }
3575
3576 /* run TX completion thread */
3577 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
1da177e4 3578 tg3_tx(tp);
df3e6548 3579 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
bea3348e 3580 netif_rx_complete(netdev, napi);
df3e6548
MC
3581 schedule_work(&tp->reset_task);
3582 return 0;
3583 }
1da177e4
LT
3584 }
3585
1da177e4
LT
3586 /* run RX thread, within the bounds set by NAPI.
3587 * All RX "locking" is done by ensuring outside
bea3348e 3588 * code synchronizes with tg3->napi.poll()
1da177e4 3589 */
bea3348e
SH
3590 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3591 work_done = tg3_rx(tp, budget);
1da177e4 3592
38f3843e 3593 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
f7383c22 3594 tp->last_tag = sblk->status_tag;
38f3843e
MC
3595 rmb();
3596 } else
3597 sblk->status &= ~SD_STATUS_UPDATED;
f7383c22 3598
1da177e4 3599 /* if no more work, tell net stack and NIC we're done */
bea3348e
SH
3600 if (!tg3_has_work(tp)) {
3601 netif_rx_complete(netdev, napi);
1da177e4 3602 tg3_restart_ints(tp);
1da177e4
LT
3603 }
3604
bea3348e 3605 return work_done;
1da177e4
LT
3606}
3607
f47c11ee
DM
3608static void tg3_irq_quiesce(struct tg3 *tp)
3609{
3610 BUG_ON(tp->irq_sync);
3611
3612 tp->irq_sync = 1;
3613 smp_mb();
3614
3615 synchronize_irq(tp->pdev->irq);
3616}
3617
3618static inline int tg3_irq_sync(struct tg3 *tp)
3619{
3620 return tp->irq_sync;
3621}
3622
3623/* Fully shutdown all tg3 driver activity elsewhere in the system.
3624 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3625 * with as well. Most of the time, this is not necessary except when
3626 * shutting down the device.
3627 */
3628static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3629{
46966545 3630 spin_lock_bh(&tp->lock);
f47c11ee
DM
3631 if (irq_sync)
3632 tg3_irq_quiesce(tp);
f47c11ee
DM
3633}
3634
3635static inline void tg3_full_unlock(struct tg3 *tp)
3636{
f47c11ee
DM
3637 spin_unlock_bh(&tp->lock);
3638}
3639
fcfa0a32
MC
3640/* One-shot MSI handler - Chip automatically disables interrupt
3641 * after sending MSI so driver doesn't have to do it.
3642 */
7d12e780 3643static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
fcfa0a32
MC
3644{
3645 struct net_device *dev = dev_id;
3646 struct tg3 *tp = netdev_priv(dev);
3647
3648 prefetch(tp->hw_status);
3649 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3650
3651 if (likely(!tg3_irq_sync(tp)))
bea3348e 3652 netif_rx_schedule(dev, &tp->napi);
fcfa0a32
MC
3653
3654 return IRQ_HANDLED;
3655}
3656
88b06bc2
MC
3657/* MSI ISR - No need to check for interrupt sharing and no need to
3658 * flush status block and interrupt mailbox. PCI ordering rules
3659 * guarantee that MSI will arrive after the status block.
3660 */
7d12e780 3661static irqreturn_t tg3_msi(int irq, void *dev_id)
88b06bc2
MC
3662{
3663 struct net_device *dev = dev_id;
3664 struct tg3 *tp = netdev_priv(dev);
88b06bc2 3665
61487480
MC
3666 prefetch(tp->hw_status);
3667 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
88b06bc2 3668 /*
fac9b83e 3669 * Writing any value to intr-mbox-0 clears PCI INTA# and
88b06bc2 3670 * chip-internal interrupt pending events.
fac9b83e 3671 * Writing non-zero to intr-mbox-0 additional tells the
88b06bc2
MC
3672 * NIC to stop sending us irqs, engaging "in-intr-handler"
3673 * event coalescing.
3674 */
3675 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
61487480 3676 if (likely(!tg3_irq_sync(tp)))
bea3348e 3677 netif_rx_schedule(dev, &tp->napi);
61487480 3678
88b06bc2
MC
3679 return IRQ_RETVAL(1);
3680}
3681
7d12e780 3682static irqreturn_t tg3_interrupt(int irq, void *dev_id)
1da177e4
LT
3683{
3684 struct net_device *dev = dev_id;
3685 struct tg3 *tp = netdev_priv(dev);
3686 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4
LT
3687 unsigned int handled = 1;
3688
1da177e4
LT
3689 /* In INTx mode, it is possible for the interrupt to arrive at
3690 * the CPU before the status block posted prior to the interrupt.
3691 * Reading the PCI State register will confirm whether the
3692 * interrupt is ours and will flush the status block.
3693 */
d18edcb2
MC
3694 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3695 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3696 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3697 handled = 0;
f47c11ee 3698 goto out;
fac9b83e 3699 }
d18edcb2
MC
3700 }
3701
3702 /*
3703 * Writing any value to intr-mbox-0 clears PCI INTA# and
3704 * chip-internal interrupt pending events.
3705 * Writing non-zero to intr-mbox-0 additional tells the
3706 * NIC to stop sending us irqs, engaging "in-intr-handler"
3707 * event coalescing.
c04cb347
MC
3708 *
3709 * Flush the mailbox to de-assert the IRQ immediately to prevent
3710 * spurious interrupts. The flush impacts performance but
3711 * excessive spurious interrupts can be worse in some cases.
d18edcb2 3712 */
c04cb347 3713 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
d18edcb2
MC
3714 if (tg3_irq_sync(tp))
3715 goto out;
3716 sblk->status &= ~SD_STATUS_UPDATED;
3717 if (likely(tg3_has_work(tp))) {
3718 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
bea3348e 3719 netif_rx_schedule(dev, &tp->napi);
d18edcb2
MC
3720 } else {
3721 /* No work, shared interrupt perhaps? re-enable
3722 * interrupts, and flush that PCI write
3723 */
3724 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3725 0x00000000);
fac9b83e 3726 }
f47c11ee 3727out:
fac9b83e
DM
3728 return IRQ_RETVAL(handled);
3729}
3730
7d12e780 3731static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
fac9b83e
DM
3732{
3733 struct net_device *dev = dev_id;
3734 struct tg3 *tp = netdev_priv(dev);
3735 struct tg3_hw_status *sblk = tp->hw_status;
fac9b83e
DM
3736 unsigned int handled = 1;
3737
fac9b83e
DM
3738 /* In INTx mode, it is possible for the interrupt to arrive at
3739 * the CPU before the status block posted prior to the interrupt.
3740 * Reading the PCI State register will confirm whether the
3741 * interrupt is ours and will flush the status block.
3742 */
d18edcb2
MC
3743 if (unlikely(sblk->status_tag == tp->last_tag)) {
3744 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3745 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3746 handled = 0;
f47c11ee 3747 goto out;
1da177e4 3748 }
d18edcb2
MC
3749 }
3750
3751 /*
3752 * writing any value to intr-mbox-0 clears PCI INTA# and
3753 * chip-internal interrupt pending events.
3754 * writing non-zero to intr-mbox-0 additional tells the
3755 * NIC to stop sending us irqs, engaging "in-intr-handler"
3756 * event coalescing.
c04cb347
MC
3757 *
3758 * Flush the mailbox to de-assert the IRQ immediately to prevent
3759 * spurious interrupts. The flush impacts performance but
3760 * excessive spurious interrupts can be worse in some cases.
d18edcb2 3761 */
c04cb347 3762 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
d18edcb2
MC
3763 if (tg3_irq_sync(tp))
3764 goto out;
bea3348e 3765 if (netif_rx_schedule_prep(dev, &tp->napi)) {
d18edcb2
MC
3766 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3767 /* Update last_tag to mark that this status has been
3768 * seen. Because interrupt may be shared, we may be
3769 * racing with tg3_poll(), so only update last_tag
3770 * if tg3_poll() is not scheduled.
3771 */
3772 tp->last_tag = sblk->status_tag;
bea3348e 3773 __netif_rx_schedule(dev, &tp->napi);
1da177e4 3774 }
f47c11ee 3775out:
1da177e4
LT
3776 return IRQ_RETVAL(handled);
3777}
3778
7938109f 3779/* ISR for interrupt test */
7d12e780 3780static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7938109f
MC
3781{
3782 struct net_device *dev = dev_id;
3783 struct tg3 *tp = netdev_priv(dev);
3784 struct tg3_hw_status *sblk = tp->hw_status;
3785
f9804ddb
MC
3786 if ((sblk->status & SD_STATUS_UPDATED) ||
3787 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
b16250e3 3788 tg3_disable_ints(tp);
7938109f
MC
3789 return IRQ_RETVAL(1);
3790 }
3791 return IRQ_RETVAL(0);
3792}
3793
8e7a22e3 3794static int tg3_init_hw(struct tg3 *, int);
944d980e 3795static int tg3_halt(struct tg3 *, int, int);
1da177e4 3796
b9ec6c1b
MC
3797/* Restart hardware after configuration changes, self-test, etc.
3798 * Invoked with tp->lock held.
3799 */
3800static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3801{
3802 int err;
3803
3804 err = tg3_init_hw(tp, reset_phy);
3805 if (err) {
3806 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3807 "aborting.\n", tp->dev->name);
3808 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3809 tg3_full_unlock(tp);
3810 del_timer_sync(&tp->timer);
3811 tp->irq_sync = 0;
bea3348e 3812 napi_enable(&tp->napi);
b9ec6c1b
MC
3813 dev_close(tp->dev);
3814 tg3_full_lock(tp, 0);
3815 }
3816 return err;
3817}
3818
1da177e4
LT
3819#ifdef CONFIG_NET_POLL_CONTROLLER
3820static void tg3_poll_controller(struct net_device *dev)
3821{
88b06bc2
MC
3822 struct tg3 *tp = netdev_priv(dev);
3823
7d12e780 3824 tg3_interrupt(tp->pdev->irq, dev);
1da177e4
LT
3825}
3826#endif
3827
c4028958 3828static void tg3_reset_task(struct work_struct *work)
1da177e4 3829{
c4028958 3830 struct tg3 *tp = container_of(work, struct tg3, reset_task);
1da177e4
LT
3831 unsigned int restart_timer;
3832
7faa006f 3833 tg3_full_lock(tp, 0);
7faa006f
MC
3834
3835 if (!netif_running(tp->dev)) {
7faa006f
MC
3836 tg3_full_unlock(tp);
3837 return;
3838 }
3839
3840 tg3_full_unlock(tp);
3841
1da177e4
LT
3842 tg3_netif_stop(tp);
3843
f47c11ee 3844 tg3_full_lock(tp, 1);
1da177e4
LT
3845
3846 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3847 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3848
df3e6548
MC
3849 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3850 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3851 tp->write32_rx_mbox = tg3_write_flush_reg32;
3852 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3853 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3854 }
3855
944d980e 3856 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
b9ec6c1b
MC
3857 if (tg3_init_hw(tp, 1))
3858 goto out;
1da177e4
LT
3859
3860 tg3_netif_start(tp);
3861
1da177e4
LT
3862 if (restart_timer)
3863 mod_timer(&tp->timer, jiffies + 1);
7faa006f 3864
b9ec6c1b 3865out:
7faa006f 3866 tg3_full_unlock(tp);
1da177e4
LT
3867}
3868
b0408751
MC
3869static void tg3_dump_short_state(struct tg3 *tp)
3870{
3871 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3872 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3873 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3874 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3875}
3876
1da177e4
LT
3877static void tg3_tx_timeout(struct net_device *dev)
3878{
3879 struct tg3 *tp = netdev_priv(dev);
3880
b0408751 3881 if (netif_msg_tx_err(tp)) {
9f88f29f
MC
3882 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3883 dev->name);
b0408751
MC
3884 tg3_dump_short_state(tp);
3885 }
1da177e4
LT
3886
3887 schedule_work(&tp->reset_task);
3888}
3889
c58ec932
MC
3890/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3891static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3892{
3893 u32 base = (u32) mapping & 0xffffffff;
3894
3895 return ((base > 0xffffdcc0) &&
3896 (base + len + 8 < base));
3897}
3898
72f2afb8
MC
3899/* Test for DMA addresses > 40-bit */
3900static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3901 int len)
3902{
3903#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6728a8e2 3904 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
72f2afb8
MC
3905 return (((u64) mapping + len) > DMA_40BIT_MASK);
3906 return 0;
3907#else
3908 return 0;
3909#endif
3910}
3911
1da177e4
LT
3912static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3913
72f2afb8
MC
3914/* Workaround 4GB and 40-bit hardware DMA bugs. */
3915static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
c58ec932
MC
3916 u32 last_plus_one, u32 *start,
3917 u32 base_flags, u32 mss)
1da177e4
LT
3918{
3919 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
c58ec932 3920 dma_addr_t new_addr = 0;
1da177e4 3921 u32 entry = *start;
c58ec932 3922 int i, ret = 0;
1da177e4
LT
3923
3924 if (!new_skb) {
c58ec932
MC
3925 ret = -1;
3926 } else {
3927 /* New SKB is guaranteed to be linear. */
3928 entry = *start;
3929 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3930 PCI_DMA_TODEVICE);
3931 /* Make sure new skb does not cross any 4G boundaries.
3932 * Drop the packet if it does.
3933 */
3934 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3935 ret = -1;
3936 dev_kfree_skb(new_skb);
3937 new_skb = NULL;
3938 } else {
3939 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3940 base_flags, 1 | (mss << 1));
3941 *start = NEXT_TX(entry);
3942 }
1da177e4
LT
3943 }
3944
1da177e4
LT
3945 /* Now clean up the sw ring entries. */
3946 i = 0;
3947 while (entry != last_plus_one) {
3948 int len;
3949
3950 if (i == 0)
3951 len = skb_headlen(skb);
3952 else
3953 len = skb_shinfo(skb)->frags[i-1].size;
3954 pci_unmap_single(tp->pdev,
3955 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3956 len, PCI_DMA_TODEVICE);
3957 if (i == 0) {
3958 tp->tx_buffers[entry].skb = new_skb;
3959 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3960 } else {
3961 tp->tx_buffers[entry].skb = NULL;
3962 }
3963 entry = NEXT_TX(entry);
3964 i++;
3965 }
3966
3967 dev_kfree_skb(skb);
3968
c58ec932 3969 return ret;
1da177e4
LT
3970}
3971
3972static void tg3_set_txd(struct tg3 *tp, int entry,
3973 dma_addr_t mapping, int len, u32 flags,
3974 u32 mss_and_is_end)
3975{
3976 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3977 int is_end = (mss_and_is_end & 0x1);
3978 u32 mss = (mss_and_is_end >> 1);
3979 u32 vlan_tag = 0;
3980
3981 if (is_end)
3982 flags |= TXD_FLAG_END;
3983 if (flags & TXD_FLAG_VLAN) {
3984 vlan_tag = flags >> 16;
3985 flags &= 0xffff;
3986 }
3987 vlan_tag |= (mss << TXD_MSS_SHIFT);
3988
3989 txd->addr_hi = ((u64) mapping >> 32);
3990 txd->addr_lo = ((u64) mapping & 0xffffffff);
3991 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3992 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3993}
3994
5a6f3074
MC
3995/* hard_start_xmit for devices that don't have any bugs and
3996 * support TG3_FLG2_HW_TSO_2 only.
3997 */
1da177e4 3998static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5a6f3074
MC
3999{
4000 struct tg3 *tp = netdev_priv(dev);
4001 dma_addr_t mapping;
4002 u32 len, entry, base_flags, mss;
4003
4004 len = skb_headlen(skb);
4005
00b70504 4006 /* We are running in BH disabled context with netif_tx_lock
bea3348e 4007 * and TX reclaim runs via tp->napi.poll inside of a software
5a6f3074
MC
4008 * interrupt. Furthermore, IRQ processing runs lockless so we have
4009 * no IRQ context deadlocks to worry about either. Rejoice!
4010 */
1b2a7205 4011 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
5a6f3074
MC
4012 if (!netif_queue_stopped(dev)) {
4013 netif_stop_queue(dev);
4014
4015 /* This is a hard error, log it. */
4016 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4017 "queue awake!\n", dev->name);
4018 }
5a6f3074
MC
4019 return NETDEV_TX_BUSY;
4020 }
4021
4022 entry = tp->tx_prod;
4023 base_flags = 0;
5a6f3074 4024 mss = 0;
c13e3713 4025 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5a6f3074
MC
4026 int tcp_opt_len, ip_tcp_len;
4027
4028 if (skb_header_cloned(skb) &&
4029 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4030 dev_kfree_skb(skb);
4031 goto out_unlock;
4032 }
4033
b0026624
MC
4034 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4035 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4036 else {
eddc9ec5
ACM
4037 struct iphdr *iph = ip_hdr(skb);
4038
ab6a5bb6 4039 tcp_opt_len = tcp_optlen(skb);
c9bdd4b5 4040 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
b0026624 4041
eddc9ec5
ACM
4042 iph->check = 0;
4043 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
b0026624
MC
4044 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4045 }
5a6f3074
MC
4046
4047 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4048 TXD_FLAG_CPU_POST_DMA);
4049
aa8223c7 4050 tcp_hdr(skb)->check = 0;
5a6f3074 4051
5a6f3074 4052 }
84fa7933 4053 else if (skb->ip_summed == CHECKSUM_PARTIAL)
5a6f3074 4054 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5a6f3074
MC
4055#if TG3_VLAN_TAG_USED
4056 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4057 base_flags |= (TXD_FLAG_VLAN |
4058 (vlan_tx_tag_get(skb) << 16));
4059#endif
4060
4061 /* Queue skb data, a.k.a. the main skb fragment. */
4062 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4063
4064 tp->tx_buffers[entry].skb = skb;
4065 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4066
4067 tg3_set_txd(tp, entry, mapping, len, base_flags,
4068 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4069
4070 entry = NEXT_TX(entry);
4071
4072 /* Now loop through additional data fragments, and queue them. */
4073 if (skb_shinfo(skb)->nr_frags > 0) {
4074 unsigned int i, last;
4075
4076 last = skb_shinfo(skb)->nr_frags - 1;
4077 for (i = 0; i <= last; i++) {
4078 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4079
4080 len = frag->size;
4081 mapping = pci_map_page(tp->pdev,
4082 frag->page,
4083 frag->page_offset,
4084 len, PCI_DMA_TODEVICE);
4085
4086 tp->tx_buffers[entry].skb = NULL;
4087 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4088
4089 tg3_set_txd(tp, entry, mapping, len,
4090 base_flags, (i == last) | (mss << 1));
4091
4092 entry = NEXT_TX(entry);
4093 }
4094 }
4095
4096 /* Packets are ready, update Tx producer idx local and on card. */
4097 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4098
4099 tp->tx_prod = entry;
1b2a7205 4100 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5a6f3074 4101 netif_stop_queue(dev);
42952231 4102 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5a6f3074
MC
4103 netif_wake_queue(tp->dev);
4104 }
4105
4106out_unlock:
4107 mmiowb();
5a6f3074
MC
4108
4109 dev->trans_start = jiffies;
4110
4111 return NETDEV_TX_OK;
4112}
4113
52c0fd83
MC
4114static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4115
4116/* Use GSO to workaround a rare TSO bug that may be triggered when the
4117 * TSO header is greater than 80 bytes.
4118 */
4119static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4120{
4121 struct sk_buff *segs, *nskb;
4122
4123 /* Estimate the number of fragments in the worst case */
1b2a7205 4124 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
52c0fd83 4125 netif_stop_queue(tp->dev);
7f62ad5d
MC
4126 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4127 return NETDEV_TX_BUSY;
4128
4129 netif_wake_queue(tp->dev);
52c0fd83
MC
4130 }
4131
4132 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4133 if (unlikely(IS_ERR(segs)))
4134 goto tg3_tso_bug_end;
4135
4136 do {
4137 nskb = segs;
4138 segs = segs->next;
4139 nskb->next = NULL;
4140 tg3_start_xmit_dma_bug(nskb, tp->dev);
4141 } while (segs);
4142
4143tg3_tso_bug_end:
4144 dev_kfree_skb(skb);
4145
4146 return NETDEV_TX_OK;
4147}
52c0fd83 4148
5a6f3074
MC
4149/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4150 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4151 */
4152static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
4153{
4154 struct tg3 *tp = netdev_priv(dev);
4155 dma_addr_t mapping;
1da177e4
LT
4156 u32 len, entry, base_flags, mss;
4157 int would_hit_hwbug;
1da177e4
LT
4158
4159 len = skb_headlen(skb);
4160
00b70504 4161 /* We are running in BH disabled context with netif_tx_lock
bea3348e 4162 * and TX reclaim runs via tp->napi.poll inside of a software
f47c11ee
DM
4163 * interrupt. Furthermore, IRQ processing runs lockless so we have
4164 * no IRQ context deadlocks to worry about either. Rejoice!
1da177e4 4165 */
1b2a7205 4166 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
1f064a87
SH
4167 if (!netif_queue_stopped(dev)) {
4168 netif_stop_queue(dev);
4169
4170 /* This is a hard error, log it. */
4171 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4172 "queue awake!\n", dev->name);
4173 }
1da177e4
LT
4174 return NETDEV_TX_BUSY;
4175 }
4176
4177 entry = tp->tx_prod;
4178 base_flags = 0;
84fa7933 4179 if (skb->ip_summed == CHECKSUM_PARTIAL)
1da177e4 4180 base_flags |= TXD_FLAG_TCPUDP_CSUM;
1da177e4 4181 mss = 0;
c13e3713 4182 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
eddc9ec5 4183 struct iphdr *iph;
52c0fd83 4184 int tcp_opt_len, ip_tcp_len, hdr_len;
1da177e4
LT
4185
4186 if (skb_header_cloned(skb) &&
4187 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4188 dev_kfree_skb(skb);
4189 goto out_unlock;
4190 }
4191
ab6a5bb6 4192 tcp_opt_len = tcp_optlen(skb);
c9bdd4b5 4193 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
1da177e4 4194
52c0fd83
MC
4195 hdr_len = ip_tcp_len + tcp_opt_len;
4196 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7f62ad5d 4197 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
52c0fd83
MC
4198 return (tg3_tso_bug(tp, skb));
4199
1da177e4
LT
4200 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4201 TXD_FLAG_CPU_POST_DMA);
4202
eddc9ec5
ACM
4203 iph = ip_hdr(skb);
4204 iph->check = 0;
4205 iph->tot_len = htons(mss + hdr_len);
1da177e4 4206 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
aa8223c7 4207 tcp_hdr(skb)->check = 0;
1da177e4 4208 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
aa8223c7
ACM
4209 } else
4210 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4211 iph->daddr, 0,
4212 IPPROTO_TCP,
4213 0);
1da177e4
LT
4214
4215 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4216 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
eddc9ec5 4217 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
4218 int tsflags;
4219
eddc9ec5 4220 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
4221 mss |= (tsflags << 11);
4222 }
4223 } else {
eddc9ec5 4224 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
4225 int tsflags;
4226
eddc9ec5 4227 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
4228 base_flags |= tsflags << 12;
4229 }
4230 }
4231 }
1da177e4
LT
4232#if TG3_VLAN_TAG_USED
4233 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4234 base_flags |= (TXD_FLAG_VLAN |
4235 (vlan_tx_tag_get(skb) << 16));
4236#endif
4237
4238 /* Queue skb data, a.k.a. the main skb fragment. */
4239 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4240
4241 tp->tx_buffers[entry].skb = skb;
4242 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4243
4244 would_hit_hwbug = 0;
4245
4246 if (tg3_4g_overflow_test(mapping, len))
c58ec932 4247 would_hit_hwbug = 1;
1da177e4
LT
4248
4249 tg3_set_txd(tp, entry, mapping, len, base_flags,
4250 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4251
4252 entry = NEXT_TX(entry);
4253
4254 /* Now loop through additional data fragments, and queue them. */
4255 if (skb_shinfo(skb)->nr_frags > 0) {
4256 unsigned int i, last;
4257
4258 last = skb_shinfo(skb)->nr_frags - 1;
4259 for (i = 0; i <= last; i++) {
4260 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4261
4262 len = frag->size;
4263 mapping = pci_map_page(tp->pdev,
4264 frag->page,
4265 frag->page_offset,
4266 len, PCI_DMA_TODEVICE);
4267
4268 tp->tx_buffers[entry].skb = NULL;
4269 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4270
c58ec932
MC
4271 if (tg3_4g_overflow_test(mapping, len))
4272 would_hit_hwbug = 1;
1da177e4 4273
72f2afb8
MC
4274 if (tg3_40bit_overflow_test(tp, mapping, len))
4275 would_hit_hwbug = 1;
4276
1da177e4
LT
4277 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4278 tg3_set_txd(tp, entry, mapping, len,
4279 base_flags, (i == last)|(mss << 1));
4280 else
4281 tg3_set_txd(tp, entry, mapping, len,
4282 base_flags, (i == last));
4283
4284 entry = NEXT_TX(entry);
4285 }
4286 }
4287
4288 if (would_hit_hwbug) {
4289 u32 last_plus_one = entry;
4290 u32 start;
1da177e4 4291
c58ec932
MC
4292 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4293 start &= (TG3_TX_RING_SIZE - 1);
1da177e4
LT
4294
4295 /* If the workaround fails due to memory/mapping
4296 * failure, silently drop this packet.
4297 */
72f2afb8 4298 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
c58ec932 4299 &start, base_flags, mss))
1da177e4
LT
4300 goto out_unlock;
4301
4302 entry = start;
4303 }
4304
4305 /* Packets are ready, update Tx producer idx local and on card. */
4306 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4307
4308 tp->tx_prod = entry;
1b2a7205 4309 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
1da177e4 4310 netif_stop_queue(dev);
42952231 4311 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
51b91468
MC
4312 netif_wake_queue(tp->dev);
4313 }
1da177e4
LT
4314
4315out_unlock:
4316 mmiowb();
1da177e4
LT
4317
4318 dev->trans_start = jiffies;
4319
4320 return NETDEV_TX_OK;
4321}
4322
4323static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4324 int new_mtu)
4325{
4326 dev->mtu = new_mtu;
4327
ef7f5ec0 4328 if (new_mtu > ETH_DATA_LEN) {
a4e2b347 4329 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
ef7f5ec0
MC
4330 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4331 ethtool_op_set_tso(dev, 0);
4332 }
4333 else
4334 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4335 } else {
a4e2b347 4336 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
ef7f5ec0 4337 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
0f893dc6 4338 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
ef7f5ec0 4339 }
1da177e4
LT
4340}
4341
4342static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4343{
4344 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 4345 int err;
1da177e4
LT
4346
4347 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4348 return -EINVAL;
4349
4350 if (!netif_running(dev)) {
4351 /* We'll just catch it later when the
4352 * device is up'd.
4353 */
4354 tg3_set_mtu(dev, tp, new_mtu);
4355 return 0;
4356 }
4357
4358 tg3_netif_stop(tp);
f47c11ee
DM
4359
4360 tg3_full_lock(tp, 1);
1da177e4 4361
944d980e 4362 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
4363
4364 tg3_set_mtu(dev, tp, new_mtu);
4365
b9ec6c1b 4366 err = tg3_restart_hw(tp, 0);
1da177e4 4367
b9ec6c1b
MC
4368 if (!err)
4369 tg3_netif_start(tp);
1da177e4 4370
f47c11ee 4371 tg3_full_unlock(tp);
1da177e4 4372
b9ec6c1b 4373 return err;
1da177e4
LT
4374}
4375
4376/* Free up pending packets in all rx/tx rings.
4377 *
4378 * The chip has been shut down and the driver detached from
4379 * the networking, so no interrupts or new tx packets will
4380 * end up in the driver. tp->{tx,}lock is not held and we are not
4381 * in an interrupt context and thus may sleep.
4382 */
4383static void tg3_free_rings(struct tg3 *tp)
4384{
4385 struct ring_info *rxp;
4386 int i;
4387
4388 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4389 rxp = &tp->rx_std_buffers[i];
4390
4391 if (rxp->skb == NULL)
4392 continue;
4393 pci_unmap_single(tp->pdev,
4394 pci_unmap_addr(rxp, mapping),
7e72aad4 4395 tp->rx_pkt_buf_sz - tp->rx_offset,
1da177e4
LT
4396 PCI_DMA_FROMDEVICE);
4397 dev_kfree_skb_any(rxp->skb);
4398 rxp->skb = NULL;
4399 }
4400
4401 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4402 rxp = &tp->rx_jumbo_buffers[i];
4403
4404 if (rxp->skb == NULL)
4405 continue;
4406 pci_unmap_single(tp->pdev,
4407 pci_unmap_addr(rxp, mapping),
4408 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4409 PCI_DMA_FROMDEVICE);
4410 dev_kfree_skb_any(rxp->skb);
4411 rxp->skb = NULL;
4412 }
4413
4414 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4415 struct tx_ring_info *txp;
4416 struct sk_buff *skb;
4417 int j;
4418
4419 txp = &tp->tx_buffers[i];
4420 skb = txp->skb;
4421
4422 if (skb == NULL) {
4423 i++;
4424 continue;
4425 }
4426
4427 pci_unmap_single(tp->pdev,
4428 pci_unmap_addr(txp, mapping),
4429 skb_headlen(skb),
4430 PCI_DMA_TODEVICE);
4431 txp->skb = NULL;
4432
4433 i++;
4434
4435 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4436 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4437 pci_unmap_page(tp->pdev,
4438 pci_unmap_addr(txp, mapping),
4439 skb_shinfo(skb)->frags[j].size,
4440 PCI_DMA_TODEVICE);
4441 i++;
4442 }
4443
4444 dev_kfree_skb_any(skb);
4445 }
4446}
4447
4448/* Initialize tx/rx rings for packet processing.
4449 *
4450 * The chip has been shut down and the driver detached from
4451 * the networking, so no interrupts or new tx packets will
4452 * end up in the driver. tp->{tx,}lock are held and thus
4453 * we may not sleep.
4454 */
32d8c572 4455static int tg3_init_rings(struct tg3 *tp)
1da177e4
LT
4456{
4457 u32 i;
4458
4459 /* Free up all the SKBs. */
4460 tg3_free_rings(tp);
4461
4462 /* Zero out all descriptors. */
4463 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4464 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4465 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4466 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4467
7e72aad4 4468 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
a4e2b347 4469 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
7e72aad4
MC
4470 (tp->dev->mtu > ETH_DATA_LEN))
4471 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4472
1da177e4
LT
4473 /* Initialize invariants of the rings, we only set this
4474 * stuff once. This works because the card does not
4475 * write into the rx buffer posting rings.
4476 */
4477 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4478 struct tg3_rx_buffer_desc *rxd;
4479
4480 rxd = &tp->rx_std[i];
7e72aad4 4481 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
1da177e4
LT
4482 << RXD_LEN_SHIFT;
4483 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4484 rxd->opaque = (RXD_OPAQUE_RING_STD |
4485 (i << RXD_OPAQUE_INDEX_SHIFT));
4486 }
4487
0f893dc6 4488 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
4489 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4490 struct tg3_rx_buffer_desc *rxd;
4491
4492 rxd = &tp->rx_jumbo[i];
4493 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4494 << RXD_LEN_SHIFT;
4495 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4496 RXD_FLAG_JUMBO;
4497 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4498 (i << RXD_OPAQUE_INDEX_SHIFT));
4499 }
4500 }
4501
4502 /* Now allocate fresh SKBs for each rx ring. */
4503 for (i = 0; i < tp->rx_pending; i++) {
32d8c572
MC
4504 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4505 printk(KERN_WARNING PFX
4506 "%s: Using a smaller RX standard ring, "
4507 "only %d out of %d buffers were allocated "
4508 "successfully.\n",
4509 tp->dev->name, i, tp->rx_pending);
4510 if (i == 0)
4511 return -ENOMEM;
4512 tp->rx_pending = i;
1da177e4 4513 break;
32d8c572 4514 }
1da177e4
LT
4515 }
4516
0f893dc6 4517 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
4518 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4519 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
32d8c572
MC
4520 -1, i) < 0) {
4521 printk(KERN_WARNING PFX
4522 "%s: Using a smaller RX jumbo ring, "
4523 "only %d out of %d buffers were "
4524 "allocated successfully.\n",
4525 tp->dev->name, i, tp->rx_jumbo_pending);
4526 if (i == 0) {
4527 tg3_free_rings(tp);
4528 return -ENOMEM;
4529 }
4530 tp->rx_jumbo_pending = i;
1da177e4 4531 break;
32d8c572 4532 }
1da177e4
LT
4533 }
4534 }
32d8c572 4535 return 0;
1da177e4
LT
4536}
4537
4538/*
4539 * Must not be invoked with interrupt sources disabled and
4540 * the hardware shutdown down.
4541 */
4542static void tg3_free_consistent(struct tg3 *tp)
4543{
b4558ea9
JJ
4544 kfree(tp->rx_std_buffers);
4545 tp->rx_std_buffers = NULL;
1da177e4
LT
4546 if (tp->rx_std) {
4547 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4548 tp->rx_std, tp->rx_std_mapping);
4549 tp->rx_std = NULL;
4550 }
4551 if (tp->rx_jumbo) {
4552 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4553 tp->rx_jumbo, tp->rx_jumbo_mapping);
4554 tp->rx_jumbo = NULL;
4555 }
4556 if (tp->rx_rcb) {
4557 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4558 tp->rx_rcb, tp->rx_rcb_mapping);
4559 tp->rx_rcb = NULL;
4560 }
4561 if (tp->tx_ring) {
4562 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4563 tp->tx_ring, tp->tx_desc_mapping);
4564 tp->tx_ring = NULL;
4565 }
4566 if (tp->hw_status) {
4567 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4568 tp->hw_status, tp->status_mapping);
4569 tp->hw_status = NULL;
4570 }
4571 if (tp->hw_stats) {
4572 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4573 tp->hw_stats, tp->stats_mapping);
4574 tp->hw_stats = NULL;
4575 }
4576}
4577
4578/*
4579 * Must not be invoked with interrupt sources disabled and
4580 * the hardware shutdown down. Can sleep.
4581 */
4582static int tg3_alloc_consistent(struct tg3 *tp)
4583{
bd2b3343 4584 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
1da177e4
LT
4585 (TG3_RX_RING_SIZE +
4586 TG3_RX_JUMBO_RING_SIZE)) +
4587 (sizeof(struct tx_ring_info) *
4588 TG3_TX_RING_SIZE),
4589 GFP_KERNEL);
4590 if (!tp->rx_std_buffers)
4591 return -ENOMEM;
4592
1da177e4
LT
4593 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4594 tp->tx_buffers = (struct tx_ring_info *)
4595 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4596
4597 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4598 &tp->rx_std_mapping);
4599 if (!tp->rx_std)
4600 goto err_out;
4601
4602 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4603 &tp->rx_jumbo_mapping);
4604
4605 if (!tp->rx_jumbo)
4606 goto err_out;
4607
4608 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4609 &tp->rx_rcb_mapping);
4610 if (!tp->rx_rcb)
4611 goto err_out;
4612
4613 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4614 &tp->tx_desc_mapping);
4615 if (!tp->tx_ring)
4616 goto err_out;
4617
4618 tp->hw_status = pci_alloc_consistent(tp->pdev,
4619 TG3_HW_STATUS_SIZE,
4620 &tp->status_mapping);
4621 if (!tp->hw_status)
4622 goto err_out;
4623
4624 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4625 sizeof(struct tg3_hw_stats),
4626 &tp->stats_mapping);
4627 if (!tp->hw_stats)
4628 goto err_out;
4629
4630 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4631 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4632
4633 return 0;
4634
4635err_out:
4636 tg3_free_consistent(tp);
4637 return -ENOMEM;
4638}
4639
4640#define MAX_WAIT_CNT 1000
4641
4642/* To stop a block, clear the enable bit and poll till it
4643 * clears. tp->lock is held.
4644 */
b3b7d6be 4645static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
1da177e4
LT
4646{
4647 unsigned int i;
4648 u32 val;
4649
4650 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4651 switch (ofs) {
4652 case RCVLSC_MODE:
4653 case DMAC_MODE:
4654 case MBFREE_MODE:
4655 case BUFMGR_MODE:
4656 case MEMARB_MODE:
4657 /* We can't enable/disable these bits of the
4658 * 5705/5750, just say success.
4659 */
4660 return 0;
4661
4662 default:
4663 break;
4664 };
4665 }
4666
4667 val = tr32(ofs);
4668 val &= ~enable_bit;
4669 tw32_f(ofs, val);
4670
4671 for (i = 0; i < MAX_WAIT_CNT; i++) {
4672 udelay(100);
4673 val = tr32(ofs);
4674 if ((val & enable_bit) == 0)
4675 break;
4676 }
4677
b3b7d6be 4678 if (i == MAX_WAIT_CNT && !silent) {
1da177e4
LT
4679 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4680 "ofs=%lx enable_bit=%x\n",
4681 ofs, enable_bit);
4682 return -ENODEV;
4683 }
4684
4685 return 0;
4686}
4687
4688/* tp->lock is held. */
b3b7d6be 4689static int tg3_abort_hw(struct tg3 *tp, int silent)
1da177e4
LT
4690{
4691 int i, err;
4692
4693 tg3_disable_ints(tp);
4694
4695 tp->rx_mode &= ~RX_MODE_ENABLE;
4696 tw32_f(MAC_RX_MODE, tp->rx_mode);
4697 udelay(10);
4698
b3b7d6be
DM
4699 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4700 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4701 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4702 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4703 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4704 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4705
4706 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4707 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4708 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4709 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4710 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4711 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4712 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
1da177e4
LT
4713
4714 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4715 tw32_f(MAC_MODE, tp->mac_mode);
4716 udelay(40);
4717
4718 tp->tx_mode &= ~TX_MODE_ENABLE;
4719 tw32_f(MAC_TX_MODE, tp->tx_mode);
4720
4721 for (i = 0; i < MAX_WAIT_CNT; i++) {
4722 udelay(100);
4723 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4724 break;
4725 }
4726 if (i >= MAX_WAIT_CNT) {
4727 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4728 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4729 tp->dev->name, tr32(MAC_TX_MODE));
e6de8ad1 4730 err |= -ENODEV;
1da177e4
LT
4731 }
4732
e6de8ad1 4733 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
b3b7d6be
DM
4734 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4735 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
1da177e4
LT
4736
4737 tw32(FTQ_RESET, 0xffffffff);
4738 tw32(FTQ_RESET, 0x00000000);
4739
b3b7d6be
DM
4740 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4741 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
1da177e4
LT
4742
4743 if (tp->hw_status)
4744 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4745 if (tp->hw_stats)
4746 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4747
1da177e4
LT
4748 return err;
4749}
4750
4751/* tp->lock is held. */
4752static int tg3_nvram_lock(struct tg3 *tp)
4753{
4754 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4755 int i;
4756
ec41c7df
MC
4757 if (tp->nvram_lock_cnt == 0) {
4758 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4759 for (i = 0; i < 8000; i++) {
4760 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4761 break;
4762 udelay(20);
4763 }
4764 if (i == 8000) {
4765 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4766 return -ENODEV;
4767 }
1da177e4 4768 }
ec41c7df 4769 tp->nvram_lock_cnt++;
1da177e4
LT
4770 }
4771 return 0;
4772}
4773
4774/* tp->lock is held. */
4775static void tg3_nvram_unlock(struct tg3 *tp)
4776{
ec41c7df
MC
4777 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4778 if (tp->nvram_lock_cnt > 0)
4779 tp->nvram_lock_cnt--;
4780 if (tp->nvram_lock_cnt == 0)
4781 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4782 }
1da177e4
LT
4783}
4784
e6af301b
MC
4785/* tp->lock is held. */
4786static void tg3_enable_nvram_access(struct tg3 *tp)
4787{
4788 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4789 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4790 u32 nvaccess = tr32(NVRAM_ACCESS);
4791
4792 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4793 }
4794}
4795
4796/* tp->lock is held. */
4797static void tg3_disable_nvram_access(struct tg3 *tp)
4798{
4799 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4800 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4801 u32 nvaccess = tr32(NVRAM_ACCESS);
4802
4803 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4804 }
4805}
4806
0d3031d9
MC
4807static void tg3_ape_send_event(struct tg3 *tp, u32 event)
4808{
4809 int i;
4810 u32 apedata;
4811
4812 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
4813 if (apedata != APE_SEG_SIG_MAGIC)
4814 return;
4815
4816 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
4817 if (apedata != APE_FW_STATUS_READY)
4818 return;
4819
4820 /* Wait for up to 1 millisecond for APE to service previous event. */
4821 for (i = 0; i < 10; i++) {
4822 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
4823 return;
4824
4825 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
4826
4827 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4828 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
4829 event | APE_EVENT_STATUS_EVENT_PENDING);
4830
4831 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
4832
4833 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4834 break;
4835
4836 udelay(100);
4837 }
4838
4839 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4840 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
4841}
4842
4843static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
4844{
4845 u32 event;
4846 u32 apedata;
4847
4848 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
4849 return;
4850
4851 switch (kind) {
4852 case RESET_KIND_INIT:
4853 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
4854 APE_HOST_SEG_SIG_MAGIC);
4855 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
4856 APE_HOST_SEG_LEN_MAGIC);
4857 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
4858 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
4859 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
4860 APE_HOST_DRIVER_ID_MAGIC);
4861 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
4862 APE_HOST_BEHAV_NO_PHYLOCK);
4863
4864 event = APE_EVENT_STATUS_STATE_START;
4865 break;
4866 case RESET_KIND_SHUTDOWN:
4867 event = APE_EVENT_STATUS_STATE_UNLOAD;
4868 break;
4869 case RESET_KIND_SUSPEND:
4870 event = APE_EVENT_STATUS_STATE_SUSPEND;
4871 break;
4872 default:
4873 return;
4874 }
4875
4876 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
4877
4878 tg3_ape_send_event(tp, event);
4879}
4880
1da177e4
LT
4881/* tp->lock is held. */
4882static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4883{
f49639e6
DM
4884 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4885 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1da177e4
LT
4886
4887 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4888 switch (kind) {
4889 case RESET_KIND_INIT:
4890 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4891 DRV_STATE_START);
4892 break;
4893
4894 case RESET_KIND_SHUTDOWN:
4895 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4896 DRV_STATE_UNLOAD);
4897 break;
4898
4899 case RESET_KIND_SUSPEND:
4900 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4901 DRV_STATE_SUSPEND);
4902 break;
4903
4904 default:
4905 break;
4906 };
4907 }
0d3031d9
MC
4908
4909 if (kind == RESET_KIND_INIT ||
4910 kind == RESET_KIND_SUSPEND)
4911 tg3_ape_driver_state_change(tp, kind);
1da177e4
LT
4912}
4913
4914/* tp->lock is held. */
4915static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4916{
4917 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4918 switch (kind) {
4919 case RESET_KIND_INIT:
4920 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4921 DRV_STATE_START_DONE);
4922 break;
4923
4924 case RESET_KIND_SHUTDOWN:
4925 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4926 DRV_STATE_UNLOAD_DONE);
4927 break;
4928
4929 default:
4930 break;
4931 };
4932 }
0d3031d9
MC
4933
4934 if (kind == RESET_KIND_SHUTDOWN)
4935 tg3_ape_driver_state_change(tp, kind);
1da177e4
LT
4936}
4937
4938/* tp->lock is held. */
4939static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4940{
4941 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4942 switch (kind) {
4943 case RESET_KIND_INIT:
4944 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4945 DRV_STATE_START);
4946 break;
4947
4948 case RESET_KIND_SHUTDOWN:
4949 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4950 DRV_STATE_UNLOAD);
4951 break;
4952
4953 case RESET_KIND_SUSPEND:
4954 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4955 DRV_STATE_SUSPEND);
4956 break;
4957
4958 default:
4959 break;
4960 };
4961 }
4962}
4963
7a6f4369
MC
4964static int tg3_poll_fw(struct tg3 *tp)
4965{
4966 int i;
4967 u32 val;
4968
b5d3772c 4969 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
0ccead18
GZ
4970 /* Wait up to 20ms for init done. */
4971 for (i = 0; i < 200; i++) {
b5d3772c
MC
4972 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
4973 return 0;
0ccead18 4974 udelay(100);
b5d3772c
MC
4975 }
4976 return -ENODEV;
4977 }
4978
7a6f4369
MC
4979 /* Wait for firmware initialization to complete. */
4980 for (i = 0; i < 100000; i++) {
4981 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4982 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4983 break;
4984 udelay(10);
4985 }
4986
4987 /* Chip might not be fitted with firmware. Some Sun onboard
4988 * parts are configured like that. So don't signal the timeout
4989 * of the above loop as an error, but do report the lack of
4990 * running firmware once.
4991 */
4992 if (i >= 100000 &&
4993 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4994 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4995
4996 printk(KERN_INFO PFX "%s: No firmware running.\n",
4997 tp->dev->name);
4998 }
4999
5000 return 0;
5001}
5002
ee6a99b5
MC
5003/* Save PCI command register before chip reset */
5004static void tg3_save_pci_state(struct tg3 *tp)
5005{
5006 u32 val;
5007
5008 pci_read_config_dword(tp->pdev, TG3PCI_COMMAND, &val);
5009 tp->pci_cmd = val;
5010}
5011
5012/* Restore PCI state after chip reset */
5013static void tg3_restore_pci_state(struct tg3 *tp)
5014{
5015 u32 val;
5016
5017 /* Re-enable indirect register accesses. */
5018 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5019 tp->misc_host_ctrl);
5020
5021 /* Set MAX PCI retry to zero. */
5022 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5023 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5024 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5025 val |= PCISTATE_RETRY_SAME_DMA;
0d3031d9
MC
5026 /* Allow reads and writes to the APE register and memory space. */
5027 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5028 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5029 PCISTATE_ALLOW_APE_SHMEM_WR;
ee6a99b5
MC
5030 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5031
5032 pci_write_config_dword(tp->pdev, TG3PCI_COMMAND, tp->pci_cmd);
5033
5034 /* Make sure PCI-X relaxed ordering bit is clear. */
9974a356
MC
5035 if (tp->pcix_cap) {
5036 u16 pcix_cmd;
5037
5038 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5039 &pcix_cmd);
5040 pcix_cmd &= ~PCI_X_CMD_ERO;
5041 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5042 pcix_cmd);
5043 }
ee6a99b5
MC
5044
5045 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
ee6a99b5
MC
5046
5047 /* Chip reset on 5780 will reset MSI enable bit,
5048 * so need to restore it.
5049 */
5050 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5051 u16 ctrl;
5052
5053 pci_read_config_word(tp->pdev,
5054 tp->msi_cap + PCI_MSI_FLAGS,
5055 &ctrl);
5056 pci_write_config_word(tp->pdev,
5057 tp->msi_cap + PCI_MSI_FLAGS,
5058 ctrl | PCI_MSI_FLAGS_ENABLE);
5059 val = tr32(MSGINT_MODE);
5060 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5061 }
5062 }
5063}
5064
1da177e4
LT
5065static void tg3_stop_fw(struct tg3 *);
5066
5067/* tp->lock is held. */
5068static int tg3_chip_reset(struct tg3 *tp)
5069{
5070 u32 val;
1ee582d8 5071 void (*write_op)(struct tg3 *, u32, u32);
7a6f4369 5072 int err;
1da177e4 5073
f49639e6
DM
5074 tg3_nvram_lock(tp);
5075
5076 /* No matching tg3_nvram_unlock() after this because
5077 * chip reset below will undo the nvram lock.
5078 */
5079 tp->nvram_lock_cnt = 0;
1da177e4 5080
ee6a99b5
MC
5081 /* GRC_MISC_CFG core clock reset will clear the memory
5082 * enable bit in PCI register 4 and the MSI enable bit
5083 * on some chips, so we save relevant registers here.
5084 */
5085 tg3_save_pci_state(tp);
5086
d9ab5ad1 5087 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
af36e6b6 5088 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28
MC
5089 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5090 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
d9ab5ad1
MC
5091 tw32(GRC_FASTBOOT_PC, 0);
5092
1da177e4
LT
5093 /*
5094 * We must avoid the readl() that normally takes place.
5095 * It locks machines, causes machine checks, and other
5096 * fun things. So, temporarily disable the 5701
5097 * hardware workaround, while we do the reset.
5098 */
1ee582d8
MC
5099 write_op = tp->write32;
5100 if (write_op == tg3_write_flush_reg32)
5101 tp->write32 = tg3_write32;
1da177e4 5102
d18edcb2
MC
5103 /* Prevent the irq handler from reading or writing PCI registers
5104 * during chip reset when the memory enable bit in the PCI command
5105 * register may be cleared. The chip does not generate interrupt
5106 * at this time, but the irq handler may still be called due to irq
5107 * sharing or irqpoll.
5108 */
5109 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
b8fa2f3a
MC
5110 if (tp->hw_status) {
5111 tp->hw_status->status = 0;
5112 tp->hw_status->status_tag = 0;
5113 }
d18edcb2
MC
5114 tp->last_tag = 0;
5115 smp_mb();
5116 synchronize_irq(tp->pdev->irq);
5117
1da177e4
LT
5118 /* do the reset */
5119 val = GRC_MISC_CFG_CORECLK_RESET;
5120
5121 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5122 if (tr32(0x7e2c) == 0x60) {
5123 tw32(0x7e2c, 0x20);
5124 }
5125 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5126 tw32(GRC_MISC_CFG, (1 << 29));
5127 val |= (1 << 29);
5128 }
5129 }
5130
b5d3772c
MC
5131 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5132 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5133 tw32(GRC_VCPU_EXT_CTRL,
5134 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5135 }
5136
1da177e4
LT
5137 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5138 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5139 tw32(GRC_MISC_CFG, val);
5140
1ee582d8
MC
5141 /* restore 5701 hardware bug workaround write method */
5142 tp->write32 = write_op;
1da177e4
LT
5143
5144 /* Unfortunately, we have to delay before the PCI read back.
5145 * Some 575X chips even will not respond to a PCI cfg access
5146 * when the reset command is given to the chip.
5147 *
5148 * How do these hardware designers expect things to work
5149 * properly if the PCI write is posted for a long period
5150 * of time? It is always necessary to have some method by
5151 * which a register read back can occur to push the write
5152 * out which does the reset.
5153 *
5154 * For most tg3 variants the trick below was working.
5155 * Ho hum...
5156 */
5157 udelay(120);
5158
5159 /* Flush PCI posted writes. The normal MMIO registers
5160 * are inaccessible at this time so this is the only
5161 * way to make this reliably (actually, this is no longer
5162 * the case, see above). I tried to use indirect
5163 * register read/write but this upset some 5701 variants.
5164 */
5165 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5166
5167 udelay(120);
5168
5169 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5170 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5171 int i;
5172 u32 cfg_val;
5173
5174 /* Wait for link training to complete. */
5175 for (i = 0; i < 5000; i++)
5176 udelay(100);
5177
5178 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5179 pci_write_config_dword(tp->pdev, 0xc4,
5180 cfg_val | (1 << 15));
5181 }
5182 /* Set PCIE max payload size and clear error status. */
5183 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5184 }
5185
ee6a99b5 5186 tg3_restore_pci_state(tp);
1da177e4 5187
d18edcb2
MC
5188 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5189
ee6a99b5
MC
5190 val = 0;
5191 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4cf78e4f 5192 val = tr32(MEMARB_MODE);
ee6a99b5 5193 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
1da177e4
LT
5194
5195 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5196 tg3_stop_fw(tp);
5197 tw32(0x5000, 0x400);
5198 }
5199
5200 tw32(GRC_MODE, tp->grc_mode);
5201
5202 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
ab0049b4 5203 val = tr32(0xc4);
1da177e4
LT
5204
5205 tw32(0xc4, val | (1 << 15));
5206 }
5207
5208 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5209 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5210 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5211 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5212 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5213 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5214 }
5215
5216 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5217 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5218 tw32_f(MAC_MODE, tp->mac_mode);
747e8f8b
MC
5219 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5220 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5221 tw32_f(MAC_MODE, tp->mac_mode);
1da177e4
LT
5222 } else
5223 tw32_f(MAC_MODE, 0);
5224 udelay(40);
5225
7a6f4369
MC
5226 err = tg3_poll_fw(tp);
5227 if (err)
5228 return err;
1da177e4
LT
5229
5230 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5231 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
ab0049b4 5232 val = tr32(0x7c00);
1da177e4
LT
5233
5234 tw32(0x7c00, val | (1 << 25));
5235 }
5236
5237 /* Reprobe ASF enable state. */
5238 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5239 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5240 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5241 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5242 u32 nic_cfg;
5243
5244 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5245 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5246 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 5247 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
5248 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5249 }
5250 }
5251
5252 return 0;
5253}
5254
5255/* tp->lock is held. */
5256static void tg3_stop_fw(struct tg3 *tp)
5257{
0d3031d9
MC
5258 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5259 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
1da177e4
LT
5260 u32 val;
5261 int i;
5262
5263 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5264 val = tr32(GRC_RX_CPU_EVENT);
5265 val |= (1 << 14);
5266 tw32(GRC_RX_CPU_EVENT, val);
5267
5268 /* Wait for RX cpu to ACK the event. */
5269 for (i = 0; i < 100; i++) {
5270 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5271 break;
5272 udelay(1);
5273 }
5274 }
5275}
5276
5277/* tp->lock is held. */
944d980e 5278static int tg3_halt(struct tg3 *tp, int kind, int silent)
1da177e4
LT
5279{
5280 int err;
5281
5282 tg3_stop_fw(tp);
5283
944d980e 5284 tg3_write_sig_pre_reset(tp, kind);
1da177e4 5285
b3b7d6be 5286 tg3_abort_hw(tp, silent);
1da177e4
LT
5287 err = tg3_chip_reset(tp);
5288
944d980e
MC
5289 tg3_write_sig_legacy(tp, kind);
5290 tg3_write_sig_post_reset(tp, kind);
1da177e4
LT
5291
5292 if (err)
5293 return err;
5294
5295 return 0;
5296}
5297
5298#define TG3_FW_RELEASE_MAJOR 0x0
5299#define TG3_FW_RELASE_MINOR 0x0
5300#define TG3_FW_RELEASE_FIX 0x0
5301#define TG3_FW_START_ADDR 0x08000000
5302#define TG3_FW_TEXT_ADDR 0x08000000
5303#define TG3_FW_TEXT_LEN 0x9c0
5304#define TG3_FW_RODATA_ADDR 0x080009c0
5305#define TG3_FW_RODATA_LEN 0x60
5306#define TG3_FW_DATA_ADDR 0x08000a40
5307#define TG3_FW_DATA_LEN 0x20
5308#define TG3_FW_SBSS_ADDR 0x08000a60
5309#define TG3_FW_SBSS_LEN 0xc
5310#define TG3_FW_BSS_ADDR 0x08000a70
5311#define TG3_FW_BSS_LEN 0x10
5312
50da859d 5313static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
1da177e4
LT
5314 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5315 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5316 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5317 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5318 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5319 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5320 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5321 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5322 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5323 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5324 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5325 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5326 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5327 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5328 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5329 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5330 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5331 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5332 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5333 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5334 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5335 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5336 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5337 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5338 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5339 0, 0, 0, 0, 0, 0,
5340 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5341 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5342 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5343 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5344 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5345 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5346 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5347 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5348 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5349 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5350 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5351 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5352 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5353 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5354 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5355 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5356 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5357 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5358 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5359 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5360 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5361 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5362 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5363 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5364 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5365 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5366 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5367 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5368 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5369 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5370 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5371 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5372 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5373 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5374 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5375 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5376 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5377 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5378 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5379 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5380 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5381 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5382 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5383 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5384 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5385 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5386 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5387 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5388 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5389 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5390 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5391 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5392 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5393 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5394 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5395 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5396 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5397 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5398 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5399 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5400 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5401 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5402 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5403 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5404 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5405};
5406
50da859d 5407static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
1da177e4
LT
5408 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5409 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5410 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5411 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5412 0x00000000
5413};
5414
5415#if 0 /* All zeros, don't eat up space with it. */
5416u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5417 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5418 0x00000000, 0x00000000, 0x00000000, 0x00000000
5419};
5420#endif
5421
5422#define RX_CPU_SCRATCH_BASE 0x30000
5423#define RX_CPU_SCRATCH_SIZE 0x04000
5424#define TX_CPU_SCRATCH_BASE 0x34000
5425#define TX_CPU_SCRATCH_SIZE 0x04000
5426
5427/* tp->lock is held. */
5428static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5429{
5430 int i;
5431
5d9428de
ES
5432 BUG_ON(offset == TX_CPU_BASE &&
5433 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
1da177e4 5434
b5d3772c
MC
5435 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5436 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5437
5438 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5439 return 0;
5440 }
1da177e4
LT
5441 if (offset == RX_CPU_BASE) {
5442 for (i = 0; i < 10000; i++) {
5443 tw32(offset + CPU_STATE, 0xffffffff);
5444 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5445 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5446 break;
5447 }
5448
5449 tw32(offset + CPU_STATE, 0xffffffff);
5450 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
5451 udelay(10);
5452 } else {
5453 for (i = 0; i < 10000; i++) {
5454 tw32(offset + CPU_STATE, 0xffffffff);
5455 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5456 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5457 break;
5458 }
5459 }
5460
5461 if (i >= 10000) {
5462 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5463 "and %s CPU\n",
5464 tp->dev->name,
5465 (offset == RX_CPU_BASE ? "RX" : "TX"));
5466 return -ENODEV;
5467 }
ec41c7df
MC
5468
5469 /* Clear firmware's nvram arbitration. */
5470 if (tp->tg3_flags & TG3_FLAG_NVRAM)
5471 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
1da177e4
LT
5472 return 0;
5473}
5474
5475struct fw_info {
5476 unsigned int text_base;
5477 unsigned int text_len;
50da859d 5478 const u32 *text_data;
1da177e4
LT
5479 unsigned int rodata_base;
5480 unsigned int rodata_len;
50da859d 5481 const u32 *rodata_data;
1da177e4
LT
5482 unsigned int data_base;
5483 unsigned int data_len;
50da859d 5484 const u32 *data_data;
1da177e4
LT
5485};
5486
5487/* tp->lock is held. */
5488static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5489 int cpu_scratch_size, struct fw_info *info)
5490{
ec41c7df 5491 int err, lock_err, i;
1da177e4
LT
5492 void (*write_op)(struct tg3 *, u32, u32);
5493
5494 if (cpu_base == TX_CPU_BASE &&
5495 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5496 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5497 "TX cpu firmware on %s which is 5705.\n",
5498 tp->dev->name);
5499 return -EINVAL;
5500 }
5501
5502 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5503 write_op = tg3_write_mem;
5504 else
5505 write_op = tg3_write_indirect_reg32;
5506
1b628151
MC
5507 /* It is possible that bootcode is still loading at this point.
5508 * Get the nvram lock first before halting the cpu.
5509 */
ec41c7df 5510 lock_err = tg3_nvram_lock(tp);
1da177e4 5511 err = tg3_halt_cpu(tp, cpu_base);
ec41c7df
MC
5512 if (!lock_err)
5513 tg3_nvram_unlock(tp);
1da177e4
LT
5514 if (err)
5515 goto out;
5516
5517 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5518 write_op(tp, cpu_scratch_base + i, 0);
5519 tw32(cpu_base + CPU_STATE, 0xffffffff);
5520 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5521 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5522 write_op(tp, (cpu_scratch_base +
5523 (info->text_base & 0xffff) +
5524 (i * sizeof(u32))),
5525 (info->text_data ?
5526 info->text_data[i] : 0));
5527 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5528 write_op(tp, (cpu_scratch_base +
5529 (info->rodata_base & 0xffff) +
5530 (i * sizeof(u32))),
5531 (info->rodata_data ?
5532 info->rodata_data[i] : 0));
5533 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5534 write_op(tp, (cpu_scratch_base +
5535 (info->data_base & 0xffff) +
5536 (i * sizeof(u32))),
5537 (info->data_data ?
5538 info->data_data[i] : 0));
5539
5540 err = 0;
5541
5542out:
1da177e4
LT
5543 return err;
5544}
5545
5546/* tp->lock is held. */
5547static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5548{
5549 struct fw_info info;
5550 int err, i;
5551
5552 info.text_base = TG3_FW_TEXT_ADDR;
5553 info.text_len = TG3_FW_TEXT_LEN;
5554 info.text_data = &tg3FwText[0];
5555 info.rodata_base = TG3_FW_RODATA_ADDR;
5556 info.rodata_len = TG3_FW_RODATA_LEN;
5557 info.rodata_data = &tg3FwRodata[0];
5558 info.data_base = TG3_FW_DATA_ADDR;
5559 info.data_len = TG3_FW_DATA_LEN;
5560 info.data_data = NULL;
5561
5562 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5563 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5564 &info);
5565 if (err)
5566 return err;
5567
5568 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5569 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5570 &info);
5571 if (err)
5572 return err;
5573
5574 /* Now startup only the RX cpu. */
5575 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5576 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5577
5578 for (i = 0; i < 5; i++) {
5579 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5580 break;
5581 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5582 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
5583 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5584 udelay(1000);
5585 }
5586 if (i >= 5) {
5587 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5588 "to set RX CPU PC, is %08x should be %08x\n",
5589 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5590 TG3_FW_TEXT_ADDR);
5591 return -ENODEV;
5592 }
5593 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5594 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
5595
5596 return 0;
5597}
5598
1da177e4
LT
5599
5600#define TG3_TSO_FW_RELEASE_MAJOR 0x1
5601#define TG3_TSO_FW_RELASE_MINOR 0x6
5602#define TG3_TSO_FW_RELEASE_FIX 0x0
5603#define TG3_TSO_FW_START_ADDR 0x08000000
5604#define TG3_TSO_FW_TEXT_ADDR 0x08000000
5605#define TG3_TSO_FW_TEXT_LEN 0x1aa0
5606#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5607#define TG3_TSO_FW_RODATA_LEN 0x60
5608#define TG3_TSO_FW_DATA_ADDR 0x08001b20
5609#define TG3_TSO_FW_DATA_LEN 0x30
5610#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5611#define TG3_TSO_FW_SBSS_LEN 0x2c
5612#define TG3_TSO_FW_BSS_ADDR 0x08001b80
5613#define TG3_TSO_FW_BSS_LEN 0x894
5614
50da859d 5615static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
1da177e4
LT
5616 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5617 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5618 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5619 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5620 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5621 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5622 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5623 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5624 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5625 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5626 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5627 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5628 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5629 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5630 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5631 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5632 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5633 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5634 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5635 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5636 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5637 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5638 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5639 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5640 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5641 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5642 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5643 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5644 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5645 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5646 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5647 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5648 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5649 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5650 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5651 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5652 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5653 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5654 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5655 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5656 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5657 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5658 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5659 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5660 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5661 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5662 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5663 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5664 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5665 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5666 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5667 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5668 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5669 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5670 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5671 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5672 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5673 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5674 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5675 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5676 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5677 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5678 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5679 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5680 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5681 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5682 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5683 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5684 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5685 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5686 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5687 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5688 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5689 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5690 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5691 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5692 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5693 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5694 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5695 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5696 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5697 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5698 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5699 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5700 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5701 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5702 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5703 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5704 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5705 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5706 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5707 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5708 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5709 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5710 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5711 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5712 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5713 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5714 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5715 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5716 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5717 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5718 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5719 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5720 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5721 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5722 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5723 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5724 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5725 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5726 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5727 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5728 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5729 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5730 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5731 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5732 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5733 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5734 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5735 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5736 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5737 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5738 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5739 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5740 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5741 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5742 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5743 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5744 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5745 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5746 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5747 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5748 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5749 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5750 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5751 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5752 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5753 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5754 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5755 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5756 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5757 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5758 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5759 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5760 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5761 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5762 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5763 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5764 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5765 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5766 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5767 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5768 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5769 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5770 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5771 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5772 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5773 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5774 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5775 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5776 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5777 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5778 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5779 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5780 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5781 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5782 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5783 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5784 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5785 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5786 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5787 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5788 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5789 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5790 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5791 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5792 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5793 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5794 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5795 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5796 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5797 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5798 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5799 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5800 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5801 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5802 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5803 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5804 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5805 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5806 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5807 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5808 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5809 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5810 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5811 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5812 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5813 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5814 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5815 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5816 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5817 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5818 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5819 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5820 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5821 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5822 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5823 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5824 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5825 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5826 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5827 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5828 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5829 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5830 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5831 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5832 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5833 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5834 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5835 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5836 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5837 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5838 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5839 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5840 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5841 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5842 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5843 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5844 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5845 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5846 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5847 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5848 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5849 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5850 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5851 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5852 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5853 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5854 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5855 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5856 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5857 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5858 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5859 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5860 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5861 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5862 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5863 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5864 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5865 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5866 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5867 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5868 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5869 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5870 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5871 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5872 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5873 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5874 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5875 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5876 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5877 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5878 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5879 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5880 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5881 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5882 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5883 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5884 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5885 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5886 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5887 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5888 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5889 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5890 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5891 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5892 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5893 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5894 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5895 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5896 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5897 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5898 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5899 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5900};
5901
50da859d 5902static const u32 tg3TsoFwRodata[] = {
1da177e4
LT
5903 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5904 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5905 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5906 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5907 0x00000000,
5908};
5909
50da859d 5910static const u32 tg3TsoFwData[] = {
1da177e4
LT
5911 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5912 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5913 0x00000000,
5914};
5915
5916/* 5705 needs a special version of the TSO firmware. */
5917#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5918#define TG3_TSO5_FW_RELASE_MINOR 0x2
5919#define TG3_TSO5_FW_RELEASE_FIX 0x0
5920#define TG3_TSO5_FW_START_ADDR 0x00010000
5921#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5922#define TG3_TSO5_FW_TEXT_LEN 0xe90
5923#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5924#define TG3_TSO5_FW_RODATA_LEN 0x50
5925#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5926#define TG3_TSO5_FW_DATA_LEN 0x20
5927#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5928#define TG3_TSO5_FW_SBSS_LEN 0x28
5929#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5930#define TG3_TSO5_FW_BSS_LEN 0x88
5931
50da859d 5932static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
1da177e4
LT
5933 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5934 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5935 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5936 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5937 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5938 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5939 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5940 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5941 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5942 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5943 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5944 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5945 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5946 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5947 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5948 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5949 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5950 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5951 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5952 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5953 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5954 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5955 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5956 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5957 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5958 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5959 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5960 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5961 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5962 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5963 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5964 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5965 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5966 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5967 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5968 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5969 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5970 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5971 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5972 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5973 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5974 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5975 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5976 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5977 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5978 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5979 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5980 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5981 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5982 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5983 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5984 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5985 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5986 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5987 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5988 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5989 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5990 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5991 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5992 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5993 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5994 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5995 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5996 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5997 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5998 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5999 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6000 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6001 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6002 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6003 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6004 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6005 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6006 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6007 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6008 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6009 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6010 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6011 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6012 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6013 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6014 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6015 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6016 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6017 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6018 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6019 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6020 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6021 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6022 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6023 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6024 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6025 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6026 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6027 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6028 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6029 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6030 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6031 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6032 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6033 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6034 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6035 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6036 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6037 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6038 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6039 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6040 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6041 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6042 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6043 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6044 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6045 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6046 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6047 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6048 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6049 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6050 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6051 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6052 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6053 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6054 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6055 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6056 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6057 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6058 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6059 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6060 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6061 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6062 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6063 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6064 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6065 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6066 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6067 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6068 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6069 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6070 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6071 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6072 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6073 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6074 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6075 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6076 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6077 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6078 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6079 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6080 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6081 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6082 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6083 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6084 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6085 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6086 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6087 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6088 0x00000000, 0x00000000, 0x00000000,
6089};
6090
50da859d 6091static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
1da177e4
LT
6092 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6093 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6094 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6095 0x00000000, 0x00000000, 0x00000000,
6096};
6097
50da859d 6098static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
1da177e4
LT
6099 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6100 0x00000000, 0x00000000, 0x00000000,
6101};
6102
6103/* tp->lock is held. */
6104static int tg3_load_tso_firmware(struct tg3 *tp)
6105{
6106 struct fw_info info;
6107 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6108 int err, i;
6109
6110 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6111 return 0;
6112
6113 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6114 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6115 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6116 info.text_data = &tg3Tso5FwText[0];
6117 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6118 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6119 info.rodata_data = &tg3Tso5FwRodata[0];
6120 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6121 info.data_len = TG3_TSO5_FW_DATA_LEN;
6122 info.data_data = &tg3Tso5FwData[0];
6123 cpu_base = RX_CPU_BASE;
6124 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6125 cpu_scratch_size = (info.text_len +
6126 info.rodata_len +
6127 info.data_len +
6128 TG3_TSO5_FW_SBSS_LEN +
6129 TG3_TSO5_FW_BSS_LEN);
6130 } else {
6131 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6132 info.text_len = TG3_TSO_FW_TEXT_LEN;
6133 info.text_data = &tg3TsoFwText[0];
6134 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6135 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6136 info.rodata_data = &tg3TsoFwRodata[0];
6137 info.data_base = TG3_TSO_FW_DATA_ADDR;
6138 info.data_len = TG3_TSO_FW_DATA_LEN;
6139 info.data_data = &tg3TsoFwData[0];
6140 cpu_base = TX_CPU_BASE;
6141 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6142 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6143 }
6144
6145 err = tg3_load_firmware_cpu(tp, cpu_base,
6146 cpu_scratch_base, cpu_scratch_size,
6147 &info);
6148 if (err)
6149 return err;
6150
6151 /* Now startup the cpu. */
6152 tw32(cpu_base + CPU_STATE, 0xffffffff);
6153 tw32_f(cpu_base + CPU_PC, info.text_base);
6154
6155 for (i = 0; i < 5; i++) {
6156 if (tr32(cpu_base + CPU_PC) == info.text_base)
6157 break;
6158 tw32(cpu_base + CPU_STATE, 0xffffffff);
6159 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6160 tw32_f(cpu_base + CPU_PC, info.text_base);
6161 udelay(1000);
6162 }
6163 if (i >= 5) {
6164 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6165 "to set CPU PC, is %08x should be %08x\n",
6166 tp->dev->name, tr32(cpu_base + CPU_PC),
6167 info.text_base);
6168 return -ENODEV;
6169 }
6170 tw32(cpu_base + CPU_STATE, 0xffffffff);
6171 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6172 return 0;
6173}
6174
1da177e4
LT
6175
6176/* tp->lock is held. */
986e0aeb 6177static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
1da177e4
LT
6178{
6179 u32 addr_high, addr_low;
6180 int i;
6181
6182 addr_high = ((tp->dev->dev_addr[0] << 8) |
6183 tp->dev->dev_addr[1]);
6184 addr_low = ((tp->dev->dev_addr[2] << 24) |
6185 (tp->dev->dev_addr[3] << 16) |
6186 (tp->dev->dev_addr[4] << 8) |
6187 (tp->dev->dev_addr[5] << 0));
6188 for (i = 0; i < 4; i++) {
986e0aeb
MC
6189 if (i == 1 && skip_mac_1)
6190 continue;
1da177e4
LT
6191 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6192 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6193 }
6194
6195 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6196 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6197 for (i = 0; i < 12; i++) {
6198 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6199 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6200 }
6201 }
6202
6203 addr_high = (tp->dev->dev_addr[0] +
6204 tp->dev->dev_addr[1] +
6205 tp->dev->dev_addr[2] +
6206 tp->dev->dev_addr[3] +
6207 tp->dev->dev_addr[4] +
6208 tp->dev->dev_addr[5]) &
6209 TX_BACKOFF_SEED_MASK;
6210 tw32(MAC_TX_BACKOFF_SEED, addr_high);
6211}
6212
6213static int tg3_set_mac_addr(struct net_device *dev, void *p)
6214{
6215 struct tg3 *tp = netdev_priv(dev);
6216 struct sockaddr *addr = p;
986e0aeb 6217 int err = 0, skip_mac_1 = 0;
1da177e4 6218
f9804ddb
MC
6219 if (!is_valid_ether_addr(addr->sa_data))
6220 return -EINVAL;
6221
1da177e4
LT
6222 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6223
e75f7c90
MC
6224 if (!netif_running(dev))
6225 return 0;
6226
58712ef9 6227 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
986e0aeb 6228 u32 addr0_high, addr0_low, addr1_high, addr1_low;
58712ef9 6229
986e0aeb
MC
6230 addr0_high = tr32(MAC_ADDR_0_HIGH);
6231 addr0_low = tr32(MAC_ADDR_0_LOW);
6232 addr1_high = tr32(MAC_ADDR_1_HIGH);
6233 addr1_low = tr32(MAC_ADDR_1_LOW);
6234
6235 /* Skip MAC addr 1 if ASF is using it. */
6236 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6237 !(addr1_high == 0 && addr1_low == 0))
6238 skip_mac_1 = 1;
58712ef9 6239 }
986e0aeb
MC
6240 spin_lock_bh(&tp->lock);
6241 __tg3_set_mac_addr(tp, skip_mac_1);
6242 spin_unlock_bh(&tp->lock);
1da177e4 6243
b9ec6c1b 6244 return err;
1da177e4
LT
6245}
6246
6247/* tp->lock is held. */
6248static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6249 dma_addr_t mapping, u32 maxlen_flags,
6250 u32 nic_addr)
6251{
6252 tg3_write_mem(tp,
6253 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6254 ((u64) mapping >> 32));
6255 tg3_write_mem(tp,
6256 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6257 ((u64) mapping & 0xffffffff));
6258 tg3_write_mem(tp,
6259 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6260 maxlen_flags);
6261
6262 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6263 tg3_write_mem(tp,
6264 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6265 nic_addr);
6266}
6267
6268static void __tg3_set_rx_mode(struct net_device *);
d244c892 6269static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
15f9850d
DM
6270{
6271 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6272 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6273 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6274 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6275 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6276 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6277 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6278 }
6279 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6280 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6281 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6282 u32 val = ec->stats_block_coalesce_usecs;
6283
6284 if (!netif_carrier_ok(tp->dev))
6285 val = 0;
6286
6287 tw32(HOSTCC_STAT_COAL_TICKS, val);
6288 }
6289}
1da177e4
LT
6290
6291/* tp->lock is held. */
8e7a22e3 6292static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
6293{
6294 u32 val, rdmac_mode;
6295 int i, err, limit;
6296
6297 tg3_disable_ints(tp);
6298
6299 tg3_stop_fw(tp);
6300
6301 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6302
6303 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
e6de8ad1 6304 tg3_abort_hw(tp, 1);
1da177e4
LT
6305 }
6306
36da4d86 6307 if (reset_phy)
d4d2c558
MC
6308 tg3_phy_reset(tp);
6309
1da177e4
LT
6310 err = tg3_chip_reset(tp);
6311 if (err)
6312 return err;
6313
6314 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6315
d30cdd28
MC
6316 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0) {
6317 val = tr32(TG3_CPMU_CTRL);
6318 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6319 tw32(TG3_CPMU_CTRL, val);
6320 }
6321
1da177e4
LT
6322 /* This works around an issue with Athlon chipsets on
6323 * B3 tigon3 silicon. This bit has no effect on any
6324 * other revision. But do not set this on PCI Express
795d01c5 6325 * chips and don't even touch the clocks if the CPMU is present.
1da177e4 6326 */
795d01c5
MC
6327 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6328 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6329 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6330 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6331 }
1da177e4
LT
6332
6333 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6334 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6335 val = tr32(TG3PCI_PCISTATE);
6336 val |= PCISTATE_RETRY_SAME_DMA;
6337 tw32(TG3PCI_PCISTATE, val);
6338 }
6339
0d3031d9
MC
6340 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6341 /* Allow reads and writes to the
6342 * APE register and memory space.
6343 */
6344 val = tr32(TG3PCI_PCISTATE);
6345 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6346 PCISTATE_ALLOW_APE_SHMEM_WR;
6347 tw32(TG3PCI_PCISTATE, val);
6348 }
6349
1da177e4
LT
6350 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6351 /* Enable some hw fixes. */
6352 val = tr32(TG3PCI_MSI_DATA);
6353 val |= (1 << 26) | (1 << 28) | (1 << 29);
6354 tw32(TG3PCI_MSI_DATA, val);
6355 }
6356
6357 /* Descriptor ring init may make accesses to the
6358 * NIC SRAM area to setup the TX descriptors, so we
6359 * can only do this after the hardware has been
6360 * successfully reset.
6361 */
32d8c572
MC
6362 err = tg3_init_rings(tp);
6363 if (err)
6364 return err;
1da177e4 6365
d30cdd28
MC
6366 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) {
6367 /* This value is determined during the probe time DMA
6368 * engine test, tg3_test_dma.
6369 */
6370 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6371 }
1da177e4
LT
6372
6373 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6374 GRC_MODE_4X_NIC_SEND_RINGS |
6375 GRC_MODE_NO_TX_PHDR_CSUM |
6376 GRC_MODE_NO_RX_PHDR_CSUM);
6377 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
d2d746f8
MC
6378
6379 /* Pseudo-header checksum is done by hardware logic and not
6380 * the offload processers, so make the chip do the pseudo-
6381 * header checksums on receive. For transmit it is more
6382 * convenient to do the pseudo-header checksum in software
6383 * as Linux does that on transmit for us in all cases.
6384 */
6385 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
1da177e4
LT
6386
6387 tw32(GRC_MODE,
6388 tp->grc_mode |
6389 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6390
6391 /* Setup the timer prescalar register. Clock is always 66Mhz. */
6392 val = tr32(GRC_MISC_CFG);
6393 val &= ~0xff;
6394 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6395 tw32(GRC_MISC_CFG, val);
6396
6397 /* Initialize MBUF/DESC pool. */
cbf46853 6398 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
1da177e4
LT
6399 /* Do nothing. */
6400 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6401 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6402 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6403 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6404 else
6405 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6406 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6407 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6408 }
1da177e4
LT
6409 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6410 int fw_len;
6411
6412 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6413 TG3_TSO5_FW_RODATA_LEN +
6414 TG3_TSO5_FW_DATA_LEN +
6415 TG3_TSO5_FW_SBSS_LEN +
6416 TG3_TSO5_FW_BSS_LEN);
6417 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6418 tw32(BUFMGR_MB_POOL_ADDR,
6419 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6420 tw32(BUFMGR_MB_POOL_SIZE,
6421 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6422 }
1da177e4 6423
0f893dc6 6424 if (tp->dev->mtu <= ETH_DATA_LEN) {
1da177e4
LT
6425 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6426 tp->bufmgr_config.mbuf_read_dma_low_water);
6427 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6428 tp->bufmgr_config.mbuf_mac_rx_low_water);
6429 tw32(BUFMGR_MB_HIGH_WATER,
6430 tp->bufmgr_config.mbuf_high_water);
6431 } else {
6432 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6433 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6434 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6435 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6436 tw32(BUFMGR_MB_HIGH_WATER,
6437 tp->bufmgr_config.mbuf_high_water_jumbo);
6438 }
6439 tw32(BUFMGR_DMA_LOW_WATER,
6440 tp->bufmgr_config.dma_low_water);
6441 tw32(BUFMGR_DMA_HIGH_WATER,
6442 tp->bufmgr_config.dma_high_water);
6443
6444 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6445 for (i = 0; i < 2000; i++) {
6446 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6447 break;
6448 udelay(10);
6449 }
6450 if (i >= 2000) {
6451 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6452 tp->dev->name);
6453 return -ENODEV;
6454 }
6455
6456 /* Setup replenish threshold. */
f92905de
MC
6457 val = tp->rx_pending / 8;
6458 if (val == 0)
6459 val = 1;
6460 else if (val > tp->rx_std_max_post)
6461 val = tp->rx_std_max_post;
b5d3772c
MC
6462 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6463 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6464 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6465
6466 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6467 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6468 }
f92905de
MC
6469
6470 tw32(RCVBDI_STD_THRESH, val);
1da177e4
LT
6471
6472 /* Initialize TG3_BDINFO's at:
6473 * RCVDBDI_STD_BD: standard eth size rx ring
6474 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6475 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6476 *
6477 * like so:
6478 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6479 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6480 * ring attribute flags
6481 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6482 *
6483 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6484 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6485 *
6486 * The size of each ring is fixed in the firmware, but the location is
6487 * configurable.
6488 */
6489 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6490 ((u64) tp->rx_std_mapping >> 32));
6491 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6492 ((u64) tp->rx_std_mapping & 0xffffffff));
6493 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6494 NIC_SRAM_RX_BUFFER_DESC);
6495
6496 /* Don't even try to program the JUMBO/MINI buffer descriptor
6497 * configs on 5705.
6498 */
6499 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6500 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6501 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6502 } else {
6503 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6504 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6505
6506 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6507 BDINFO_FLAGS_DISABLED);
6508
6509 /* Setup replenish threshold. */
6510 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6511
0f893dc6 6512 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
6513 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6514 ((u64) tp->rx_jumbo_mapping >> 32));
6515 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6516 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6517 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6518 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6519 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6520 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6521 } else {
6522 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6523 BDINFO_FLAGS_DISABLED);
6524 }
6525
6526 }
6527
6528 /* There is only one send ring on 5705/5750, no need to explicitly
6529 * disable the others.
6530 */
6531 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6532 /* Clear out send RCB ring in SRAM. */
6533 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6534 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6535 BDINFO_FLAGS_DISABLED);
6536 }
6537
6538 tp->tx_prod = 0;
6539 tp->tx_cons = 0;
6540 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6541 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6542
6543 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6544 tp->tx_desc_mapping,
6545 (TG3_TX_RING_SIZE <<
6546 BDINFO_FLAGS_MAXLEN_SHIFT),
6547 NIC_SRAM_TX_BUFFER_DESC);
6548
6549 /* There is only one receive return ring on 5705/5750, no need
6550 * to explicitly disable the others.
6551 */
6552 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6553 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6554 i += TG3_BDINFO_SIZE) {
6555 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6556 BDINFO_FLAGS_DISABLED);
6557 }
6558 }
6559
6560 tp->rx_rcb_ptr = 0;
6561 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6562
6563 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6564 tp->rx_rcb_mapping,
6565 (TG3_RX_RCB_RING_SIZE(tp) <<
6566 BDINFO_FLAGS_MAXLEN_SHIFT),
6567 0);
6568
6569 tp->rx_std_ptr = tp->rx_pending;
6570 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6571 tp->rx_std_ptr);
6572
0f893dc6 6573 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
1da177e4
LT
6574 tp->rx_jumbo_pending : 0;
6575 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6576 tp->rx_jumbo_ptr);
6577
6578 /* Initialize MAC address and backoff seed. */
986e0aeb 6579 __tg3_set_mac_addr(tp, 0);
1da177e4
LT
6580
6581 /* MTU + ethernet header + FCS + optional VLAN tag */
6582 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6583
6584 /* The slot time is changed by tg3_setup_phy if we
6585 * run at gigabit with half duplex.
6586 */
6587 tw32(MAC_TX_LENGTHS,
6588 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6589 (6 << TX_LENGTHS_IPG_SHIFT) |
6590 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6591
6592 /* Receive rules. */
6593 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6594 tw32(RCVLPC_CONFIG, 0x0181);
6595
6596 /* Calculate RDMAC_MODE setting early, we need it to determine
6597 * the RCVLPC_STATE_ENABLE mask.
6598 */
6599 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6600 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6601 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6602 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6603 RDMAC_MODE_LNGREAD_ENAB);
85e94ced 6604
d30cdd28
MC
6605 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6606 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6607 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6608 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6609
85e94ced
MC
6610 /* If statement applies to 5705 and 5750 PCI devices only */
6611 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6612 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6613 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
1da177e4 6614 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
c13e3713 6615 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1da177e4
LT
6616 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6617 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6618 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6619 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6620 }
6621 }
6622
85e94ced
MC
6623 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6624 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6625
1da177e4
LT
6626 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6627 rdmac_mode |= (1 << 27);
1da177e4
LT
6628
6629 /* Receive/send statistics. */
1661394e
MC
6630 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6631 val = tr32(RCVLPC_STATS_ENABLE);
6632 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6633 tw32(RCVLPC_STATS_ENABLE, val);
6634 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6635 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
1da177e4
LT
6636 val = tr32(RCVLPC_STATS_ENABLE);
6637 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6638 tw32(RCVLPC_STATS_ENABLE, val);
6639 } else {
6640 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6641 }
6642 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6643 tw32(SNDDATAI_STATSENAB, 0xffffff);
6644 tw32(SNDDATAI_STATSCTRL,
6645 (SNDDATAI_SCTRL_ENABLE |
6646 SNDDATAI_SCTRL_FASTUPD));
6647
6648 /* Setup host coalescing engine. */
6649 tw32(HOSTCC_MODE, 0);
6650 for (i = 0; i < 2000; i++) {
6651 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6652 break;
6653 udelay(10);
6654 }
6655
d244c892 6656 __tg3_set_coalesce(tp, &tp->coal);
1da177e4
LT
6657
6658 /* set status block DMA address */
6659 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6660 ((u64) tp->status_mapping >> 32));
6661 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6662 ((u64) tp->status_mapping & 0xffffffff));
6663
6664 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6665 /* Status/statistics block address. See tg3_timer,
6666 * the tg3_periodic_fetch_stats call there, and
6667 * tg3_get_stats to see how this works for 5705/5750 chips.
6668 */
1da177e4
LT
6669 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6670 ((u64) tp->stats_mapping >> 32));
6671 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6672 ((u64) tp->stats_mapping & 0xffffffff));
6673 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6674 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6675 }
6676
6677 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6678
6679 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6680 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6681 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6682 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6683
6684 /* Clear statistics/status block in chip, and status block in ram. */
6685 for (i = NIC_SRAM_STATS_BLK;
6686 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6687 i += sizeof(u32)) {
6688 tg3_write_mem(tp, i, 0);
6689 udelay(40);
6690 }
6691 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6692
c94e3941
MC
6693 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6694 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6695 /* reset to prevent losing 1st rx packet intermittently */
6696 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6697 udelay(10);
6698 }
6699
1da177e4
LT
6700 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6701 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
e8f3f6ca
MC
6702 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6703 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6704 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6705 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1da177e4
LT
6706 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6707 udelay(40);
6708
314fba34 6709 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9d26e213 6710 * If TG3_FLG2_IS_NIC is zero, we should read the
314fba34
MC
6711 * register to preserve the GPIO settings for LOMs. The GPIOs,
6712 * whether used as inputs or outputs, are set by boot code after
6713 * reset.
6714 */
9d26e213 6715 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
314fba34
MC
6716 u32 gpio_mask;
6717
9d26e213
MC
6718 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6719 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6720 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
3e7d83bc
MC
6721
6722 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6723 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6724 GRC_LCLCTRL_GPIO_OUTPUT3;
6725
af36e6b6
MC
6726 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6727 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6728
aaf84465 6729 tp->grc_local_ctrl &= ~gpio_mask;
314fba34
MC
6730 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6731
6732 /* GPIO1 must be driven high for eeprom write protect */
9d26e213
MC
6733 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6734 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6735 GRC_LCLCTRL_GPIO_OUTPUT1);
314fba34 6736 }
1da177e4
LT
6737 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6738 udelay(100);
6739
09ee929c 6740 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
fac9b83e 6741 tp->last_tag = 0;
1da177e4
LT
6742
6743 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6744 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6745 udelay(40);
6746 }
6747
6748 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6749 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6750 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6751 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6752 WDMAC_MODE_LNGREAD_ENAB);
6753
85e94ced
MC
6754 /* If statement applies to 5705 and 5750 PCI devices only */
6755 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6756 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6757 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1da177e4
LT
6758 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6759 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6760 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6761 /* nothing */
6762 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6763 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6764 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6765 val |= WDMAC_MODE_RX_ACCEL;
6766 }
6767 }
6768
d9ab5ad1 6769 /* Enable host coalescing bug fix */
af36e6b6 6770 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
d30cdd28
MC
6771 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
6772 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784))
d9ab5ad1
MC
6773 val |= (1 << 29);
6774
1da177e4
LT
6775 tw32_f(WDMAC_MODE, val);
6776 udelay(40);
6777
9974a356
MC
6778 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6779 u16 pcix_cmd;
6780
6781 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6782 &pcix_cmd);
1da177e4 6783 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9974a356
MC
6784 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
6785 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 6786 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9974a356
MC
6787 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
6788 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 6789 }
9974a356
MC
6790 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6791 pcix_cmd);
1da177e4
LT
6792 }
6793
6794 tw32_f(RDMAC_MODE, rdmac_mode);
6795 udelay(40);
6796
6797 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6798 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6799 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6800 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6801 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6802 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6803 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6804 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
1da177e4
LT
6805 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6806 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
1da177e4
LT
6807 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6808 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6809
6810 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6811 err = tg3_load_5701_a0_firmware_fix(tp);
6812 if (err)
6813 return err;
6814 }
6815
1da177e4
LT
6816 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6817 err = tg3_load_tso_firmware(tp);
6818 if (err)
6819 return err;
6820 }
1da177e4
LT
6821
6822 tp->tx_mode = TX_MODE_ENABLE;
6823 tw32_f(MAC_TX_MODE, tp->tx_mode);
6824 udelay(100);
6825
6826 tp->rx_mode = RX_MODE_ENABLE;
af36e6b6
MC
6827 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6828 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6829
1da177e4
LT
6830 tw32_f(MAC_RX_MODE, tp->rx_mode);
6831 udelay(10);
6832
6833 if (tp->link_config.phy_is_low_power) {
6834 tp->link_config.phy_is_low_power = 0;
6835 tp->link_config.speed = tp->link_config.orig_speed;
6836 tp->link_config.duplex = tp->link_config.orig_duplex;
6837 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6838 }
6839
6840 tp->mi_mode = MAC_MI_MODE_BASE;
6841 tw32_f(MAC_MI_MODE, tp->mi_mode);
6842 udelay(80);
6843
6844 tw32(MAC_LED_CTRL, tp->led_ctrl);
6845
6846 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
c94e3941 6847 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1da177e4
LT
6848 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6849 udelay(10);
6850 }
6851 tw32_f(MAC_RX_MODE, tp->rx_mode);
6852 udelay(10);
6853
6854 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6855 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6856 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6857 /* Set drive transmission level to 1.2V */
6858 /* only if the signal pre-emphasis bit is not set */
6859 val = tr32(MAC_SERDES_CFG);
6860 val &= 0xfffff000;
6861 val |= 0x880;
6862 tw32(MAC_SERDES_CFG, val);
6863 }
6864 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6865 tw32(MAC_SERDES_CFG, 0x616000);
6866 }
6867
6868 /* Prevent chip from dropping frames when flow control
6869 * is enabled.
6870 */
6871 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6872
6873 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6874 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6875 /* Use hardware link auto-negotiation */
6876 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6877 }
6878
d4d2c558
MC
6879 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6880 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6881 u32 tmp;
6882
6883 tmp = tr32(SERDES_RX_CTRL);
6884 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6885 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6886 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6887 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6888 }
6889
36da4d86 6890 err = tg3_setup_phy(tp, 0);
1da177e4
LT
6891 if (err)
6892 return err;
6893
715116a1
MC
6894 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6895 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
1da177e4
LT
6896 u32 tmp;
6897
6898 /* Clear CRC stats. */
569a5df8
MC
6899 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
6900 tg3_writephy(tp, MII_TG3_TEST1,
6901 tmp | MII_TG3_TEST1_CRC_EN);
1da177e4
LT
6902 tg3_readphy(tp, 0x14, &tmp);
6903 }
6904 }
6905
6906 __tg3_set_rx_mode(tp->dev);
6907
6908 /* Initialize receive rules. */
6909 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
6910 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6911 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
6912 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6913
4cf78e4f 6914 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
a4e2b347 6915 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
1da177e4
LT
6916 limit = 8;
6917 else
6918 limit = 16;
6919 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6920 limit -= 4;
6921 switch (limit) {
6922 case 16:
6923 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
6924 case 15:
6925 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
6926 case 14:
6927 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
6928 case 13:
6929 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
6930 case 12:
6931 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
6932 case 11:
6933 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
6934 case 10:
6935 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
6936 case 9:
6937 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
6938 case 8:
6939 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
6940 case 7:
6941 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
6942 case 6:
6943 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
6944 case 5:
6945 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
6946 case 4:
6947 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6948 case 3:
6949 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6950 case 2:
6951 case 1:
6952
6953 default:
6954 break;
6955 };
6956
0d3031d9
MC
6957 /* Write our heartbeat update interval to APE. */
6958 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
6959 APE_HOST_HEARTBEAT_INT_DISABLE);
6960
1da177e4
LT
6961 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6962
1da177e4
LT
6963 return 0;
6964}
6965
6966/* Called at device open time to get the chip ready for
6967 * packet processing. Invoked with tp->lock held.
6968 */
8e7a22e3 6969static int tg3_init_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
6970{
6971 int err;
6972
6973 /* Force the chip into D0. */
bc1c7567 6974 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
6975 if (err)
6976 goto out;
6977
6978 tg3_switch_clocks(tp);
6979
6980 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6981
8e7a22e3 6982 err = tg3_reset_hw(tp, reset_phy);
1da177e4
LT
6983
6984out:
6985 return err;
6986}
6987
6988#define TG3_STAT_ADD32(PSTAT, REG) \
6989do { u32 __val = tr32(REG); \
6990 (PSTAT)->low += __val; \
6991 if ((PSTAT)->low < __val) \
6992 (PSTAT)->high += 1; \
6993} while (0)
6994
6995static void tg3_periodic_fetch_stats(struct tg3 *tp)
6996{
6997 struct tg3_hw_stats *sp = tp->hw_stats;
6998
6999 if (!netif_carrier_ok(tp->dev))
7000 return;
7001
7002 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7003 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7004 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7005 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7006 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7007 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7008 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7009 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7010 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7011 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7012 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7013 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7014 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7015
7016 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7017 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7018 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7019 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7020 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7021 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7022 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7023 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7024 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7025 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7026 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7027 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7028 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7029 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
463d305b
MC
7030
7031 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7032 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7033 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
1da177e4
LT
7034}
7035
7036static void tg3_timer(unsigned long __opaque)
7037{
7038 struct tg3 *tp = (struct tg3 *) __opaque;
1da177e4 7039
f475f163
MC
7040 if (tp->irq_sync)
7041 goto restart_timer;
7042
f47c11ee 7043 spin_lock(&tp->lock);
1da177e4 7044
fac9b83e
DM
7045 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7046 /* All of this garbage is because when using non-tagged
7047 * IRQ status the mailbox/status_block protocol the chip
7048 * uses with the cpu is race prone.
7049 */
7050 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7051 tw32(GRC_LOCAL_CTRL,
7052 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7053 } else {
7054 tw32(HOSTCC_MODE, tp->coalesce_mode |
7055 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7056 }
1da177e4 7057
fac9b83e
DM
7058 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7059 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
f47c11ee 7060 spin_unlock(&tp->lock);
fac9b83e
DM
7061 schedule_work(&tp->reset_task);
7062 return;
7063 }
1da177e4
LT
7064 }
7065
1da177e4
LT
7066 /* This part only runs once per second. */
7067 if (!--tp->timer_counter) {
fac9b83e
DM
7068 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7069 tg3_periodic_fetch_stats(tp);
7070
1da177e4
LT
7071 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7072 u32 mac_stat;
7073 int phy_event;
7074
7075 mac_stat = tr32(MAC_STATUS);
7076
7077 phy_event = 0;
7078 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7079 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7080 phy_event = 1;
7081 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7082 phy_event = 1;
7083
7084 if (phy_event)
7085 tg3_setup_phy(tp, 0);
7086 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7087 u32 mac_stat = tr32(MAC_STATUS);
7088 int need_setup = 0;
7089
7090 if (netif_carrier_ok(tp->dev) &&
7091 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7092 need_setup = 1;
7093 }
7094 if (! netif_carrier_ok(tp->dev) &&
7095 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7096 MAC_STATUS_SIGNAL_DET))) {
7097 need_setup = 1;
7098 }
7099 if (need_setup) {
3d3ebe74
MC
7100 if (!tp->serdes_counter) {
7101 tw32_f(MAC_MODE,
7102 (tp->mac_mode &
7103 ~MAC_MODE_PORT_MODE_MASK));
7104 udelay(40);
7105 tw32_f(MAC_MODE, tp->mac_mode);
7106 udelay(40);
7107 }
1da177e4
LT
7108 tg3_setup_phy(tp, 0);
7109 }
747e8f8b
MC
7110 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7111 tg3_serdes_parallel_detect(tp);
1da177e4
LT
7112
7113 tp->timer_counter = tp->timer_multiplier;
7114 }
7115
130b8e4d
MC
7116 /* Heartbeat is only sent once every 2 seconds.
7117 *
7118 * The heartbeat is to tell the ASF firmware that the host
7119 * driver is still alive. In the event that the OS crashes,
7120 * ASF needs to reset the hardware to free up the FIFO space
7121 * that may be filled with rx packets destined for the host.
7122 * If the FIFO is full, ASF will no longer function properly.
7123 *
7124 * Unintended resets have been reported on real time kernels
7125 * where the timer doesn't run on time. Netpoll will also have
7126 * same problem.
7127 *
7128 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7129 * to check the ring condition when the heartbeat is expiring
7130 * before doing the reset. This will prevent most unintended
7131 * resets.
7132 */
1da177e4
LT
7133 if (!--tp->asf_counter) {
7134 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7135 u32 val;
7136
bbadf503 7137 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
130b8e4d 7138 FWCMD_NICDRV_ALIVE3);
bbadf503 7139 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
28fbef78 7140 /* 5 seconds timeout */
bbadf503 7141 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
1da177e4
LT
7142 val = tr32(GRC_RX_CPU_EVENT);
7143 val |= (1 << 14);
7144 tw32(GRC_RX_CPU_EVENT, val);
7145 }
7146 tp->asf_counter = tp->asf_multiplier;
7147 }
7148
f47c11ee 7149 spin_unlock(&tp->lock);
1da177e4 7150
f475f163 7151restart_timer:
1da177e4
LT
7152 tp->timer.expires = jiffies + tp->timer_offset;
7153 add_timer(&tp->timer);
7154}
7155
81789ef5 7156static int tg3_request_irq(struct tg3 *tp)
fcfa0a32 7157{
7d12e780 7158 irq_handler_t fn;
fcfa0a32
MC
7159 unsigned long flags;
7160 struct net_device *dev = tp->dev;
7161
7162 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7163 fn = tg3_msi;
7164 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7165 fn = tg3_msi_1shot;
1fb9df5d 7166 flags = IRQF_SAMPLE_RANDOM;
fcfa0a32
MC
7167 } else {
7168 fn = tg3_interrupt;
7169 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7170 fn = tg3_interrupt_tagged;
1fb9df5d 7171 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
fcfa0a32
MC
7172 }
7173 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7174}
7175
7938109f
MC
7176static int tg3_test_interrupt(struct tg3 *tp)
7177{
7178 struct net_device *dev = tp->dev;
b16250e3 7179 int err, i, intr_ok = 0;
7938109f 7180
d4bc3927
MC
7181 if (!netif_running(dev))
7182 return -ENODEV;
7183
7938109f
MC
7184 tg3_disable_ints(tp);
7185
7186 free_irq(tp->pdev->irq, dev);
7187
7188 err = request_irq(tp->pdev->irq, tg3_test_isr,
1fb9df5d 7189 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7938109f
MC
7190 if (err)
7191 return err;
7192
38f3843e 7193 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7938109f
MC
7194 tg3_enable_ints(tp);
7195
7196 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7197 HOSTCC_MODE_NOW);
7198
7199 for (i = 0; i < 5; i++) {
b16250e3
MC
7200 u32 int_mbox, misc_host_ctrl;
7201
09ee929c
MC
7202 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7203 TG3_64BIT_REG_LOW);
b16250e3
MC
7204 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7205
7206 if ((int_mbox != 0) ||
7207 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7208 intr_ok = 1;
7938109f 7209 break;
b16250e3
MC
7210 }
7211
7938109f
MC
7212 msleep(10);
7213 }
7214
7215 tg3_disable_ints(tp);
7216
7217 free_irq(tp->pdev->irq, dev);
6aa20a22 7218
fcfa0a32 7219 err = tg3_request_irq(tp);
7938109f
MC
7220
7221 if (err)
7222 return err;
7223
b16250e3 7224 if (intr_ok)
7938109f
MC
7225 return 0;
7226
7227 return -EIO;
7228}
7229
7230/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7231 * successfully restored
7232 */
7233static int tg3_test_msi(struct tg3 *tp)
7234{
7235 struct net_device *dev = tp->dev;
7236 int err;
7237 u16 pci_cmd;
7238
7239 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7240 return 0;
7241
7242 /* Turn off SERR reporting in case MSI terminates with Master
7243 * Abort.
7244 */
7245 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7246 pci_write_config_word(tp->pdev, PCI_COMMAND,
7247 pci_cmd & ~PCI_COMMAND_SERR);
7248
7249 err = tg3_test_interrupt(tp);
7250
7251 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7252
7253 if (!err)
7254 return 0;
7255
7256 /* other failures */
7257 if (err != -EIO)
7258 return err;
7259
7260 /* MSI test failed, go back to INTx mode */
7261 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7262 "switching to INTx mode. Please report this failure to "
7263 "the PCI maintainer and include system chipset information.\n",
7264 tp->dev->name);
7265
7266 free_irq(tp->pdev->irq, dev);
7267 pci_disable_msi(tp->pdev);
7268
7269 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7270
fcfa0a32 7271 err = tg3_request_irq(tp);
7938109f
MC
7272 if (err)
7273 return err;
7274
7275 /* Need to reset the chip because the MSI cycle may have terminated
7276 * with Master Abort.
7277 */
f47c11ee 7278 tg3_full_lock(tp, 1);
7938109f 7279
944d980e 7280 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8e7a22e3 7281 err = tg3_init_hw(tp, 1);
7938109f 7282
f47c11ee 7283 tg3_full_unlock(tp);
7938109f
MC
7284
7285 if (err)
7286 free_irq(tp->pdev->irq, dev);
7287
7288 return err;
7289}
7290
1da177e4
LT
7291static int tg3_open(struct net_device *dev)
7292{
7293 struct tg3 *tp = netdev_priv(dev);
7294 int err;
7295
c49a1561
MC
7296 netif_carrier_off(tp->dev);
7297
f47c11ee 7298 tg3_full_lock(tp, 0);
1da177e4 7299
bc1c7567 7300 err = tg3_set_power_state(tp, PCI_D0);
12862086
IS
7301 if (err) {
7302 tg3_full_unlock(tp);
bc1c7567 7303 return err;
12862086 7304 }
bc1c7567 7305
1da177e4
LT
7306 tg3_disable_ints(tp);
7307 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7308
f47c11ee 7309 tg3_full_unlock(tp);
1da177e4
LT
7310
7311 /* The placement of this call is tied
7312 * to the setup and use of Host TX descriptors.
7313 */
7314 err = tg3_alloc_consistent(tp);
7315 if (err)
7316 return err;
7317
7544b097 7318 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
fac9b83e
DM
7319 /* All MSI supporting chips should support tagged
7320 * status. Assert that this is the case.
7321 */
7322 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7323 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7324 "Not using MSI.\n", tp->dev->name);
7325 } else if (pci_enable_msi(tp->pdev) == 0) {
88b06bc2
MC
7326 u32 msi_mode;
7327
2fbe43f6
MC
7328 /* Hardware bug - MSI won't work if INTX disabled. */
7329 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
7330 pci_intx(tp->pdev, 1);
7331
88b06bc2
MC
7332 msi_mode = tr32(MSGINT_MODE);
7333 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7334 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7335 }
7336 }
fcfa0a32 7337 err = tg3_request_irq(tp);
1da177e4
LT
7338
7339 if (err) {
88b06bc2
MC
7340 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7341 pci_disable_msi(tp->pdev);
7342 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7343 }
1da177e4
LT
7344 tg3_free_consistent(tp);
7345 return err;
7346 }
7347
bea3348e
SH
7348 napi_enable(&tp->napi);
7349
f47c11ee 7350 tg3_full_lock(tp, 0);
1da177e4 7351
8e7a22e3 7352 err = tg3_init_hw(tp, 1);
1da177e4 7353 if (err) {
944d980e 7354 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7355 tg3_free_rings(tp);
7356 } else {
fac9b83e
DM
7357 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7358 tp->timer_offset = HZ;
7359 else
7360 tp->timer_offset = HZ / 10;
7361
7362 BUG_ON(tp->timer_offset > HZ);
7363 tp->timer_counter = tp->timer_multiplier =
7364 (HZ / tp->timer_offset);
7365 tp->asf_counter = tp->asf_multiplier =
28fbef78 7366 ((HZ / tp->timer_offset) * 2);
1da177e4
LT
7367
7368 init_timer(&tp->timer);
7369 tp->timer.expires = jiffies + tp->timer_offset;
7370 tp->timer.data = (unsigned long) tp;
7371 tp->timer.function = tg3_timer;
1da177e4
LT
7372 }
7373
f47c11ee 7374 tg3_full_unlock(tp);
1da177e4
LT
7375
7376 if (err) {
bea3348e 7377 napi_disable(&tp->napi);
88b06bc2
MC
7378 free_irq(tp->pdev->irq, dev);
7379 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7380 pci_disable_msi(tp->pdev);
7381 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7382 }
1da177e4
LT
7383 tg3_free_consistent(tp);
7384 return err;
7385 }
7386
7938109f
MC
7387 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7388 err = tg3_test_msi(tp);
fac9b83e 7389
7938109f 7390 if (err) {
f47c11ee 7391 tg3_full_lock(tp, 0);
7938109f
MC
7392
7393 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7394 pci_disable_msi(tp->pdev);
7395 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7396 }
944d980e 7397 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f
MC
7398 tg3_free_rings(tp);
7399 tg3_free_consistent(tp);
7400
f47c11ee 7401 tg3_full_unlock(tp);
7938109f 7402
bea3348e
SH
7403 napi_disable(&tp->napi);
7404
7938109f
MC
7405 return err;
7406 }
fcfa0a32
MC
7407
7408 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7409 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
b5d3772c 7410 u32 val = tr32(PCIE_TRANSACTION_CFG);
fcfa0a32 7411
b5d3772c
MC
7412 tw32(PCIE_TRANSACTION_CFG,
7413 val | PCIE_TRANS_CFG_1SHOT_MSI);
fcfa0a32
MC
7414 }
7415 }
7938109f
MC
7416 }
7417
f47c11ee 7418 tg3_full_lock(tp, 0);
1da177e4 7419
7938109f
MC
7420 add_timer(&tp->timer);
7421 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
1da177e4
LT
7422 tg3_enable_ints(tp);
7423
f47c11ee 7424 tg3_full_unlock(tp);
1da177e4
LT
7425
7426 netif_start_queue(dev);
7427
7428 return 0;
7429}
7430
7431#if 0
7432/*static*/ void tg3_dump_state(struct tg3 *tp)
7433{
7434 u32 val32, val32_2, val32_3, val32_4, val32_5;
7435 u16 val16;
7436 int i;
7437
7438 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7439 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7440 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7441 val16, val32);
7442
7443 /* MAC block */
7444 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7445 tr32(MAC_MODE), tr32(MAC_STATUS));
7446 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7447 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7448 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7449 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7450 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7451 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7452
7453 /* Send data initiator control block */
7454 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7455 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7456 printk(" SNDDATAI_STATSCTRL[%08x]\n",
7457 tr32(SNDDATAI_STATSCTRL));
7458
7459 /* Send data completion control block */
7460 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7461
7462 /* Send BD ring selector block */
7463 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7464 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7465
7466 /* Send BD initiator control block */
7467 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7468 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7469
7470 /* Send BD completion control block */
7471 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7472
7473 /* Receive list placement control block */
7474 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7475 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7476 printk(" RCVLPC_STATSCTRL[%08x]\n",
7477 tr32(RCVLPC_STATSCTRL));
7478
7479 /* Receive data and receive BD initiator control block */
7480 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7481 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7482
7483 /* Receive data completion control block */
7484 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7485 tr32(RCVDCC_MODE));
7486
7487 /* Receive BD initiator control block */
7488 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7489 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7490
7491 /* Receive BD completion control block */
7492 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7493 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7494
7495 /* Receive list selector control block */
7496 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7497 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7498
7499 /* Mbuf cluster free block */
7500 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7501 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7502
7503 /* Host coalescing control block */
7504 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7505 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7506 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7507 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7508 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7509 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7510 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7511 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7512 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7513 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7514 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7515 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7516
7517 /* Memory arbiter control block */
7518 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7519 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7520
7521 /* Buffer manager control block */
7522 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7523 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7524 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7525 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7526 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7527 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7528 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7529 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7530
7531 /* Read DMA control block */
7532 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7533 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7534
7535 /* Write DMA control block */
7536 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7537 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7538
7539 /* DMA completion block */
7540 printk("DEBUG: DMAC_MODE[%08x]\n",
7541 tr32(DMAC_MODE));
7542
7543 /* GRC block */
7544 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7545 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7546 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7547 tr32(GRC_LOCAL_CTRL));
7548
7549 /* TG3_BDINFOs */
7550 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7551 tr32(RCVDBDI_JUMBO_BD + 0x0),
7552 tr32(RCVDBDI_JUMBO_BD + 0x4),
7553 tr32(RCVDBDI_JUMBO_BD + 0x8),
7554 tr32(RCVDBDI_JUMBO_BD + 0xc));
7555 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7556 tr32(RCVDBDI_STD_BD + 0x0),
7557 tr32(RCVDBDI_STD_BD + 0x4),
7558 tr32(RCVDBDI_STD_BD + 0x8),
7559 tr32(RCVDBDI_STD_BD + 0xc));
7560 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7561 tr32(RCVDBDI_MINI_BD + 0x0),
7562 tr32(RCVDBDI_MINI_BD + 0x4),
7563 tr32(RCVDBDI_MINI_BD + 0x8),
7564 tr32(RCVDBDI_MINI_BD + 0xc));
7565
7566 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7567 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7568 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7569 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7570 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7571 val32, val32_2, val32_3, val32_4);
7572
7573 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7574 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7575 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7576 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7577 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7578 val32, val32_2, val32_3, val32_4);
7579
7580 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7581 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7582 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7583 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7584 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7585 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7586 val32, val32_2, val32_3, val32_4, val32_5);
7587
7588 /* SW status block */
7589 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7590 tp->hw_status->status,
7591 tp->hw_status->status_tag,
7592 tp->hw_status->rx_jumbo_consumer,
7593 tp->hw_status->rx_consumer,
7594 tp->hw_status->rx_mini_consumer,
7595 tp->hw_status->idx[0].rx_producer,
7596 tp->hw_status->idx[0].tx_consumer);
7597
7598 /* SW statistics block */
7599 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7600 ((u32 *)tp->hw_stats)[0],
7601 ((u32 *)tp->hw_stats)[1],
7602 ((u32 *)tp->hw_stats)[2],
7603 ((u32 *)tp->hw_stats)[3]);
7604
7605 /* Mailboxes */
7606 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
09ee929c
MC
7607 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7608 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7609 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7610 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
1da177e4
LT
7611
7612 /* NIC side send descriptors. */
7613 for (i = 0; i < 6; i++) {
7614 unsigned long txd;
7615
7616 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7617 + (i * sizeof(struct tg3_tx_buffer_desc));
7618 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7619 i,
7620 readl(txd + 0x0), readl(txd + 0x4),
7621 readl(txd + 0x8), readl(txd + 0xc));
7622 }
7623
7624 /* NIC side RX descriptors. */
7625 for (i = 0; i < 6; i++) {
7626 unsigned long rxd;
7627
7628 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7629 + (i * sizeof(struct tg3_rx_buffer_desc));
7630 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7631 i,
7632 readl(rxd + 0x0), readl(rxd + 0x4),
7633 readl(rxd + 0x8), readl(rxd + 0xc));
7634 rxd += (4 * sizeof(u32));
7635 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7636 i,
7637 readl(rxd + 0x0), readl(rxd + 0x4),
7638 readl(rxd + 0x8), readl(rxd + 0xc));
7639 }
7640
7641 for (i = 0; i < 6; i++) {
7642 unsigned long rxd;
7643
7644 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7645 + (i * sizeof(struct tg3_rx_buffer_desc));
7646 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7647 i,
7648 readl(rxd + 0x0), readl(rxd + 0x4),
7649 readl(rxd + 0x8), readl(rxd + 0xc));
7650 rxd += (4 * sizeof(u32));
7651 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7652 i,
7653 readl(rxd + 0x0), readl(rxd + 0x4),
7654 readl(rxd + 0x8), readl(rxd + 0xc));
7655 }
7656}
7657#endif
7658
7659static struct net_device_stats *tg3_get_stats(struct net_device *);
7660static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7661
7662static int tg3_close(struct net_device *dev)
7663{
7664 struct tg3 *tp = netdev_priv(dev);
7665
bea3348e 7666 napi_disable(&tp->napi);
28e53bdd 7667 cancel_work_sync(&tp->reset_task);
7faa006f 7668
1da177e4
LT
7669 netif_stop_queue(dev);
7670
7671 del_timer_sync(&tp->timer);
7672
f47c11ee 7673 tg3_full_lock(tp, 1);
1da177e4
LT
7674#if 0
7675 tg3_dump_state(tp);
7676#endif
7677
7678 tg3_disable_ints(tp);
7679
944d980e 7680 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4 7681 tg3_free_rings(tp);
5cf64b8a 7682 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
1da177e4 7683
f47c11ee 7684 tg3_full_unlock(tp);
1da177e4 7685
88b06bc2
MC
7686 free_irq(tp->pdev->irq, dev);
7687 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7688 pci_disable_msi(tp->pdev);
7689 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7690 }
1da177e4
LT
7691
7692 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7693 sizeof(tp->net_stats_prev));
7694 memcpy(&tp->estats_prev, tg3_get_estats(tp),
7695 sizeof(tp->estats_prev));
7696
7697 tg3_free_consistent(tp);
7698
bc1c7567
MC
7699 tg3_set_power_state(tp, PCI_D3hot);
7700
7701 netif_carrier_off(tp->dev);
7702
1da177e4
LT
7703 return 0;
7704}
7705
7706static inline unsigned long get_stat64(tg3_stat64_t *val)
7707{
7708 unsigned long ret;
7709
7710#if (BITS_PER_LONG == 32)
7711 ret = val->low;
7712#else
7713 ret = ((u64)val->high << 32) | ((u64)val->low);
7714#endif
7715 return ret;
7716}
7717
7718static unsigned long calc_crc_errors(struct tg3 *tp)
7719{
7720 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7721
7722 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7723 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7724 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1da177e4
LT
7725 u32 val;
7726
f47c11ee 7727 spin_lock_bh(&tp->lock);
569a5df8
MC
7728 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7729 tg3_writephy(tp, MII_TG3_TEST1,
7730 val | MII_TG3_TEST1_CRC_EN);
1da177e4
LT
7731 tg3_readphy(tp, 0x14, &val);
7732 } else
7733 val = 0;
f47c11ee 7734 spin_unlock_bh(&tp->lock);
1da177e4
LT
7735
7736 tp->phy_crc_errors += val;
7737
7738 return tp->phy_crc_errors;
7739 }
7740
7741 return get_stat64(&hw_stats->rx_fcs_errors);
7742}
7743
7744#define ESTAT_ADD(member) \
7745 estats->member = old_estats->member + \
7746 get_stat64(&hw_stats->member)
7747
7748static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7749{
7750 struct tg3_ethtool_stats *estats = &tp->estats;
7751 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7752 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7753
7754 if (!hw_stats)
7755 return old_estats;
7756
7757 ESTAT_ADD(rx_octets);
7758 ESTAT_ADD(rx_fragments);
7759 ESTAT_ADD(rx_ucast_packets);
7760 ESTAT_ADD(rx_mcast_packets);
7761 ESTAT_ADD(rx_bcast_packets);
7762 ESTAT_ADD(rx_fcs_errors);
7763 ESTAT_ADD(rx_align_errors);
7764 ESTAT_ADD(rx_xon_pause_rcvd);
7765 ESTAT_ADD(rx_xoff_pause_rcvd);
7766 ESTAT_ADD(rx_mac_ctrl_rcvd);
7767 ESTAT_ADD(rx_xoff_entered);
7768 ESTAT_ADD(rx_frame_too_long_errors);
7769 ESTAT_ADD(rx_jabbers);
7770 ESTAT_ADD(rx_undersize_packets);
7771 ESTAT_ADD(rx_in_length_errors);
7772 ESTAT_ADD(rx_out_length_errors);
7773 ESTAT_ADD(rx_64_or_less_octet_packets);
7774 ESTAT_ADD(rx_65_to_127_octet_packets);
7775 ESTAT_ADD(rx_128_to_255_octet_packets);
7776 ESTAT_ADD(rx_256_to_511_octet_packets);
7777 ESTAT_ADD(rx_512_to_1023_octet_packets);
7778 ESTAT_ADD(rx_1024_to_1522_octet_packets);
7779 ESTAT_ADD(rx_1523_to_2047_octet_packets);
7780 ESTAT_ADD(rx_2048_to_4095_octet_packets);
7781 ESTAT_ADD(rx_4096_to_8191_octet_packets);
7782 ESTAT_ADD(rx_8192_to_9022_octet_packets);
7783
7784 ESTAT_ADD(tx_octets);
7785 ESTAT_ADD(tx_collisions);
7786 ESTAT_ADD(tx_xon_sent);
7787 ESTAT_ADD(tx_xoff_sent);
7788 ESTAT_ADD(tx_flow_control);
7789 ESTAT_ADD(tx_mac_errors);
7790 ESTAT_ADD(tx_single_collisions);
7791 ESTAT_ADD(tx_mult_collisions);
7792 ESTAT_ADD(tx_deferred);
7793 ESTAT_ADD(tx_excessive_collisions);
7794 ESTAT_ADD(tx_late_collisions);
7795 ESTAT_ADD(tx_collide_2times);
7796 ESTAT_ADD(tx_collide_3times);
7797 ESTAT_ADD(tx_collide_4times);
7798 ESTAT_ADD(tx_collide_5times);
7799 ESTAT_ADD(tx_collide_6times);
7800 ESTAT_ADD(tx_collide_7times);
7801 ESTAT_ADD(tx_collide_8times);
7802 ESTAT_ADD(tx_collide_9times);
7803 ESTAT_ADD(tx_collide_10times);
7804 ESTAT_ADD(tx_collide_11times);
7805 ESTAT_ADD(tx_collide_12times);
7806 ESTAT_ADD(tx_collide_13times);
7807 ESTAT_ADD(tx_collide_14times);
7808 ESTAT_ADD(tx_collide_15times);
7809 ESTAT_ADD(tx_ucast_packets);
7810 ESTAT_ADD(tx_mcast_packets);
7811 ESTAT_ADD(tx_bcast_packets);
7812 ESTAT_ADD(tx_carrier_sense_errors);
7813 ESTAT_ADD(tx_discards);
7814 ESTAT_ADD(tx_errors);
7815
7816 ESTAT_ADD(dma_writeq_full);
7817 ESTAT_ADD(dma_write_prioq_full);
7818 ESTAT_ADD(rxbds_empty);
7819 ESTAT_ADD(rx_discards);
7820 ESTAT_ADD(rx_errors);
7821 ESTAT_ADD(rx_threshold_hit);
7822
7823 ESTAT_ADD(dma_readq_full);
7824 ESTAT_ADD(dma_read_prioq_full);
7825 ESTAT_ADD(tx_comp_queue_full);
7826
7827 ESTAT_ADD(ring_set_send_prod_index);
7828 ESTAT_ADD(ring_status_update);
7829 ESTAT_ADD(nic_irqs);
7830 ESTAT_ADD(nic_avoided_irqs);
7831 ESTAT_ADD(nic_tx_threshold_hit);
7832
7833 return estats;
7834}
7835
7836static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7837{
7838 struct tg3 *tp = netdev_priv(dev);
7839 struct net_device_stats *stats = &tp->net_stats;
7840 struct net_device_stats *old_stats = &tp->net_stats_prev;
7841 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7842
7843 if (!hw_stats)
7844 return old_stats;
7845
7846 stats->rx_packets = old_stats->rx_packets +
7847 get_stat64(&hw_stats->rx_ucast_packets) +
7848 get_stat64(&hw_stats->rx_mcast_packets) +
7849 get_stat64(&hw_stats->rx_bcast_packets);
6aa20a22 7850
1da177e4
LT
7851 stats->tx_packets = old_stats->tx_packets +
7852 get_stat64(&hw_stats->tx_ucast_packets) +
7853 get_stat64(&hw_stats->tx_mcast_packets) +
7854 get_stat64(&hw_stats->tx_bcast_packets);
7855
7856 stats->rx_bytes = old_stats->rx_bytes +
7857 get_stat64(&hw_stats->rx_octets);
7858 stats->tx_bytes = old_stats->tx_bytes +
7859 get_stat64(&hw_stats->tx_octets);
7860
7861 stats->rx_errors = old_stats->rx_errors +
4f63b877 7862 get_stat64(&hw_stats->rx_errors);
1da177e4
LT
7863 stats->tx_errors = old_stats->tx_errors +
7864 get_stat64(&hw_stats->tx_errors) +
7865 get_stat64(&hw_stats->tx_mac_errors) +
7866 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7867 get_stat64(&hw_stats->tx_discards);
7868
7869 stats->multicast = old_stats->multicast +
7870 get_stat64(&hw_stats->rx_mcast_packets);
7871 stats->collisions = old_stats->collisions +
7872 get_stat64(&hw_stats->tx_collisions);
7873
7874 stats->rx_length_errors = old_stats->rx_length_errors +
7875 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7876 get_stat64(&hw_stats->rx_undersize_packets);
7877
7878 stats->rx_over_errors = old_stats->rx_over_errors +
7879 get_stat64(&hw_stats->rxbds_empty);
7880 stats->rx_frame_errors = old_stats->rx_frame_errors +
7881 get_stat64(&hw_stats->rx_align_errors);
7882 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7883 get_stat64(&hw_stats->tx_discards);
7884 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7885 get_stat64(&hw_stats->tx_carrier_sense_errors);
7886
7887 stats->rx_crc_errors = old_stats->rx_crc_errors +
7888 calc_crc_errors(tp);
7889
4f63b877
JL
7890 stats->rx_missed_errors = old_stats->rx_missed_errors +
7891 get_stat64(&hw_stats->rx_discards);
7892
1da177e4
LT
7893 return stats;
7894}
7895
7896static inline u32 calc_crc(unsigned char *buf, int len)
7897{
7898 u32 reg;
7899 u32 tmp;
7900 int j, k;
7901
7902 reg = 0xffffffff;
7903
7904 for (j = 0; j < len; j++) {
7905 reg ^= buf[j];
7906
7907 for (k = 0; k < 8; k++) {
7908 tmp = reg & 0x01;
7909
7910 reg >>= 1;
7911
7912 if (tmp) {
7913 reg ^= 0xedb88320;
7914 }
7915 }
7916 }
7917
7918 return ~reg;
7919}
7920
7921static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7922{
7923 /* accept or reject all multicast frames */
7924 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7925 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7926 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7927 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7928}
7929
7930static void __tg3_set_rx_mode(struct net_device *dev)
7931{
7932 struct tg3 *tp = netdev_priv(dev);
7933 u32 rx_mode;
7934
7935 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7936 RX_MODE_KEEP_VLAN_TAG);
7937
7938 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7939 * flag clear.
7940 */
7941#if TG3_VLAN_TAG_USED
7942 if (!tp->vlgrp &&
7943 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7944 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7945#else
7946 /* By definition, VLAN is disabled always in this
7947 * case.
7948 */
7949 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7950 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7951#endif
7952
7953 if (dev->flags & IFF_PROMISC) {
7954 /* Promiscuous mode. */
7955 rx_mode |= RX_MODE_PROMISC;
7956 } else if (dev->flags & IFF_ALLMULTI) {
7957 /* Accept all multicast. */
7958 tg3_set_multi (tp, 1);
7959 } else if (dev->mc_count < 1) {
7960 /* Reject all multicast. */
7961 tg3_set_multi (tp, 0);
7962 } else {
7963 /* Accept one or more multicast(s). */
7964 struct dev_mc_list *mclist;
7965 unsigned int i;
7966 u32 mc_filter[4] = { 0, };
7967 u32 regidx;
7968 u32 bit;
7969 u32 crc;
7970
7971 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7972 i++, mclist = mclist->next) {
7973
7974 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7975 bit = ~crc & 0x7f;
7976 regidx = (bit & 0x60) >> 5;
7977 bit &= 0x1f;
7978 mc_filter[regidx] |= (1 << bit);
7979 }
7980
7981 tw32(MAC_HASH_REG_0, mc_filter[0]);
7982 tw32(MAC_HASH_REG_1, mc_filter[1]);
7983 tw32(MAC_HASH_REG_2, mc_filter[2]);
7984 tw32(MAC_HASH_REG_3, mc_filter[3]);
7985 }
7986
7987 if (rx_mode != tp->rx_mode) {
7988 tp->rx_mode = rx_mode;
7989 tw32_f(MAC_RX_MODE, rx_mode);
7990 udelay(10);
7991 }
7992}
7993
7994static void tg3_set_rx_mode(struct net_device *dev)
7995{
7996 struct tg3 *tp = netdev_priv(dev);
7997
e75f7c90
MC
7998 if (!netif_running(dev))
7999 return;
8000
f47c11ee 8001 tg3_full_lock(tp, 0);
1da177e4 8002 __tg3_set_rx_mode(dev);
f47c11ee 8003 tg3_full_unlock(tp);
1da177e4
LT
8004}
8005
8006#define TG3_REGDUMP_LEN (32 * 1024)
8007
8008static int tg3_get_regs_len(struct net_device *dev)
8009{
8010 return TG3_REGDUMP_LEN;
8011}
8012
8013static void tg3_get_regs(struct net_device *dev,
8014 struct ethtool_regs *regs, void *_p)
8015{
8016 u32 *p = _p;
8017 struct tg3 *tp = netdev_priv(dev);
8018 u8 *orig_p = _p;
8019 int i;
8020
8021 regs->version = 0;
8022
8023 memset(p, 0, TG3_REGDUMP_LEN);
8024
bc1c7567
MC
8025 if (tp->link_config.phy_is_low_power)
8026 return;
8027
f47c11ee 8028 tg3_full_lock(tp, 0);
1da177e4
LT
8029
8030#define __GET_REG32(reg) (*(p)++ = tr32(reg))
8031#define GET_REG32_LOOP(base,len) \
8032do { p = (u32 *)(orig_p + (base)); \
8033 for (i = 0; i < len; i += 4) \
8034 __GET_REG32((base) + i); \
8035} while (0)
8036#define GET_REG32_1(reg) \
8037do { p = (u32 *)(orig_p + (reg)); \
8038 __GET_REG32((reg)); \
8039} while (0)
8040
8041 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8042 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8043 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8044 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8045 GET_REG32_1(SNDDATAC_MODE);
8046 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8047 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8048 GET_REG32_1(SNDBDC_MODE);
8049 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8050 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8051 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8052 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8053 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8054 GET_REG32_1(RCVDCC_MODE);
8055 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8056 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8057 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8058 GET_REG32_1(MBFREE_MODE);
8059 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8060 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8061 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8062 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8063 GET_REG32_LOOP(WDMAC_MODE, 0x08);
091465d7
CE
8064 GET_REG32_1(RX_CPU_MODE);
8065 GET_REG32_1(RX_CPU_STATE);
8066 GET_REG32_1(RX_CPU_PGMCTR);
8067 GET_REG32_1(RX_CPU_HWBKPT);
8068 GET_REG32_1(TX_CPU_MODE);
8069 GET_REG32_1(TX_CPU_STATE);
8070 GET_REG32_1(TX_CPU_PGMCTR);
1da177e4
LT
8071 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8072 GET_REG32_LOOP(FTQ_RESET, 0x120);
8073 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8074 GET_REG32_1(DMAC_MODE);
8075 GET_REG32_LOOP(GRC_MODE, 0x4c);
8076 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8077 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8078
8079#undef __GET_REG32
8080#undef GET_REG32_LOOP
8081#undef GET_REG32_1
8082
f47c11ee 8083 tg3_full_unlock(tp);
1da177e4
LT
8084}
8085
8086static int tg3_get_eeprom_len(struct net_device *dev)
8087{
8088 struct tg3 *tp = netdev_priv(dev);
8089
8090 return tp->nvram_size;
8091}
8092
8093static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
1820180b 8094static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
1da177e4
LT
8095
8096static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8097{
8098 struct tg3 *tp = netdev_priv(dev);
8099 int ret;
8100 u8 *pd;
8101 u32 i, offset, len, val, b_offset, b_count;
8102
bc1c7567
MC
8103 if (tp->link_config.phy_is_low_power)
8104 return -EAGAIN;
8105
1da177e4
LT
8106 offset = eeprom->offset;
8107 len = eeprom->len;
8108 eeprom->len = 0;
8109
8110 eeprom->magic = TG3_EEPROM_MAGIC;
8111
8112 if (offset & 3) {
8113 /* adjustments to start on required 4 byte boundary */
8114 b_offset = offset & 3;
8115 b_count = 4 - b_offset;
8116 if (b_count > len) {
8117 /* i.e. offset=1 len=2 */
8118 b_count = len;
8119 }
8120 ret = tg3_nvram_read(tp, offset-b_offset, &val);
8121 if (ret)
8122 return ret;
8123 val = cpu_to_le32(val);
8124 memcpy(data, ((char*)&val) + b_offset, b_count);
8125 len -= b_count;
8126 offset += b_count;
8127 eeprom->len += b_count;
8128 }
8129
8130 /* read bytes upto the last 4 byte boundary */
8131 pd = &data[eeprom->len];
8132 for (i = 0; i < (len - (len & 3)); i += 4) {
8133 ret = tg3_nvram_read(tp, offset + i, &val);
8134 if (ret) {
8135 eeprom->len += i;
8136 return ret;
8137 }
8138 val = cpu_to_le32(val);
8139 memcpy(pd + i, &val, 4);
8140 }
8141 eeprom->len += i;
8142
8143 if (len & 3) {
8144 /* read last bytes not ending on 4 byte boundary */
8145 pd = &data[eeprom->len];
8146 b_count = len & 3;
8147 b_offset = offset + len - b_count;
8148 ret = tg3_nvram_read(tp, b_offset, &val);
8149 if (ret)
8150 return ret;
8151 val = cpu_to_le32(val);
8152 memcpy(pd, ((char*)&val), b_count);
8153 eeprom->len += b_count;
8154 }
8155 return 0;
8156}
8157
6aa20a22 8158static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
1da177e4
LT
8159
8160static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8161{
8162 struct tg3 *tp = netdev_priv(dev);
8163 int ret;
8164 u32 offset, len, b_offset, odd_len, start, end;
8165 u8 *buf;
8166
bc1c7567
MC
8167 if (tp->link_config.phy_is_low_power)
8168 return -EAGAIN;
8169
1da177e4
LT
8170 if (eeprom->magic != TG3_EEPROM_MAGIC)
8171 return -EINVAL;
8172
8173 offset = eeprom->offset;
8174 len = eeprom->len;
8175
8176 if ((b_offset = (offset & 3))) {
8177 /* adjustments to start on required 4 byte boundary */
8178 ret = tg3_nvram_read(tp, offset-b_offset, &start);
8179 if (ret)
8180 return ret;
8181 start = cpu_to_le32(start);
8182 len += b_offset;
8183 offset &= ~3;
1c8594b4
MC
8184 if (len < 4)
8185 len = 4;
1da177e4
LT
8186 }
8187
8188 odd_len = 0;
1c8594b4 8189 if (len & 3) {
1da177e4
LT
8190 /* adjustments to end on required 4 byte boundary */
8191 odd_len = 1;
8192 len = (len + 3) & ~3;
8193 ret = tg3_nvram_read(tp, offset+len-4, &end);
8194 if (ret)
8195 return ret;
8196 end = cpu_to_le32(end);
8197 }
8198
8199 buf = data;
8200 if (b_offset || odd_len) {
8201 buf = kmalloc(len, GFP_KERNEL);
ab0049b4 8202 if (!buf)
1da177e4
LT
8203 return -ENOMEM;
8204 if (b_offset)
8205 memcpy(buf, &start, 4);
8206 if (odd_len)
8207 memcpy(buf+len-4, &end, 4);
8208 memcpy(buf + b_offset, data, eeprom->len);
8209 }
8210
8211 ret = tg3_nvram_write_block(tp, offset, len, buf);
8212
8213 if (buf != data)
8214 kfree(buf);
8215
8216 return ret;
8217}
8218
8219static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8220{
8221 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8222
1da177e4
LT
8223 cmd->supported = (SUPPORTED_Autoneg);
8224
8225 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8226 cmd->supported |= (SUPPORTED_1000baseT_Half |
8227 SUPPORTED_1000baseT_Full);
8228
ef348144 8229 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1da177e4
LT
8230 cmd->supported |= (SUPPORTED_100baseT_Half |
8231 SUPPORTED_100baseT_Full |
8232 SUPPORTED_10baseT_Half |
8233 SUPPORTED_10baseT_Full |
8234 SUPPORTED_MII);
ef348144
KK
8235 cmd->port = PORT_TP;
8236 } else {
1da177e4 8237 cmd->supported |= SUPPORTED_FIBRE;
ef348144
KK
8238 cmd->port = PORT_FIBRE;
8239 }
6aa20a22 8240
1da177e4
LT
8241 cmd->advertising = tp->link_config.advertising;
8242 if (netif_running(dev)) {
8243 cmd->speed = tp->link_config.active_speed;
8244 cmd->duplex = tp->link_config.active_duplex;
8245 }
1da177e4
LT
8246 cmd->phy_address = PHY_ADDR;
8247 cmd->transceiver = 0;
8248 cmd->autoneg = tp->link_config.autoneg;
8249 cmd->maxtxpkt = 0;
8250 cmd->maxrxpkt = 0;
8251 return 0;
8252}
6aa20a22 8253
1da177e4
LT
8254static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8255{
8256 struct tg3 *tp = netdev_priv(dev);
6aa20a22
JG
8257
8258 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
1da177e4
LT
8259 /* These are the only valid advertisement bits allowed. */
8260 if (cmd->autoneg == AUTONEG_ENABLE &&
8261 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8262 ADVERTISED_1000baseT_Full |
8263 ADVERTISED_Autoneg |
8264 ADVERTISED_FIBRE)))
8265 return -EINVAL;
37ff238d
MC
8266 /* Fiber can only do SPEED_1000. */
8267 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8268 (cmd->speed != SPEED_1000))
8269 return -EINVAL;
8270 /* Copper cannot force SPEED_1000. */
8271 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8272 (cmd->speed == SPEED_1000))
8273 return -EINVAL;
8274 else if ((cmd->speed == SPEED_1000) &&
8275 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8276 return -EINVAL;
1da177e4 8277
f47c11ee 8278 tg3_full_lock(tp, 0);
1da177e4
LT
8279
8280 tp->link_config.autoneg = cmd->autoneg;
8281 if (cmd->autoneg == AUTONEG_ENABLE) {
405d8e5c
AG
8282 tp->link_config.advertising = (cmd->advertising |
8283 ADVERTISED_Autoneg);
1da177e4
LT
8284 tp->link_config.speed = SPEED_INVALID;
8285 tp->link_config.duplex = DUPLEX_INVALID;
8286 } else {
8287 tp->link_config.advertising = 0;
8288 tp->link_config.speed = cmd->speed;
8289 tp->link_config.duplex = cmd->duplex;
8290 }
6aa20a22 8291
24fcad6b
MC
8292 tp->link_config.orig_speed = tp->link_config.speed;
8293 tp->link_config.orig_duplex = tp->link_config.duplex;
8294 tp->link_config.orig_autoneg = tp->link_config.autoneg;
8295
1da177e4
LT
8296 if (netif_running(dev))
8297 tg3_setup_phy(tp, 1);
8298
f47c11ee 8299 tg3_full_unlock(tp);
6aa20a22 8300
1da177e4
LT
8301 return 0;
8302}
6aa20a22 8303
1da177e4
LT
8304static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8305{
8306 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8307
1da177e4
LT
8308 strcpy(info->driver, DRV_MODULE_NAME);
8309 strcpy(info->version, DRV_MODULE_VERSION);
c4e6575c 8310 strcpy(info->fw_version, tp->fw_ver);
1da177e4
LT
8311 strcpy(info->bus_info, pci_name(tp->pdev));
8312}
6aa20a22 8313
1da177e4
LT
8314static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8315{
8316 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8317
a85feb8c
GZ
8318 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8319 wol->supported = WAKE_MAGIC;
8320 else
8321 wol->supported = 0;
1da177e4
LT
8322 wol->wolopts = 0;
8323 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8324 wol->wolopts = WAKE_MAGIC;
8325 memset(&wol->sopass, 0, sizeof(wol->sopass));
8326}
6aa20a22 8327
1da177e4
LT
8328static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8329{
8330 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8331
1da177e4
LT
8332 if (wol->wolopts & ~WAKE_MAGIC)
8333 return -EINVAL;
8334 if ((wol->wolopts & WAKE_MAGIC) &&
a85feb8c 8335 !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
1da177e4 8336 return -EINVAL;
6aa20a22 8337
f47c11ee 8338 spin_lock_bh(&tp->lock);
1da177e4
LT
8339 if (wol->wolopts & WAKE_MAGIC)
8340 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8341 else
8342 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
f47c11ee 8343 spin_unlock_bh(&tp->lock);
6aa20a22 8344
1da177e4
LT
8345 return 0;
8346}
6aa20a22 8347
1da177e4
LT
8348static u32 tg3_get_msglevel(struct net_device *dev)
8349{
8350 struct tg3 *tp = netdev_priv(dev);
8351 return tp->msg_enable;
8352}
6aa20a22 8353
1da177e4
LT
8354static void tg3_set_msglevel(struct net_device *dev, u32 value)
8355{
8356 struct tg3 *tp = netdev_priv(dev);
8357 tp->msg_enable = value;
8358}
6aa20a22 8359
1da177e4
LT
8360static int tg3_set_tso(struct net_device *dev, u32 value)
8361{
8362 struct tg3 *tp = netdev_priv(dev);
8363
8364 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8365 if (value)
8366 return -EINVAL;
8367 return 0;
8368 }
b5d3772c
MC
8369 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8370 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
b0026624
MC
8371 if (value)
8372 dev->features |= NETIF_F_TSO6;
8373 else
8374 dev->features &= ~NETIF_F_TSO6;
8375 }
1da177e4
LT
8376 return ethtool_op_set_tso(dev, value);
8377}
6aa20a22 8378
1da177e4
LT
8379static int tg3_nway_reset(struct net_device *dev)
8380{
8381 struct tg3 *tp = netdev_priv(dev);
8382 u32 bmcr;
8383 int r;
6aa20a22 8384
1da177e4
LT
8385 if (!netif_running(dev))
8386 return -EAGAIN;
8387
c94e3941
MC
8388 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8389 return -EINVAL;
8390
f47c11ee 8391 spin_lock_bh(&tp->lock);
1da177e4
LT
8392 r = -EINVAL;
8393 tg3_readphy(tp, MII_BMCR, &bmcr);
8394 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
c94e3941
MC
8395 ((bmcr & BMCR_ANENABLE) ||
8396 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8397 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8398 BMCR_ANENABLE);
1da177e4
LT
8399 r = 0;
8400 }
f47c11ee 8401 spin_unlock_bh(&tp->lock);
6aa20a22 8402
1da177e4
LT
8403 return r;
8404}
6aa20a22 8405
1da177e4
LT
8406static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8407{
8408 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8409
1da177e4
LT
8410 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8411 ering->rx_mini_max_pending = 0;
4f81c32b
MC
8412 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8413 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8414 else
8415 ering->rx_jumbo_max_pending = 0;
8416
8417 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
1da177e4
LT
8418
8419 ering->rx_pending = tp->rx_pending;
8420 ering->rx_mini_pending = 0;
4f81c32b
MC
8421 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8422 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8423 else
8424 ering->rx_jumbo_pending = 0;
8425
1da177e4
LT
8426 ering->tx_pending = tp->tx_pending;
8427}
6aa20a22 8428
1da177e4
LT
8429static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8430{
8431 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 8432 int irq_sync = 0, err = 0;
6aa20a22 8433
1da177e4
LT
8434 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8435 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
bc3a9254
MC
8436 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8437 (ering->tx_pending <= MAX_SKB_FRAGS) ||
7f62ad5d 8438 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
bc3a9254 8439 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
1da177e4 8440 return -EINVAL;
6aa20a22 8441
bbe832c0 8442 if (netif_running(dev)) {
1da177e4 8443 tg3_netif_stop(tp);
bbe832c0
MC
8444 irq_sync = 1;
8445 }
1da177e4 8446
bbe832c0 8447 tg3_full_lock(tp, irq_sync);
6aa20a22 8448
1da177e4
LT
8449 tp->rx_pending = ering->rx_pending;
8450
8451 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8452 tp->rx_pending > 63)
8453 tp->rx_pending = 63;
8454 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8455 tp->tx_pending = ering->tx_pending;
8456
8457 if (netif_running(dev)) {
944d980e 8458 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
b9ec6c1b
MC
8459 err = tg3_restart_hw(tp, 1);
8460 if (!err)
8461 tg3_netif_start(tp);
1da177e4
LT
8462 }
8463
f47c11ee 8464 tg3_full_unlock(tp);
6aa20a22 8465
b9ec6c1b 8466 return err;
1da177e4 8467}
6aa20a22 8468
1da177e4
LT
8469static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8470{
8471 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8472
1da177e4
LT
8473 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8474 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8475 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8476}
6aa20a22 8477
1da177e4
LT
8478static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8479{
8480 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 8481 int irq_sync = 0, err = 0;
6aa20a22 8482
bbe832c0 8483 if (netif_running(dev)) {
1da177e4 8484 tg3_netif_stop(tp);
bbe832c0
MC
8485 irq_sync = 1;
8486 }
1da177e4 8487
bbe832c0 8488 tg3_full_lock(tp, irq_sync);
f47c11ee 8489
1da177e4
LT
8490 if (epause->autoneg)
8491 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8492 else
8493 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8494 if (epause->rx_pause)
8495 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8496 else
8497 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8498 if (epause->tx_pause)
8499 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8500 else
8501 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8502
8503 if (netif_running(dev)) {
944d980e 8504 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
b9ec6c1b
MC
8505 err = tg3_restart_hw(tp, 1);
8506 if (!err)
8507 tg3_netif_start(tp);
1da177e4 8508 }
f47c11ee
DM
8509
8510 tg3_full_unlock(tp);
6aa20a22 8511
b9ec6c1b 8512 return err;
1da177e4 8513}
6aa20a22 8514
1da177e4
LT
8515static u32 tg3_get_rx_csum(struct net_device *dev)
8516{
8517 struct tg3 *tp = netdev_priv(dev);
8518 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8519}
6aa20a22 8520
1da177e4
LT
8521static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8522{
8523 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8524
1da177e4
LT
8525 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8526 if (data != 0)
8527 return -EINVAL;
8528 return 0;
8529 }
6aa20a22 8530
f47c11ee 8531 spin_lock_bh(&tp->lock);
1da177e4
LT
8532 if (data)
8533 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8534 else
8535 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
f47c11ee 8536 spin_unlock_bh(&tp->lock);
6aa20a22 8537
1da177e4
LT
8538 return 0;
8539}
6aa20a22 8540
1da177e4
LT
8541static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8542{
8543 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8544
1da177e4
LT
8545 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8546 if (data != 0)
8547 return -EINVAL;
8548 return 0;
8549 }
6aa20a22 8550
af36e6b6 8551 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28
MC
8552 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8553 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6460d948 8554 ethtool_op_set_tx_ipv6_csum(dev, data);
1da177e4 8555 else
9c27dbdf 8556 ethtool_op_set_tx_csum(dev, data);
1da177e4
LT
8557
8558 return 0;
8559}
8560
b9f2c044 8561static int tg3_get_sset_count (struct net_device *dev, int sset)
1da177e4 8562{
b9f2c044
JG
8563 switch (sset) {
8564 case ETH_SS_TEST:
8565 return TG3_NUM_TEST;
8566 case ETH_SS_STATS:
8567 return TG3_NUM_STATS;
8568 default:
8569 return -EOPNOTSUPP;
8570 }
4cafd3f5
MC
8571}
8572
1da177e4
LT
8573static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8574{
8575 switch (stringset) {
8576 case ETH_SS_STATS:
8577 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8578 break;
4cafd3f5
MC
8579 case ETH_SS_TEST:
8580 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8581 break;
1da177e4
LT
8582 default:
8583 WARN_ON(1); /* we need a WARN() */
8584 break;
8585 }
8586}
8587
4009a93d
MC
8588static int tg3_phys_id(struct net_device *dev, u32 data)
8589{
8590 struct tg3 *tp = netdev_priv(dev);
8591 int i;
8592
8593 if (!netif_running(tp->dev))
8594 return -EAGAIN;
8595
8596 if (data == 0)
8597 data = 2;
8598
8599 for (i = 0; i < (data * 2); i++) {
8600 if ((i % 2) == 0)
8601 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8602 LED_CTRL_1000MBPS_ON |
8603 LED_CTRL_100MBPS_ON |
8604 LED_CTRL_10MBPS_ON |
8605 LED_CTRL_TRAFFIC_OVERRIDE |
8606 LED_CTRL_TRAFFIC_BLINK |
8607 LED_CTRL_TRAFFIC_LED);
6aa20a22 8608
4009a93d
MC
8609 else
8610 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8611 LED_CTRL_TRAFFIC_OVERRIDE);
8612
8613 if (msleep_interruptible(500))
8614 break;
8615 }
8616 tw32(MAC_LED_CTRL, tp->led_ctrl);
8617 return 0;
8618}
8619
1da177e4
LT
8620static void tg3_get_ethtool_stats (struct net_device *dev,
8621 struct ethtool_stats *estats, u64 *tmp_stats)
8622{
8623 struct tg3 *tp = netdev_priv(dev);
8624 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8625}
8626
566f86ad 8627#define NVRAM_TEST_SIZE 0x100
1b27777a 8628#define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
b16250e3
MC
8629#define NVRAM_SELFBOOT_HW_SIZE 0x20
8630#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
566f86ad
MC
8631
8632static int tg3_test_nvram(struct tg3 *tp)
8633{
1b27777a 8634 u32 *buf, csum, magic;
ab0049b4 8635 int i, j, k, err = 0, size;
566f86ad 8636
1820180b 8637 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
1b27777a
MC
8638 return -EIO;
8639
1b27777a
MC
8640 if (magic == TG3_EEPROM_MAGIC)
8641 size = NVRAM_TEST_SIZE;
b16250e3 8642 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
1b27777a
MC
8643 if ((magic & 0xe00000) == 0x200000)
8644 size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8645 else
8646 return 0;
b16250e3
MC
8647 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8648 size = NVRAM_SELFBOOT_HW_SIZE;
8649 else
1b27777a
MC
8650 return -EIO;
8651
8652 buf = kmalloc(size, GFP_KERNEL);
566f86ad
MC
8653 if (buf == NULL)
8654 return -ENOMEM;
8655
1b27777a
MC
8656 err = -EIO;
8657 for (i = 0, j = 0; i < size; i += 4, j++) {
566f86ad
MC
8658 u32 val;
8659
8660 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8661 break;
8662 buf[j] = cpu_to_le32(val);
8663 }
1b27777a 8664 if (i < size)
566f86ad
MC
8665 goto out;
8666
1b27777a 8667 /* Selfboot format */
b16250e3
MC
8668 if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
8669 TG3_EEPROM_MAGIC_FW) {
1b27777a
MC
8670 u8 *buf8 = (u8 *) buf, csum8 = 0;
8671
8672 for (i = 0; i < size; i++)
8673 csum8 += buf8[i];
8674
ad96b485
AB
8675 if (csum8 == 0) {
8676 err = 0;
8677 goto out;
8678 }
8679
8680 err = -EIO;
8681 goto out;
1b27777a 8682 }
566f86ad 8683
b16250e3
MC
8684 if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
8685 TG3_EEPROM_MAGIC_HW) {
8686 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8687 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8688 u8 *buf8 = (u8 *) buf;
b16250e3
MC
8689
8690 /* Separate the parity bits and the data bytes. */
8691 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8692 if ((i == 0) || (i == 8)) {
8693 int l;
8694 u8 msk;
8695
8696 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8697 parity[k++] = buf8[i] & msk;
8698 i++;
8699 }
8700 else if (i == 16) {
8701 int l;
8702 u8 msk;
8703
8704 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8705 parity[k++] = buf8[i] & msk;
8706 i++;
8707
8708 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8709 parity[k++] = buf8[i] & msk;
8710 i++;
8711 }
8712 data[j++] = buf8[i];
8713 }
8714
8715 err = -EIO;
8716 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8717 u8 hw8 = hweight8(data[i]);
8718
8719 if ((hw8 & 0x1) && parity[i])
8720 goto out;
8721 else if (!(hw8 & 0x1) && !parity[i])
8722 goto out;
8723 }
8724 err = 0;
8725 goto out;
8726 }
8727
566f86ad
MC
8728 /* Bootstrap checksum at offset 0x10 */
8729 csum = calc_crc((unsigned char *) buf, 0x10);
8730 if(csum != cpu_to_le32(buf[0x10/4]))
8731 goto out;
8732
8733 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8734 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8735 if (csum != cpu_to_le32(buf[0xfc/4]))
8736 goto out;
8737
8738 err = 0;
8739
8740out:
8741 kfree(buf);
8742 return err;
8743}
8744
ca43007a
MC
8745#define TG3_SERDES_TIMEOUT_SEC 2
8746#define TG3_COPPER_TIMEOUT_SEC 6
8747
8748static int tg3_test_link(struct tg3 *tp)
8749{
8750 int i, max;
8751
8752 if (!netif_running(tp->dev))
8753 return -ENODEV;
8754
4c987487 8755 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
ca43007a
MC
8756 max = TG3_SERDES_TIMEOUT_SEC;
8757 else
8758 max = TG3_COPPER_TIMEOUT_SEC;
8759
8760 for (i = 0; i < max; i++) {
8761 if (netif_carrier_ok(tp->dev))
8762 return 0;
8763
8764 if (msleep_interruptible(1000))
8765 break;
8766 }
8767
8768 return -EIO;
8769}
8770
a71116d1 8771/* Only test the commonly used registers */
30ca3e37 8772static int tg3_test_registers(struct tg3 *tp)
a71116d1 8773{
b16250e3 8774 int i, is_5705, is_5750;
a71116d1
MC
8775 u32 offset, read_mask, write_mask, val, save_val, read_val;
8776 static struct {
8777 u16 offset;
8778 u16 flags;
8779#define TG3_FL_5705 0x1
8780#define TG3_FL_NOT_5705 0x2
8781#define TG3_FL_NOT_5788 0x4
b16250e3 8782#define TG3_FL_NOT_5750 0x8
a71116d1
MC
8783 u32 read_mask;
8784 u32 write_mask;
8785 } reg_tbl[] = {
8786 /* MAC Control Registers */
8787 { MAC_MODE, TG3_FL_NOT_5705,
8788 0x00000000, 0x00ef6f8c },
8789 { MAC_MODE, TG3_FL_5705,
8790 0x00000000, 0x01ef6b8c },
8791 { MAC_STATUS, TG3_FL_NOT_5705,
8792 0x03800107, 0x00000000 },
8793 { MAC_STATUS, TG3_FL_5705,
8794 0x03800100, 0x00000000 },
8795 { MAC_ADDR_0_HIGH, 0x0000,
8796 0x00000000, 0x0000ffff },
8797 { MAC_ADDR_0_LOW, 0x0000,
8798 0x00000000, 0xffffffff },
8799 { MAC_RX_MTU_SIZE, 0x0000,
8800 0x00000000, 0x0000ffff },
8801 { MAC_TX_MODE, 0x0000,
8802 0x00000000, 0x00000070 },
8803 { MAC_TX_LENGTHS, 0x0000,
8804 0x00000000, 0x00003fff },
8805 { MAC_RX_MODE, TG3_FL_NOT_5705,
8806 0x00000000, 0x000007fc },
8807 { MAC_RX_MODE, TG3_FL_5705,
8808 0x00000000, 0x000007dc },
8809 { MAC_HASH_REG_0, 0x0000,
8810 0x00000000, 0xffffffff },
8811 { MAC_HASH_REG_1, 0x0000,
8812 0x00000000, 0xffffffff },
8813 { MAC_HASH_REG_2, 0x0000,
8814 0x00000000, 0xffffffff },
8815 { MAC_HASH_REG_3, 0x0000,
8816 0x00000000, 0xffffffff },
8817
8818 /* Receive Data and Receive BD Initiator Control Registers. */
8819 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8820 0x00000000, 0xffffffff },
8821 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8822 0x00000000, 0xffffffff },
8823 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8824 0x00000000, 0x00000003 },
8825 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8826 0x00000000, 0xffffffff },
8827 { RCVDBDI_STD_BD+0, 0x0000,
8828 0x00000000, 0xffffffff },
8829 { RCVDBDI_STD_BD+4, 0x0000,
8830 0x00000000, 0xffffffff },
8831 { RCVDBDI_STD_BD+8, 0x0000,
8832 0x00000000, 0xffff0002 },
8833 { RCVDBDI_STD_BD+0xc, 0x0000,
8834 0x00000000, 0xffffffff },
6aa20a22 8835
a71116d1
MC
8836 /* Receive BD Initiator Control Registers. */
8837 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8838 0x00000000, 0xffffffff },
8839 { RCVBDI_STD_THRESH, TG3_FL_5705,
8840 0x00000000, 0x000003ff },
8841 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8842 0x00000000, 0xffffffff },
6aa20a22 8843
a71116d1
MC
8844 /* Host Coalescing Control Registers. */
8845 { HOSTCC_MODE, TG3_FL_NOT_5705,
8846 0x00000000, 0x00000004 },
8847 { HOSTCC_MODE, TG3_FL_5705,
8848 0x00000000, 0x000000f6 },
8849 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8850 0x00000000, 0xffffffff },
8851 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8852 0x00000000, 0x000003ff },
8853 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8854 0x00000000, 0xffffffff },
8855 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8856 0x00000000, 0x000003ff },
8857 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8858 0x00000000, 0xffffffff },
8859 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8860 0x00000000, 0x000000ff },
8861 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8862 0x00000000, 0xffffffff },
8863 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8864 0x00000000, 0x000000ff },
8865 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8866 0x00000000, 0xffffffff },
8867 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8868 0x00000000, 0xffffffff },
8869 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8870 0x00000000, 0xffffffff },
8871 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8872 0x00000000, 0x000000ff },
8873 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8874 0x00000000, 0xffffffff },
8875 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8876 0x00000000, 0x000000ff },
8877 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8878 0x00000000, 0xffffffff },
8879 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8880 0x00000000, 0xffffffff },
8881 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8882 0x00000000, 0xffffffff },
8883 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8884 0x00000000, 0xffffffff },
8885 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8886 0x00000000, 0xffffffff },
8887 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8888 0xffffffff, 0x00000000 },
8889 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8890 0xffffffff, 0x00000000 },
8891
8892 /* Buffer Manager Control Registers. */
b16250e3 8893 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
a71116d1 8894 0x00000000, 0x007fff80 },
b16250e3 8895 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
a71116d1
MC
8896 0x00000000, 0x007fffff },
8897 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8898 0x00000000, 0x0000003f },
8899 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8900 0x00000000, 0x000001ff },
8901 { BUFMGR_MB_HIGH_WATER, 0x0000,
8902 0x00000000, 0x000001ff },
8903 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8904 0xffffffff, 0x00000000 },
8905 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8906 0xffffffff, 0x00000000 },
6aa20a22 8907
a71116d1
MC
8908 /* Mailbox Registers */
8909 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8910 0x00000000, 0x000001ff },
8911 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8912 0x00000000, 0x000001ff },
8913 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8914 0x00000000, 0x000007ff },
8915 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8916 0x00000000, 0x000001ff },
8917
8918 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8919 };
8920
b16250e3
MC
8921 is_5705 = is_5750 = 0;
8922 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
a71116d1 8923 is_5705 = 1;
b16250e3
MC
8924 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8925 is_5750 = 1;
8926 }
a71116d1
MC
8927
8928 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8929 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8930 continue;
8931
8932 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8933 continue;
8934
8935 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8936 (reg_tbl[i].flags & TG3_FL_NOT_5788))
8937 continue;
8938
b16250e3
MC
8939 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
8940 continue;
8941
a71116d1
MC
8942 offset = (u32) reg_tbl[i].offset;
8943 read_mask = reg_tbl[i].read_mask;
8944 write_mask = reg_tbl[i].write_mask;
8945
8946 /* Save the original register content */
8947 save_val = tr32(offset);
8948
8949 /* Determine the read-only value. */
8950 read_val = save_val & read_mask;
8951
8952 /* Write zero to the register, then make sure the read-only bits
8953 * are not changed and the read/write bits are all zeros.
8954 */
8955 tw32(offset, 0);
8956
8957 val = tr32(offset);
8958
8959 /* Test the read-only and read/write bits. */
8960 if (((val & read_mask) != read_val) || (val & write_mask))
8961 goto out;
8962
8963 /* Write ones to all the bits defined by RdMask and WrMask, then
8964 * make sure the read-only bits are not changed and the
8965 * read/write bits are all ones.
8966 */
8967 tw32(offset, read_mask | write_mask);
8968
8969 val = tr32(offset);
8970
8971 /* Test the read-only bits. */
8972 if ((val & read_mask) != read_val)
8973 goto out;
8974
8975 /* Test the read/write bits. */
8976 if ((val & write_mask) != write_mask)
8977 goto out;
8978
8979 tw32(offset, save_val);
8980 }
8981
8982 return 0;
8983
8984out:
9f88f29f
MC
8985 if (netif_msg_hw(tp))
8986 printk(KERN_ERR PFX "Register test failed at offset %x\n",
8987 offset);
a71116d1
MC
8988 tw32(offset, save_val);
8989 return -EIO;
8990}
8991
7942e1db
MC
8992static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8993{
f71e1309 8994 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7942e1db
MC
8995 int i;
8996 u32 j;
8997
8998 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8999 for (j = 0; j < len; j += 4) {
9000 u32 val;
9001
9002 tg3_write_mem(tp, offset + j, test_pattern[i]);
9003 tg3_read_mem(tp, offset + j, &val);
9004 if (val != test_pattern[i])
9005 return -EIO;
9006 }
9007 }
9008 return 0;
9009}
9010
9011static int tg3_test_memory(struct tg3 *tp)
9012{
9013 static struct mem_entry {
9014 u32 offset;
9015 u32 len;
9016 } mem_tbl_570x[] = {
38690194 9017 { 0x00000000, 0x00b50},
7942e1db
MC
9018 { 0x00002000, 0x1c000},
9019 { 0xffffffff, 0x00000}
9020 }, mem_tbl_5705[] = {
9021 { 0x00000100, 0x0000c},
9022 { 0x00000200, 0x00008},
7942e1db
MC
9023 { 0x00004000, 0x00800},
9024 { 0x00006000, 0x01000},
9025 { 0x00008000, 0x02000},
9026 { 0x00010000, 0x0e000},
9027 { 0xffffffff, 0x00000}
79f4d13a
MC
9028 }, mem_tbl_5755[] = {
9029 { 0x00000200, 0x00008},
9030 { 0x00004000, 0x00800},
9031 { 0x00006000, 0x00800},
9032 { 0x00008000, 0x02000},
9033 { 0x00010000, 0x0c000},
9034 { 0xffffffff, 0x00000}
b16250e3
MC
9035 }, mem_tbl_5906[] = {
9036 { 0x00000200, 0x00008},
9037 { 0x00004000, 0x00400},
9038 { 0x00006000, 0x00400},
9039 { 0x00008000, 0x01000},
9040 { 0x00010000, 0x01000},
9041 { 0xffffffff, 0x00000}
7942e1db
MC
9042 };
9043 struct mem_entry *mem_tbl;
9044 int err = 0;
9045 int i;
9046
79f4d13a 9047 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
af36e6b6 9048 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28
MC
9049 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9050 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
79f4d13a 9051 mem_tbl = mem_tbl_5755;
b16250e3
MC
9052 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9053 mem_tbl = mem_tbl_5906;
79f4d13a
MC
9054 else
9055 mem_tbl = mem_tbl_5705;
9056 } else
7942e1db
MC
9057 mem_tbl = mem_tbl_570x;
9058
9059 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9060 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9061 mem_tbl[i].len)) != 0)
9062 break;
9063 }
6aa20a22 9064
7942e1db
MC
9065 return err;
9066}
9067
9f40dead
MC
9068#define TG3_MAC_LOOPBACK 0
9069#define TG3_PHY_LOOPBACK 1
9070
9071static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
c76949a6 9072{
9f40dead 9073 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
c76949a6
MC
9074 u32 desc_idx;
9075 struct sk_buff *skb, *rx_skb;
9076 u8 *tx_data;
9077 dma_addr_t map;
9078 int num_pkts, tx_len, rx_len, i, err;
9079 struct tg3_rx_buffer_desc *desc;
9080
9f40dead 9081 if (loopback_mode == TG3_MAC_LOOPBACK) {
c94e3941
MC
9082 /* HW errata - mac loopback fails in some cases on 5780.
9083 * Normal traffic and PHY loopback are not affected by
9084 * errata.
9085 */
9086 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9087 return 0;
9088
9f40dead 9089 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
e8f3f6ca
MC
9090 MAC_MODE_PORT_INT_LPBACK;
9091 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9092 mac_mode |= MAC_MODE_LINK_POLARITY;
3f7045c1
MC
9093 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9094 mac_mode |= MAC_MODE_PORT_MODE_MII;
9095 else
9096 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9f40dead
MC
9097 tw32(MAC_MODE, mac_mode);
9098 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
3f7045c1
MC
9099 u32 val;
9100
b16250e3
MC
9101 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9102 u32 phytest;
9103
9104 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9105 u32 phy;
9106
9107 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9108 phytest | MII_TG3_EPHY_SHADOW_EN);
9109 if (!tg3_readphy(tp, 0x1b, &phy))
9110 tg3_writephy(tp, 0x1b, phy & ~0x20);
b16250e3
MC
9111 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9112 }
5d64ad34
MC
9113 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9114 } else
9115 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
3f7045c1 9116
9ef8ca99
MC
9117 tg3_phy_toggle_automdix(tp, 0);
9118
3f7045c1 9119 tg3_writephy(tp, MII_BMCR, val);
c94e3941 9120 udelay(40);
5d64ad34 9121
e8f3f6ca 9122 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5d64ad34 9123 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
b16250e3 9124 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
5d64ad34
MC
9125 mac_mode |= MAC_MODE_PORT_MODE_MII;
9126 } else
9127 mac_mode |= MAC_MODE_PORT_MODE_GMII;
b16250e3 9128
c94e3941
MC
9129 /* reset to prevent losing 1st rx packet intermittently */
9130 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9131 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9132 udelay(10);
9133 tw32_f(MAC_RX_MODE, tp->rx_mode);
9134 }
e8f3f6ca
MC
9135 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9136 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9137 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9138 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9139 mac_mode |= MAC_MODE_LINK_POLARITY;
ff18ff02
MC
9140 tg3_writephy(tp, MII_TG3_EXT_CTRL,
9141 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9142 }
9f40dead 9143 tw32(MAC_MODE, mac_mode);
9f40dead
MC
9144 }
9145 else
9146 return -EINVAL;
c76949a6
MC
9147
9148 err = -EIO;
9149
c76949a6 9150 tx_len = 1514;
a20e9c62 9151 skb = netdev_alloc_skb(tp->dev, tx_len);
a50bb7b9
JJ
9152 if (!skb)
9153 return -ENOMEM;
9154
c76949a6
MC
9155 tx_data = skb_put(skb, tx_len);
9156 memcpy(tx_data, tp->dev->dev_addr, 6);
9157 memset(tx_data + 6, 0x0, 8);
9158
9159 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9160
9161 for (i = 14; i < tx_len; i++)
9162 tx_data[i] = (u8) (i & 0xff);
9163
9164 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9165
9166 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9167 HOSTCC_MODE_NOW);
9168
9169 udelay(10);
9170
9171 rx_start_idx = tp->hw_status->idx[0].rx_producer;
9172
c76949a6
MC
9173 num_pkts = 0;
9174
9f40dead 9175 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
c76949a6 9176
9f40dead 9177 tp->tx_prod++;
c76949a6
MC
9178 num_pkts++;
9179
9f40dead
MC
9180 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9181 tp->tx_prod);
09ee929c 9182 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
c76949a6
MC
9183
9184 udelay(10);
9185
3f7045c1
MC
9186 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
9187 for (i = 0; i < 25; i++) {
c76949a6
MC
9188 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9189 HOSTCC_MODE_NOW);
9190
9191 udelay(10);
9192
9193 tx_idx = tp->hw_status->idx[0].tx_consumer;
9194 rx_idx = tp->hw_status->idx[0].rx_producer;
9f40dead 9195 if ((tx_idx == tp->tx_prod) &&
c76949a6
MC
9196 (rx_idx == (rx_start_idx + num_pkts)))
9197 break;
9198 }
9199
9200 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9201 dev_kfree_skb(skb);
9202
9f40dead 9203 if (tx_idx != tp->tx_prod)
c76949a6
MC
9204 goto out;
9205
9206 if (rx_idx != rx_start_idx + num_pkts)
9207 goto out;
9208
9209 desc = &tp->rx_rcb[rx_start_idx];
9210 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9211 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9212 if (opaque_key != RXD_OPAQUE_RING_STD)
9213 goto out;
9214
9215 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9216 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9217 goto out;
9218
9219 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9220 if (rx_len != tx_len)
9221 goto out;
9222
9223 rx_skb = tp->rx_std_buffers[desc_idx].skb;
9224
9225 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9226 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9227
9228 for (i = 14; i < tx_len; i++) {
9229 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9230 goto out;
9231 }
9232 err = 0;
6aa20a22 9233
c76949a6
MC
9234 /* tg3_free_rings will unmap and free the rx_skb */
9235out:
9236 return err;
9237}
9238
9f40dead
MC
9239#define TG3_MAC_LOOPBACK_FAILED 1
9240#define TG3_PHY_LOOPBACK_FAILED 2
9241#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
9242 TG3_PHY_LOOPBACK_FAILED)
9243
9244static int tg3_test_loopback(struct tg3 *tp)
9245{
9246 int err = 0;
9247
9248 if (!netif_running(tp->dev))
9249 return TG3_LOOPBACK_FAILED;
9250
b9ec6c1b
MC
9251 err = tg3_reset_hw(tp, 1);
9252 if (err)
9253 return TG3_LOOPBACK_FAILED;
9f40dead
MC
9254
9255 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9256 err |= TG3_MAC_LOOPBACK_FAILED;
9257 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9258 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9259 err |= TG3_PHY_LOOPBACK_FAILED;
9260 }
9261
9262 return err;
9263}
9264
4cafd3f5
MC
9265static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9266 u64 *data)
9267{
566f86ad
MC
9268 struct tg3 *tp = netdev_priv(dev);
9269
bc1c7567
MC
9270 if (tp->link_config.phy_is_low_power)
9271 tg3_set_power_state(tp, PCI_D0);
9272
566f86ad
MC
9273 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9274
9275 if (tg3_test_nvram(tp) != 0) {
9276 etest->flags |= ETH_TEST_FL_FAILED;
9277 data[0] = 1;
9278 }
ca43007a
MC
9279 if (tg3_test_link(tp) != 0) {
9280 etest->flags |= ETH_TEST_FL_FAILED;
9281 data[1] = 1;
9282 }
a71116d1 9283 if (etest->flags & ETH_TEST_FL_OFFLINE) {
ec41c7df 9284 int err, irq_sync = 0;
bbe832c0
MC
9285
9286 if (netif_running(dev)) {
a71116d1 9287 tg3_netif_stop(tp);
bbe832c0
MC
9288 irq_sync = 1;
9289 }
a71116d1 9290
bbe832c0 9291 tg3_full_lock(tp, irq_sync);
a71116d1
MC
9292
9293 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
ec41c7df 9294 err = tg3_nvram_lock(tp);
a71116d1
MC
9295 tg3_halt_cpu(tp, RX_CPU_BASE);
9296 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9297 tg3_halt_cpu(tp, TX_CPU_BASE);
ec41c7df
MC
9298 if (!err)
9299 tg3_nvram_unlock(tp);
a71116d1 9300
d9ab5ad1
MC
9301 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9302 tg3_phy_reset(tp);
9303
a71116d1
MC
9304 if (tg3_test_registers(tp) != 0) {
9305 etest->flags |= ETH_TEST_FL_FAILED;
9306 data[2] = 1;
9307 }
7942e1db
MC
9308 if (tg3_test_memory(tp) != 0) {
9309 etest->flags |= ETH_TEST_FL_FAILED;
9310 data[3] = 1;
9311 }
9f40dead 9312 if ((data[4] = tg3_test_loopback(tp)) != 0)
c76949a6 9313 etest->flags |= ETH_TEST_FL_FAILED;
a71116d1 9314
f47c11ee
DM
9315 tg3_full_unlock(tp);
9316
d4bc3927
MC
9317 if (tg3_test_interrupt(tp) != 0) {
9318 etest->flags |= ETH_TEST_FL_FAILED;
9319 data[5] = 1;
9320 }
f47c11ee
DM
9321
9322 tg3_full_lock(tp, 0);
d4bc3927 9323
a71116d1
MC
9324 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9325 if (netif_running(dev)) {
9326 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
b9ec6c1b
MC
9327 if (!tg3_restart_hw(tp, 1))
9328 tg3_netif_start(tp);
a71116d1 9329 }
f47c11ee
DM
9330
9331 tg3_full_unlock(tp);
a71116d1 9332 }
bc1c7567
MC
9333 if (tp->link_config.phy_is_low_power)
9334 tg3_set_power_state(tp, PCI_D3hot);
9335
4cafd3f5
MC
9336}
9337
1da177e4
LT
9338static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9339{
9340 struct mii_ioctl_data *data = if_mii(ifr);
9341 struct tg3 *tp = netdev_priv(dev);
9342 int err;
9343
9344 switch(cmd) {
9345 case SIOCGMIIPHY:
9346 data->phy_id = PHY_ADDR;
9347
9348 /* fallthru */
9349 case SIOCGMIIREG: {
9350 u32 mii_regval;
9351
9352 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9353 break; /* We have no PHY */
9354
bc1c7567
MC
9355 if (tp->link_config.phy_is_low_power)
9356 return -EAGAIN;
9357
f47c11ee 9358 spin_lock_bh(&tp->lock);
1da177e4 9359 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
f47c11ee 9360 spin_unlock_bh(&tp->lock);
1da177e4
LT
9361
9362 data->val_out = mii_regval;
9363
9364 return err;
9365 }
9366
9367 case SIOCSMIIREG:
9368 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9369 break; /* We have no PHY */
9370
9371 if (!capable(CAP_NET_ADMIN))
9372 return -EPERM;
9373
bc1c7567
MC
9374 if (tp->link_config.phy_is_low_power)
9375 return -EAGAIN;
9376
f47c11ee 9377 spin_lock_bh(&tp->lock);
1da177e4 9378 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
f47c11ee 9379 spin_unlock_bh(&tp->lock);
1da177e4
LT
9380
9381 return err;
9382
9383 default:
9384 /* do nothing */
9385 break;
9386 }
9387 return -EOPNOTSUPP;
9388}
9389
9390#if TG3_VLAN_TAG_USED
9391static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9392{
9393 struct tg3 *tp = netdev_priv(dev);
9394
29315e87
MC
9395 if (netif_running(dev))
9396 tg3_netif_stop(tp);
9397
f47c11ee 9398 tg3_full_lock(tp, 0);
1da177e4
LT
9399
9400 tp->vlgrp = grp;
9401
9402 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9403 __tg3_set_rx_mode(dev);
9404
29315e87
MC
9405 if (netif_running(dev))
9406 tg3_netif_start(tp);
46966545
MC
9407
9408 tg3_full_unlock(tp);
1da177e4 9409}
1da177e4
LT
9410#endif
9411
15f9850d
DM
9412static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9413{
9414 struct tg3 *tp = netdev_priv(dev);
9415
9416 memcpy(ec, &tp->coal, sizeof(*ec));
9417 return 0;
9418}
9419
d244c892
MC
9420static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9421{
9422 struct tg3 *tp = netdev_priv(dev);
9423 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9424 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9425
9426 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9427 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9428 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9429 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9430 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9431 }
9432
9433 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9434 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9435 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9436 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9437 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9438 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9439 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9440 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9441 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9442 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9443 return -EINVAL;
9444
9445 /* No rx interrupts will be generated if both are zero */
9446 if ((ec->rx_coalesce_usecs == 0) &&
9447 (ec->rx_max_coalesced_frames == 0))
9448 return -EINVAL;
9449
9450 /* No tx interrupts will be generated if both are zero */
9451 if ((ec->tx_coalesce_usecs == 0) &&
9452 (ec->tx_max_coalesced_frames == 0))
9453 return -EINVAL;
9454
9455 /* Only copy relevant parameters, ignore all others. */
9456 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9457 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9458 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9459 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9460 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9461 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9462 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9463 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9464 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9465
9466 if (netif_running(dev)) {
9467 tg3_full_lock(tp, 0);
9468 __tg3_set_coalesce(tp, &tp->coal);
9469 tg3_full_unlock(tp);
9470 }
9471 return 0;
9472}
9473
7282d491 9474static const struct ethtool_ops tg3_ethtool_ops = {
1da177e4
LT
9475 .get_settings = tg3_get_settings,
9476 .set_settings = tg3_set_settings,
9477 .get_drvinfo = tg3_get_drvinfo,
9478 .get_regs_len = tg3_get_regs_len,
9479 .get_regs = tg3_get_regs,
9480 .get_wol = tg3_get_wol,
9481 .set_wol = tg3_set_wol,
9482 .get_msglevel = tg3_get_msglevel,
9483 .set_msglevel = tg3_set_msglevel,
9484 .nway_reset = tg3_nway_reset,
9485 .get_link = ethtool_op_get_link,
9486 .get_eeprom_len = tg3_get_eeprom_len,
9487 .get_eeprom = tg3_get_eeprom,
9488 .set_eeprom = tg3_set_eeprom,
9489 .get_ringparam = tg3_get_ringparam,
9490 .set_ringparam = tg3_set_ringparam,
9491 .get_pauseparam = tg3_get_pauseparam,
9492 .set_pauseparam = tg3_set_pauseparam,
9493 .get_rx_csum = tg3_get_rx_csum,
9494 .set_rx_csum = tg3_set_rx_csum,
1da177e4 9495 .set_tx_csum = tg3_set_tx_csum,
1da177e4 9496 .set_sg = ethtool_op_set_sg,
1da177e4 9497 .set_tso = tg3_set_tso,
4cafd3f5 9498 .self_test = tg3_self_test,
1da177e4 9499 .get_strings = tg3_get_strings,
4009a93d 9500 .phys_id = tg3_phys_id,
1da177e4 9501 .get_ethtool_stats = tg3_get_ethtool_stats,
15f9850d 9502 .get_coalesce = tg3_get_coalesce,
d244c892 9503 .set_coalesce = tg3_set_coalesce,
b9f2c044 9504 .get_sset_count = tg3_get_sset_count,
1da177e4
LT
9505};
9506
9507static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9508{
1b27777a 9509 u32 cursize, val, magic;
1da177e4
LT
9510
9511 tp->nvram_size = EEPROM_CHIP_SIZE;
9512
1820180b 9513 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
1da177e4
LT
9514 return;
9515
b16250e3
MC
9516 if ((magic != TG3_EEPROM_MAGIC) &&
9517 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9518 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
1da177e4
LT
9519 return;
9520
9521 /*
9522 * Size the chip by reading offsets at increasing powers of two.
9523 * When we encounter our validation signature, we know the addressing
9524 * has wrapped around, and thus have our chip size.
9525 */
1b27777a 9526 cursize = 0x10;
1da177e4
LT
9527
9528 while (cursize < tp->nvram_size) {
1820180b 9529 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
1da177e4
LT
9530 return;
9531
1820180b 9532 if (val == magic)
1da177e4
LT
9533 break;
9534
9535 cursize <<= 1;
9536 }
9537
9538 tp->nvram_size = cursize;
9539}
6aa20a22 9540
1da177e4
LT
9541static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9542{
9543 u32 val;
9544
1820180b 9545 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
1b27777a
MC
9546 return;
9547
9548 /* Selfboot format */
1820180b 9549 if (val != TG3_EEPROM_MAGIC) {
1b27777a
MC
9550 tg3_get_eeprom_size(tp);
9551 return;
9552 }
9553
1da177e4
LT
9554 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9555 if (val != 0) {
9556 tp->nvram_size = (val >> 16) * 1024;
9557 return;
9558 }
9559 }
989a9d23 9560 tp->nvram_size = 0x80000;
1da177e4
LT
9561}
9562
9563static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9564{
9565 u32 nvcfg1;
9566
9567 nvcfg1 = tr32(NVRAM_CFG1);
9568 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9569 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9570 }
9571 else {
9572 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9573 tw32(NVRAM_CFG1, nvcfg1);
9574 }
9575
4c987487 9576 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
a4e2b347 9577 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
9578 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9579 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9580 tp->nvram_jedecnum = JEDEC_ATMEL;
9581 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9582 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9583 break;
9584 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9585 tp->nvram_jedecnum = JEDEC_ATMEL;
9586 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9587 break;
9588 case FLASH_VENDOR_ATMEL_EEPROM:
9589 tp->nvram_jedecnum = JEDEC_ATMEL;
9590 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9591 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9592 break;
9593 case FLASH_VENDOR_ST:
9594 tp->nvram_jedecnum = JEDEC_ST;
9595 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9596 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9597 break;
9598 case FLASH_VENDOR_SAIFUN:
9599 tp->nvram_jedecnum = JEDEC_SAIFUN;
9600 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9601 break;
9602 case FLASH_VENDOR_SST_SMALL:
9603 case FLASH_VENDOR_SST_LARGE:
9604 tp->nvram_jedecnum = JEDEC_SST;
9605 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9606 break;
9607 }
9608 }
9609 else {
9610 tp->nvram_jedecnum = JEDEC_ATMEL;
9611 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9612 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9613 }
9614}
9615
361b4ac2
MC
9616static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9617{
9618 u32 nvcfg1;
9619
9620 nvcfg1 = tr32(NVRAM_CFG1);
9621
e6af301b
MC
9622 /* NVRAM protection for TPM */
9623 if (nvcfg1 & (1 << 27))
9624 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9625
361b4ac2
MC
9626 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9627 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9628 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9629 tp->nvram_jedecnum = JEDEC_ATMEL;
9630 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9631 break;
9632 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9633 tp->nvram_jedecnum = JEDEC_ATMEL;
9634 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9635 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9636 break;
9637 case FLASH_5752VENDOR_ST_M45PE10:
9638 case FLASH_5752VENDOR_ST_M45PE20:
9639 case FLASH_5752VENDOR_ST_M45PE40:
9640 tp->nvram_jedecnum = JEDEC_ST;
9641 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9642 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9643 break;
9644 }
9645
9646 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9647 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9648 case FLASH_5752PAGE_SIZE_256:
9649 tp->nvram_pagesize = 256;
9650 break;
9651 case FLASH_5752PAGE_SIZE_512:
9652 tp->nvram_pagesize = 512;
9653 break;
9654 case FLASH_5752PAGE_SIZE_1K:
9655 tp->nvram_pagesize = 1024;
9656 break;
9657 case FLASH_5752PAGE_SIZE_2K:
9658 tp->nvram_pagesize = 2048;
9659 break;
9660 case FLASH_5752PAGE_SIZE_4K:
9661 tp->nvram_pagesize = 4096;
9662 break;
9663 case FLASH_5752PAGE_SIZE_264:
9664 tp->nvram_pagesize = 264;
9665 break;
9666 }
9667 }
9668 else {
9669 /* For eeprom, set pagesize to maximum eeprom size */
9670 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9671
9672 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9673 tw32(NVRAM_CFG1, nvcfg1);
9674 }
9675}
9676
d3c7b886
MC
9677static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9678{
989a9d23 9679 u32 nvcfg1, protect = 0;
d3c7b886
MC
9680
9681 nvcfg1 = tr32(NVRAM_CFG1);
9682
9683 /* NVRAM protection for TPM */
989a9d23 9684 if (nvcfg1 & (1 << 27)) {
d3c7b886 9685 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
989a9d23
MC
9686 protect = 1;
9687 }
d3c7b886 9688
989a9d23
MC
9689 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9690 switch (nvcfg1) {
d3c7b886
MC
9691 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9692 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9693 case FLASH_5755VENDOR_ATMEL_FLASH_3:
70b65a2d 9694 case FLASH_5755VENDOR_ATMEL_FLASH_5:
d3c7b886
MC
9695 tp->nvram_jedecnum = JEDEC_ATMEL;
9696 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9697 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9698 tp->nvram_pagesize = 264;
70b65a2d
MC
9699 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
9700 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
989a9d23
MC
9701 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9702 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9703 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
9704 else
9705 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
d3c7b886
MC
9706 break;
9707 case FLASH_5752VENDOR_ST_M45PE10:
9708 case FLASH_5752VENDOR_ST_M45PE20:
9709 case FLASH_5752VENDOR_ST_M45PE40:
9710 tp->nvram_jedecnum = JEDEC_ST;
9711 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9712 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9713 tp->nvram_pagesize = 256;
989a9d23
MC
9714 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
9715 tp->nvram_size = (protect ? 0x10000 : 0x20000);
9716 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
9717 tp->nvram_size = (protect ? 0x10000 : 0x40000);
9718 else
9719 tp->nvram_size = (protect ? 0x20000 : 0x80000);
d3c7b886
MC
9720 break;
9721 }
9722}
9723
1b27777a
MC
9724static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9725{
9726 u32 nvcfg1;
9727
9728 nvcfg1 = tr32(NVRAM_CFG1);
9729
9730 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9731 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9732 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9733 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9734 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9735 tp->nvram_jedecnum = JEDEC_ATMEL;
9736 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9737 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9738
9739 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9740 tw32(NVRAM_CFG1, nvcfg1);
9741 break;
9742 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9743 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9744 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9745 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9746 tp->nvram_jedecnum = JEDEC_ATMEL;
9747 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9748 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9749 tp->nvram_pagesize = 264;
9750 break;
9751 case FLASH_5752VENDOR_ST_M45PE10:
9752 case FLASH_5752VENDOR_ST_M45PE20:
9753 case FLASH_5752VENDOR_ST_M45PE40:
9754 tp->nvram_jedecnum = JEDEC_ST;
9755 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9756 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9757 tp->nvram_pagesize = 256;
9758 break;
9759 }
9760}
9761
6b91fa02
MC
9762static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
9763{
9764 u32 nvcfg1, protect = 0;
9765
9766 nvcfg1 = tr32(NVRAM_CFG1);
9767
9768 /* NVRAM protection for TPM */
9769 if (nvcfg1 & (1 << 27)) {
9770 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9771 protect = 1;
9772 }
9773
9774 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9775 switch (nvcfg1) {
9776 case FLASH_5761VENDOR_ATMEL_ADB021D:
9777 case FLASH_5761VENDOR_ATMEL_ADB041D:
9778 case FLASH_5761VENDOR_ATMEL_ADB081D:
9779 case FLASH_5761VENDOR_ATMEL_ADB161D:
9780 case FLASH_5761VENDOR_ATMEL_MDB021D:
9781 case FLASH_5761VENDOR_ATMEL_MDB041D:
9782 case FLASH_5761VENDOR_ATMEL_MDB081D:
9783 case FLASH_5761VENDOR_ATMEL_MDB161D:
9784 tp->nvram_jedecnum = JEDEC_ATMEL;
9785 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9786 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9787 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
9788 tp->nvram_pagesize = 256;
9789 break;
9790 case FLASH_5761VENDOR_ST_A_M45PE20:
9791 case FLASH_5761VENDOR_ST_A_M45PE40:
9792 case FLASH_5761VENDOR_ST_A_M45PE80:
9793 case FLASH_5761VENDOR_ST_A_M45PE16:
9794 case FLASH_5761VENDOR_ST_M_M45PE20:
9795 case FLASH_5761VENDOR_ST_M_M45PE40:
9796 case FLASH_5761VENDOR_ST_M_M45PE80:
9797 case FLASH_5761VENDOR_ST_M_M45PE16:
9798 tp->nvram_jedecnum = JEDEC_ST;
9799 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9800 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9801 tp->nvram_pagesize = 256;
9802 break;
9803 }
9804
9805 if (protect) {
9806 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
9807 } else {
9808 switch (nvcfg1) {
9809 case FLASH_5761VENDOR_ATMEL_ADB161D:
9810 case FLASH_5761VENDOR_ATMEL_MDB161D:
9811 case FLASH_5761VENDOR_ST_A_M45PE16:
9812 case FLASH_5761VENDOR_ST_M_M45PE16:
9813 tp->nvram_size = 0x100000;
9814 break;
9815 case FLASH_5761VENDOR_ATMEL_ADB081D:
9816 case FLASH_5761VENDOR_ATMEL_MDB081D:
9817 case FLASH_5761VENDOR_ST_A_M45PE80:
9818 case FLASH_5761VENDOR_ST_M_M45PE80:
9819 tp->nvram_size = 0x80000;
9820 break;
9821 case FLASH_5761VENDOR_ATMEL_ADB041D:
9822 case FLASH_5761VENDOR_ATMEL_MDB041D:
9823 case FLASH_5761VENDOR_ST_A_M45PE40:
9824 case FLASH_5761VENDOR_ST_M_M45PE40:
9825 tp->nvram_size = 0x40000;
9826 break;
9827 case FLASH_5761VENDOR_ATMEL_ADB021D:
9828 case FLASH_5761VENDOR_ATMEL_MDB021D:
9829 case FLASH_5761VENDOR_ST_A_M45PE20:
9830 case FLASH_5761VENDOR_ST_M_M45PE20:
9831 tp->nvram_size = 0x20000;
9832 break;
9833 }
9834 }
9835}
9836
b5d3772c
MC
9837static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9838{
9839 tp->nvram_jedecnum = JEDEC_ATMEL;
9840 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9841 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9842}
9843
1da177e4
LT
9844/* Chips other than 5700/5701 use the NVRAM for fetching info. */
9845static void __devinit tg3_nvram_init(struct tg3 *tp)
9846{
1da177e4
LT
9847 tw32_f(GRC_EEPROM_ADDR,
9848 (EEPROM_ADDR_FSM_RESET |
9849 (EEPROM_DEFAULT_CLOCK_PERIOD <<
9850 EEPROM_ADDR_CLKPERD_SHIFT)));
9851
9d57f01c 9852 msleep(1);
1da177e4
LT
9853
9854 /* Enable seeprom accesses. */
9855 tw32_f(GRC_LOCAL_CTRL,
9856 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9857 udelay(100);
9858
9859 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9860 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9861 tp->tg3_flags |= TG3_FLAG_NVRAM;
9862
ec41c7df
MC
9863 if (tg3_nvram_lock(tp)) {
9864 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9865 "tg3_nvram_init failed.\n", tp->dev->name);
9866 return;
9867 }
e6af301b 9868 tg3_enable_nvram_access(tp);
1da177e4 9869
989a9d23
MC
9870 tp->nvram_size = 0;
9871
361b4ac2
MC
9872 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9873 tg3_get_5752_nvram_info(tp);
d3c7b886
MC
9874 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9875 tg3_get_5755_nvram_info(tp);
d30cdd28
MC
9876 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9877 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
1b27777a 9878 tg3_get_5787_nvram_info(tp);
6b91fa02
MC
9879 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9880 tg3_get_5761_nvram_info(tp);
b5d3772c
MC
9881 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9882 tg3_get_5906_nvram_info(tp);
361b4ac2
MC
9883 else
9884 tg3_get_nvram_info(tp);
9885
989a9d23
MC
9886 if (tp->nvram_size == 0)
9887 tg3_get_nvram_size(tp);
1da177e4 9888
e6af301b 9889 tg3_disable_nvram_access(tp);
381291b7 9890 tg3_nvram_unlock(tp);
1da177e4
LT
9891
9892 } else {
9893 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9894
9895 tg3_get_eeprom_size(tp);
9896 }
9897}
9898
9899static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9900 u32 offset, u32 *val)
9901{
9902 u32 tmp;
9903 int i;
9904
9905 if (offset > EEPROM_ADDR_ADDR_MASK ||
9906 (offset % 4) != 0)
9907 return -EINVAL;
9908
9909 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9910 EEPROM_ADDR_DEVID_MASK |
9911 EEPROM_ADDR_READ);
9912 tw32(GRC_EEPROM_ADDR,
9913 tmp |
9914 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9915 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9916 EEPROM_ADDR_ADDR_MASK) |
9917 EEPROM_ADDR_READ | EEPROM_ADDR_START);
9918
9d57f01c 9919 for (i = 0; i < 1000; i++) {
1da177e4
LT
9920 tmp = tr32(GRC_EEPROM_ADDR);
9921
9922 if (tmp & EEPROM_ADDR_COMPLETE)
9923 break;
9d57f01c 9924 msleep(1);
1da177e4
LT
9925 }
9926 if (!(tmp & EEPROM_ADDR_COMPLETE))
9927 return -EBUSY;
9928
9929 *val = tr32(GRC_EEPROM_DATA);
9930 return 0;
9931}
9932
9933#define NVRAM_CMD_TIMEOUT 10000
9934
9935static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9936{
9937 int i;
9938
9939 tw32(NVRAM_CMD, nvram_cmd);
9940 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9941 udelay(10);
9942 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9943 udelay(10);
9944 break;
9945 }
9946 }
9947 if (i == NVRAM_CMD_TIMEOUT) {
9948 return -EBUSY;
9949 }
9950 return 0;
9951}
9952
1820180b
MC
9953static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9954{
9955 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9956 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9957 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
6b91fa02 9958 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
1820180b
MC
9959 (tp->nvram_jedecnum == JEDEC_ATMEL))
9960
9961 addr = ((addr / tp->nvram_pagesize) <<
9962 ATMEL_AT45DB0X1B_PAGE_POS) +
9963 (addr % tp->nvram_pagesize);
9964
9965 return addr;
9966}
9967
c4e6575c
MC
9968static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9969{
9970 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9971 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9972 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
6b91fa02 9973 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
c4e6575c
MC
9974 (tp->nvram_jedecnum == JEDEC_ATMEL))
9975
9976 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9977 tp->nvram_pagesize) +
9978 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9979
9980 return addr;
9981}
9982
1da177e4
LT
9983static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9984{
9985 int ret;
9986
1da177e4
LT
9987 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9988 return tg3_nvram_read_using_eeprom(tp, offset, val);
9989
1820180b 9990 offset = tg3_nvram_phys_addr(tp, offset);
1da177e4
LT
9991
9992 if (offset > NVRAM_ADDR_MSK)
9993 return -EINVAL;
9994
ec41c7df
MC
9995 ret = tg3_nvram_lock(tp);
9996 if (ret)
9997 return ret;
1da177e4 9998
e6af301b 9999 tg3_enable_nvram_access(tp);
1da177e4
LT
10000
10001 tw32(NVRAM_ADDR, offset);
10002 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10003 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10004
10005 if (ret == 0)
10006 *val = swab32(tr32(NVRAM_RDDATA));
10007
e6af301b 10008 tg3_disable_nvram_access(tp);
1da177e4 10009
381291b7
MC
10010 tg3_nvram_unlock(tp);
10011
1da177e4
LT
10012 return ret;
10013}
10014
1820180b
MC
10015static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10016{
10017 int err;
10018 u32 tmp;
10019
10020 err = tg3_nvram_read(tp, offset, &tmp);
10021 *val = swab32(tmp);
10022 return err;
10023}
10024
1da177e4
LT
10025static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10026 u32 offset, u32 len, u8 *buf)
10027{
10028 int i, j, rc = 0;
10029 u32 val;
10030
10031 for (i = 0; i < len; i += 4) {
10032 u32 addr, data;
10033
10034 addr = offset + i;
10035
10036 memcpy(&data, buf + i, 4);
10037
10038 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
10039
10040 val = tr32(GRC_EEPROM_ADDR);
10041 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10042
10043 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10044 EEPROM_ADDR_READ);
10045 tw32(GRC_EEPROM_ADDR, val |
10046 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10047 (addr & EEPROM_ADDR_ADDR_MASK) |
10048 EEPROM_ADDR_START |
10049 EEPROM_ADDR_WRITE);
6aa20a22 10050
9d57f01c 10051 for (j = 0; j < 1000; j++) {
1da177e4
LT
10052 val = tr32(GRC_EEPROM_ADDR);
10053
10054 if (val & EEPROM_ADDR_COMPLETE)
10055 break;
9d57f01c 10056 msleep(1);
1da177e4
LT
10057 }
10058 if (!(val & EEPROM_ADDR_COMPLETE)) {
10059 rc = -EBUSY;
10060 break;
10061 }
10062 }
10063
10064 return rc;
10065}
10066
10067/* offset and length are dword aligned */
10068static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10069 u8 *buf)
10070{
10071 int ret = 0;
10072 u32 pagesize = tp->nvram_pagesize;
10073 u32 pagemask = pagesize - 1;
10074 u32 nvram_cmd;
10075 u8 *tmp;
10076
10077 tmp = kmalloc(pagesize, GFP_KERNEL);
10078 if (tmp == NULL)
10079 return -ENOMEM;
10080
10081 while (len) {
10082 int j;
e6af301b 10083 u32 phy_addr, page_off, size;
1da177e4
LT
10084
10085 phy_addr = offset & ~pagemask;
6aa20a22 10086
1da177e4
LT
10087 for (j = 0; j < pagesize; j += 4) {
10088 if ((ret = tg3_nvram_read(tp, phy_addr + j,
10089 (u32 *) (tmp + j))))
10090 break;
10091 }
10092 if (ret)
10093 break;
10094
10095 page_off = offset & pagemask;
10096 size = pagesize;
10097 if (len < size)
10098 size = len;
10099
10100 len -= size;
10101
10102 memcpy(tmp + page_off, buf, size);
10103
10104 offset = offset + (pagesize - page_off);
10105
e6af301b 10106 tg3_enable_nvram_access(tp);
1da177e4
LT
10107
10108 /*
10109 * Before we can erase the flash page, we need
10110 * to issue a special "write enable" command.
10111 */
10112 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10113
10114 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10115 break;
10116
10117 /* Erase the target page */
10118 tw32(NVRAM_ADDR, phy_addr);
10119
10120 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10121 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10122
10123 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10124 break;
10125
10126 /* Issue another write enable to start the write. */
10127 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10128
10129 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10130 break;
10131
10132 for (j = 0; j < pagesize; j += 4) {
10133 u32 data;
10134
10135 data = *((u32 *) (tmp + j));
10136 tw32(NVRAM_WRDATA, cpu_to_be32(data));
10137
10138 tw32(NVRAM_ADDR, phy_addr + j);
10139
10140 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10141 NVRAM_CMD_WR;
10142
10143 if (j == 0)
10144 nvram_cmd |= NVRAM_CMD_FIRST;
10145 else if (j == (pagesize - 4))
10146 nvram_cmd |= NVRAM_CMD_LAST;
10147
10148 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10149 break;
10150 }
10151 if (ret)
10152 break;
10153 }
10154
10155 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10156 tg3_nvram_exec_cmd(tp, nvram_cmd);
10157
10158 kfree(tmp);
10159
10160 return ret;
10161}
10162
10163/* offset and length are dword aligned */
10164static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10165 u8 *buf)
10166{
10167 int i, ret = 0;
10168
10169 for (i = 0; i < len; i += 4, offset += 4) {
10170 u32 data, page_off, phy_addr, nvram_cmd;
10171
10172 memcpy(&data, buf + i, 4);
10173 tw32(NVRAM_WRDATA, cpu_to_be32(data));
10174
10175 page_off = offset % tp->nvram_pagesize;
10176
1820180b 10177 phy_addr = tg3_nvram_phys_addr(tp, offset);
1da177e4
LT
10178
10179 tw32(NVRAM_ADDR, phy_addr);
10180
10181 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10182
10183 if ((page_off == 0) || (i == 0))
10184 nvram_cmd |= NVRAM_CMD_FIRST;
f6d9a256 10185 if (page_off == (tp->nvram_pagesize - 4))
1da177e4
LT
10186 nvram_cmd |= NVRAM_CMD_LAST;
10187
10188 if (i == (len - 4))
10189 nvram_cmd |= NVRAM_CMD_LAST;
10190
4c987487 10191 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
af36e6b6 10192 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
1b27777a 10193 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
d30cdd28 10194 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
4c987487
MC
10195 (tp->nvram_jedecnum == JEDEC_ST) &&
10196 (nvram_cmd & NVRAM_CMD_FIRST)) {
1da177e4
LT
10197
10198 if ((ret = tg3_nvram_exec_cmd(tp,
10199 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10200 NVRAM_CMD_DONE)))
10201
10202 break;
10203 }
10204 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10205 /* We always do complete word writes to eeprom. */
10206 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10207 }
10208
10209 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10210 break;
10211 }
10212 return ret;
10213}
10214
10215/* offset and length are dword aligned */
10216static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10217{
10218 int ret;
10219
1da177e4 10220 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34
MC
10221 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10222 ~GRC_LCLCTRL_GPIO_OUTPUT1);
1da177e4
LT
10223 udelay(40);
10224 }
10225
10226 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10227 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10228 }
10229 else {
10230 u32 grc_mode;
10231
ec41c7df
MC
10232 ret = tg3_nvram_lock(tp);
10233 if (ret)
10234 return ret;
1da177e4 10235
e6af301b
MC
10236 tg3_enable_nvram_access(tp);
10237 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10238 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
1da177e4 10239 tw32(NVRAM_WRITE1, 0x406);
1da177e4
LT
10240
10241 grc_mode = tr32(GRC_MODE);
10242 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10243
10244 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10245 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10246
10247 ret = tg3_nvram_write_block_buffered(tp, offset, len,
10248 buf);
10249 }
10250 else {
10251 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10252 buf);
10253 }
10254
10255 grc_mode = tr32(GRC_MODE);
10256 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10257
e6af301b 10258 tg3_disable_nvram_access(tp);
1da177e4
LT
10259 tg3_nvram_unlock(tp);
10260 }
10261
10262 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34 10263 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1da177e4
LT
10264 udelay(40);
10265 }
10266
10267 return ret;
10268}
10269
10270struct subsys_tbl_ent {
10271 u16 subsys_vendor, subsys_devid;
10272 u32 phy_id;
10273};
10274
10275static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10276 /* Broadcom boards. */
10277 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10278 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10279 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10280 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
10281 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10282 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10283 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
10284 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10285 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10286 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10287 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10288
10289 /* 3com boards. */
10290 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10291 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10292 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
10293 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10294 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10295
10296 /* DELL boards. */
10297 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10298 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10299 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10300 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10301
10302 /* Compaq boards. */
10303 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10304 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10305 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
10306 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10307 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10308
10309 /* IBM boards. */
10310 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10311};
10312
10313static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10314{
10315 int i;
10316
10317 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10318 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10319 tp->pdev->subsystem_vendor) &&
10320 (subsys_id_to_phy_id[i].subsys_devid ==
10321 tp->pdev->subsystem_device))
10322 return &subsys_id_to_phy_id[i];
10323 }
10324 return NULL;
10325}
10326
7d0c41ef 10327static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
1da177e4 10328{
1da177e4 10329 u32 val;
caf636c7
MC
10330 u16 pmcsr;
10331
10332 /* On some early chips the SRAM cannot be accessed in D3hot state,
10333 * so need make sure we're in D0.
10334 */
10335 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10336 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10337 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10338 msleep(1);
7d0c41ef
MC
10339
10340 /* Make sure register accesses (indirect or otherwise)
10341 * will function correctly.
10342 */
10343 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10344 tp->misc_host_ctrl);
1da177e4 10345
f49639e6
DM
10346 /* The memory arbiter has to be enabled in order for SRAM accesses
10347 * to succeed. Normally on powerup the tg3 chip firmware will make
10348 * sure it is enabled, but other entities such as system netboot
10349 * code might disable it.
10350 */
10351 val = tr32(MEMARB_MODE);
10352 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10353
1da177e4 10354 tp->phy_id = PHY_ID_INVALID;
7d0c41ef
MC
10355 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10356
a85feb8c
GZ
10357 /* Assume an onboard device and WOL capable by default. */
10358 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
72b845e0 10359
b5d3772c 10360 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9d26e213 10361 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
b5d3772c 10362 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9d26e213
MC
10363 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10364 }
8ed5d97e
MC
10365 if (tr32(VCPU_CFGSHDW) & VCPU_CFGSHDW_ASPM_DBNC)
10366 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
b5d3772c
MC
10367 return;
10368 }
10369
1da177e4
LT
10370 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10371 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10372 u32 nic_cfg, led_cfg;
7d0c41ef
MC
10373 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10374 int eeprom_phy_serdes = 0;
1da177e4
LT
10375
10376 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10377 tp->nic_sram_data_cfg = nic_cfg;
10378
10379 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10380 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10381 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10382 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10383 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10384 (ver > 0) && (ver < 0x100))
10385 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10386
1da177e4
LT
10387 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10388 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10389 eeprom_phy_serdes = 1;
10390
10391 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10392 if (nic_phy_id != 0) {
10393 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10394 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10395
10396 eeprom_phy_id = (id1 >> 16) << 10;
10397 eeprom_phy_id |= (id2 & 0xfc00) << 16;
10398 eeprom_phy_id |= (id2 & 0x03ff) << 0;
10399 } else
10400 eeprom_phy_id = 0;
10401
7d0c41ef 10402 tp->phy_id = eeprom_phy_id;
747e8f8b 10403 if (eeprom_phy_serdes) {
a4e2b347 10404 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
747e8f8b
MC
10405 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10406 else
10407 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10408 }
7d0c41ef 10409
cbf46853 10410 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
10411 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10412 SHASTA_EXT_LED_MODE_MASK);
cbf46853 10413 else
1da177e4
LT
10414 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10415
10416 switch (led_cfg) {
10417 default:
10418 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10419 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10420 break;
10421
10422 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10423 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10424 break;
10425
10426 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10427 tp->led_ctrl = LED_CTRL_MODE_MAC;
9ba27794
MC
10428
10429 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10430 * read on some older 5700/5701 bootcode.
10431 */
10432 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10433 ASIC_REV_5700 ||
10434 GET_ASIC_REV(tp->pci_chip_rev_id) ==
10435 ASIC_REV_5701)
10436 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10437
1da177e4
LT
10438 break;
10439
10440 case SHASTA_EXT_LED_SHARED:
10441 tp->led_ctrl = LED_CTRL_MODE_SHARED;
10442 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10443 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10444 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10445 LED_CTRL_MODE_PHY_2);
10446 break;
10447
10448 case SHASTA_EXT_LED_MAC:
10449 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10450 break;
10451
10452 case SHASTA_EXT_LED_COMBO:
10453 tp->led_ctrl = LED_CTRL_MODE_COMBO;
10454 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10455 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10456 LED_CTRL_MODE_PHY_2);
10457 break;
10458
10459 };
10460
10461 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10462 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10463 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10464 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10465
9d26e213 10466 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
1da177e4 10467 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9d26e213
MC
10468 if ((tp->pdev->subsystem_vendor ==
10469 PCI_VENDOR_ID_ARIMA) &&
10470 (tp->pdev->subsystem_device == 0x205a ||
10471 tp->pdev->subsystem_device == 0x2063))
10472 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10473 } else {
f49639e6 10474 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9d26e213
MC
10475 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10476 }
1da177e4
LT
10477
10478 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10479 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 10480 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
10481 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10482 }
0d3031d9
MC
10483 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10484 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
a85feb8c
GZ
10485 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10486 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10487 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
1da177e4
LT
10488
10489 if (cfg2 & (1 << 17))
10490 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10491
10492 /* serdes signal pre-emphasis in register 0x590 set by */
10493 /* bootcode if bit 18 is set */
10494 if (cfg2 & (1 << 18))
10495 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
8ed5d97e
MC
10496
10497 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10498 u32 cfg3;
10499
10500 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10501 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10502 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10503 }
1da177e4 10504 }
7d0c41ef
MC
10505}
10506
10507static int __devinit tg3_phy_probe(struct tg3 *tp)
10508{
10509 u32 hw_phy_id_1, hw_phy_id_2;
10510 u32 hw_phy_id, hw_phy_id_masked;
10511 int err;
1da177e4
LT
10512
10513 /* Reading the PHY ID register can conflict with ASF
10514 * firwmare access to the PHY hardware.
10515 */
10516 err = 0;
0d3031d9
MC
10517 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10518 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
1da177e4
LT
10519 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10520 } else {
10521 /* Now read the physical PHY_ID from the chip and verify
10522 * that it is sane. If it doesn't look good, we fall back
10523 * to either the hard-coded table based PHY_ID and failing
10524 * that the value found in the eeprom area.
10525 */
10526 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10527 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10528
10529 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
10530 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10531 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
10532
10533 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10534 }
10535
10536 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10537 tp->phy_id = hw_phy_id;
10538 if (hw_phy_id_masked == PHY_ID_BCM8002)
10539 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
da6b2d01
MC
10540 else
10541 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
1da177e4 10542 } else {
7d0c41ef
MC
10543 if (tp->phy_id != PHY_ID_INVALID) {
10544 /* Do nothing, phy ID already set up in
10545 * tg3_get_eeprom_hw_cfg().
10546 */
1da177e4
LT
10547 } else {
10548 struct subsys_tbl_ent *p;
10549
10550 /* No eeprom signature? Try the hardcoded
10551 * subsys device table.
10552 */
10553 p = lookup_by_subsys(tp);
10554 if (!p)
10555 return -ENODEV;
10556
10557 tp->phy_id = p->phy_id;
10558 if (!tp->phy_id ||
10559 tp->phy_id == PHY_ID_BCM8002)
10560 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10561 }
10562 }
10563
747e8f8b 10564 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
0d3031d9 10565 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
1da177e4 10566 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
3600d918 10567 u32 bmsr, adv_reg, tg3_ctrl, mask;
1da177e4
LT
10568
10569 tg3_readphy(tp, MII_BMSR, &bmsr);
10570 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10571 (bmsr & BMSR_LSTATUS))
10572 goto skip_phy_reset;
6aa20a22 10573
1da177e4
LT
10574 err = tg3_phy_reset(tp);
10575 if (err)
10576 return err;
10577
10578 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10579 ADVERTISE_100HALF | ADVERTISE_100FULL |
10580 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10581 tg3_ctrl = 0;
10582 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10583 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10584 MII_TG3_CTRL_ADV_1000_FULL);
10585 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10586 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10587 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10588 MII_TG3_CTRL_ENABLE_AS_MASTER);
10589 }
10590
3600d918
MC
10591 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10592 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10593 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10594 if (!tg3_copper_is_advertising_all(tp, mask)) {
1da177e4
LT
10595 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10596
10597 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10598 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10599
10600 tg3_writephy(tp, MII_BMCR,
10601 BMCR_ANENABLE | BMCR_ANRESTART);
10602 }
10603 tg3_phy_set_wirespeed(tp);
10604
10605 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10606 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10607 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10608 }
10609
10610skip_phy_reset:
10611 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10612 err = tg3_init_5401phy_dsp(tp);
10613 if (err)
10614 return err;
10615 }
10616
10617 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10618 err = tg3_init_5401phy_dsp(tp);
10619 }
10620
747e8f8b 10621 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1da177e4
LT
10622 tp->link_config.advertising =
10623 (ADVERTISED_1000baseT_Half |
10624 ADVERTISED_1000baseT_Full |
10625 ADVERTISED_Autoneg |
10626 ADVERTISED_FIBRE);
10627 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10628 tp->link_config.advertising &=
10629 ~(ADVERTISED_1000baseT_Half |
10630 ADVERTISED_1000baseT_Full);
10631
10632 return err;
10633}
10634
10635static void __devinit tg3_read_partno(struct tg3 *tp)
10636{
10637 unsigned char vpd_data[256];
af2c6a4a 10638 unsigned int i;
1b27777a 10639 u32 magic;
1da177e4 10640
1820180b 10641 if (tg3_nvram_read_swab(tp, 0x0, &magic))
f49639e6 10642 goto out_not_found;
1da177e4 10643
1820180b 10644 if (magic == TG3_EEPROM_MAGIC) {
1b27777a
MC
10645 for (i = 0; i < 256; i += 4) {
10646 u32 tmp;
1da177e4 10647
1b27777a
MC
10648 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10649 goto out_not_found;
10650
10651 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
10652 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
10653 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10654 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10655 }
10656 } else {
10657 int vpd_cap;
10658
10659 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10660 for (i = 0; i < 256; i += 4) {
10661 u32 tmp, j = 0;
10662 u16 tmp16;
10663
10664 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10665 i);
10666 while (j++ < 100) {
10667 pci_read_config_word(tp->pdev, vpd_cap +
10668 PCI_VPD_ADDR, &tmp16);
10669 if (tmp16 & 0x8000)
10670 break;
10671 msleep(1);
10672 }
f49639e6
DM
10673 if (!(tmp16 & 0x8000))
10674 goto out_not_found;
10675
1b27777a
MC
10676 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10677 &tmp);
10678 tmp = cpu_to_le32(tmp);
10679 memcpy(&vpd_data[i], &tmp, 4);
10680 }
1da177e4
LT
10681 }
10682
10683 /* Now parse and find the part number. */
af2c6a4a 10684 for (i = 0; i < 254; ) {
1da177e4 10685 unsigned char val = vpd_data[i];
af2c6a4a 10686 unsigned int block_end;
1da177e4
LT
10687
10688 if (val == 0x82 || val == 0x91) {
10689 i = (i + 3 +
10690 (vpd_data[i + 1] +
10691 (vpd_data[i + 2] << 8)));
10692 continue;
10693 }
10694
10695 if (val != 0x90)
10696 goto out_not_found;
10697
10698 block_end = (i + 3 +
10699 (vpd_data[i + 1] +
10700 (vpd_data[i + 2] << 8)));
10701 i += 3;
af2c6a4a
MC
10702
10703 if (block_end > 256)
10704 goto out_not_found;
10705
10706 while (i < (block_end - 2)) {
1da177e4
LT
10707 if (vpd_data[i + 0] == 'P' &&
10708 vpd_data[i + 1] == 'N') {
10709 int partno_len = vpd_data[i + 2];
10710
af2c6a4a
MC
10711 i += 3;
10712 if (partno_len > 24 || (partno_len + i) > 256)
1da177e4
LT
10713 goto out_not_found;
10714
10715 memcpy(tp->board_part_number,
af2c6a4a 10716 &vpd_data[i], partno_len);
1da177e4
LT
10717
10718 /* Success. */
10719 return;
10720 }
af2c6a4a 10721 i += 3 + vpd_data[i + 2];
1da177e4
LT
10722 }
10723
10724 /* Part number not found. */
10725 goto out_not_found;
10726 }
10727
10728out_not_found:
b5d3772c
MC
10729 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10730 strcpy(tp->board_part_number, "BCM95906");
10731 else
10732 strcpy(tp->board_part_number, "none");
1da177e4
LT
10733}
10734
c4e6575c
MC
10735static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10736{
10737 u32 val, offset, start;
10738
10739 if (tg3_nvram_read_swab(tp, 0, &val))
10740 return;
10741
10742 if (val != TG3_EEPROM_MAGIC)
10743 return;
10744
10745 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10746 tg3_nvram_read_swab(tp, 0x4, &start))
10747 return;
10748
10749 offset = tg3_nvram_logical_addr(tp, offset);
10750 if (tg3_nvram_read_swab(tp, offset, &val))
10751 return;
10752
10753 if ((val & 0xfc000000) == 0x0c000000) {
10754 u32 ver_offset, addr;
10755 int i;
10756
10757 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10758 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10759 return;
10760
10761 if (val != 0)
10762 return;
10763
10764 addr = offset + ver_offset - start;
10765 for (i = 0; i < 16; i += 4) {
10766 if (tg3_nvram_read(tp, addr + i, &val))
10767 return;
10768
10769 val = cpu_to_le32(val);
10770 memcpy(tp->fw_ver + i, &val, 4);
10771 }
10772 }
10773}
10774
7544b097
MC
10775static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
10776
1da177e4
LT
10777static int __devinit tg3_get_invariants(struct tg3 *tp)
10778{
10779 static struct pci_device_id write_reorder_chipsets[] = {
1da177e4
LT
10780 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10781 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
c165b004
JL
10782 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10783 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
399de50b
MC
10784 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10785 PCI_DEVICE_ID_VIA_8385_0) },
1da177e4
LT
10786 { },
10787 };
10788 u32 misc_ctrl_reg;
10789 u32 cacheline_sz_reg;
10790 u32 pci_state_reg, grc_misc_cfg;
10791 u32 val;
10792 u16 pci_cmd;
c7835a77 10793 int err, pcie_cap;
1da177e4 10794
1da177e4
LT
10795 /* Force memory write invalidate off. If we leave it on,
10796 * then on 5700_BX chips we have to enable a workaround.
10797 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10798 * to match the cacheline size. The Broadcom driver have this
10799 * workaround but turns MWI off all the times so never uses
10800 * it. This seems to suggest that the workaround is insufficient.
10801 */
10802 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10803 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10804 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10805
10806 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10807 * has the register indirect write enable bit set before
10808 * we try to access any of the MMIO registers. It is also
10809 * critical that the PCI-X hw workaround situation is decided
10810 * before that as well.
10811 */
10812 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10813 &misc_ctrl_reg);
10814
10815 tp->pci_chip_rev_id = (misc_ctrl_reg >>
10816 MISC_HOST_CTRL_CHIPREV_SHIFT);
795d01c5
MC
10817 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
10818 u32 prod_id_asic_rev;
10819
10820 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
10821 &prod_id_asic_rev);
10822 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
10823 }
1da177e4 10824
ff645bec
MC
10825 /* Wrong chip ID in 5752 A0. This code can be removed later
10826 * as A0 is not in production.
10827 */
10828 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10829 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10830
6892914f
MC
10831 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10832 * we need to disable memory and use config. cycles
10833 * only to access all registers. The 5702/03 chips
10834 * can mistakenly decode the special cycles from the
10835 * ICH chipsets as memory write cycles, causing corruption
10836 * of register and memory space. Only certain ICH bridges
10837 * will drive special cycles with non-zero data during the
10838 * address phase which can fall within the 5703's address
10839 * range. This is not an ICH bug as the PCI spec allows
10840 * non-zero address during special cycles. However, only
10841 * these ICH bridges are known to drive non-zero addresses
10842 * during special cycles.
10843 *
10844 * Since special cycles do not cross PCI bridges, we only
10845 * enable this workaround if the 5703 is on the secondary
10846 * bus of these ICH bridges.
10847 */
10848 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10849 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10850 static struct tg3_dev_id {
10851 u32 vendor;
10852 u32 device;
10853 u32 rev;
10854 } ich_chipsets[] = {
10855 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10856 PCI_ANY_ID },
10857 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10858 PCI_ANY_ID },
10859 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10860 0xa },
10861 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10862 PCI_ANY_ID },
10863 { },
10864 };
10865 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10866 struct pci_dev *bridge = NULL;
10867
10868 while (pci_id->vendor != 0) {
10869 bridge = pci_get_device(pci_id->vendor, pci_id->device,
10870 bridge);
10871 if (!bridge) {
10872 pci_id++;
10873 continue;
10874 }
10875 if (pci_id->rev != PCI_ANY_ID) {
44c10138 10876 if (bridge->revision > pci_id->rev)
6892914f
MC
10877 continue;
10878 }
10879 if (bridge->subordinate &&
10880 (bridge->subordinate->number ==
10881 tp->pdev->bus->number)) {
10882
10883 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10884 pci_dev_put(bridge);
10885 break;
10886 }
10887 }
10888 }
10889
4a29cc2e
MC
10890 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10891 * DMA addresses > 40-bit. This bridge may have other additional
10892 * 57xx devices behind it in some 4-port NIC designs for example.
10893 * Any tg3 device found behind the bridge will also need the 40-bit
10894 * DMA workaround.
10895 */
a4e2b347
MC
10896 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10897 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10898 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
4a29cc2e 10899 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
4cf78e4f 10900 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
a4e2b347 10901 }
4a29cc2e
MC
10902 else {
10903 struct pci_dev *bridge = NULL;
10904
10905 do {
10906 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10907 PCI_DEVICE_ID_SERVERWORKS_EPB,
10908 bridge);
10909 if (bridge && bridge->subordinate &&
10910 (bridge->subordinate->number <=
10911 tp->pdev->bus->number) &&
10912 (bridge->subordinate->subordinate >=
10913 tp->pdev->bus->number)) {
10914 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10915 pci_dev_put(bridge);
10916 break;
10917 }
10918 } while (bridge);
10919 }
4cf78e4f 10920
1da177e4
LT
10921 /* Initialize misc host control in PCI block. */
10922 tp->misc_host_ctrl |= (misc_ctrl_reg &
10923 MISC_HOST_CTRL_CHIPREV);
10924 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10925 tp->misc_host_ctrl);
10926
10927 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10928 &cacheline_sz_reg);
10929
10930 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
10931 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
10932 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
10933 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
10934
7544b097
MC
10935 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10936 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
10937 tp->pdev_peer = tg3_find_peer(tp);
10938
6708e5cc 10939 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
4cf78e4f 10940 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
af36e6b6 10941 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d9ab5ad1 10942 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
d30cdd28 10943 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
b5d3772c 10944 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
a4e2b347 10945 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6708e5cc
JL
10946 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10947
1b440c56
JL
10948 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10949 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10950 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10951
5a6f3074 10952 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7544b097
MC
10953 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
10954 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
10955 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
10956 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
10957 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
10958 tp->pdev_peer == tp->pdev))
10959 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
10960
af36e6b6 10961 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
b5d3772c 10962 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
d30cdd28 10963 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
b5d3772c 10964 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5a6f3074 10965 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
fcfa0a32 10966 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
52c0fd83 10967 } else {
7f62ad5d 10968 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
52c0fd83
MC
10969 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10970 ASIC_REV_5750 &&
10971 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
7f62ad5d 10972 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
52c0fd83 10973 }
5a6f3074 10974 }
1da177e4 10975
0f893dc6
MC
10976 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10977 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
d9ab5ad1 10978 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
af36e6b6 10979 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
b5d3772c 10980 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
d30cdd28 10981 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
b5d3772c 10982 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
0f893dc6
MC
10983 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10984
c7835a77
MC
10985 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
10986 if (pcie_cap != 0) {
1da177e4 10987 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
c7835a77
MC
10988 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10989 u16 lnkctl;
10990
10991 pci_read_config_word(tp->pdev,
10992 pcie_cap + PCI_EXP_LNKCTL,
10993 &lnkctl);
10994 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
10995 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
10996 }
10997 }
1da177e4 10998
399de50b
MC
10999 /* If we have an AMD 762 or VIA K8T800 chipset, write
11000 * reordering to the mailbox registers done by the host
11001 * controller can cause major troubles. We read back from
11002 * every mailbox register write to force the writes to be
11003 * posted to the chip in order.
11004 */
11005 if (pci_dev_present(write_reorder_chipsets) &&
11006 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11007 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11008
1da177e4
LT
11009 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11010 tp->pci_lat_timer < 64) {
11011 tp->pci_lat_timer = 64;
11012
11013 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
11014 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
11015 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
11016 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
11017
11018 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11019 cacheline_sz_reg);
11020 }
11021
9974a356
MC
11022 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11023 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11024 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11025 if (!tp->pcix_cap) {
11026 printk(KERN_ERR PFX "Cannot find PCI-X "
11027 "capability, aborting.\n");
11028 return -EIO;
11029 }
11030 }
11031
1da177e4
LT
11032 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11033 &pci_state_reg);
11034
9974a356 11035 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
1da177e4
LT
11036 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11037
11038 /* If this is a 5700 BX chipset, and we are in PCI-X
11039 * mode, enable register write workaround.
11040 *
11041 * The workaround is to use indirect register accesses
11042 * for all chip writes not to mailbox registers.
11043 */
11044 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11045 u32 pm_reg;
1da177e4
LT
11046
11047 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11048
11049 /* The chip can have it's power management PCI config
11050 * space registers clobbered due to this bug.
11051 * So explicitly force the chip into D0 here.
11052 */
9974a356
MC
11053 pci_read_config_dword(tp->pdev,
11054 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
11055 &pm_reg);
11056 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11057 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9974a356
MC
11058 pci_write_config_dword(tp->pdev,
11059 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
11060 pm_reg);
11061
11062 /* Also, force SERR#/PERR# in PCI command. */
11063 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11064 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11065 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11066 }
11067 }
11068
087fe256
MC
11069 /* 5700 BX chips need to have their TX producer index mailboxes
11070 * written twice to workaround a bug.
11071 */
11072 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11073 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11074
1da177e4
LT
11075 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11076 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11077 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11078 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11079
11080 /* Chip-specific fixup from Broadcom driver */
11081 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11082 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11083 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11084 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11085 }
11086
1ee582d8 11087 /* Default fast path register access methods */
20094930 11088 tp->read32 = tg3_read32;
1ee582d8 11089 tp->write32 = tg3_write32;
09ee929c 11090 tp->read32_mbox = tg3_read32;
20094930 11091 tp->write32_mbox = tg3_write32;
1ee582d8
MC
11092 tp->write32_tx_mbox = tg3_write32;
11093 tp->write32_rx_mbox = tg3_write32;
11094
11095 /* Various workaround register access methods */
11096 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11097 tp->write32 = tg3_write_indirect_reg32;
98efd8a6
MC
11098 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11099 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11100 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11101 /*
11102 * Back to back register writes can cause problems on these
11103 * chips, the workaround is to read back all reg writes
11104 * except those to mailbox regs.
11105 *
11106 * See tg3_write_indirect_reg32().
11107 */
1ee582d8 11108 tp->write32 = tg3_write_flush_reg32;
98efd8a6
MC
11109 }
11110
1ee582d8
MC
11111
11112 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11113 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11114 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11115 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11116 tp->write32_rx_mbox = tg3_write_flush_reg32;
11117 }
20094930 11118
6892914f
MC
11119 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11120 tp->read32 = tg3_read_indirect_reg32;
11121 tp->write32 = tg3_write_indirect_reg32;
11122 tp->read32_mbox = tg3_read_indirect_mbox;
11123 tp->write32_mbox = tg3_write_indirect_mbox;
11124 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11125 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11126
11127 iounmap(tp->regs);
22abe310 11128 tp->regs = NULL;
6892914f
MC
11129
11130 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11131 pci_cmd &= ~PCI_COMMAND_MEMORY;
11132 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11133 }
b5d3772c
MC
11134 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11135 tp->read32_mbox = tg3_read32_mbox_5906;
11136 tp->write32_mbox = tg3_write32_mbox_5906;
11137 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11138 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11139 }
6892914f 11140
bbadf503
MC
11141 if (tp->write32 == tg3_write_indirect_reg32 ||
11142 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11143 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
f49639e6 11144 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
bbadf503
MC
11145 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11146
7d0c41ef 11147 /* Get eeprom hw config before calling tg3_set_power_state().
9d26e213 11148 * In particular, the TG3_FLG2_IS_NIC flag must be
7d0c41ef
MC
11149 * determined before calling tg3_set_power_state() so that
11150 * we know whether or not to switch out of Vaux power.
11151 * When the flag is set, it means that GPIO1 is used for eeprom
11152 * write protect and also implies that it is a LOM where GPIOs
11153 * are not used to switch power.
6aa20a22 11154 */
7d0c41ef
MC
11155 tg3_get_eeprom_hw_cfg(tp);
11156
0d3031d9
MC
11157 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11158 /* Allow reads and writes to the
11159 * APE register and memory space.
11160 */
11161 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11162 PCISTATE_ALLOW_APE_SHMEM_WR;
11163 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11164 pci_state_reg);
11165 }
11166
d30cdd28
MC
11167 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
11168 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11169
314fba34
MC
11170 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11171 * GPIO1 driven high will bring 5700's external PHY out of reset.
11172 * It is also used as eeprom write protect on LOMs.
11173 */
11174 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11175 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11176 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11177 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11178 GRC_LCLCTRL_GPIO_OUTPUT1);
3e7d83bc
MC
11179 /* Unused GPIO3 must be driven as output on 5752 because there
11180 * are no pull-up resistors on unused GPIO pins.
11181 */
11182 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11183 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
314fba34 11184
af36e6b6
MC
11185 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11186 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11187
1da177e4 11188 /* Force the chip into D0. */
bc1c7567 11189 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
11190 if (err) {
11191 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11192 pci_name(tp->pdev));
11193 return err;
11194 }
11195
11196 /* 5700 B0 chips do not support checksumming correctly due
11197 * to hardware bugs.
11198 */
11199 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11200 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11201
1da177e4
LT
11202 /* Derive initial jumbo mode from MTU assigned in
11203 * ether_setup() via the alloc_etherdev() call
11204 */
0f893dc6 11205 if (tp->dev->mtu > ETH_DATA_LEN &&
a4e2b347 11206 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
0f893dc6 11207 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
1da177e4
LT
11208
11209 /* Determine WakeOnLan speed to use. */
11210 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11211 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11212 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11213 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11214 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11215 } else {
11216 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11217 }
11218
11219 /* A few boards don't want Ethernet@WireSpeed phy feature */
11220 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11221 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11222 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
747e8f8b 11223 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
b5d3772c 11224 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
747e8f8b 11225 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1da177e4
LT
11226 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11227
11228 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11229 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11230 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11231 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11232 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11233
c424cb24
MC
11234 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11235 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28
MC
11236 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11237 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) {
d4011ada
MC
11238 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11239 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11240 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
c1d2a196
MC
11241 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11242 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11243 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
c424cb24
MC
11244 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11245 }
1da177e4 11246
1da177e4 11247 tp->coalesce_mode = 0;
1da177e4
LT
11248 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11249 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11250 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11251
11252 /* Initialize MAC MI mode, polling disabled. */
11253 tw32_f(MAC_MI_MODE, tp->mi_mode);
11254 udelay(80);
11255
11256 /* Initialize data/descriptor byte/word swapping. */
11257 val = tr32(GRC_MODE);
11258 val &= GRC_MODE_HOST_STACKUP;
11259 tw32(GRC_MODE, val | tp->grc_mode);
11260
11261 tg3_switch_clocks(tp);
11262
11263 /* Clear this out for sanity. */
11264 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11265
11266 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11267 &pci_state_reg);
11268 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11269 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11270 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11271
11272 if (chiprevid == CHIPREV_ID_5701_A0 ||
11273 chiprevid == CHIPREV_ID_5701_B0 ||
11274 chiprevid == CHIPREV_ID_5701_B2 ||
11275 chiprevid == CHIPREV_ID_5701_B5) {
11276 void __iomem *sram_base;
11277
11278 /* Write some dummy words into the SRAM status block
11279 * area, see if it reads back correctly. If the return
11280 * value is bad, force enable the PCIX workaround.
11281 */
11282 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11283
11284 writel(0x00000000, sram_base);
11285 writel(0x00000000, sram_base + 4);
11286 writel(0xffffffff, sram_base + 4);
11287 if (readl(sram_base) != 0x00000000)
11288 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11289 }
11290 }
11291
11292 udelay(50);
11293 tg3_nvram_init(tp);
11294
11295 grc_misc_cfg = tr32(GRC_MISC_CFG);
11296 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11297
1da177e4
LT
11298 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11299 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11300 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11301 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11302
fac9b83e
DM
11303 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11304 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11305 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11306 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11307 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11308 HOSTCC_MODE_CLRTICK_TXBD);
11309
11310 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11311 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11312 tp->misc_host_ctrl);
11313 }
11314
1da177e4
LT
11315 /* these are limited to 10/100 only */
11316 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11317 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11318 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11319 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11320 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11321 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11322 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11323 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11324 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
676917d4
MC
11325 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11326 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
b5d3772c 11327 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
1da177e4
LT
11328 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11329
11330 err = tg3_phy_probe(tp);
11331 if (err) {
11332 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11333 pci_name(tp->pdev), err);
11334 /* ... but do not return immediately ... */
11335 }
11336
11337 tg3_read_partno(tp);
c4e6575c 11338 tg3_read_fw_ver(tp);
1da177e4
LT
11339
11340 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11341 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11342 } else {
11343 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11344 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11345 else
11346 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11347 }
11348
11349 /* 5700 {AX,BX} chips have a broken status block link
11350 * change bit implementation, so we must use the
11351 * status register in those cases.
11352 */
11353 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11354 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11355 else
11356 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11357
11358 /* The led_ctrl is set during tg3_phy_probe, here we might
11359 * have to force the link status polling mechanism based
11360 * upon subsystem IDs.
11361 */
11362 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
007a880d 11363 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
1da177e4
LT
11364 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11365 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11366 TG3_FLAG_USE_LINKCHG_REG);
11367 }
11368
11369 /* For all SERDES we poll the MAC status register. */
11370 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11371 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11372 else
11373 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11374
5a6f3074 11375 /* All chips before 5787 can get confused if TX buffers
1da177e4
LT
11376 * straddle the 4GB address boundary in some cases.
11377 */
af36e6b6 11378 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
b5d3772c 11379 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
d30cdd28 11380 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
b5d3772c 11381 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
5a6f3074
MC
11382 tp->dev->hard_start_xmit = tg3_start_xmit;
11383 else
11384 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
1da177e4
LT
11385
11386 tp->rx_offset = 2;
11387 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11388 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11389 tp->rx_offset = 0;
11390
f92905de
MC
11391 tp->rx_std_max_post = TG3_RX_RING_SIZE;
11392
11393 /* Increment the rx prod index on the rx std ring by at most
11394 * 8 for these chips to workaround hw errata.
11395 */
11396 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11397 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11398 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11399 tp->rx_std_max_post = 8;
11400
1da177e4
LT
11401 /* By default, disable wake-on-lan. User can change this
11402 * using ETHTOOL_SWOL.
11403 */
11404 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
11405
8ed5d97e
MC
11406 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11407 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11408 PCIE_PWR_MGMT_L1_THRESH_MSK;
11409
1da177e4
LT
11410 return err;
11411}
11412
49b6e95f 11413#ifdef CONFIG_SPARC
1da177e4
LT
11414static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11415{
11416 struct net_device *dev = tp->dev;
11417 struct pci_dev *pdev = tp->pdev;
49b6e95f 11418 struct device_node *dp = pci_device_to_OF_node(pdev);
374d4cac 11419 const unsigned char *addr;
49b6e95f
DM
11420 int len;
11421
11422 addr = of_get_property(dp, "local-mac-address", &len);
11423 if (addr && len == 6) {
11424 memcpy(dev->dev_addr, addr, 6);
11425 memcpy(dev->perm_addr, dev->dev_addr, 6);
11426 return 0;
1da177e4
LT
11427 }
11428 return -ENODEV;
11429}
11430
11431static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11432{
11433 struct net_device *dev = tp->dev;
11434
11435 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2ff43697 11436 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
1da177e4
LT
11437 return 0;
11438}
11439#endif
11440
11441static int __devinit tg3_get_device_address(struct tg3 *tp)
11442{
11443 struct net_device *dev = tp->dev;
11444 u32 hi, lo, mac_offset;
008652b3 11445 int addr_ok = 0;
1da177e4 11446
49b6e95f 11447#ifdef CONFIG_SPARC
1da177e4
LT
11448 if (!tg3_get_macaddr_sparc(tp))
11449 return 0;
11450#endif
11451
11452 mac_offset = 0x7c;
f49639e6 11453 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
a4e2b347 11454 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
11455 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11456 mac_offset = 0xcc;
11457 if (tg3_nvram_lock(tp))
11458 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11459 else
11460 tg3_nvram_unlock(tp);
11461 }
b5d3772c
MC
11462 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11463 mac_offset = 0x10;
1da177e4
LT
11464
11465 /* First try to get it from MAC address mailbox. */
11466 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11467 if ((hi >> 16) == 0x484b) {
11468 dev->dev_addr[0] = (hi >> 8) & 0xff;
11469 dev->dev_addr[1] = (hi >> 0) & 0xff;
11470
11471 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11472 dev->dev_addr[2] = (lo >> 24) & 0xff;
11473 dev->dev_addr[3] = (lo >> 16) & 0xff;
11474 dev->dev_addr[4] = (lo >> 8) & 0xff;
11475 dev->dev_addr[5] = (lo >> 0) & 0xff;
1da177e4 11476
008652b3
MC
11477 /* Some old bootcode may report a 0 MAC address in SRAM */
11478 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11479 }
11480 if (!addr_ok) {
11481 /* Next, try NVRAM. */
f49639e6 11482 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
008652b3
MC
11483 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11484 dev->dev_addr[0] = ((hi >> 16) & 0xff);
11485 dev->dev_addr[1] = ((hi >> 24) & 0xff);
11486 dev->dev_addr[2] = ((lo >> 0) & 0xff);
11487 dev->dev_addr[3] = ((lo >> 8) & 0xff);
11488 dev->dev_addr[4] = ((lo >> 16) & 0xff);
11489 dev->dev_addr[5] = ((lo >> 24) & 0xff);
11490 }
11491 /* Finally just fetch it out of the MAC control regs. */
11492 else {
11493 hi = tr32(MAC_ADDR_0_HIGH);
11494 lo = tr32(MAC_ADDR_0_LOW);
11495
11496 dev->dev_addr[5] = lo & 0xff;
11497 dev->dev_addr[4] = (lo >> 8) & 0xff;
11498 dev->dev_addr[3] = (lo >> 16) & 0xff;
11499 dev->dev_addr[2] = (lo >> 24) & 0xff;
11500 dev->dev_addr[1] = hi & 0xff;
11501 dev->dev_addr[0] = (hi >> 8) & 0xff;
11502 }
1da177e4
LT
11503 }
11504
11505 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11506#ifdef CONFIG_SPARC64
11507 if (!tg3_get_default_macaddr_sparc(tp))
11508 return 0;
11509#endif
11510 return -EINVAL;
11511 }
2ff43697 11512 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
11513 return 0;
11514}
11515
59e6b434
DM
11516#define BOUNDARY_SINGLE_CACHELINE 1
11517#define BOUNDARY_MULTI_CACHELINE 2
11518
11519static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11520{
11521 int cacheline_size;
11522 u8 byte;
11523 int goal;
11524
11525 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11526 if (byte == 0)
11527 cacheline_size = 1024;
11528 else
11529 cacheline_size = (int) byte * 4;
11530
11531 /* On 5703 and later chips, the boundary bits have no
11532 * effect.
11533 */
11534 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11535 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11536 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11537 goto out;
11538
11539#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11540 goal = BOUNDARY_MULTI_CACHELINE;
11541#else
11542#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11543 goal = BOUNDARY_SINGLE_CACHELINE;
11544#else
11545 goal = 0;
11546#endif
11547#endif
11548
11549 if (!goal)
11550 goto out;
11551
11552 /* PCI controllers on most RISC systems tend to disconnect
11553 * when a device tries to burst across a cache-line boundary.
11554 * Therefore, letting tg3 do so just wastes PCI bandwidth.
11555 *
11556 * Unfortunately, for PCI-E there are only limited
11557 * write-side controls for this, and thus for reads
11558 * we will still get the disconnects. We'll also waste
11559 * these PCI cycles for both read and write for chips
11560 * other than 5700 and 5701 which do not implement the
11561 * boundary bits.
11562 */
11563 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11564 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11565 switch (cacheline_size) {
11566 case 16:
11567 case 32:
11568 case 64:
11569 case 128:
11570 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11571 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11572 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11573 } else {
11574 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11575 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11576 }
11577 break;
11578
11579 case 256:
11580 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11581 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11582 break;
11583
11584 default:
11585 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11586 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11587 break;
11588 };
11589 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11590 switch (cacheline_size) {
11591 case 16:
11592 case 32:
11593 case 64:
11594 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11595 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11596 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11597 break;
11598 }
11599 /* fallthrough */
11600 case 128:
11601 default:
11602 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11603 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11604 break;
11605 };
11606 } else {
11607 switch (cacheline_size) {
11608 case 16:
11609 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11610 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11611 DMA_RWCTRL_WRITE_BNDRY_16);
11612 break;
11613 }
11614 /* fallthrough */
11615 case 32:
11616 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11617 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11618 DMA_RWCTRL_WRITE_BNDRY_32);
11619 break;
11620 }
11621 /* fallthrough */
11622 case 64:
11623 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11624 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11625 DMA_RWCTRL_WRITE_BNDRY_64);
11626 break;
11627 }
11628 /* fallthrough */
11629 case 128:
11630 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11631 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11632 DMA_RWCTRL_WRITE_BNDRY_128);
11633 break;
11634 }
11635 /* fallthrough */
11636 case 256:
11637 val |= (DMA_RWCTRL_READ_BNDRY_256 |
11638 DMA_RWCTRL_WRITE_BNDRY_256);
11639 break;
11640 case 512:
11641 val |= (DMA_RWCTRL_READ_BNDRY_512 |
11642 DMA_RWCTRL_WRITE_BNDRY_512);
11643 break;
11644 case 1024:
11645 default:
11646 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11647 DMA_RWCTRL_WRITE_BNDRY_1024);
11648 break;
11649 };
11650 }
11651
11652out:
11653 return val;
11654}
11655
1da177e4
LT
11656static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11657{
11658 struct tg3_internal_buffer_desc test_desc;
11659 u32 sram_dma_descs;
11660 int i, ret;
11661
11662 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11663
11664 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11665 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11666 tw32(RDMAC_STATUS, 0);
11667 tw32(WDMAC_STATUS, 0);
11668
11669 tw32(BUFMGR_MODE, 0);
11670 tw32(FTQ_RESET, 0);
11671
11672 test_desc.addr_hi = ((u64) buf_dma) >> 32;
11673 test_desc.addr_lo = buf_dma & 0xffffffff;
11674 test_desc.nic_mbuf = 0x00002100;
11675 test_desc.len = size;
11676
11677 /*
11678 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11679 * the *second* time the tg3 driver was getting loaded after an
11680 * initial scan.
11681 *
11682 * Broadcom tells me:
11683 * ...the DMA engine is connected to the GRC block and a DMA
11684 * reset may affect the GRC block in some unpredictable way...
11685 * The behavior of resets to individual blocks has not been tested.
11686 *
11687 * Broadcom noted the GRC reset will also reset all sub-components.
11688 */
11689 if (to_device) {
11690 test_desc.cqid_sqid = (13 << 8) | 2;
11691
11692 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11693 udelay(40);
11694 } else {
11695 test_desc.cqid_sqid = (16 << 8) | 7;
11696
11697 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11698 udelay(40);
11699 }
11700 test_desc.flags = 0x00000005;
11701
11702 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11703 u32 val;
11704
11705 val = *(((u32 *)&test_desc) + i);
11706 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11707 sram_dma_descs + (i * sizeof(u32)));
11708 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11709 }
11710 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11711
11712 if (to_device) {
11713 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11714 } else {
11715 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11716 }
11717
11718 ret = -ENODEV;
11719 for (i = 0; i < 40; i++) {
11720 u32 val;
11721
11722 if (to_device)
11723 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11724 else
11725 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11726 if ((val & 0xffff) == sram_dma_descs) {
11727 ret = 0;
11728 break;
11729 }
11730
11731 udelay(100);
11732 }
11733
11734 return ret;
11735}
11736
ded7340d 11737#define TEST_BUFFER_SIZE 0x2000
1da177e4
LT
11738
11739static int __devinit tg3_test_dma(struct tg3 *tp)
11740{
11741 dma_addr_t buf_dma;
59e6b434 11742 u32 *buf, saved_dma_rwctrl;
1da177e4
LT
11743 int ret;
11744
11745 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11746 if (!buf) {
11747 ret = -ENOMEM;
11748 goto out_nofree;
11749 }
11750
11751 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11752 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11753
59e6b434 11754 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
1da177e4
LT
11755
11756 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11757 /* DMA read watermark not used on PCIE */
11758 tp->dma_rwctrl |= 0x00180000;
11759 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
85e94ced
MC
11760 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11761 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
1da177e4
LT
11762 tp->dma_rwctrl |= 0x003f0000;
11763 else
11764 tp->dma_rwctrl |= 0x003f000f;
11765 } else {
11766 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11767 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11768 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
49afdeb6 11769 u32 read_water = 0x7;
1da177e4 11770
4a29cc2e
MC
11771 /* If the 5704 is behind the EPB bridge, we can
11772 * do the less restrictive ONE_DMA workaround for
11773 * better performance.
11774 */
11775 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11776 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11777 tp->dma_rwctrl |= 0x8000;
11778 else if (ccval == 0x6 || ccval == 0x7)
1da177e4
LT
11779 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11780
49afdeb6
MC
11781 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
11782 read_water = 4;
59e6b434 11783 /* Set bit 23 to enable PCIX hw bug fix */
49afdeb6
MC
11784 tp->dma_rwctrl |=
11785 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
11786 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
11787 (1 << 23);
4cf78e4f
MC
11788 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11789 /* 5780 always in PCIX mode */
11790 tp->dma_rwctrl |= 0x00144000;
a4e2b347
MC
11791 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11792 /* 5714 always in PCIX mode */
11793 tp->dma_rwctrl |= 0x00148000;
1da177e4
LT
11794 } else {
11795 tp->dma_rwctrl |= 0x001b000f;
11796 }
11797 }
11798
11799 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11800 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11801 tp->dma_rwctrl &= 0xfffffff0;
11802
11803 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11804 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11805 /* Remove this if it causes problems for some boards. */
11806 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11807
11808 /* On 5700/5701 chips, we need to set this bit.
11809 * Otherwise the chip will issue cacheline transactions
11810 * to streamable DMA memory with not all the byte
11811 * enables turned on. This is an error on several
11812 * RISC PCI controllers, in particular sparc64.
11813 *
11814 * On 5703/5704 chips, this bit has been reassigned
11815 * a different meaning. In particular, it is used
11816 * on those chips to enable a PCI-X workaround.
11817 */
11818 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11819 }
11820
11821 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11822
11823#if 0
11824 /* Unneeded, already done by tg3_get_invariants. */
11825 tg3_switch_clocks(tp);
11826#endif
11827
11828 ret = 0;
11829 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11830 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11831 goto out;
11832
59e6b434
DM
11833 /* It is best to perform DMA test with maximum write burst size
11834 * to expose the 5700/5701 write DMA bug.
11835 */
11836 saved_dma_rwctrl = tp->dma_rwctrl;
11837 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11838 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11839
1da177e4
LT
11840 while (1) {
11841 u32 *p = buf, i;
11842
11843 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11844 p[i] = i;
11845
11846 /* Send the buffer to the chip. */
11847 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11848 if (ret) {
11849 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11850 break;
11851 }
11852
11853#if 0
11854 /* validate data reached card RAM correctly. */
11855 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11856 u32 val;
11857 tg3_read_mem(tp, 0x2100 + (i*4), &val);
11858 if (le32_to_cpu(val) != p[i]) {
11859 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
11860 /* ret = -ENODEV here? */
11861 }
11862 p[i] = 0;
11863 }
11864#endif
11865 /* Now read it back. */
11866 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11867 if (ret) {
11868 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11869
11870 break;
11871 }
11872
11873 /* Verify it. */
11874 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11875 if (p[i] == i)
11876 continue;
11877
59e6b434
DM
11878 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11879 DMA_RWCTRL_WRITE_BNDRY_16) {
11880 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
1da177e4
LT
11881 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11882 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11883 break;
11884 } else {
11885 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11886 ret = -ENODEV;
11887 goto out;
11888 }
11889 }
11890
11891 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11892 /* Success. */
11893 ret = 0;
11894 break;
11895 }
11896 }
59e6b434
DM
11897 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11898 DMA_RWCTRL_WRITE_BNDRY_16) {
6d1cfbab
MC
11899 static struct pci_device_id dma_wait_state_chipsets[] = {
11900 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11901 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11902 { },
11903 };
11904
59e6b434 11905 /* DMA test passed without adjusting DMA boundary,
6d1cfbab
MC
11906 * now look for chipsets that are known to expose the
11907 * DMA bug without failing the test.
59e6b434 11908 */
6d1cfbab
MC
11909 if (pci_dev_present(dma_wait_state_chipsets)) {
11910 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11911 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11912 }
11913 else
11914 /* Safe to use the calculated DMA boundary. */
11915 tp->dma_rwctrl = saved_dma_rwctrl;
11916
59e6b434
DM
11917 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11918 }
1da177e4
LT
11919
11920out:
11921 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11922out_nofree:
11923 return ret;
11924}
11925
11926static void __devinit tg3_init_link_config(struct tg3 *tp)
11927{
11928 tp->link_config.advertising =
11929 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11930 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11931 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11932 ADVERTISED_Autoneg | ADVERTISED_MII);
11933 tp->link_config.speed = SPEED_INVALID;
11934 tp->link_config.duplex = DUPLEX_INVALID;
11935 tp->link_config.autoneg = AUTONEG_ENABLE;
1da177e4
LT
11936 tp->link_config.active_speed = SPEED_INVALID;
11937 tp->link_config.active_duplex = DUPLEX_INVALID;
11938 tp->link_config.phy_is_low_power = 0;
11939 tp->link_config.orig_speed = SPEED_INVALID;
11940 tp->link_config.orig_duplex = DUPLEX_INVALID;
11941 tp->link_config.orig_autoneg = AUTONEG_INVALID;
11942}
11943
11944static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11945{
fdfec172
MC
11946 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11947 tp->bufmgr_config.mbuf_read_dma_low_water =
11948 DEFAULT_MB_RDMA_LOW_WATER_5705;
11949 tp->bufmgr_config.mbuf_mac_rx_low_water =
11950 DEFAULT_MB_MACRX_LOW_WATER_5705;
11951 tp->bufmgr_config.mbuf_high_water =
11952 DEFAULT_MB_HIGH_WATER_5705;
b5d3772c
MC
11953 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11954 tp->bufmgr_config.mbuf_mac_rx_low_water =
11955 DEFAULT_MB_MACRX_LOW_WATER_5906;
11956 tp->bufmgr_config.mbuf_high_water =
11957 DEFAULT_MB_HIGH_WATER_5906;
11958 }
fdfec172
MC
11959
11960 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11961 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11962 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11963 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11964 tp->bufmgr_config.mbuf_high_water_jumbo =
11965 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11966 } else {
11967 tp->bufmgr_config.mbuf_read_dma_low_water =
11968 DEFAULT_MB_RDMA_LOW_WATER;
11969 tp->bufmgr_config.mbuf_mac_rx_low_water =
11970 DEFAULT_MB_MACRX_LOW_WATER;
11971 tp->bufmgr_config.mbuf_high_water =
11972 DEFAULT_MB_HIGH_WATER;
11973
11974 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11975 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11976 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11977 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11978 tp->bufmgr_config.mbuf_high_water_jumbo =
11979 DEFAULT_MB_HIGH_WATER_JUMBO;
11980 }
1da177e4
LT
11981
11982 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11983 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11984}
11985
11986static char * __devinit tg3_phy_string(struct tg3 *tp)
11987{
11988 switch (tp->phy_id & PHY_ID_MASK) {
11989 case PHY_ID_BCM5400: return "5400";
11990 case PHY_ID_BCM5401: return "5401";
11991 case PHY_ID_BCM5411: return "5411";
11992 case PHY_ID_BCM5701: return "5701";
11993 case PHY_ID_BCM5703: return "5703";
11994 case PHY_ID_BCM5704: return "5704";
11995 case PHY_ID_BCM5705: return "5705";
11996 case PHY_ID_BCM5750: return "5750";
85e94ced 11997 case PHY_ID_BCM5752: return "5752";
a4e2b347 11998 case PHY_ID_BCM5714: return "5714";
4cf78e4f 11999 case PHY_ID_BCM5780: return "5780";
af36e6b6 12000 case PHY_ID_BCM5755: return "5755";
d9ab5ad1 12001 case PHY_ID_BCM5787: return "5787";
d30cdd28 12002 case PHY_ID_BCM5784: return "5784";
126a3368 12003 case PHY_ID_BCM5756: return "5722/5756";
b5d3772c 12004 case PHY_ID_BCM5906: return "5906";
1da177e4
LT
12005 case PHY_ID_BCM8002: return "8002/serdes";
12006 case 0: return "serdes";
12007 default: return "unknown";
12008 };
12009}
12010
f9804ddb
MC
12011static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12012{
12013 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12014 strcpy(str, "PCI Express");
12015 return str;
12016 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12017 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12018
12019 strcpy(str, "PCIX:");
12020
12021 if ((clock_ctrl == 7) ||
12022 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12023 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12024 strcat(str, "133MHz");
12025 else if (clock_ctrl == 0)
12026 strcat(str, "33MHz");
12027 else if (clock_ctrl == 2)
12028 strcat(str, "50MHz");
12029 else if (clock_ctrl == 4)
12030 strcat(str, "66MHz");
12031 else if (clock_ctrl == 6)
12032 strcat(str, "100MHz");
f9804ddb
MC
12033 } else {
12034 strcpy(str, "PCI:");
12035 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12036 strcat(str, "66MHz");
12037 else
12038 strcat(str, "33MHz");
12039 }
12040 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12041 strcat(str, ":32-bit");
12042 else
12043 strcat(str, ":64-bit");
12044 return str;
12045}
12046
8c2dc7e1 12047static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
1da177e4
LT
12048{
12049 struct pci_dev *peer;
12050 unsigned int func, devnr = tp->pdev->devfn & ~7;
12051
12052 for (func = 0; func < 8; func++) {
12053 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12054 if (peer && peer != tp->pdev)
12055 break;
12056 pci_dev_put(peer);
12057 }
16fe9d74
MC
12058 /* 5704 can be configured in single-port mode, set peer to
12059 * tp->pdev in that case.
12060 */
12061 if (!peer) {
12062 peer = tp->pdev;
12063 return peer;
12064 }
1da177e4
LT
12065
12066 /*
12067 * We don't need to keep the refcount elevated; there's no way
12068 * to remove one half of this device without removing the other
12069 */
12070 pci_dev_put(peer);
12071
12072 return peer;
12073}
12074
15f9850d
DM
12075static void __devinit tg3_init_coal(struct tg3 *tp)
12076{
12077 struct ethtool_coalesce *ec = &tp->coal;
12078
12079 memset(ec, 0, sizeof(*ec));
12080 ec->cmd = ETHTOOL_GCOALESCE;
12081 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12082 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12083 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12084 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12085 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12086 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12087 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12088 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12089 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12090
12091 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12092 HOSTCC_MODE_CLRTICK_TXBD)) {
12093 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12094 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12095 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12096 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12097 }
d244c892
MC
12098
12099 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12100 ec->rx_coalesce_usecs_irq = 0;
12101 ec->tx_coalesce_usecs_irq = 0;
12102 ec->stats_block_coalesce_usecs = 0;
12103 }
15f9850d
DM
12104}
12105
1da177e4
LT
12106static int __devinit tg3_init_one(struct pci_dev *pdev,
12107 const struct pci_device_id *ent)
12108{
12109 static int tg3_version_printed = 0;
12110 unsigned long tg3reg_base, tg3reg_len;
12111 struct net_device *dev;
12112 struct tg3 *tp;
72f2afb8 12113 int i, err, pm_cap;
f9804ddb 12114 char str[40];
72f2afb8 12115 u64 dma_mask, persist_dma_mask;
1da177e4
LT
12116
12117 if (tg3_version_printed++ == 0)
12118 printk(KERN_INFO "%s", version);
12119
12120 err = pci_enable_device(pdev);
12121 if (err) {
12122 printk(KERN_ERR PFX "Cannot enable PCI device, "
12123 "aborting.\n");
12124 return err;
12125 }
12126
12127 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12128 printk(KERN_ERR PFX "Cannot find proper PCI device "
12129 "base address, aborting.\n");
12130 err = -ENODEV;
12131 goto err_out_disable_pdev;
12132 }
12133
12134 err = pci_request_regions(pdev, DRV_MODULE_NAME);
12135 if (err) {
12136 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12137 "aborting.\n");
12138 goto err_out_disable_pdev;
12139 }
12140
12141 pci_set_master(pdev);
12142
12143 /* Find power-management capability. */
12144 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12145 if (pm_cap == 0) {
12146 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12147 "aborting.\n");
12148 err = -EIO;
12149 goto err_out_free_res;
12150 }
12151
1da177e4
LT
12152 tg3reg_base = pci_resource_start(pdev, 0);
12153 tg3reg_len = pci_resource_len(pdev, 0);
12154
12155 dev = alloc_etherdev(sizeof(*tp));
12156 if (!dev) {
12157 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12158 err = -ENOMEM;
12159 goto err_out_free_res;
12160 }
12161
1da177e4
LT
12162 SET_NETDEV_DEV(dev, &pdev->dev);
12163
1da177e4
LT
12164#if TG3_VLAN_TAG_USED
12165 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12166 dev->vlan_rx_register = tg3_vlan_rx_register;
1da177e4
LT
12167#endif
12168
12169 tp = netdev_priv(dev);
12170 tp->pdev = pdev;
12171 tp->dev = dev;
12172 tp->pm_cap = pm_cap;
12173 tp->mac_mode = TG3_DEF_MAC_MODE;
12174 tp->rx_mode = TG3_DEF_RX_MODE;
12175 tp->tx_mode = TG3_DEF_TX_MODE;
12176 tp->mi_mode = MAC_MI_MODE_BASE;
12177 if (tg3_debug > 0)
12178 tp->msg_enable = tg3_debug;
12179 else
12180 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12181
12182 /* The word/byte swap controls here control register access byte
12183 * swapping. DMA data byte swapping is controlled in the GRC_MODE
12184 * setting below.
12185 */
12186 tp->misc_host_ctrl =
12187 MISC_HOST_CTRL_MASK_PCI_INT |
12188 MISC_HOST_CTRL_WORD_SWAP |
12189 MISC_HOST_CTRL_INDIR_ACCESS |
12190 MISC_HOST_CTRL_PCISTATE_RW;
12191
12192 /* The NONFRM (non-frame) byte/word swap controls take effect
12193 * on descriptor entries, anything which isn't packet data.
12194 *
12195 * The StrongARM chips on the board (one for tx, one for rx)
12196 * are running in big-endian mode.
12197 */
12198 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12199 GRC_MODE_WSWAP_NONFRM_DATA);
12200#ifdef __BIG_ENDIAN
12201 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12202#endif
12203 spin_lock_init(&tp->lock);
1da177e4 12204 spin_lock_init(&tp->indirect_lock);
c4028958 12205 INIT_WORK(&tp->reset_task, tg3_reset_task);
1da177e4
LT
12206
12207 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
ab0049b4 12208 if (!tp->regs) {
1da177e4
LT
12209 printk(KERN_ERR PFX "Cannot map device registers, "
12210 "aborting.\n");
12211 err = -ENOMEM;
12212 goto err_out_free_dev;
12213 }
12214
12215 tg3_init_link_config(tp);
12216
1da177e4
LT
12217 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12218 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12219 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12220
12221 dev->open = tg3_open;
12222 dev->stop = tg3_close;
12223 dev->get_stats = tg3_get_stats;
12224 dev->set_multicast_list = tg3_set_rx_mode;
12225 dev->set_mac_address = tg3_set_mac_addr;
12226 dev->do_ioctl = tg3_ioctl;
12227 dev->tx_timeout = tg3_tx_timeout;
bea3348e 12228 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
1da177e4 12229 dev->ethtool_ops = &tg3_ethtool_ops;
1da177e4
LT
12230 dev->watchdog_timeo = TG3_TX_TIMEOUT;
12231 dev->change_mtu = tg3_change_mtu;
12232 dev->irq = pdev->irq;
12233#ifdef CONFIG_NET_POLL_CONTROLLER
12234 dev->poll_controller = tg3_poll_controller;
12235#endif
12236
12237 err = tg3_get_invariants(tp);
12238 if (err) {
12239 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12240 "aborting.\n");
12241 goto err_out_iounmap;
12242 }
12243
4a29cc2e
MC
12244 /* The EPB bridge inside 5714, 5715, and 5780 and any
12245 * device behind the EPB cannot support DMA addresses > 40-bit.
72f2afb8
MC
12246 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12247 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12248 * do DMA address check in tg3_start_xmit().
12249 */
4a29cc2e
MC
12250 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12251 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12252 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
72f2afb8
MC
12253 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12254#ifdef CONFIG_HIGHMEM
12255 dma_mask = DMA_64BIT_MASK;
12256#endif
4a29cc2e 12257 } else
72f2afb8
MC
12258 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12259
12260 /* Configure DMA attributes. */
12261 if (dma_mask > DMA_32BIT_MASK) {
12262 err = pci_set_dma_mask(pdev, dma_mask);
12263 if (!err) {
12264 dev->features |= NETIF_F_HIGHDMA;
12265 err = pci_set_consistent_dma_mask(pdev,
12266 persist_dma_mask);
12267 if (err < 0) {
12268 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12269 "DMA for consistent allocations\n");
12270 goto err_out_iounmap;
12271 }
12272 }
12273 }
12274 if (err || dma_mask == DMA_32BIT_MASK) {
12275 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12276 if (err) {
12277 printk(KERN_ERR PFX "No usable DMA configuration, "
12278 "aborting.\n");
12279 goto err_out_iounmap;
12280 }
12281 }
12282
fdfec172 12283 tg3_init_bufmgr_config(tp);
1da177e4 12284
1da177e4
LT
12285 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12286 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12287 }
12288 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12289 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12290 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
c7835a77 12291 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
1da177e4
LT
12292 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12293 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12294 } else {
7f62ad5d 12295 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
1da177e4
LT
12296 }
12297
4e3a7aaa
MC
12298 /* TSO is on by default on chips that support hardware TSO.
12299 * Firmware TSO on older chips gives lower performance, so it
12300 * is off by default, but can be enabled using ethtool.
12301 */
b0026624 12302 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
1da177e4 12303 dev->features |= NETIF_F_TSO;
b5d3772c
MC
12304 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12305 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
b0026624
MC
12306 dev->features |= NETIF_F_TSO6;
12307 }
1da177e4 12308
1da177e4
LT
12309
12310 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12311 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12312 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12313 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12314 tp->rx_pending = 63;
12315 }
12316
1da177e4
LT
12317 err = tg3_get_device_address(tp);
12318 if (err) {
12319 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12320 "aborting.\n");
12321 goto err_out_iounmap;
12322 }
12323
12324 /*
12325 * Reset chip in case UNDI or EFI driver did not shutdown
12326 * DMA self test will enable WDMAC and we'll see (spurious)
12327 * pending DMA on the PCI bus at that point.
12328 */
12329 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12330 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
1da177e4 12331 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
944d980e 12332 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
12333 }
12334
12335 err = tg3_test_dma(tp);
12336 if (err) {
12337 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12338 goto err_out_iounmap;
12339 }
12340
12341 /* Tigon3 can do ipv4 only... and some chips have buggy
12342 * checksumming.
12343 */
12344 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
d212f87b 12345 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
af36e6b6 12346 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28
MC
12347 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12348 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
d212f87b
SH
12349 dev->features |= NETIF_F_IPV6_CSUM;
12350
1da177e4
LT
12351 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12352 } else
12353 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12354
1da177e4
LT
12355 /* flow control autonegotiation is default behavior */
12356 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12357
15f9850d
DM
12358 tg3_init_coal(tp);
12359
0d3031d9
MC
12360 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12361 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12362 printk(KERN_ERR PFX "Cannot find proper PCI device "
12363 "base address for APE, aborting.\n");
12364 err = -ENODEV;
12365 goto err_out_iounmap;
12366 }
12367
12368 tg3reg_base = pci_resource_start(pdev, 2);
12369 tg3reg_len = pci_resource_len(pdev, 2);
12370
12371 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12372 if (tp->aperegs == 0UL) {
12373 printk(KERN_ERR PFX "Cannot map APE registers, "
12374 "aborting.\n");
12375 err = -ENOMEM;
12376 goto err_out_iounmap;
12377 }
12378
12379 tg3_ape_lock_init(tp);
12380 }
12381
c49a1561
MC
12382 pci_set_drvdata(pdev, dev);
12383
1da177e4
LT
12384 err = register_netdev(dev);
12385 if (err) {
12386 printk(KERN_ERR PFX "Cannot register net device, "
12387 "aborting.\n");
0d3031d9 12388 goto err_out_apeunmap;
1da177e4
LT
12389 }
12390
cbb45d21 12391 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
1da177e4
LT
12392 dev->name,
12393 tp->board_part_number,
12394 tp->pci_chip_rev_id,
12395 tg3_phy_string(tp),
f9804ddb 12396 tg3_bus_string(tp, str),
cbb45d21
MC
12397 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12398 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12399 "10/100/1000Base-T")));
1da177e4
LT
12400
12401 for (i = 0; i < 6; i++)
12402 printk("%2.2x%c", dev->dev_addr[i],
12403 i == 5 ? '\n' : ':');
12404
12405 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
1c46ae05 12406 "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
1da177e4
LT
12407 dev->name,
12408 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12409 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12410 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12411 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
1da177e4
LT
12412 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12413 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
4a29cc2e
MC
12414 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12415 dev->name, tp->dma_rwctrl,
12416 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12417 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
1da177e4
LT
12418
12419 return 0;
12420
0d3031d9
MC
12421err_out_apeunmap:
12422 if (tp->aperegs) {
12423 iounmap(tp->aperegs);
12424 tp->aperegs = NULL;
12425 }
12426
1da177e4 12427err_out_iounmap:
6892914f
MC
12428 if (tp->regs) {
12429 iounmap(tp->regs);
22abe310 12430 tp->regs = NULL;
6892914f 12431 }
1da177e4
LT
12432
12433err_out_free_dev:
12434 free_netdev(dev);
12435
12436err_out_free_res:
12437 pci_release_regions(pdev);
12438
12439err_out_disable_pdev:
12440 pci_disable_device(pdev);
12441 pci_set_drvdata(pdev, NULL);
12442 return err;
12443}
12444
12445static void __devexit tg3_remove_one(struct pci_dev *pdev)
12446{
12447 struct net_device *dev = pci_get_drvdata(pdev);
12448
12449 if (dev) {
12450 struct tg3 *tp = netdev_priv(dev);
12451
7faa006f 12452 flush_scheduled_work();
1da177e4 12453 unregister_netdev(dev);
0d3031d9
MC
12454 if (tp->aperegs) {
12455 iounmap(tp->aperegs);
12456 tp->aperegs = NULL;
12457 }
6892914f
MC
12458 if (tp->regs) {
12459 iounmap(tp->regs);
22abe310 12460 tp->regs = NULL;
6892914f 12461 }
1da177e4
LT
12462 free_netdev(dev);
12463 pci_release_regions(pdev);
12464 pci_disable_device(pdev);
12465 pci_set_drvdata(pdev, NULL);
12466 }
12467}
12468
12469static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12470{
12471 struct net_device *dev = pci_get_drvdata(pdev);
12472 struct tg3 *tp = netdev_priv(dev);
12473 int err;
12474
3e0c95fd
MC
12475 /* PCI register 4 needs to be saved whether netif_running() or not.
12476 * MSI address and data need to be saved if using MSI and
12477 * netif_running().
12478 */
12479 pci_save_state(pdev);
12480
1da177e4
LT
12481 if (!netif_running(dev))
12482 return 0;
12483
7faa006f 12484 flush_scheduled_work();
1da177e4
LT
12485 tg3_netif_stop(tp);
12486
12487 del_timer_sync(&tp->timer);
12488
f47c11ee 12489 tg3_full_lock(tp, 1);
1da177e4 12490 tg3_disable_ints(tp);
f47c11ee 12491 tg3_full_unlock(tp);
1da177e4
LT
12492
12493 netif_device_detach(dev);
12494
f47c11ee 12495 tg3_full_lock(tp, 0);
944d980e 12496 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6a9eba15 12497 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
f47c11ee 12498 tg3_full_unlock(tp);
1da177e4
LT
12499
12500 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12501 if (err) {
f47c11ee 12502 tg3_full_lock(tp, 0);
1da177e4 12503
6a9eba15 12504 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
b9ec6c1b
MC
12505 if (tg3_restart_hw(tp, 1))
12506 goto out;
1da177e4
LT
12507
12508 tp->timer.expires = jiffies + tp->timer_offset;
12509 add_timer(&tp->timer);
12510
12511 netif_device_attach(dev);
12512 tg3_netif_start(tp);
12513
b9ec6c1b 12514out:
f47c11ee 12515 tg3_full_unlock(tp);
1da177e4
LT
12516 }
12517
12518 return err;
12519}
12520
12521static int tg3_resume(struct pci_dev *pdev)
12522{
12523 struct net_device *dev = pci_get_drvdata(pdev);
12524 struct tg3 *tp = netdev_priv(dev);
12525 int err;
12526
3e0c95fd
MC
12527 pci_restore_state(tp->pdev);
12528
1da177e4
LT
12529 if (!netif_running(dev))
12530 return 0;
12531
bc1c7567 12532 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
12533 if (err)
12534 return err;
12535
2fbe43f6
MC
12536 /* Hardware bug - MSI won't work if INTX disabled. */
12537 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
12538 (tp->tg3_flags2 & TG3_FLG2_USING_MSI))
12539 pci_intx(tp->pdev, 1);
12540
1da177e4
LT
12541 netif_device_attach(dev);
12542
f47c11ee 12543 tg3_full_lock(tp, 0);
1da177e4 12544
6a9eba15 12545 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
b9ec6c1b
MC
12546 err = tg3_restart_hw(tp, 1);
12547 if (err)
12548 goto out;
1da177e4
LT
12549
12550 tp->timer.expires = jiffies + tp->timer_offset;
12551 add_timer(&tp->timer);
12552
1da177e4
LT
12553 tg3_netif_start(tp);
12554
b9ec6c1b 12555out:
f47c11ee 12556 tg3_full_unlock(tp);
1da177e4 12557
b9ec6c1b 12558 return err;
1da177e4
LT
12559}
12560
12561static struct pci_driver tg3_driver = {
12562 .name = DRV_MODULE_NAME,
12563 .id_table = tg3_pci_tbl,
12564 .probe = tg3_init_one,
12565 .remove = __devexit_p(tg3_remove_one),
12566 .suspend = tg3_suspend,
12567 .resume = tg3_resume
12568};
12569
12570static int __init tg3_init(void)
12571{
29917620 12572 return pci_register_driver(&tg3_driver);
1da177e4
LT
12573}
12574
12575static void __exit tg3_cleanup(void)
12576{
12577 pci_unregister_driver(&tg3_driver);
12578}
12579
12580module_init(tg3_init);
12581module_exit(tg3_cleanup);