]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/tg3.c
[TG3]: Add 5709 PHY support.
[net-next-2.6.git] / drivers / net / tg3.c
CommitLineData
1da177e4
LT
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
8 *
9 * Firmware is:
49cabf49
MC
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
1da177e4
LT
16 */
17
1da177e4
LT
18
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/compiler.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
14c85021 26#include <linux/in.h>
1da177e4
LT
27#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/workqueue.h>
61487480 39#include <linux/prefetch.h>
f9a5f7d3 40#include <linux/dma-mapping.h>
1da177e4
LT
41
42#include <net/checksum.h>
43
44#include <asm/system.h>
45#include <asm/io.h>
46#include <asm/byteorder.h>
47#include <asm/uaccess.h>
48
49#ifdef CONFIG_SPARC64
50#include <asm/idprom.h>
51#include <asm/oplib.h>
52#include <asm/pbm.h>
53#endif
54
55#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56#define TG3_VLAN_TAG_USED 1
57#else
58#define TG3_VLAN_TAG_USED 0
59#endif
60
61#ifdef NETIF_F_TSO
62#define TG3_TSO_SUPPORT 1
63#else
64#define TG3_TSO_SUPPORT 0
65#endif
66
67#include "tg3.h"
68
69#define DRV_MODULE_NAME "tg3"
70#define PFX DRV_MODULE_NAME ": "
1b2a7205
MC
71#define DRV_MODULE_VERSION "3.65"
72#define DRV_MODULE_RELDATE "August 07, 2006"
1da177e4
LT
73
74#define TG3_DEF_MAC_MODE 0
75#define TG3_DEF_RX_MODE 0
76#define TG3_DEF_TX_MODE 0
77#define TG3_DEF_MSG_ENABLE \
78 (NETIF_MSG_DRV | \
79 NETIF_MSG_PROBE | \
80 NETIF_MSG_LINK | \
81 NETIF_MSG_TIMER | \
82 NETIF_MSG_IFDOWN | \
83 NETIF_MSG_IFUP | \
84 NETIF_MSG_RX_ERR | \
85 NETIF_MSG_TX_ERR)
86
87/* length of time before we decide the hardware is borked,
88 * and dev->tx_timeout() should be called to fix the problem
89 */
90#define TG3_TX_TIMEOUT (5 * HZ)
91
92/* hardware minimum and maximum for a single frame's data payload */
93#define TG3_MIN_MTU 60
94#define TG3_MAX_MTU(tp) \
0f893dc6 95 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
1da177e4
LT
96
97/* These numbers seem to be hard coded in the NIC firmware somehow.
98 * You can't change the ring sizes, but you can change where you place
99 * them in the NIC onboard memory.
100 */
101#define TG3_RX_RING_SIZE 512
102#define TG3_DEF_RX_RING_PENDING 200
103#define TG3_RX_JUMBO_RING_SIZE 256
104#define TG3_DEF_RX_JUMBO_RING_PENDING 100
105
106/* Do not place this n-ring entries value into the tp struct itself,
107 * we really want to expose these constants to GCC so that modulo et
108 * al. operations are done with shifts and masks instead of with
109 * hw multiply/modulo instructions. Another solution would be to
110 * replace things like '% foo' with '& (foo - 1)'.
111 */
112#define TG3_RX_RCB_RING_SIZE(tp) \
113 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
114
115#define TG3_TX_RING_SIZE 512
116#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
117
118#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_RING_SIZE)
120#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121 TG3_RX_JUMBO_RING_SIZE)
122#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123 TG3_RX_RCB_RING_SIZE(tp))
124#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
125 TG3_TX_RING_SIZE)
1da177e4
LT
126#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
127
128#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
129#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
130
131/* minimum number of free TX descriptors required to wake up TX process */
132#define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
133
134/* number of ETHTOOL_GSTATS u64's */
135#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
136
4cafd3f5
MC
137#define TG3_NUM_TEST 6
138
1da177e4
LT
139static char version[] __devinitdata =
140 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
141
142MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
143MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
144MODULE_LICENSE("GPL");
145MODULE_VERSION(DRV_MODULE_VERSION);
146
147static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
148module_param(tg3_debug, int, 0);
149MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
150
151static struct pci_device_id tg3_pci_tbl[] = {
13185217
HK
152 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
126a3368 176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
13185217
HK
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
126a3368 191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
13185217
HK
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
b5d3772c
MC
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
13185217
HK
204 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
205 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
206 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
207 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
208 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
209 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
210 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
211 {}
1da177e4
LT
212};
213
214MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
215
50da859d 216static const struct {
1da177e4
LT
217 const char string[ETH_GSTRING_LEN];
218} ethtool_stats_keys[TG3_NUM_STATS] = {
219 { "rx_octets" },
220 { "rx_fragments" },
221 { "rx_ucast_packets" },
222 { "rx_mcast_packets" },
223 { "rx_bcast_packets" },
224 { "rx_fcs_errors" },
225 { "rx_align_errors" },
226 { "rx_xon_pause_rcvd" },
227 { "rx_xoff_pause_rcvd" },
228 { "rx_mac_ctrl_rcvd" },
229 { "rx_xoff_entered" },
230 { "rx_frame_too_long_errors" },
231 { "rx_jabbers" },
232 { "rx_undersize_packets" },
233 { "rx_in_length_errors" },
234 { "rx_out_length_errors" },
235 { "rx_64_or_less_octet_packets" },
236 { "rx_65_to_127_octet_packets" },
237 { "rx_128_to_255_octet_packets" },
238 { "rx_256_to_511_octet_packets" },
239 { "rx_512_to_1023_octet_packets" },
240 { "rx_1024_to_1522_octet_packets" },
241 { "rx_1523_to_2047_octet_packets" },
242 { "rx_2048_to_4095_octet_packets" },
243 { "rx_4096_to_8191_octet_packets" },
244 { "rx_8192_to_9022_octet_packets" },
245
246 { "tx_octets" },
247 { "tx_collisions" },
248
249 { "tx_xon_sent" },
250 { "tx_xoff_sent" },
251 { "tx_flow_control" },
252 { "tx_mac_errors" },
253 { "tx_single_collisions" },
254 { "tx_mult_collisions" },
255 { "tx_deferred" },
256 { "tx_excessive_collisions" },
257 { "tx_late_collisions" },
258 { "tx_collide_2times" },
259 { "tx_collide_3times" },
260 { "tx_collide_4times" },
261 { "tx_collide_5times" },
262 { "tx_collide_6times" },
263 { "tx_collide_7times" },
264 { "tx_collide_8times" },
265 { "tx_collide_9times" },
266 { "tx_collide_10times" },
267 { "tx_collide_11times" },
268 { "tx_collide_12times" },
269 { "tx_collide_13times" },
270 { "tx_collide_14times" },
271 { "tx_collide_15times" },
272 { "tx_ucast_packets" },
273 { "tx_mcast_packets" },
274 { "tx_bcast_packets" },
275 { "tx_carrier_sense_errors" },
276 { "tx_discards" },
277 { "tx_errors" },
278
279 { "dma_writeq_full" },
280 { "dma_write_prioq_full" },
281 { "rxbds_empty" },
282 { "rx_discards" },
283 { "rx_errors" },
284 { "rx_threshold_hit" },
285
286 { "dma_readq_full" },
287 { "dma_read_prioq_full" },
288 { "tx_comp_queue_full" },
289
290 { "ring_set_send_prod_index" },
291 { "ring_status_update" },
292 { "nic_irqs" },
293 { "nic_avoided_irqs" },
294 { "nic_tx_threshold_hit" }
295};
296
50da859d 297static const struct {
4cafd3f5
MC
298 const char string[ETH_GSTRING_LEN];
299} ethtool_test_keys[TG3_NUM_TEST] = {
300 { "nvram test (online) " },
301 { "link test (online) " },
302 { "register test (offline)" },
303 { "memory test (offline)" },
304 { "loopback test (offline)" },
305 { "interrupt test (offline)" },
306};
307
b401e9e2
MC
308static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
309{
310 writel(val, tp->regs + off);
311}
312
313static u32 tg3_read32(struct tg3 *tp, u32 off)
314{
6aa20a22 315 return (readl(tp->regs + off));
b401e9e2
MC
316}
317
1da177e4
LT
318static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
319{
6892914f
MC
320 unsigned long flags;
321
322 spin_lock_irqsave(&tp->indirect_lock, flags);
1ee582d8
MC
323 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
324 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
6892914f 325 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1ee582d8
MC
326}
327
328static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
329{
330 writel(val, tp->regs + off);
331 readl(tp->regs + off);
1da177e4
LT
332}
333
6892914f 334static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
1da177e4 335{
6892914f
MC
336 unsigned long flags;
337 u32 val;
338
339 spin_lock_irqsave(&tp->indirect_lock, flags);
340 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
341 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
342 spin_unlock_irqrestore(&tp->indirect_lock, flags);
343 return val;
344}
345
346static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
347{
348 unsigned long flags;
349
350 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
351 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
352 TG3_64BIT_REG_LOW, val);
353 return;
354 }
355 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
356 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
357 TG3_64BIT_REG_LOW, val);
358 return;
1da177e4 359 }
6892914f
MC
360
361 spin_lock_irqsave(&tp->indirect_lock, flags);
362 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
363 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
364 spin_unlock_irqrestore(&tp->indirect_lock, flags);
365
366 /* In indirect mode when disabling interrupts, we also need
367 * to clear the interrupt bit in the GRC local ctrl register.
368 */
369 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
370 (val == 0x1)) {
371 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
372 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
373 }
374}
375
376static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
377{
378 unsigned long flags;
379 u32 val;
380
381 spin_lock_irqsave(&tp->indirect_lock, flags);
382 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
383 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
384 spin_unlock_irqrestore(&tp->indirect_lock, flags);
385 return val;
386}
387
b401e9e2
MC
388/* usec_wait specifies the wait time in usec when writing to certain registers
389 * where it is unsafe to read back the register without some delay.
390 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
391 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
392 */
393static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
6892914f 394{
b401e9e2
MC
395 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
396 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
397 /* Non-posted methods */
398 tp->write32(tp, off, val);
399 else {
400 /* Posted method */
401 tg3_write32(tp, off, val);
402 if (usec_wait)
403 udelay(usec_wait);
404 tp->read32(tp, off);
405 }
406 /* Wait again after the read for the posted method to guarantee that
407 * the wait time is met.
408 */
409 if (usec_wait)
410 udelay(usec_wait);
1da177e4
LT
411}
412
09ee929c
MC
413static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
414{
415 tp->write32_mbox(tp, off, val);
6892914f
MC
416 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
417 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
418 tp->read32_mbox(tp, off);
09ee929c
MC
419}
420
20094930 421static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
1da177e4
LT
422{
423 void __iomem *mbox = tp->regs + off;
424 writel(val, mbox);
425 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
426 writel(val, mbox);
427 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
428 readl(mbox);
429}
430
b5d3772c
MC
431static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
432{
433 return (readl(tp->regs + off + GRCMBOX_BASE));
434}
435
436static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
437{
438 writel(val, tp->regs + off + GRCMBOX_BASE);
439}
440
20094930 441#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
09ee929c 442#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
20094930
MC
443#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
444#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
09ee929c 445#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
20094930
MC
446
447#define tw32(reg,val) tp->write32(tp, reg, val)
b401e9e2
MC
448#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
449#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
20094930 450#define tr32(reg) tp->read32(tp, reg)
1da177e4
LT
451
452static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
453{
6892914f
MC
454 unsigned long flags;
455
b5d3772c
MC
456 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
457 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
458 return;
459
6892914f 460 spin_lock_irqsave(&tp->indirect_lock, flags);
bbadf503
MC
461 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
462 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
463 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 464
bbadf503
MC
465 /* Always leave this as zero. */
466 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
467 } else {
468 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
469 tw32_f(TG3PCI_MEM_WIN_DATA, val);
28fbef78 470
bbadf503
MC
471 /* Always leave this as zero. */
472 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
473 }
474 spin_unlock_irqrestore(&tp->indirect_lock, flags);
758a6139
DM
475}
476
1da177e4
LT
477static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
478{
6892914f
MC
479 unsigned long flags;
480
b5d3772c
MC
481 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
482 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
483 *val = 0;
484 return;
485 }
486
6892914f 487 spin_lock_irqsave(&tp->indirect_lock, flags);
bbadf503
MC
488 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
489 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
490 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 491
bbadf503
MC
492 /* Always leave this as zero. */
493 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
494 } else {
495 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
496 *val = tr32(TG3PCI_MEM_WIN_DATA);
497
498 /* Always leave this as zero. */
499 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
500 }
6892914f 501 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
502}
503
504static void tg3_disable_ints(struct tg3 *tp)
505{
506 tw32(TG3PCI_MISC_HOST_CTRL,
507 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c 508 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
1da177e4
LT
509}
510
511static inline void tg3_cond_int(struct tg3 *tp)
512{
38f3843e
MC
513 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
514 (tp->hw_status->status & SD_STATUS_UPDATED))
1da177e4 515 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
b5d3772c
MC
516 else
517 tw32(HOSTCC_MODE, tp->coalesce_mode |
518 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
1da177e4
LT
519}
520
521static void tg3_enable_ints(struct tg3 *tp)
522{
bbe832c0
MC
523 tp->irq_sync = 0;
524 wmb();
525
1da177e4
LT
526 tw32(TG3PCI_MISC_HOST_CTRL,
527 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c
MC
528 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
529 (tp->last_tag << 24));
fcfa0a32
MC
530 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
531 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
532 (tp->last_tag << 24));
1da177e4
LT
533 tg3_cond_int(tp);
534}
535
04237ddd
MC
536static inline unsigned int tg3_has_work(struct tg3 *tp)
537{
538 struct tg3_hw_status *sblk = tp->hw_status;
539 unsigned int work_exists = 0;
540
541 /* check for phy events */
542 if (!(tp->tg3_flags &
543 (TG3_FLAG_USE_LINKCHG_REG |
544 TG3_FLAG_POLL_SERDES))) {
545 if (sblk->status & SD_STATUS_LINK_CHG)
546 work_exists = 1;
547 }
548 /* check for RX/TX work to do */
549 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
550 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
551 work_exists = 1;
552
553 return work_exists;
554}
555
1da177e4 556/* tg3_restart_ints
04237ddd
MC
557 * similar to tg3_enable_ints, but it accurately determines whether there
558 * is new work pending and can return without flushing the PIO write
6aa20a22 559 * which reenables interrupts
1da177e4
LT
560 */
561static void tg3_restart_ints(struct tg3 *tp)
562{
fac9b83e
DM
563 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
564 tp->last_tag << 24);
1da177e4
LT
565 mmiowb();
566
fac9b83e
DM
567 /* When doing tagged status, this work check is unnecessary.
568 * The last_tag we write above tells the chip which piece of
569 * work we've completed.
570 */
571 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
572 tg3_has_work(tp))
04237ddd
MC
573 tw32(HOSTCC_MODE, tp->coalesce_mode |
574 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
1da177e4
LT
575}
576
577static inline void tg3_netif_stop(struct tg3 *tp)
578{
bbe832c0 579 tp->dev->trans_start = jiffies; /* prevent tx timeout */
1da177e4
LT
580 netif_poll_disable(tp->dev);
581 netif_tx_disable(tp->dev);
582}
583
584static inline void tg3_netif_start(struct tg3 *tp)
585{
586 netif_wake_queue(tp->dev);
587 /* NOTE: unconditional netif_wake_queue is only appropriate
588 * so long as all callers are assured to have free tx slots
589 * (such as after tg3_init_hw)
590 */
591 netif_poll_enable(tp->dev);
f47c11ee
DM
592 tp->hw_status->status |= SD_STATUS_UPDATED;
593 tg3_enable_ints(tp);
1da177e4
LT
594}
595
596static void tg3_switch_clocks(struct tg3 *tp)
597{
598 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
599 u32 orig_clock_ctrl;
600
a4e2b347 601 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4cf78e4f
MC
602 return;
603
1da177e4
LT
604 orig_clock_ctrl = clock_ctrl;
605 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
606 CLOCK_CTRL_CLKRUN_OENABLE |
607 0x1f);
608 tp->pci_clock_ctrl = clock_ctrl;
609
610 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
611 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
b401e9e2
MC
612 tw32_wait_f(TG3PCI_CLOCK_CTRL,
613 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1da177e4
LT
614 }
615 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
b401e9e2
MC
616 tw32_wait_f(TG3PCI_CLOCK_CTRL,
617 clock_ctrl |
618 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
619 40);
620 tw32_wait_f(TG3PCI_CLOCK_CTRL,
621 clock_ctrl | (CLOCK_CTRL_ALTCLK),
622 40);
1da177e4 623 }
b401e9e2 624 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1da177e4
LT
625}
626
627#define PHY_BUSY_LOOPS 5000
628
629static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
630{
631 u32 frame_val;
632 unsigned int loops;
633 int ret;
634
635 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
636 tw32_f(MAC_MI_MODE,
637 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
638 udelay(80);
639 }
640
641 *val = 0x0;
642
643 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
644 MI_COM_PHY_ADDR_MASK);
645 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
646 MI_COM_REG_ADDR_MASK);
647 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
6aa20a22 648
1da177e4
LT
649 tw32_f(MAC_MI_COM, frame_val);
650
651 loops = PHY_BUSY_LOOPS;
652 while (loops != 0) {
653 udelay(10);
654 frame_val = tr32(MAC_MI_COM);
655
656 if ((frame_val & MI_COM_BUSY) == 0) {
657 udelay(5);
658 frame_val = tr32(MAC_MI_COM);
659 break;
660 }
661 loops -= 1;
662 }
663
664 ret = -EBUSY;
665 if (loops != 0) {
666 *val = frame_val & MI_COM_DATA_MASK;
667 ret = 0;
668 }
669
670 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
671 tw32_f(MAC_MI_MODE, tp->mi_mode);
672 udelay(80);
673 }
674
675 return ret;
676}
677
678static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
679{
680 u32 frame_val;
681 unsigned int loops;
682 int ret;
683
b5d3772c
MC
684 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
685 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
686 return 0;
687
1da177e4
LT
688 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
689 tw32_f(MAC_MI_MODE,
690 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
691 udelay(80);
692 }
693
694 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
695 MI_COM_PHY_ADDR_MASK);
696 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
697 MI_COM_REG_ADDR_MASK);
698 frame_val |= (val & MI_COM_DATA_MASK);
699 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
6aa20a22 700
1da177e4
LT
701 tw32_f(MAC_MI_COM, frame_val);
702
703 loops = PHY_BUSY_LOOPS;
704 while (loops != 0) {
705 udelay(10);
706 frame_val = tr32(MAC_MI_COM);
707 if ((frame_val & MI_COM_BUSY) == 0) {
708 udelay(5);
709 frame_val = tr32(MAC_MI_COM);
710 break;
711 }
712 loops -= 1;
713 }
714
715 ret = -EBUSY;
716 if (loops != 0)
717 ret = 0;
718
719 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
720 tw32_f(MAC_MI_MODE, tp->mi_mode);
721 udelay(80);
722 }
723
724 return ret;
725}
726
727static void tg3_phy_set_wirespeed(struct tg3 *tp)
728{
729 u32 val;
730
731 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
732 return;
733
734 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
735 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
736 tg3_writephy(tp, MII_TG3_AUX_CTRL,
737 (val | (1 << 15) | (1 << 4)));
738}
739
740static int tg3_bmcr_reset(struct tg3 *tp)
741{
742 u32 phy_control;
743 int limit, err;
744
745 /* OK, reset it, and poll the BMCR_RESET bit until it
746 * clears or we time out.
747 */
748 phy_control = BMCR_RESET;
749 err = tg3_writephy(tp, MII_BMCR, phy_control);
750 if (err != 0)
751 return -EBUSY;
752
753 limit = 5000;
754 while (limit--) {
755 err = tg3_readphy(tp, MII_BMCR, &phy_control);
756 if (err != 0)
757 return -EBUSY;
758
759 if ((phy_control & BMCR_RESET) == 0) {
760 udelay(40);
761 break;
762 }
763 udelay(10);
764 }
765 if (limit <= 0)
766 return -EBUSY;
767
768 return 0;
769}
770
771static int tg3_wait_macro_done(struct tg3 *tp)
772{
773 int limit = 100;
774
775 while (limit--) {
776 u32 tmp32;
777
778 if (!tg3_readphy(tp, 0x16, &tmp32)) {
779 if ((tmp32 & 0x1000) == 0)
780 break;
781 }
782 }
783 if (limit <= 0)
784 return -EBUSY;
785
786 return 0;
787}
788
789static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
790{
791 static const u32 test_pat[4][6] = {
792 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
793 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
794 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
795 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
796 };
797 int chan;
798
799 for (chan = 0; chan < 4; chan++) {
800 int i;
801
802 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
803 (chan * 0x2000) | 0x0200);
804 tg3_writephy(tp, 0x16, 0x0002);
805
806 for (i = 0; i < 6; i++)
807 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
808 test_pat[chan][i]);
809
810 tg3_writephy(tp, 0x16, 0x0202);
811 if (tg3_wait_macro_done(tp)) {
812 *resetp = 1;
813 return -EBUSY;
814 }
815
816 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
817 (chan * 0x2000) | 0x0200);
818 tg3_writephy(tp, 0x16, 0x0082);
819 if (tg3_wait_macro_done(tp)) {
820 *resetp = 1;
821 return -EBUSY;
822 }
823
824 tg3_writephy(tp, 0x16, 0x0802);
825 if (tg3_wait_macro_done(tp)) {
826 *resetp = 1;
827 return -EBUSY;
828 }
829
830 for (i = 0; i < 6; i += 2) {
831 u32 low, high;
832
833 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
834 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
835 tg3_wait_macro_done(tp)) {
836 *resetp = 1;
837 return -EBUSY;
838 }
839 low &= 0x7fff;
840 high &= 0x000f;
841 if (low != test_pat[chan][i] ||
842 high != test_pat[chan][i+1]) {
843 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
844 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
845 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
846
847 return -EBUSY;
848 }
849 }
850 }
851
852 return 0;
853}
854
855static int tg3_phy_reset_chanpat(struct tg3 *tp)
856{
857 int chan;
858
859 for (chan = 0; chan < 4; chan++) {
860 int i;
861
862 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
863 (chan * 0x2000) | 0x0200);
864 tg3_writephy(tp, 0x16, 0x0002);
865 for (i = 0; i < 6; i++)
866 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
867 tg3_writephy(tp, 0x16, 0x0202);
868 if (tg3_wait_macro_done(tp))
869 return -EBUSY;
870 }
871
872 return 0;
873}
874
875static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
876{
877 u32 reg32, phy9_orig;
878 int retries, do_phy_reset, err;
879
880 retries = 10;
881 do_phy_reset = 1;
882 do {
883 if (do_phy_reset) {
884 err = tg3_bmcr_reset(tp);
885 if (err)
886 return err;
887 do_phy_reset = 0;
888 }
889
890 /* Disable transmitter and interrupt. */
891 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
892 continue;
893
894 reg32 |= 0x3000;
895 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
896
897 /* Set full-duplex, 1000 mbps. */
898 tg3_writephy(tp, MII_BMCR,
899 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
900
901 /* Set to master mode. */
902 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
903 continue;
904
905 tg3_writephy(tp, MII_TG3_CTRL,
906 (MII_TG3_CTRL_AS_MASTER |
907 MII_TG3_CTRL_ENABLE_AS_MASTER));
908
909 /* Enable SM_DSP_CLOCK and 6dB. */
910 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
911
912 /* Block the PHY control access. */
913 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
914 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
915
916 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
917 if (!err)
918 break;
919 } while (--retries);
920
921 err = tg3_phy_reset_chanpat(tp);
922 if (err)
923 return err;
924
925 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
926 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
927
928 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
929 tg3_writephy(tp, 0x16, 0x0000);
930
931 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
932 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
933 /* Set Extended packet length bit for jumbo frames */
934 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
935 }
936 else {
937 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
938 }
939
940 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
941
942 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
943 reg32 &= ~0x3000;
944 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
945 } else if (!err)
946 err = -EBUSY;
947
948 return err;
949}
950
c8e1e82b
MC
951static void tg3_link_report(struct tg3 *);
952
1da177e4
LT
953/* This will reset the tigon3 PHY if there is no valid
954 * link unless the FORCE argument is non-zero.
955 */
956static int tg3_phy_reset(struct tg3 *tp)
957{
958 u32 phy_status;
959 int err;
960
961 err = tg3_readphy(tp, MII_BMSR, &phy_status);
962 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
963 if (err != 0)
964 return -EBUSY;
965
c8e1e82b
MC
966 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
967 netif_carrier_off(tp->dev);
968 tg3_link_report(tp);
969 }
970
1da177e4
LT
971 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
972 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
973 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
974 err = tg3_phy_reset_5703_4_5(tp);
975 if (err)
976 return err;
977 goto out;
978 }
979
980 err = tg3_bmcr_reset(tp);
981 if (err)
982 return err;
983
984out:
985 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
986 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
987 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
988 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
989 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
990 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
991 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
992 }
993 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
994 tg3_writephy(tp, 0x1c, 0x8d68);
995 tg3_writephy(tp, 0x1c, 0x8d68);
996 }
997 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
998 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
999 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1000 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1001 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1002 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1003 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1004 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1005 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1006 }
c424cb24
MC
1007 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1008 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1009 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1010 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1011 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1012 }
1da177e4
LT
1013 /* Set Extended packet length bit (bit 14) on all chips that */
1014 /* support jumbo frames */
1015 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1016 /* Cannot do read-modify-write on 5401 */
1017 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
0f893dc6 1018 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1019 u32 phy_reg;
1020
1021 /* Set bit 14 with read-modify-write to preserve other bits */
1022 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1023 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1024 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1025 }
1026
1027 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1028 * jumbo frames transmission.
1029 */
0f893dc6 1030 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1031 u32 phy_reg;
1032
1033 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1034 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1035 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1036 }
1037
715116a1
MC
1038 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1039 u32 phy_reg;
1040
1041 /* adjust output voltage */
1042 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1043
1044 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phy_reg)) {
1045 u32 phy_reg2;
1046
1047 tg3_writephy(tp, MII_TG3_EPHY_TEST,
1048 phy_reg | MII_TG3_EPHY_SHADOW_EN);
1049 /* Enable auto-MDIX */
1050 if (!tg3_readphy(tp, 0x10, &phy_reg2))
1051 tg3_writephy(tp, 0x10, phy_reg2 | 0x4000);
1052 tg3_writephy(tp, MII_TG3_EPHY_TEST, phy_reg);
1053 }
1054 }
1055
1da177e4
LT
1056 tg3_phy_set_wirespeed(tp);
1057 return 0;
1058}
1059
1060static void tg3_frob_aux_power(struct tg3 *tp)
1061{
1062 struct tg3 *tp_peer = tp;
1063
1064 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1065 return;
1066
8c2dc7e1
MC
1067 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1068 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1069 struct net_device *dev_peer;
1070
1071 dev_peer = pci_get_drvdata(tp->pdev_peer);
bc1c7567 1072 /* remove_one() may have been run on the peer. */
8c2dc7e1 1073 if (!dev_peer)
bc1c7567
MC
1074 tp_peer = tp;
1075 else
1076 tp_peer = netdev_priv(dev_peer);
1da177e4
LT
1077 }
1078
1da177e4 1079 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
6921d201
MC
1080 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1081 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1082 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1da177e4
LT
1083 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1084 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
b401e9e2
MC
1085 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1086 (GRC_LCLCTRL_GPIO_OE0 |
1087 GRC_LCLCTRL_GPIO_OE1 |
1088 GRC_LCLCTRL_GPIO_OE2 |
1089 GRC_LCLCTRL_GPIO_OUTPUT0 |
1090 GRC_LCLCTRL_GPIO_OUTPUT1),
1091 100);
1da177e4
LT
1092 } else {
1093 u32 no_gpio2;
dc56b7d4 1094 u32 grc_local_ctrl = 0;
1da177e4
LT
1095
1096 if (tp_peer != tp &&
1097 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1098 return;
1099
dc56b7d4
MC
1100 /* Workaround to prevent overdrawing Amps. */
1101 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1102 ASIC_REV_5714) {
1103 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
b401e9e2
MC
1104 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1105 grc_local_ctrl, 100);
dc56b7d4
MC
1106 }
1107
1da177e4
LT
1108 /* On 5753 and variants, GPIO2 cannot be used. */
1109 no_gpio2 = tp->nic_sram_data_cfg &
1110 NIC_SRAM_DATA_CFG_NO_GPIO2;
1111
dc56b7d4 1112 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1da177e4
LT
1113 GRC_LCLCTRL_GPIO_OE1 |
1114 GRC_LCLCTRL_GPIO_OE2 |
1115 GRC_LCLCTRL_GPIO_OUTPUT1 |
1116 GRC_LCLCTRL_GPIO_OUTPUT2;
1117 if (no_gpio2) {
1118 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1119 GRC_LCLCTRL_GPIO_OUTPUT2);
1120 }
b401e9e2
MC
1121 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1122 grc_local_ctrl, 100);
1da177e4
LT
1123
1124 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1125
b401e9e2
MC
1126 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1127 grc_local_ctrl, 100);
1da177e4
LT
1128
1129 if (!no_gpio2) {
1130 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
b401e9e2
MC
1131 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1132 grc_local_ctrl, 100);
1da177e4
LT
1133 }
1134 }
1135 } else {
1136 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1137 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1138 if (tp_peer != tp &&
1139 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1140 return;
1141
b401e9e2
MC
1142 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1143 (GRC_LCLCTRL_GPIO_OE1 |
1144 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4 1145
b401e9e2
MC
1146 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1147 GRC_LCLCTRL_GPIO_OE1, 100);
1da177e4 1148
b401e9e2
MC
1149 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1150 (GRC_LCLCTRL_GPIO_OE1 |
1151 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4
LT
1152 }
1153 }
1154}
1155
1156static int tg3_setup_phy(struct tg3 *, int);
1157
1158#define RESET_KIND_SHUTDOWN 0
1159#define RESET_KIND_INIT 1
1160#define RESET_KIND_SUSPEND 2
1161
1162static void tg3_write_sig_post_reset(struct tg3 *, int);
1163static int tg3_halt_cpu(struct tg3 *, u32);
6921d201
MC
1164static int tg3_nvram_lock(struct tg3 *);
1165static void tg3_nvram_unlock(struct tg3 *);
1da177e4 1166
15c3b696
MC
1167static void tg3_power_down_phy(struct tg3 *tp)
1168{
3f7045c1
MC
1169 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
1170 return;
1171
715116a1
MC
1172 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
1173 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1174 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1175 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1176 }
3f7045c1 1177
15c3b696
MC
1178 /* The PHY should not be powered down on some chips because
1179 * of bugs.
1180 */
1181 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1182 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1183 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1184 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1185 return;
1186 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1187}
1188
bc1c7567 1189static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1da177e4
LT
1190{
1191 u32 misc_host_ctrl;
1192 u16 power_control, power_caps;
1193 int pm = tp->pm_cap;
1194
1195 /* Make sure register accesses (indirect or otherwise)
1196 * will function correctly.
1197 */
1198 pci_write_config_dword(tp->pdev,
1199 TG3PCI_MISC_HOST_CTRL,
1200 tp->misc_host_ctrl);
1201
1202 pci_read_config_word(tp->pdev,
1203 pm + PCI_PM_CTRL,
1204 &power_control);
1205 power_control |= PCI_PM_CTRL_PME_STATUS;
1206 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1207 switch (state) {
bc1c7567 1208 case PCI_D0:
1da177e4
LT
1209 power_control |= 0;
1210 pci_write_config_word(tp->pdev,
1211 pm + PCI_PM_CTRL,
1212 power_control);
8c6bda1a
MC
1213 udelay(100); /* Delay after power state change */
1214
1215 /* Switch out of Vaux if it is not a LOM */
b401e9e2
MC
1216 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1217 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1da177e4
LT
1218
1219 return 0;
1220
bc1c7567 1221 case PCI_D1:
1da177e4
LT
1222 power_control |= 1;
1223 break;
1224
bc1c7567 1225 case PCI_D2:
1da177e4
LT
1226 power_control |= 2;
1227 break;
1228
bc1c7567 1229 case PCI_D3hot:
1da177e4
LT
1230 power_control |= 3;
1231 break;
1232
1233 default:
1234 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1235 "requested.\n",
1236 tp->dev->name, state);
1237 return -EINVAL;
1238 };
1239
1240 power_control |= PCI_PM_CTRL_PME_ENABLE;
1241
1242 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1243 tw32(TG3PCI_MISC_HOST_CTRL,
1244 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1245
1246 if (tp->link_config.phy_is_low_power == 0) {
1247 tp->link_config.phy_is_low_power = 1;
1248 tp->link_config.orig_speed = tp->link_config.speed;
1249 tp->link_config.orig_duplex = tp->link_config.duplex;
1250 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1251 }
1252
747e8f8b 1253 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1da177e4
LT
1254 tp->link_config.speed = SPEED_10;
1255 tp->link_config.duplex = DUPLEX_HALF;
1256 tp->link_config.autoneg = AUTONEG_ENABLE;
1257 tg3_setup_phy(tp, 0);
1258 }
1259
b5d3772c
MC
1260 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1261 u32 val;
1262
1263 val = tr32(GRC_VCPU_EXT_CTRL);
1264 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1265 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
6921d201
MC
1266 int i;
1267 u32 val;
1268
1269 for (i = 0; i < 200; i++) {
1270 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1271 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1272 break;
1273 msleep(1);
1274 }
1275 }
1276 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1277 WOL_DRV_STATE_SHUTDOWN |
1278 WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1279
1da177e4
LT
1280 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1281
1282 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1283 u32 mac_mode;
1284
1285 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1286 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1287 udelay(40);
1288
3f7045c1
MC
1289 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1290 mac_mode = MAC_MODE_PORT_MODE_GMII;
1291 else
1292 mac_mode = MAC_MODE_PORT_MODE_MII;
1da177e4
LT
1293
1294 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1295 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1296 mac_mode |= MAC_MODE_LINK_POLARITY;
1297 } else {
1298 mac_mode = MAC_MODE_PORT_MODE_TBI;
1299 }
1300
cbf46853 1301 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1da177e4
LT
1302 tw32(MAC_LED_CTRL, tp->led_ctrl);
1303
1304 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1305 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1306 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1307
1308 tw32_f(MAC_MODE, mac_mode);
1309 udelay(100);
1310
1311 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1312 udelay(10);
1313 }
1314
1315 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1316 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1317 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1318 u32 base_val;
1319
1320 base_val = tp->pci_clock_ctrl;
1321 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1322 CLOCK_CTRL_TXCLK_DISABLE);
1323
b401e9e2
MC
1324 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1325 CLOCK_CTRL_PWRDOWN_PLL133, 40);
a4e2b347 1326 } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4cf78e4f 1327 /* do nothing */
85e94ced 1328 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1da177e4
LT
1329 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1330 u32 newbits1, newbits2;
1331
1332 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1333 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1334 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1335 CLOCK_CTRL_TXCLK_DISABLE |
1336 CLOCK_CTRL_ALTCLK);
1337 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1338 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1339 newbits1 = CLOCK_CTRL_625_CORE;
1340 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1341 } else {
1342 newbits1 = CLOCK_CTRL_ALTCLK;
1343 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1344 }
1345
b401e9e2
MC
1346 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1347 40);
1da177e4 1348
b401e9e2
MC
1349 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1350 40);
1da177e4
LT
1351
1352 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1353 u32 newbits3;
1354
1355 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1356 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1357 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1358 CLOCK_CTRL_TXCLK_DISABLE |
1359 CLOCK_CTRL_44MHZ_CORE);
1360 } else {
1361 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1362 }
1363
b401e9e2
MC
1364 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1365 tp->pci_clock_ctrl | newbits3, 40);
1da177e4
LT
1366 }
1367 }
1368
6921d201 1369 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
3f7045c1
MC
1370 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1371 tg3_power_down_phy(tp);
6921d201 1372
1da177e4
LT
1373 tg3_frob_aux_power(tp);
1374
1375 /* Workaround for unstable PLL clock */
1376 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1377 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1378 u32 val = tr32(0x7d00);
1379
1380 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1381 tw32(0x7d00, val);
6921d201 1382 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
ec41c7df
MC
1383 int err;
1384
1385 err = tg3_nvram_lock(tp);
1da177e4 1386 tg3_halt_cpu(tp, RX_CPU_BASE);
ec41c7df
MC
1387 if (!err)
1388 tg3_nvram_unlock(tp);
6921d201 1389 }
1da177e4
LT
1390 }
1391
bbadf503
MC
1392 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1393
1da177e4
LT
1394 /* Finally, set the new power state. */
1395 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
8c6bda1a 1396 udelay(100); /* Delay after power state change */
1da177e4 1397
1da177e4
LT
1398 return 0;
1399}
1400
1401static void tg3_link_report(struct tg3 *tp)
1402{
1403 if (!netif_carrier_ok(tp->dev)) {
1404 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1405 } else {
1406 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1407 tp->dev->name,
1408 (tp->link_config.active_speed == SPEED_1000 ?
1409 1000 :
1410 (tp->link_config.active_speed == SPEED_100 ?
1411 100 : 10)),
1412 (tp->link_config.active_duplex == DUPLEX_FULL ?
1413 "full" : "half"));
1414
1415 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1416 "%s for RX.\n",
1417 tp->dev->name,
1418 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1419 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1420 }
1421}
1422
1423static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1424{
1425 u32 new_tg3_flags = 0;
1426 u32 old_rx_mode = tp->rx_mode;
1427 u32 old_tx_mode = tp->tx_mode;
1428
1429 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
747e8f8b
MC
1430
1431 /* Convert 1000BaseX flow control bits to 1000BaseT
1432 * bits before resolving flow control.
1433 */
1434 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1435 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1436 ADVERTISE_PAUSE_ASYM);
1437 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1438
1439 if (local_adv & ADVERTISE_1000XPAUSE)
1440 local_adv |= ADVERTISE_PAUSE_CAP;
1441 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1442 local_adv |= ADVERTISE_PAUSE_ASYM;
1443 if (remote_adv & LPA_1000XPAUSE)
1444 remote_adv |= LPA_PAUSE_CAP;
1445 if (remote_adv & LPA_1000XPAUSE_ASYM)
1446 remote_adv |= LPA_PAUSE_ASYM;
1447 }
1448
1da177e4
LT
1449 if (local_adv & ADVERTISE_PAUSE_CAP) {
1450 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1451 if (remote_adv & LPA_PAUSE_CAP)
1452 new_tg3_flags |=
1453 (TG3_FLAG_RX_PAUSE |
1454 TG3_FLAG_TX_PAUSE);
1455 else if (remote_adv & LPA_PAUSE_ASYM)
1456 new_tg3_flags |=
1457 (TG3_FLAG_RX_PAUSE);
1458 } else {
1459 if (remote_adv & LPA_PAUSE_CAP)
1460 new_tg3_flags |=
1461 (TG3_FLAG_RX_PAUSE |
1462 TG3_FLAG_TX_PAUSE);
1463 }
1464 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1465 if ((remote_adv & LPA_PAUSE_CAP) &&
1466 (remote_adv & LPA_PAUSE_ASYM))
1467 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1468 }
1469
1470 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1471 tp->tg3_flags |= new_tg3_flags;
1472 } else {
1473 new_tg3_flags = tp->tg3_flags;
1474 }
1475
1476 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1477 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1478 else
1479 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1480
1481 if (old_rx_mode != tp->rx_mode) {
1482 tw32_f(MAC_RX_MODE, tp->rx_mode);
1483 }
6aa20a22 1484
1da177e4
LT
1485 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1486 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1487 else
1488 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1489
1490 if (old_tx_mode != tp->tx_mode) {
1491 tw32_f(MAC_TX_MODE, tp->tx_mode);
1492 }
1493}
1494
1495static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1496{
1497 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1498 case MII_TG3_AUX_STAT_10HALF:
1499 *speed = SPEED_10;
1500 *duplex = DUPLEX_HALF;
1501 break;
1502
1503 case MII_TG3_AUX_STAT_10FULL:
1504 *speed = SPEED_10;
1505 *duplex = DUPLEX_FULL;
1506 break;
1507
1508 case MII_TG3_AUX_STAT_100HALF:
1509 *speed = SPEED_100;
1510 *duplex = DUPLEX_HALF;
1511 break;
1512
1513 case MII_TG3_AUX_STAT_100FULL:
1514 *speed = SPEED_100;
1515 *duplex = DUPLEX_FULL;
1516 break;
1517
1518 case MII_TG3_AUX_STAT_1000HALF:
1519 *speed = SPEED_1000;
1520 *duplex = DUPLEX_HALF;
1521 break;
1522
1523 case MII_TG3_AUX_STAT_1000FULL:
1524 *speed = SPEED_1000;
1525 *duplex = DUPLEX_FULL;
1526 break;
1527
1528 default:
715116a1
MC
1529 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1530 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1531 SPEED_10;
1532 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1533 DUPLEX_HALF;
1534 break;
1535 }
1da177e4
LT
1536 *speed = SPEED_INVALID;
1537 *duplex = DUPLEX_INVALID;
1538 break;
1539 };
1540}
1541
1542static void tg3_phy_copper_begin(struct tg3 *tp)
1543{
1544 u32 new_adv;
1545 int i;
1546
1547 if (tp->link_config.phy_is_low_power) {
1548 /* Entering low power mode. Disable gigabit and
1549 * 100baseT advertisements.
1550 */
1551 tg3_writephy(tp, MII_TG3_CTRL, 0);
1552
1553 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1554 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1555 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1556 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1557
1558 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1559 } else if (tp->link_config.speed == SPEED_INVALID) {
1560 tp->link_config.advertising =
1561 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1562 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1563 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1564 ADVERTISED_Autoneg | ADVERTISED_MII);
1565
1566 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1567 tp->link_config.advertising &=
1568 ~(ADVERTISED_1000baseT_Half |
1569 ADVERTISED_1000baseT_Full);
1570
1571 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1572 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1573 new_adv |= ADVERTISE_10HALF;
1574 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1575 new_adv |= ADVERTISE_10FULL;
1576 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1577 new_adv |= ADVERTISE_100HALF;
1578 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1579 new_adv |= ADVERTISE_100FULL;
1580 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1581
1582 if (tp->link_config.advertising &
1583 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1584 new_adv = 0;
1585 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1586 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1587 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1588 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1589 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1590 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1591 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1592 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1593 MII_TG3_CTRL_ENABLE_AS_MASTER);
1594 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1595 } else {
1596 tg3_writephy(tp, MII_TG3_CTRL, 0);
1597 }
1598 } else {
1599 /* Asking for a specific link mode. */
1600 if (tp->link_config.speed == SPEED_1000) {
1601 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1602 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1603
1604 if (tp->link_config.duplex == DUPLEX_FULL)
1605 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1606 else
1607 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1608 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1609 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1610 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1611 MII_TG3_CTRL_ENABLE_AS_MASTER);
1612 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1613 } else {
1614 tg3_writephy(tp, MII_TG3_CTRL, 0);
1615
1616 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1617 if (tp->link_config.speed == SPEED_100) {
1618 if (tp->link_config.duplex == DUPLEX_FULL)
1619 new_adv |= ADVERTISE_100FULL;
1620 else
1621 new_adv |= ADVERTISE_100HALF;
1622 } else {
1623 if (tp->link_config.duplex == DUPLEX_FULL)
1624 new_adv |= ADVERTISE_10FULL;
1625 else
1626 new_adv |= ADVERTISE_10HALF;
1627 }
1628 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1629 }
1630 }
1631
1632 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1633 tp->link_config.speed != SPEED_INVALID) {
1634 u32 bmcr, orig_bmcr;
1635
1636 tp->link_config.active_speed = tp->link_config.speed;
1637 tp->link_config.active_duplex = tp->link_config.duplex;
1638
1639 bmcr = 0;
1640 switch (tp->link_config.speed) {
1641 default:
1642 case SPEED_10:
1643 break;
1644
1645 case SPEED_100:
1646 bmcr |= BMCR_SPEED100;
1647 break;
1648
1649 case SPEED_1000:
1650 bmcr |= TG3_BMCR_SPEED1000;
1651 break;
1652 };
1653
1654 if (tp->link_config.duplex == DUPLEX_FULL)
1655 bmcr |= BMCR_FULLDPLX;
1656
1657 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1658 (bmcr != orig_bmcr)) {
1659 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1660 for (i = 0; i < 1500; i++) {
1661 u32 tmp;
1662
1663 udelay(10);
1664 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1665 tg3_readphy(tp, MII_BMSR, &tmp))
1666 continue;
1667 if (!(tmp & BMSR_LSTATUS)) {
1668 udelay(40);
1669 break;
1670 }
1671 }
1672 tg3_writephy(tp, MII_BMCR, bmcr);
1673 udelay(40);
1674 }
1675 } else {
1676 tg3_writephy(tp, MII_BMCR,
1677 BMCR_ANENABLE | BMCR_ANRESTART);
1678 }
1679}
1680
1681static int tg3_init_5401phy_dsp(struct tg3 *tp)
1682{
1683 int err;
1684
1685 /* Turn off tap power management. */
1686 /* Set Extended packet length bit */
1687 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1688
1689 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1690 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1691
1692 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1693 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1694
1695 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1696 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1697
1698 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1699 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1700
1701 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1702 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1703
1704 udelay(40);
1705
1706 return err;
1707}
1708
1709static int tg3_copper_is_advertising_all(struct tg3 *tp)
1710{
1711 u32 adv_reg, all_mask;
1712
1713 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1714 return 0;
1715
1716 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1717 ADVERTISE_100HALF | ADVERTISE_100FULL);
1718 if ((adv_reg & all_mask) != all_mask)
1719 return 0;
1720 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1721 u32 tg3_ctrl;
1722
1723 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1724 return 0;
1725
1726 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1727 MII_TG3_CTRL_ADV_1000_FULL);
1728 if ((tg3_ctrl & all_mask) != all_mask)
1729 return 0;
1730 }
1731 return 1;
1732}
1733
1734static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1735{
1736 int current_link_up;
1737 u32 bmsr, dummy;
1738 u16 current_speed;
1739 u8 current_duplex;
1740 int i, err;
1741
1742 tw32(MAC_EVENT, 0);
1743
1744 tw32_f(MAC_STATUS,
1745 (MAC_STATUS_SYNC_CHANGED |
1746 MAC_STATUS_CFG_CHANGED |
1747 MAC_STATUS_MI_COMPLETION |
1748 MAC_STATUS_LNKSTATE_CHANGED));
1749 udelay(40);
1750
1751 tp->mi_mode = MAC_MI_MODE_BASE;
1752 tw32_f(MAC_MI_MODE, tp->mi_mode);
1753 udelay(80);
1754
1755 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1756
1757 /* Some third-party PHYs need to be reset on link going
1758 * down.
1759 */
1760 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1761 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1762 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1763 netif_carrier_ok(tp->dev)) {
1764 tg3_readphy(tp, MII_BMSR, &bmsr);
1765 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1766 !(bmsr & BMSR_LSTATUS))
1767 force_reset = 1;
1768 }
1769 if (force_reset)
1770 tg3_phy_reset(tp);
1771
1772 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1773 tg3_readphy(tp, MII_BMSR, &bmsr);
1774 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1775 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1776 bmsr = 0;
1777
1778 if (!(bmsr & BMSR_LSTATUS)) {
1779 err = tg3_init_5401phy_dsp(tp);
1780 if (err)
1781 return err;
1782
1783 tg3_readphy(tp, MII_BMSR, &bmsr);
1784 for (i = 0; i < 1000; i++) {
1785 udelay(10);
1786 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1787 (bmsr & BMSR_LSTATUS)) {
1788 udelay(40);
1789 break;
1790 }
1791 }
1792
1793 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1794 !(bmsr & BMSR_LSTATUS) &&
1795 tp->link_config.active_speed == SPEED_1000) {
1796 err = tg3_phy_reset(tp);
1797 if (!err)
1798 err = tg3_init_5401phy_dsp(tp);
1799 if (err)
1800 return err;
1801 }
1802 }
1803 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1804 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1805 /* 5701 {A0,B0} CRC bug workaround */
1806 tg3_writephy(tp, 0x15, 0x0a75);
1807 tg3_writephy(tp, 0x1c, 0x8c68);
1808 tg3_writephy(tp, 0x1c, 0x8d68);
1809 tg3_writephy(tp, 0x1c, 0x8c68);
1810 }
1811
1812 /* Clear pending interrupts... */
1813 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1814 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1815
1816 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1817 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
715116a1 1818 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1da177e4
LT
1819 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1820
1821 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1822 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1823 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1824 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1825 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1826 else
1827 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1828 }
1829
1830 current_link_up = 0;
1831 current_speed = SPEED_INVALID;
1832 current_duplex = DUPLEX_INVALID;
1833
1834 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1835 u32 val;
1836
1837 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1838 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1839 if (!(val & (1 << 10))) {
1840 val |= (1 << 10);
1841 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1842 goto relink;
1843 }
1844 }
1845
1846 bmsr = 0;
1847 for (i = 0; i < 100; i++) {
1848 tg3_readphy(tp, MII_BMSR, &bmsr);
1849 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1850 (bmsr & BMSR_LSTATUS))
1851 break;
1852 udelay(40);
1853 }
1854
1855 if (bmsr & BMSR_LSTATUS) {
1856 u32 aux_stat, bmcr;
1857
1858 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1859 for (i = 0; i < 2000; i++) {
1860 udelay(10);
1861 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1862 aux_stat)
1863 break;
1864 }
1865
1866 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1867 &current_speed,
1868 &current_duplex);
1869
1870 bmcr = 0;
1871 for (i = 0; i < 200; i++) {
1872 tg3_readphy(tp, MII_BMCR, &bmcr);
1873 if (tg3_readphy(tp, MII_BMCR, &bmcr))
1874 continue;
1875 if (bmcr && bmcr != 0x7fff)
1876 break;
1877 udelay(10);
1878 }
1879
1880 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1881 if (bmcr & BMCR_ANENABLE) {
1882 current_link_up = 1;
1883
1884 /* Force autoneg restart if we are exiting
1885 * low power mode.
1886 */
1887 if (!tg3_copper_is_advertising_all(tp))
1888 current_link_up = 0;
1889 } else {
1890 current_link_up = 0;
1891 }
1892 } else {
1893 if (!(bmcr & BMCR_ANENABLE) &&
1894 tp->link_config.speed == current_speed &&
1895 tp->link_config.duplex == current_duplex) {
1896 current_link_up = 1;
1897 } else {
1898 current_link_up = 0;
1899 }
1900 }
1901
1902 tp->link_config.active_speed = current_speed;
1903 tp->link_config.active_duplex = current_duplex;
1904 }
1905
1906 if (current_link_up == 1 &&
1907 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1908 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1909 u32 local_adv, remote_adv;
1910
1911 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1912 local_adv = 0;
1913 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1914
1915 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1916 remote_adv = 0;
1917
1918 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1919
1920 /* If we are not advertising full pause capability,
1921 * something is wrong. Bring the link down and reconfigure.
1922 */
1923 if (local_adv != ADVERTISE_PAUSE_CAP) {
1924 current_link_up = 0;
1925 } else {
1926 tg3_setup_flow_control(tp, local_adv, remote_adv);
1927 }
1928 }
1929relink:
6921d201 1930 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1da177e4
LT
1931 u32 tmp;
1932
1933 tg3_phy_copper_begin(tp);
1934
1935 tg3_readphy(tp, MII_BMSR, &tmp);
1936 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1937 (tmp & BMSR_LSTATUS))
1938 current_link_up = 1;
1939 }
1940
1941 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1942 if (current_link_up == 1) {
1943 if (tp->link_config.active_speed == SPEED_100 ||
1944 tp->link_config.active_speed == SPEED_10)
1945 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1946 else
1947 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1948 } else
1949 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1950
1951 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1952 if (tp->link_config.active_duplex == DUPLEX_HALF)
1953 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1954
1955 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1956 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1957 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1958 (current_link_up == 1 &&
1959 tp->link_config.active_speed == SPEED_10))
1960 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1961 } else {
1962 if (current_link_up == 1)
1963 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1964 }
1965
1966 /* ??? Without this setting Netgear GA302T PHY does not
1967 * ??? send/receive packets...
1968 */
1969 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1970 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1971 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1972 tw32_f(MAC_MI_MODE, tp->mi_mode);
1973 udelay(80);
1974 }
1975
1976 tw32_f(MAC_MODE, tp->mac_mode);
1977 udelay(40);
1978
1979 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1980 /* Polled via timer. */
1981 tw32_f(MAC_EVENT, 0);
1982 } else {
1983 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1984 }
1985 udelay(40);
1986
1987 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1988 current_link_up == 1 &&
1989 tp->link_config.active_speed == SPEED_1000 &&
1990 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1991 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1992 udelay(120);
1993 tw32_f(MAC_STATUS,
1994 (MAC_STATUS_SYNC_CHANGED |
1995 MAC_STATUS_CFG_CHANGED));
1996 udelay(40);
1997 tg3_write_mem(tp,
1998 NIC_SRAM_FIRMWARE_MBOX,
1999 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2000 }
2001
2002 if (current_link_up != netif_carrier_ok(tp->dev)) {
2003 if (current_link_up)
2004 netif_carrier_on(tp->dev);
2005 else
2006 netif_carrier_off(tp->dev);
2007 tg3_link_report(tp);
2008 }
2009
2010 return 0;
2011}
2012
2013struct tg3_fiber_aneginfo {
2014 int state;
2015#define ANEG_STATE_UNKNOWN 0
2016#define ANEG_STATE_AN_ENABLE 1
2017#define ANEG_STATE_RESTART_INIT 2
2018#define ANEG_STATE_RESTART 3
2019#define ANEG_STATE_DISABLE_LINK_OK 4
2020#define ANEG_STATE_ABILITY_DETECT_INIT 5
2021#define ANEG_STATE_ABILITY_DETECT 6
2022#define ANEG_STATE_ACK_DETECT_INIT 7
2023#define ANEG_STATE_ACK_DETECT 8
2024#define ANEG_STATE_COMPLETE_ACK_INIT 9
2025#define ANEG_STATE_COMPLETE_ACK 10
2026#define ANEG_STATE_IDLE_DETECT_INIT 11
2027#define ANEG_STATE_IDLE_DETECT 12
2028#define ANEG_STATE_LINK_OK 13
2029#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2030#define ANEG_STATE_NEXT_PAGE_WAIT 15
2031
2032 u32 flags;
2033#define MR_AN_ENABLE 0x00000001
2034#define MR_RESTART_AN 0x00000002
2035#define MR_AN_COMPLETE 0x00000004
2036#define MR_PAGE_RX 0x00000008
2037#define MR_NP_LOADED 0x00000010
2038#define MR_TOGGLE_TX 0x00000020
2039#define MR_LP_ADV_FULL_DUPLEX 0x00000040
2040#define MR_LP_ADV_HALF_DUPLEX 0x00000080
2041#define MR_LP_ADV_SYM_PAUSE 0x00000100
2042#define MR_LP_ADV_ASYM_PAUSE 0x00000200
2043#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2044#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2045#define MR_LP_ADV_NEXT_PAGE 0x00001000
2046#define MR_TOGGLE_RX 0x00002000
2047#define MR_NP_RX 0x00004000
2048
2049#define MR_LINK_OK 0x80000000
2050
2051 unsigned long link_time, cur_time;
2052
2053 u32 ability_match_cfg;
2054 int ability_match_count;
2055
2056 char ability_match, idle_match, ack_match;
2057
2058 u32 txconfig, rxconfig;
2059#define ANEG_CFG_NP 0x00000080
2060#define ANEG_CFG_ACK 0x00000040
2061#define ANEG_CFG_RF2 0x00000020
2062#define ANEG_CFG_RF1 0x00000010
2063#define ANEG_CFG_PS2 0x00000001
2064#define ANEG_CFG_PS1 0x00008000
2065#define ANEG_CFG_HD 0x00004000
2066#define ANEG_CFG_FD 0x00002000
2067#define ANEG_CFG_INVAL 0x00001f06
2068
2069};
2070#define ANEG_OK 0
2071#define ANEG_DONE 1
2072#define ANEG_TIMER_ENAB 2
2073#define ANEG_FAILED -1
2074
2075#define ANEG_STATE_SETTLE_TIME 10000
2076
2077static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2078 struct tg3_fiber_aneginfo *ap)
2079{
2080 unsigned long delta;
2081 u32 rx_cfg_reg;
2082 int ret;
2083
2084 if (ap->state == ANEG_STATE_UNKNOWN) {
2085 ap->rxconfig = 0;
2086 ap->link_time = 0;
2087 ap->cur_time = 0;
2088 ap->ability_match_cfg = 0;
2089 ap->ability_match_count = 0;
2090 ap->ability_match = 0;
2091 ap->idle_match = 0;
2092 ap->ack_match = 0;
2093 }
2094 ap->cur_time++;
2095
2096 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2097 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2098
2099 if (rx_cfg_reg != ap->ability_match_cfg) {
2100 ap->ability_match_cfg = rx_cfg_reg;
2101 ap->ability_match = 0;
2102 ap->ability_match_count = 0;
2103 } else {
2104 if (++ap->ability_match_count > 1) {
2105 ap->ability_match = 1;
2106 ap->ability_match_cfg = rx_cfg_reg;
2107 }
2108 }
2109 if (rx_cfg_reg & ANEG_CFG_ACK)
2110 ap->ack_match = 1;
2111 else
2112 ap->ack_match = 0;
2113
2114 ap->idle_match = 0;
2115 } else {
2116 ap->idle_match = 1;
2117 ap->ability_match_cfg = 0;
2118 ap->ability_match_count = 0;
2119 ap->ability_match = 0;
2120 ap->ack_match = 0;
2121
2122 rx_cfg_reg = 0;
2123 }
2124
2125 ap->rxconfig = rx_cfg_reg;
2126 ret = ANEG_OK;
2127
2128 switch(ap->state) {
2129 case ANEG_STATE_UNKNOWN:
2130 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2131 ap->state = ANEG_STATE_AN_ENABLE;
2132
2133 /* fallthru */
2134 case ANEG_STATE_AN_ENABLE:
2135 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2136 if (ap->flags & MR_AN_ENABLE) {
2137 ap->link_time = 0;
2138 ap->cur_time = 0;
2139 ap->ability_match_cfg = 0;
2140 ap->ability_match_count = 0;
2141 ap->ability_match = 0;
2142 ap->idle_match = 0;
2143 ap->ack_match = 0;
2144
2145 ap->state = ANEG_STATE_RESTART_INIT;
2146 } else {
2147 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2148 }
2149 break;
2150
2151 case ANEG_STATE_RESTART_INIT:
2152 ap->link_time = ap->cur_time;
2153 ap->flags &= ~(MR_NP_LOADED);
2154 ap->txconfig = 0;
2155 tw32(MAC_TX_AUTO_NEG, 0);
2156 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2157 tw32_f(MAC_MODE, tp->mac_mode);
2158 udelay(40);
2159
2160 ret = ANEG_TIMER_ENAB;
2161 ap->state = ANEG_STATE_RESTART;
2162
2163 /* fallthru */
2164 case ANEG_STATE_RESTART:
2165 delta = ap->cur_time - ap->link_time;
2166 if (delta > ANEG_STATE_SETTLE_TIME) {
2167 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2168 } else {
2169 ret = ANEG_TIMER_ENAB;
2170 }
2171 break;
2172
2173 case ANEG_STATE_DISABLE_LINK_OK:
2174 ret = ANEG_DONE;
2175 break;
2176
2177 case ANEG_STATE_ABILITY_DETECT_INIT:
2178 ap->flags &= ~(MR_TOGGLE_TX);
2179 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2180 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2181 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2182 tw32_f(MAC_MODE, tp->mac_mode);
2183 udelay(40);
2184
2185 ap->state = ANEG_STATE_ABILITY_DETECT;
2186 break;
2187
2188 case ANEG_STATE_ABILITY_DETECT:
2189 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2190 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2191 }
2192 break;
2193
2194 case ANEG_STATE_ACK_DETECT_INIT:
2195 ap->txconfig |= ANEG_CFG_ACK;
2196 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2197 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2198 tw32_f(MAC_MODE, tp->mac_mode);
2199 udelay(40);
2200
2201 ap->state = ANEG_STATE_ACK_DETECT;
2202
2203 /* fallthru */
2204 case ANEG_STATE_ACK_DETECT:
2205 if (ap->ack_match != 0) {
2206 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2207 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2208 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2209 } else {
2210 ap->state = ANEG_STATE_AN_ENABLE;
2211 }
2212 } else if (ap->ability_match != 0 &&
2213 ap->rxconfig == 0) {
2214 ap->state = ANEG_STATE_AN_ENABLE;
2215 }
2216 break;
2217
2218 case ANEG_STATE_COMPLETE_ACK_INIT:
2219 if (ap->rxconfig & ANEG_CFG_INVAL) {
2220 ret = ANEG_FAILED;
2221 break;
2222 }
2223 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2224 MR_LP_ADV_HALF_DUPLEX |
2225 MR_LP_ADV_SYM_PAUSE |
2226 MR_LP_ADV_ASYM_PAUSE |
2227 MR_LP_ADV_REMOTE_FAULT1 |
2228 MR_LP_ADV_REMOTE_FAULT2 |
2229 MR_LP_ADV_NEXT_PAGE |
2230 MR_TOGGLE_RX |
2231 MR_NP_RX);
2232 if (ap->rxconfig & ANEG_CFG_FD)
2233 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2234 if (ap->rxconfig & ANEG_CFG_HD)
2235 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2236 if (ap->rxconfig & ANEG_CFG_PS1)
2237 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2238 if (ap->rxconfig & ANEG_CFG_PS2)
2239 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2240 if (ap->rxconfig & ANEG_CFG_RF1)
2241 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2242 if (ap->rxconfig & ANEG_CFG_RF2)
2243 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2244 if (ap->rxconfig & ANEG_CFG_NP)
2245 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2246
2247 ap->link_time = ap->cur_time;
2248
2249 ap->flags ^= (MR_TOGGLE_TX);
2250 if (ap->rxconfig & 0x0008)
2251 ap->flags |= MR_TOGGLE_RX;
2252 if (ap->rxconfig & ANEG_CFG_NP)
2253 ap->flags |= MR_NP_RX;
2254 ap->flags |= MR_PAGE_RX;
2255
2256 ap->state = ANEG_STATE_COMPLETE_ACK;
2257 ret = ANEG_TIMER_ENAB;
2258 break;
2259
2260 case ANEG_STATE_COMPLETE_ACK:
2261 if (ap->ability_match != 0 &&
2262 ap->rxconfig == 0) {
2263 ap->state = ANEG_STATE_AN_ENABLE;
2264 break;
2265 }
2266 delta = ap->cur_time - ap->link_time;
2267 if (delta > ANEG_STATE_SETTLE_TIME) {
2268 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2269 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2270 } else {
2271 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2272 !(ap->flags & MR_NP_RX)) {
2273 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2274 } else {
2275 ret = ANEG_FAILED;
2276 }
2277 }
2278 }
2279 break;
2280
2281 case ANEG_STATE_IDLE_DETECT_INIT:
2282 ap->link_time = ap->cur_time;
2283 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2284 tw32_f(MAC_MODE, tp->mac_mode);
2285 udelay(40);
2286
2287 ap->state = ANEG_STATE_IDLE_DETECT;
2288 ret = ANEG_TIMER_ENAB;
2289 break;
2290
2291 case ANEG_STATE_IDLE_DETECT:
2292 if (ap->ability_match != 0 &&
2293 ap->rxconfig == 0) {
2294 ap->state = ANEG_STATE_AN_ENABLE;
2295 break;
2296 }
2297 delta = ap->cur_time - ap->link_time;
2298 if (delta > ANEG_STATE_SETTLE_TIME) {
2299 /* XXX another gem from the Broadcom driver :( */
2300 ap->state = ANEG_STATE_LINK_OK;
2301 }
2302 break;
2303
2304 case ANEG_STATE_LINK_OK:
2305 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2306 ret = ANEG_DONE;
2307 break;
2308
2309 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2310 /* ??? unimplemented */
2311 break;
2312
2313 case ANEG_STATE_NEXT_PAGE_WAIT:
2314 /* ??? unimplemented */
2315 break;
2316
2317 default:
2318 ret = ANEG_FAILED;
2319 break;
2320 };
2321
2322 return ret;
2323}
2324
2325static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2326{
2327 int res = 0;
2328 struct tg3_fiber_aneginfo aninfo;
2329 int status = ANEG_FAILED;
2330 unsigned int tick;
2331 u32 tmp;
2332
2333 tw32_f(MAC_TX_AUTO_NEG, 0);
2334
2335 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2336 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2337 udelay(40);
2338
2339 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2340 udelay(40);
2341
2342 memset(&aninfo, 0, sizeof(aninfo));
2343 aninfo.flags |= MR_AN_ENABLE;
2344 aninfo.state = ANEG_STATE_UNKNOWN;
2345 aninfo.cur_time = 0;
2346 tick = 0;
2347 while (++tick < 195000) {
2348 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2349 if (status == ANEG_DONE || status == ANEG_FAILED)
2350 break;
2351
2352 udelay(1);
2353 }
2354
2355 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2356 tw32_f(MAC_MODE, tp->mac_mode);
2357 udelay(40);
2358
2359 *flags = aninfo.flags;
2360
2361 if (status == ANEG_DONE &&
2362 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2363 MR_LP_ADV_FULL_DUPLEX)))
2364 res = 1;
2365
2366 return res;
2367}
2368
2369static void tg3_init_bcm8002(struct tg3 *tp)
2370{
2371 u32 mac_status = tr32(MAC_STATUS);
2372 int i;
2373
2374 /* Reset when initting first time or we have a link. */
2375 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2376 !(mac_status & MAC_STATUS_PCS_SYNCED))
2377 return;
2378
2379 /* Set PLL lock range. */
2380 tg3_writephy(tp, 0x16, 0x8007);
2381
2382 /* SW reset */
2383 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2384
2385 /* Wait for reset to complete. */
2386 /* XXX schedule_timeout() ... */
2387 for (i = 0; i < 500; i++)
2388 udelay(10);
2389
2390 /* Config mode; select PMA/Ch 1 regs. */
2391 tg3_writephy(tp, 0x10, 0x8411);
2392
2393 /* Enable auto-lock and comdet, select txclk for tx. */
2394 tg3_writephy(tp, 0x11, 0x0a10);
2395
2396 tg3_writephy(tp, 0x18, 0x00a0);
2397 tg3_writephy(tp, 0x16, 0x41ff);
2398
2399 /* Assert and deassert POR. */
2400 tg3_writephy(tp, 0x13, 0x0400);
2401 udelay(40);
2402 tg3_writephy(tp, 0x13, 0x0000);
2403
2404 tg3_writephy(tp, 0x11, 0x0a50);
2405 udelay(40);
2406 tg3_writephy(tp, 0x11, 0x0a10);
2407
2408 /* Wait for signal to stabilize */
2409 /* XXX schedule_timeout() ... */
2410 for (i = 0; i < 15000; i++)
2411 udelay(10);
2412
2413 /* Deselect the channel register so we can read the PHYID
2414 * later.
2415 */
2416 tg3_writephy(tp, 0x10, 0x8011);
2417}
2418
2419static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2420{
2421 u32 sg_dig_ctrl, sg_dig_status;
2422 u32 serdes_cfg, expected_sg_dig_ctrl;
2423 int workaround, port_a;
2424 int current_link_up;
2425
2426 serdes_cfg = 0;
2427 expected_sg_dig_ctrl = 0;
2428 workaround = 0;
2429 port_a = 1;
2430 current_link_up = 0;
2431
2432 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2433 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2434 workaround = 1;
2435 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2436 port_a = 0;
2437
2438 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2439 /* preserve bits 20-23 for voltage regulator */
2440 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2441 }
2442
2443 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2444
2445 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2446 if (sg_dig_ctrl & (1 << 31)) {
2447 if (workaround) {
2448 u32 val = serdes_cfg;
2449
2450 if (port_a)
2451 val |= 0xc010000;
2452 else
2453 val |= 0x4010000;
2454 tw32_f(MAC_SERDES_CFG, val);
2455 }
2456 tw32_f(SG_DIG_CTRL, 0x01388400);
2457 }
2458 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2459 tg3_setup_flow_control(tp, 0, 0);
2460 current_link_up = 1;
2461 }
2462 goto out;
2463 }
2464
2465 /* Want auto-negotiation. */
2466 expected_sg_dig_ctrl = 0x81388400;
2467
2468 /* Pause capability */
2469 expected_sg_dig_ctrl |= (1 << 11);
2470
2471 /* Asymettric pause */
2472 expected_sg_dig_ctrl |= (1 << 12);
2473
2474 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3d3ebe74
MC
2475 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2476 tp->serdes_counter &&
2477 ((mac_status & (MAC_STATUS_PCS_SYNCED |
2478 MAC_STATUS_RCVD_CFG)) ==
2479 MAC_STATUS_PCS_SYNCED)) {
2480 tp->serdes_counter--;
2481 current_link_up = 1;
2482 goto out;
2483 }
2484restart_autoneg:
1da177e4
LT
2485 if (workaround)
2486 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2487 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2488 udelay(5);
2489 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2490
3d3ebe74
MC
2491 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2492 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
1da177e4
LT
2493 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2494 MAC_STATUS_SIGNAL_DET)) {
3d3ebe74 2495 sg_dig_status = tr32(SG_DIG_STATUS);
1da177e4
LT
2496 mac_status = tr32(MAC_STATUS);
2497
2498 if ((sg_dig_status & (1 << 1)) &&
2499 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2500 u32 local_adv, remote_adv;
2501
2502 local_adv = ADVERTISE_PAUSE_CAP;
2503 remote_adv = 0;
2504 if (sg_dig_status & (1 << 19))
2505 remote_adv |= LPA_PAUSE_CAP;
2506 if (sg_dig_status & (1 << 20))
2507 remote_adv |= LPA_PAUSE_ASYM;
2508
2509 tg3_setup_flow_control(tp, local_adv, remote_adv);
2510 current_link_up = 1;
3d3ebe74
MC
2511 tp->serdes_counter = 0;
2512 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
1da177e4 2513 } else if (!(sg_dig_status & (1 << 1))) {
3d3ebe74
MC
2514 if (tp->serdes_counter)
2515 tp->serdes_counter--;
1da177e4
LT
2516 else {
2517 if (workaround) {
2518 u32 val = serdes_cfg;
2519
2520 if (port_a)
2521 val |= 0xc010000;
2522 else
2523 val |= 0x4010000;
2524
2525 tw32_f(MAC_SERDES_CFG, val);
2526 }
2527
2528 tw32_f(SG_DIG_CTRL, 0x01388400);
2529 udelay(40);
2530
2531 /* Link parallel detection - link is up */
2532 /* only if we have PCS_SYNC and not */
2533 /* receiving config code words */
2534 mac_status = tr32(MAC_STATUS);
2535 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2536 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2537 tg3_setup_flow_control(tp, 0, 0);
2538 current_link_up = 1;
3d3ebe74
MC
2539 tp->tg3_flags2 |=
2540 TG3_FLG2_PARALLEL_DETECT;
2541 tp->serdes_counter =
2542 SERDES_PARALLEL_DET_TIMEOUT;
2543 } else
2544 goto restart_autoneg;
1da177e4
LT
2545 }
2546 }
3d3ebe74
MC
2547 } else {
2548 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2549 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
1da177e4
LT
2550 }
2551
2552out:
2553 return current_link_up;
2554}
2555
2556static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2557{
2558 int current_link_up = 0;
2559
2560 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2561 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2562 goto out;
2563 }
2564
2565 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2566 u32 flags;
2567 int i;
6aa20a22 2568
1da177e4
LT
2569 if (fiber_autoneg(tp, &flags)) {
2570 u32 local_adv, remote_adv;
2571
2572 local_adv = ADVERTISE_PAUSE_CAP;
2573 remote_adv = 0;
2574 if (flags & MR_LP_ADV_SYM_PAUSE)
2575 remote_adv |= LPA_PAUSE_CAP;
2576 if (flags & MR_LP_ADV_ASYM_PAUSE)
2577 remote_adv |= LPA_PAUSE_ASYM;
2578
2579 tg3_setup_flow_control(tp, local_adv, remote_adv);
2580
2581 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2582 current_link_up = 1;
2583 }
2584 for (i = 0; i < 30; i++) {
2585 udelay(20);
2586 tw32_f(MAC_STATUS,
2587 (MAC_STATUS_SYNC_CHANGED |
2588 MAC_STATUS_CFG_CHANGED));
2589 udelay(40);
2590 if ((tr32(MAC_STATUS) &
2591 (MAC_STATUS_SYNC_CHANGED |
2592 MAC_STATUS_CFG_CHANGED)) == 0)
2593 break;
2594 }
2595
2596 mac_status = tr32(MAC_STATUS);
2597 if (current_link_up == 0 &&
2598 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2599 !(mac_status & MAC_STATUS_RCVD_CFG))
2600 current_link_up = 1;
2601 } else {
2602 /* Forcing 1000FD link up. */
2603 current_link_up = 1;
2604 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2605
2606 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2607 udelay(40);
2608 }
2609
2610out:
2611 return current_link_up;
2612}
2613
2614static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2615{
2616 u32 orig_pause_cfg;
2617 u16 orig_active_speed;
2618 u8 orig_active_duplex;
2619 u32 mac_status;
2620 int current_link_up;
2621 int i;
2622
2623 orig_pause_cfg =
2624 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2625 TG3_FLAG_TX_PAUSE));
2626 orig_active_speed = tp->link_config.active_speed;
2627 orig_active_duplex = tp->link_config.active_duplex;
2628
2629 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2630 netif_carrier_ok(tp->dev) &&
2631 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2632 mac_status = tr32(MAC_STATUS);
2633 mac_status &= (MAC_STATUS_PCS_SYNCED |
2634 MAC_STATUS_SIGNAL_DET |
2635 MAC_STATUS_CFG_CHANGED |
2636 MAC_STATUS_RCVD_CFG);
2637 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2638 MAC_STATUS_SIGNAL_DET)) {
2639 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2640 MAC_STATUS_CFG_CHANGED));
2641 return 0;
2642 }
2643 }
2644
2645 tw32_f(MAC_TX_AUTO_NEG, 0);
2646
2647 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2648 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2649 tw32_f(MAC_MODE, tp->mac_mode);
2650 udelay(40);
2651
2652 if (tp->phy_id == PHY_ID_BCM8002)
2653 tg3_init_bcm8002(tp);
2654
2655 /* Enable link change event even when serdes polling. */
2656 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2657 udelay(40);
2658
2659 current_link_up = 0;
2660 mac_status = tr32(MAC_STATUS);
2661
2662 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2663 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2664 else
2665 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2666
2667 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2668 tw32_f(MAC_MODE, tp->mac_mode);
2669 udelay(40);
2670
2671 tp->hw_status->status =
2672 (SD_STATUS_UPDATED |
2673 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2674
2675 for (i = 0; i < 100; i++) {
2676 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2677 MAC_STATUS_CFG_CHANGED));
2678 udelay(5);
2679 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3d3ebe74
MC
2680 MAC_STATUS_CFG_CHANGED |
2681 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
1da177e4
LT
2682 break;
2683 }
2684
2685 mac_status = tr32(MAC_STATUS);
2686 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2687 current_link_up = 0;
3d3ebe74
MC
2688 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2689 tp->serdes_counter == 0) {
1da177e4
LT
2690 tw32_f(MAC_MODE, (tp->mac_mode |
2691 MAC_MODE_SEND_CONFIGS));
2692 udelay(1);
2693 tw32_f(MAC_MODE, tp->mac_mode);
2694 }
2695 }
2696
2697 if (current_link_up == 1) {
2698 tp->link_config.active_speed = SPEED_1000;
2699 tp->link_config.active_duplex = DUPLEX_FULL;
2700 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2701 LED_CTRL_LNKLED_OVERRIDE |
2702 LED_CTRL_1000MBPS_ON));
2703 } else {
2704 tp->link_config.active_speed = SPEED_INVALID;
2705 tp->link_config.active_duplex = DUPLEX_INVALID;
2706 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2707 LED_CTRL_LNKLED_OVERRIDE |
2708 LED_CTRL_TRAFFIC_OVERRIDE));
2709 }
2710
2711 if (current_link_up != netif_carrier_ok(tp->dev)) {
2712 if (current_link_up)
2713 netif_carrier_on(tp->dev);
2714 else
2715 netif_carrier_off(tp->dev);
2716 tg3_link_report(tp);
2717 } else {
2718 u32 now_pause_cfg =
2719 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2720 TG3_FLAG_TX_PAUSE);
2721 if (orig_pause_cfg != now_pause_cfg ||
2722 orig_active_speed != tp->link_config.active_speed ||
2723 orig_active_duplex != tp->link_config.active_duplex)
2724 tg3_link_report(tp);
2725 }
2726
2727 return 0;
2728}
2729
747e8f8b
MC
2730static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2731{
2732 int current_link_up, err = 0;
2733 u32 bmsr, bmcr;
2734 u16 current_speed;
2735 u8 current_duplex;
2736
2737 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2738 tw32_f(MAC_MODE, tp->mac_mode);
2739 udelay(40);
2740
2741 tw32(MAC_EVENT, 0);
2742
2743 tw32_f(MAC_STATUS,
2744 (MAC_STATUS_SYNC_CHANGED |
2745 MAC_STATUS_CFG_CHANGED |
2746 MAC_STATUS_MI_COMPLETION |
2747 MAC_STATUS_LNKSTATE_CHANGED));
2748 udelay(40);
2749
2750 if (force_reset)
2751 tg3_phy_reset(tp);
2752
2753 current_link_up = 0;
2754 current_speed = SPEED_INVALID;
2755 current_duplex = DUPLEX_INVALID;
2756
2757 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2758 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
2759 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2760 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2761 bmsr |= BMSR_LSTATUS;
2762 else
2763 bmsr &= ~BMSR_LSTATUS;
2764 }
747e8f8b
MC
2765
2766 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2767
2768 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2769 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2770 /* do nothing, just check for link up at the end */
2771 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2772 u32 adv, new_adv;
2773
2774 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2775 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2776 ADVERTISE_1000XPAUSE |
2777 ADVERTISE_1000XPSE_ASYM |
2778 ADVERTISE_SLCT);
2779
2780 /* Always advertise symmetric PAUSE just like copper */
2781 new_adv |= ADVERTISE_1000XPAUSE;
2782
2783 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2784 new_adv |= ADVERTISE_1000XHALF;
2785 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2786 new_adv |= ADVERTISE_1000XFULL;
2787
2788 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2789 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2790 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2791 tg3_writephy(tp, MII_BMCR, bmcr);
2792
2793 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3d3ebe74 2794 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
747e8f8b
MC
2795 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2796
2797 return err;
2798 }
2799 } else {
2800 u32 new_bmcr;
2801
2802 bmcr &= ~BMCR_SPEED1000;
2803 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2804
2805 if (tp->link_config.duplex == DUPLEX_FULL)
2806 new_bmcr |= BMCR_FULLDPLX;
2807
2808 if (new_bmcr != bmcr) {
2809 /* BMCR_SPEED1000 is a reserved bit that needs
2810 * to be set on write.
2811 */
2812 new_bmcr |= BMCR_SPEED1000;
2813
2814 /* Force a linkdown */
2815 if (netif_carrier_ok(tp->dev)) {
2816 u32 adv;
2817
2818 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2819 adv &= ~(ADVERTISE_1000XFULL |
2820 ADVERTISE_1000XHALF |
2821 ADVERTISE_SLCT);
2822 tg3_writephy(tp, MII_ADVERTISE, adv);
2823 tg3_writephy(tp, MII_BMCR, bmcr |
2824 BMCR_ANRESTART |
2825 BMCR_ANENABLE);
2826 udelay(10);
2827 netif_carrier_off(tp->dev);
2828 }
2829 tg3_writephy(tp, MII_BMCR, new_bmcr);
2830 bmcr = new_bmcr;
2831 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2832 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
2833 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2834 ASIC_REV_5714) {
2835 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2836 bmsr |= BMSR_LSTATUS;
2837 else
2838 bmsr &= ~BMSR_LSTATUS;
2839 }
747e8f8b
MC
2840 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2841 }
2842 }
2843
2844 if (bmsr & BMSR_LSTATUS) {
2845 current_speed = SPEED_1000;
2846 current_link_up = 1;
2847 if (bmcr & BMCR_FULLDPLX)
2848 current_duplex = DUPLEX_FULL;
2849 else
2850 current_duplex = DUPLEX_HALF;
2851
2852 if (bmcr & BMCR_ANENABLE) {
2853 u32 local_adv, remote_adv, common;
2854
2855 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2856 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2857 common = local_adv & remote_adv;
2858 if (common & (ADVERTISE_1000XHALF |
2859 ADVERTISE_1000XFULL)) {
2860 if (common & ADVERTISE_1000XFULL)
2861 current_duplex = DUPLEX_FULL;
2862 else
2863 current_duplex = DUPLEX_HALF;
2864
2865 tg3_setup_flow_control(tp, local_adv,
2866 remote_adv);
2867 }
2868 else
2869 current_link_up = 0;
2870 }
2871 }
2872
2873 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2874 if (tp->link_config.active_duplex == DUPLEX_HALF)
2875 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2876
2877 tw32_f(MAC_MODE, tp->mac_mode);
2878 udelay(40);
2879
2880 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2881
2882 tp->link_config.active_speed = current_speed;
2883 tp->link_config.active_duplex = current_duplex;
2884
2885 if (current_link_up != netif_carrier_ok(tp->dev)) {
2886 if (current_link_up)
2887 netif_carrier_on(tp->dev);
2888 else {
2889 netif_carrier_off(tp->dev);
2890 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2891 }
2892 tg3_link_report(tp);
2893 }
2894 return err;
2895}
2896
2897static void tg3_serdes_parallel_detect(struct tg3 *tp)
2898{
3d3ebe74 2899 if (tp->serdes_counter) {
747e8f8b 2900 /* Give autoneg time to complete. */
3d3ebe74 2901 tp->serdes_counter--;
747e8f8b
MC
2902 return;
2903 }
2904 if (!netif_carrier_ok(tp->dev) &&
2905 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2906 u32 bmcr;
2907
2908 tg3_readphy(tp, MII_BMCR, &bmcr);
2909 if (bmcr & BMCR_ANENABLE) {
2910 u32 phy1, phy2;
2911
2912 /* Select shadow register 0x1f */
2913 tg3_writephy(tp, 0x1c, 0x7c00);
2914 tg3_readphy(tp, 0x1c, &phy1);
2915
2916 /* Select expansion interrupt status register */
2917 tg3_writephy(tp, 0x17, 0x0f01);
2918 tg3_readphy(tp, 0x15, &phy2);
2919 tg3_readphy(tp, 0x15, &phy2);
2920
2921 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2922 /* We have signal detect and not receiving
2923 * config code words, link is up by parallel
2924 * detection.
2925 */
2926
2927 bmcr &= ~BMCR_ANENABLE;
2928 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2929 tg3_writephy(tp, MII_BMCR, bmcr);
2930 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2931 }
2932 }
2933 }
2934 else if (netif_carrier_ok(tp->dev) &&
2935 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2936 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2937 u32 phy2;
2938
2939 /* Select expansion interrupt status register */
2940 tg3_writephy(tp, 0x17, 0x0f01);
2941 tg3_readphy(tp, 0x15, &phy2);
2942 if (phy2 & 0x20) {
2943 u32 bmcr;
2944
2945 /* Config code words received, turn on autoneg. */
2946 tg3_readphy(tp, MII_BMCR, &bmcr);
2947 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2948
2949 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2950
2951 }
2952 }
2953}
2954
1da177e4
LT
2955static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2956{
2957 int err;
2958
2959 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2960 err = tg3_setup_fiber_phy(tp, force_reset);
747e8f8b
MC
2961 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2962 err = tg3_setup_fiber_mii_phy(tp, force_reset);
1da177e4
LT
2963 } else {
2964 err = tg3_setup_copper_phy(tp, force_reset);
2965 }
2966
2967 if (tp->link_config.active_speed == SPEED_1000 &&
2968 tp->link_config.active_duplex == DUPLEX_HALF)
2969 tw32(MAC_TX_LENGTHS,
2970 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2971 (6 << TX_LENGTHS_IPG_SHIFT) |
2972 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2973 else
2974 tw32(MAC_TX_LENGTHS,
2975 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2976 (6 << TX_LENGTHS_IPG_SHIFT) |
2977 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2978
2979 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2980 if (netif_carrier_ok(tp->dev)) {
2981 tw32(HOSTCC_STAT_COAL_TICKS,
15f9850d 2982 tp->coal.stats_block_coalesce_usecs);
1da177e4
LT
2983 } else {
2984 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2985 }
2986 }
2987
2988 return err;
2989}
2990
df3e6548
MC
2991/* This is called whenever we suspect that the system chipset is re-
2992 * ordering the sequence of MMIO to the tx send mailbox. The symptom
2993 * is bogus tx completions. We try to recover by setting the
2994 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2995 * in the workqueue.
2996 */
2997static void tg3_tx_recover(struct tg3 *tp)
2998{
2999 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3000 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3001
3002 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3003 "mapped I/O cycles to the network device, attempting to "
3004 "recover. Please report the problem to the driver maintainer "
3005 "and include system chipset information.\n", tp->dev->name);
3006
3007 spin_lock(&tp->lock);
df3e6548 3008 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
df3e6548
MC
3009 spin_unlock(&tp->lock);
3010}
3011
1b2a7205
MC
3012static inline u32 tg3_tx_avail(struct tg3 *tp)
3013{
3014 smp_mb();
3015 return (tp->tx_pending -
3016 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3017}
3018
1da177e4
LT
3019/* Tigon3 never reports partial packet sends. So we do not
3020 * need special logic to handle SKBs that have not had all
3021 * of their frags sent yet, like SunGEM does.
3022 */
3023static void tg3_tx(struct tg3 *tp)
3024{
3025 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3026 u32 sw_idx = tp->tx_cons;
3027
3028 while (sw_idx != hw_idx) {
3029 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3030 struct sk_buff *skb = ri->skb;
df3e6548
MC
3031 int i, tx_bug = 0;
3032
3033 if (unlikely(skb == NULL)) {
3034 tg3_tx_recover(tp);
3035 return;
3036 }
1da177e4 3037
1da177e4
LT
3038 pci_unmap_single(tp->pdev,
3039 pci_unmap_addr(ri, mapping),
3040 skb_headlen(skb),
3041 PCI_DMA_TODEVICE);
3042
3043 ri->skb = NULL;
3044
3045 sw_idx = NEXT_TX(sw_idx);
3046
3047 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1da177e4 3048 ri = &tp->tx_buffers[sw_idx];
df3e6548
MC
3049 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3050 tx_bug = 1;
1da177e4
LT
3051
3052 pci_unmap_page(tp->pdev,
3053 pci_unmap_addr(ri, mapping),
3054 skb_shinfo(skb)->frags[i].size,
3055 PCI_DMA_TODEVICE);
3056
3057 sw_idx = NEXT_TX(sw_idx);
3058 }
3059
f47c11ee 3060 dev_kfree_skb(skb);
df3e6548
MC
3061
3062 if (unlikely(tx_bug)) {
3063 tg3_tx_recover(tp);
3064 return;
3065 }
1da177e4
LT
3066 }
3067
3068 tp->tx_cons = sw_idx;
3069
1b2a7205
MC
3070 /* Need to make the tx_cons update visible to tg3_start_xmit()
3071 * before checking for netif_queue_stopped(). Without the
3072 * memory barrier, there is a small possibility that tg3_start_xmit()
3073 * will miss it and cause the queue to be stopped forever.
3074 */
3075 smp_mb();
3076
3077 if (unlikely(netif_queue_stopped(tp->dev) &&
3078 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) {
3079 netif_tx_lock(tp->dev);
51b91468 3080 if (netif_queue_stopped(tp->dev) &&
1b2a7205 3081 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))
51b91468 3082 netif_wake_queue(tp->dev);
1b2a7205 3083 netif_tx_unlock(tp->dev);
51b91468 3084 }
1da177e4
LT
3085}
3086
3087/* Returns size of skb allocated or < 0 on error.
3088 *
3089 * We only need to fill in the address because the other members
3090 * of the RX descriptor are invariant, see tg3_init_rings.
3091 *
3092 * Note the purposeful assymetry of cpu vs. chip accesses. For
3093 * posting buffers we only dirty the first cache line of the RX
3094 * descriptor (containing the address). Whereas for the RX status
3095 * buffers the cpu only reads the last cacheline of the RX descriptor
3096 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3097 */
3098static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3099 int src_idx, u32 dest_idx_unmasked)
3100{
3101 struct tg3_rx_buffer_desc *desc;
3102 struct ring_info *map, *src_map;
3103 struct sk_buff *skb;
3104 dma_addr_t mapping;
3105 int skb_size, dest_idx;
3106
3107 src_map = NULL;
3108 switch (opaque_key) {
3109 case RXD_OPAQUE_RING_STD:
3110 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3111 desc = &tp->rx_std[dest_idx];
3112 map = &tp->rx_std_buffers[dest_idx];
3113 if (src_idx >= 0)
3114 src_map = &tp->rx_std_buffers[src_idx];
7e72aad4 3115 skb_size = tp->rx_pkt_buf_sz;
1da177e4
LT
3116 break;
3117
3118 case RXD_OPAQUE_RING_JUMBO:
3119 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3120 desc = &tp->rx_jumbo[dest_idx];
3121 map = &tp->rx_jumbo_buffers[dest_idx];
3122 if (src_idx >= 0)
3123 src_map = &tp->rx_jumbo_buffers[src_idx];
3124 skb_size = RX_JUMBO_PKT_BUF_SZ;
3125 break;
3126
3127 default:
3128 return -EINVAL;
3129 };
3130
3131 /* Do not overwrite any of the map or rp information
3132 * until we are sure we can commit to a new buffer.
3133 *
3134 * Callers depend upon this behavior and assume that
3135 * we leave everything unchanged if we fail.
3136 */
a20e9c62 3137 skb = netdev_alloc_skb(tp->dev, skb_size);
1da177e4
LT
3138 if (skb == NULL)
3139 return -ENOMEM;
3140
1da177e4
LT
3141 skb_reserve(skb, tp->rx_offset);
3142
3143 mapping = pci_map_single(tp->pdev, skb->data,
3144 skb_size - tp->rx_offset,
3145 PCI_DMA_FROMDEVICE);
3146
3147 map->skb = skb;
3148 pci_unmap_addr_set(map, mapping, mapping);
3149
3150 if (src_map != NULL)
3151 src_map->skb = NULL;
3152
3153 desc->addr_hi = ((u64)mapping >> 32);
3154 desc->addr_lo = ((u64)mapping & 0xffffffff);
3155
3156 return skb_size;
3157}
3158
3159/* We only need to move over in the address because the other
3160 * members of the RX descriptor are invariant. See notes above
3161 * tg3_alloc_rx_skb for full details.
3162 */
3163static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3164 int src_idx, u32 dest_idx_unmasked)
3165{
3166 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3167 struct ring_info *src_map, *dest_map;
3168 int dest_idx;
3169
3170 switch (opaque_key) {
3171 case RXD_OPAQUE_RING_STD:
3172 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3173 dest_desc = &tp->rx_std[dest_idx];
3174 dest_map = &tp->rx_std_buffers[dest_idx];
3175 src_desc = &tp->rx_std[src_idx];
3176 src_map = &tp->rx_std_buffers[src_idx];
3177 break;
3178
3179 case RXD_OPAQUE_RING_JUMBO:
3180 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3181 dest_desc = &tp->rx_jumbo[dest_idx];
3182 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3183 src_desc = &tp->rx_jumbo[src_idx];
3184 src_map = &tp->rx_jumbo_buffers[src_idx];
3185 break;
3186
3187 default:
3188 return;
3189 };
3190
3191 dest_map->skb = src_map->skb;
3192 pci_unmap_addr_set(dest_map, mapping,
3193 pci_unmap_addr(src_map, mapping));
3194 dest_desc->addr_hi = src_desc->addr_hi;
3195 dest_desc->addr_lo = src_desc->addr_lo;
3196
3197 src_map->skb = NULL;
3198}
3199
3200#if TG3_VLAN_TAG_USED
3201static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3202{
3203 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3204}
3205#endif
3206
3207/* The RX ring scheme is composed of multiple rings which post fresh
3208 * buffers to the chip, and one special ring the chip uses to report
3209 * status back to the host.
3210 *
3211 * The special ring reports the status of received packets to the
3212 * host. The chip does not write into the original descriptor the
3213 * RX buffer was obtained from. The chip simply takes the original
3214 * descriptor as provided by the host, updates the status and length
3215 * field, then writes this into the next status ring entry.
3216 *
3217 * Each ring the host uses to post buffers to the chip is described
3218 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3219 * it is first placed into the on-chip ram. When the packet's length
3220 * is known, it walks down the TG3_BDINFO entries to select the ring.
3221 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3222 * which is within the range of the new packet's length is chosen.
3223 *
3224 * The "separate ring for rx status" scheme may sound queer, but it makes
3225 * sense from a cache coherency perspective. If only the host writes
3226 * to the buffer post rings, and only the chip writes to the rx status
3227 * rings, then cache lines never move beyond shared-modified state.
3228 * If both the host and chip were to write into the same ring, cache line
3229 * eviction could occur since both entities want it in an exclusive state.
3230 */
3231static int tg3_rx(struct tg3 *tp, int budget)
3232{
f92905de 3233 u32 work_mask, rx_std_posted = 0;
483ba50b
MC
3234 u32 sw_idx = tp->rx_rcb_ptr;
3235 u16 hw_idx;
1da177e4
LT
3236 int received;
3237
3238 hw_idx = tp->hw_status->idx[0].rx_producer;
3239 /*
3240 * We need to order the read of hw_idx and the read of
3241 * the opaque cookie.
3242 */
3243 rmb();
1da177e4
LT
3244 work_mask = 0;
3245 received = 0;
3246 while (sw_idx != hw_idx && budget > 0) {
3247 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3248 unsigned int len;
3249 struct sk_buff *skb;
3250 dma_addr_t dma_addr;
3251 u32 opaque_key, desc_idx, *post_ptr;
3252
3253 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3254 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3255 if (opaque_key == RXD_OPAQUE_RING_STD) {
3256 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3257 mapping);
3258 skb = tp->rx_std_buffers[desc_idx].skb;
3259 post_ptr = &tp->rx_std_ptr;
f92905de 3260 rx_std_posted++;
1da177e4
LT
3261 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3262 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3263 mapping);
3264 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3265 post_ptr = &tp->rx_jumbo_ptr;
3266 }
3267 else {
3268 goto next_pkt_nopost;
3269 }
3270
3271 work_mask |= opaque_key;
3272
3273 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3274 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3275 drop_it:
3276 tg3_recycle_rx(tp, opaque_key,
3277 desc_idx, *post_ptr);
3278 drop_it_no_recycle:
3279 /* Other statistics kept track of by card. */
3280 tp->net_stats.rx_dropped++;
3281 goto next_pkt;
3282 }
3283
3284 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3285
6aa20a22 3286 if (len > RX_COPY_THRESHOLD
1da177e4
LT
3287 && tp->rx_offset == 2
3288 /* rx_offset != 2 iff this is a 5701 card running
3289 * in PCI-X mode [see tg3_get_invariants()] */
3290 ) {
3291 int skb_size;
3292
3293 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3294 desc_idx, *post_ptr);
3295 if (skb_size < 0)
3296 goto drop_it;
3297
3298 pci_unmap_single(tp->pdev, dma_addr,
3299 skb_size - tp->rx_offset,
3300 PCI_DMA_FROMDEVICE);
3301
3302 skb_put(skb, len);
3303 } else {
3304 struct sk_buff *copy_skb;
3305
3306 tg3_recycle_rx(tp, opaque_key,
3307 desc_idx, *post_ptr);
3308
a20e9c62 3309 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
1da177e4
LT
3310 if (copy_skb == NULL)
3311 goto drop_it_no_recycle;
3312
1da177e4
LT
3313 skb_reserve(copy_skb, 2);
3314 skb_put(copy_skb, len);
3315 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3316 memcpy(copy_skb->data, skb->data, len);
3317 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3318
3319 /* We'll reuse the original ring buffer. */
3320 skb = copy_skb;
3321 }
3322
3323 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3324 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3325 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3326 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3327 skb->ip_summed = CHECKSUM_UNNECESSARY;
3328 else
3329 skb->ip_summed = CHECKSUM_NONE;
3330
3331 skb->protocol = eth_type_trans(skb, tp->dev);
3332#if TG3_VLAN_TAG_USED
3333 if (tp->vlgrp != NULL &&
3334 desc->type_flags & RXD_FLAG_VLAN) {
3335 tg3_vlan_rx(tp, skb,
3336 desc->err_vlan & RXD_VLAN_MASK);
3337 } else
3338#endif
3339 netif_receive_skb(skb);
3340
3341 tp->dev->last_rx = jiffies;
3342 received++;
3343 budget--;
3344
3345next_pkt:
3346 (*post_ptr)++;
f92905de
MC
3347
3348 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3349 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3350
3351 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3352 TG3_64BIT_REG_LOW, idx);
3353 work_mask &= ~RXD_OPAQUE_RING_STD;
3354 rx_std_posted = 0;
3355 }
1da177e4 3356next_pkt_nopost:
483ba50b
MC
3357 sw_idx++;
3358 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
52f6d697
MC
3359
3360 /* Refresh hw_idx to see if there is new work */
3361 if (sw_idx == hw_idx) {
3362 hw_idx = tp->hw_status->idx[0].rx_producer;
3363 rmb();
3364 }
1da177e4
LT
3365 }
3366
3367 /* ACK the status ring. */
483ba50b
MC
3368 tp->rx_rcb_ptr = sw_idx;
3369 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
1da177e4
LT
3370
3371 /* Refill RX ring(s). */
3372 if (work_mask & RXD_OPAQUE_RING_STD) {
3373 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3374 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3375 sw_idx);
3376 }
3377 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3378 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3379 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3380 sw_idx);
3381 }
3382 mmiowb();
3383
3384 return received;
3385}
3386
3387static int tg3_poll(struct net_device *netdev, int *budget)
3388{
3389 struct tg3 *tp = netdev_priv(netdev);
3390 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4
LT
3391 int done;
3392
1da177e4
LT
3393 /* handle link change and other phy events */
3394 if (!(tp->tg3_flags &
3395 (TG3_FLAG_USE_LINKCHG_REG |
3396 TG3_FLAG_POLL_SERDES))) {
3397 if (sblk->status & SD_STATUS_LINK_CHG) {
3398 sblk->status = SD_STATUS_UPDATED |
3399 (sblk->status & ~SD_STATUS_LINK_CHG);
f47c11ee 3400 spin_lock(&tp->lock);
1da177e4 3401 tg3_setup_phy(tp, 0);
f47c11ee 3402 spin_unlock(&tp->lock);
1da177e4
LT
3403 }
3404 }
3405
3406 /* run TX completion thread */
3407 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
1da177e4 3408 tg3_tx(tp);
df3e6548
MC
3409 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3410 netif_rx_complete(netdev);
3411 schedule_work(&tp->reset_task);
3412 return 0;
3413 }
1da177e4
LT
3414 }
3415
1da177e4
LT
3416 /* run RX thread, within the bounds set by NAPI.
3417 * All RX "locking" is done by ensuring outside
3418 * code synchronizes with dev->poll()
3419 */
1da177e4
LT
3420 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3421 int orig_budget = *budget;
3422 int work_done;
3423
3424 if (orig_budget > netdev->quota)
3425 orig_budget = netdev->quota;
3426
3427 work_done = tg3_rx(tp, orig_budget);
3428
3429 *budget -= work_done;
3430 netdev->quota -= work_done;
1da177e4
LT
3431 }
3432
38f3843e 3433 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
f7383c22 3434 tp->last_tag = sblk->status_tag;
38f3843e
MC
3435 rmb();
3436 } else
3437 sblk->status &= ~SD_STATUS_UPDATED;
f7383c22 3438
1da177e4 3439 /* if no more work, tell net stack and NIC we're done */
f7383c22 3440 done = !tg3_has_work(tp);
1da177e4 3441 if (done) {
f47c11ee 3442 netif_rx_complete(netdev);
1da177e4 3443 tg3_restart_ints(tp);
1da177e4
LT
3444 }
3445
3446 return (done ? 0 : 1);
3447}
3448
f47c11ee
DM
3449static void tg3_irq_quiesce(struct tg3 *tp)
3450{
3451 BUG_ON(tp->irq_sync);
3452
3453 tp->irq_sync = 1;
3454 smp_mb();
3455
3456 synchronize_irq(tp->pdev->irq);
3457}
3458
3459static inline int tg3_irq_sync(struct tg3 *tp)
3460{
3461 return tp->irq_sync;
3462}
3463
3464/* Fully shutdown all tg3 driver activity elsewhere in the system.
3465 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3466 * with as well. Most of the time, this is not necessary except when
3467 * shutting down the device.
3468 */
3469static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3470{
3471 if (irq_sync)
3472 tg3_irq_quiesce(tp);
3473 spin_lock_bh(&tp->lock);
f47c11ee
DM
3474}
3475
3476static inline void tg3_full_unlock(struct tg3 *tp)
3477{
f47c11ee
DM
3478 spin_unlock_bh(&tp->lock);
3479}
3480
fcfa0a32
MC
3481/* One-shot MSI handler - Chip automatically disables interrupt
3482 * after sending MSI so driver doesn't have to do it.
3483 */
3484static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3485{
3486 struct net_device *dev = dev_id;
3487 struct tg3 *tp = netdev_priv(dev);
3488
3489 prefetch(tp->hw_status);
3490 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3491
3492 if (likely(!tg3_irq_sync(tp)))
3493 netif_rx_schedule(dev); /* schedule NAPI poll */
3494
3495 return IRQ_HANDLED;
3496}
3497
88b06bc2
MC
3498/* MSI ISR - No need to check for interrupt sharing and no need to
3499 * flush status block and interrupt mailbox. PCI ordering rules
3500 * guarantee that MSI will arrive after the status block.
3501 */
3502static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3503{
3504 struct net_device *dev = dev_id;
3505 struct tg3 *tp = netdev_priv(dev);
88b06bc2 3506
61487480
MC
3507 prefetch(tp->hw_status);
3508 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
88b06bc2 3509 /*
fac9b83e 3510 * Writing any value to intr-mbox-0 clears PCI INTA# and
88b06bc2 3511 * chip-internal interrupt pending events.
fac9b83e 3512 * Writing non-zero to intr-mbox-0 additional tells the
88b06bc2
MC
3513 * NIC to stop sending us irqs, engaging "in-intr-handler"
3514 * event coalescing.
3515 */
3516 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
61487480 3517 if (likely(!tg3_irq_sync(tp)))
88b06bc2 3518 netif_rx_schedule(dev); /* schedule NAPI poll */
61487480 3519
88b06bc2
MC
3520 return IRQ_RETVAL(1);
3521}
3522
1da177e4
LT
3523static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3524{
3525 struct net_device *dev = dev_id;
3526 struct tg3 *tp = netdev_priv(dev);
3527 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4
LT
3528 unsigned int handled = 1;
3529
1da177e4
LT
3530 /* In INTx mode, it is possible for the interrupt to arrive at
3531 * the CPU before the status block posted prior to the interrupt.
3532 * Reading the PCI State register will confirm whether the
3533 * interrupt is ours and will flush the status block.
3534 */
3535 if ((sblk->status & SD_STATUS_UPDATED) ||
3536 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3537 /*
fac9b83e 3538 * Writing any value to intr-mbox-0 clears PCI INTA# and
1da177e4 3539 * chip-internal interrupt pending events.
fac9b83e 3540 * Writing non-zero to intr-mbox-0 additional tells the
1da177e4
LT
3541 * NIC to stop sending us irqs, engaging "in-intr-handler"
3542 * event coalescing.
3543 */
3544 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3545 0x00000001);
f47c11ee
DM
3546 if (tg3_irq_sync(tp))
3547 goto out;
fac9b83e 3548 sblk->status &= ~SD_STATUS_UPDATED;
61487480
MC
3549 if (likely(tg3_has_work(tp))) {
3550 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
fac9b83e 3551 netif_rx_schedule(dev); /* schedule NAPI poll */
61487480 3552 } else {
fac9b83e
DM
3553 /* No work, shared interrupt perhaps? re-enable
3554 * interrupts, and flush that PCI write
3555 */
09ee929c 3556 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
fac9b83e 3557 0x00000000);
fac9b83e
DM
3558 }
3559 } else { /* shared interrupt */
3560 handled = 0;
3561 }
f47c11ee 3562out:
fac9b83e
DM
3563 return IRQ_RETVAL(handled);
3564}
3565
3566static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3567{
3568 struct net_device *dev = dev_id;
3569 struct tg3 *tp = netdev_priv(dev);
3570 struct tg3_hw_status *sblk = tp->hw_status;
fac9b83e
DM
3571 unsigned int handled = 1;
3572
fac9b83e
DM
3573 /* In INTx mode, it is possible for the interrupt to arrive at
3574 * the CPU before the status block posted prior to the interrupt.
3575 * Reading the PCI State register will confirm whether the
3576 * interrupt is ours and will flush the status block.
3577 */
38f3843e 3578 if ((sblk->status_tag != tp->last_tag) ||
fac9b83e 3579 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
1da177e4 3580 /*
fac9b83e
DM
3581 * writing any value to intr-mbox-0 clears PCI INTA# and
3582 * chip-internal interrupt pending events.
3583 * writing non-zero to intr-mbox-0 additional tells the
3584 * NIC to stop sending us irqs, engaging "in-intr-handler"
3585 * event coalescing.
1da177e4 3586 */
fac9b83e
DM
3587 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3588 0x00000001);
f47c11ee
DM
3589 if (tg3_irq_sync(tp))
3590 goto out;
38f3843e 3591 if (netif_rx_schedule_prep(dev)) {
61487480 3592 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
38f3843e
MC
3593 /* Update last_tag to mark that this status has been
3594 * seen. Because interrupt may be shared, we may be
3595 * racing with tg3_poll(), so only update last_tag
3596 * if tg3_poll() is not scheduled.
1da177e4 3597 */
38f3843e
MC
3598 tp->last_tag = sblk->status_tag;
3599 __netif_rx_schedule(dev);
1da177e4
LT
3600 }
3601 } else { /* shared interrupt */
3602 handled = 0;
3603 }
f47c11ee 3604out:
1da177e4
LT
3605 return IRQ_RETVAL(handled);
3606}
3607
7938109f
MC
3608/* ISR for interrupt test */
3609static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3610 struct pt_regs *regs)
3611{
3612 struct net_device *dev = dev_id;
3613 struct tg3 *tp = netdev_priv(dev);
3614 struct tg3_hw_status *sblk = tp->hw_status;
3615
f9804ddb
MC
3616 if ((sblk->status & SD_STATUS_UPDATED) ||
3617 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7938109f
MC
3618 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3619 0x00000001);
3620 return IRQ_RETVAL(1);
3621 }
3622 return IRQ_RETVAL(0);
3623}
3624
8e7a22e3 3625static int tg3_init_hw(struct tg3 *, int);
944d980e 3626static int tg3_halt(struct tg3 *, int, int);
1da177e4 3627
b9ec6c1b
MC
3628/* Restart hardware after configuration changes, self-test, etc.
3629 * Invoked with tp->lock held.
3630 */
3631static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3632{
3633 int err;
3634
3635 err = tg3_init_hw(tp, reset_phy);
3636 if (err) {
3637 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3638 "aborting.\n", tp->dev->name);
3639 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3640 tg3_full_unlock(tp);
3641 del_timer_sync(&tp->timer);
3642 tp->irq_sync = 0;
3643 netif_poll_enable(tp->dev);
3644 dev_close(tp->dev);
3645 tg3_full_lock(tp, 0);
3646 }
3647 return err;
3648}
3649
1da177e4
LT
3650#ifdef CONFIG_NET_POLL_CONTROLLER
3651static void tg3_poll_controller(struct net_device *dev)
3652{
88b06bc2
MC
3653 struct tg3 *tp = netdev_priv(dev);
3654
3655 tg3_interrupt(tp->pdev->irq, dev, NULL);
1da177e4
LT
3656}
3657#endif
3658
3659static void tg3_reset_task(void *_data)
3660{
3661 struct tg3 *tp = _data;
3662 unsigned int restart_timer;
3663
7faa006f
MC
3664 tg3_full_lock(tp, 0);
3665 tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3666
3667 if (!netif_running(tp->dev)) {
3668 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3669 tg3_full_unlock(tp);
3670 return;
3671 }
3672
3673 tg3_full_unlock(tp);
3674
1da177e4
LT
3675 tg3_netif_stop(tp);
3676
f47c11ee 3677 tg3_full_lock(tp, 1);
1da177e4
LT
3678
3679 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3680 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3681
df3e6548
MC
3682 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3683 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3684 tp->write32_rx_mbox = tg3_write_flush_reg32;
3685 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3686 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3687 }
3688
944d980e 3689 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
b9ec6c1b
MC
3690 if (tg3_init_hw(tp, 1))
3691 goto out;
1da177e4
LT
3692
3693 tg3_netif_start(tp);
3694
1da177e4
LT
3695 if (restart_timer)
3696 mod_timer(&tp->timer, jiffies + 1);
7faa006f 3697
b9ec6c1b 3698out:
7faa006f
MC
3699 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3700
3701 tg3_full_unlock(tp);
1da177e4
LT
3702}
3703
3704static void tg3_tx_timeout(struct net_device *dev)
3705{
3706 struct tg3 *tp = netdev_priv(dev);
3707
3708 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3709 dev->name);
3710
3711 schedule_work(&tp->reset_task);
3712}
3713
c58ec932
MC
3714/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3715static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3716{
3717 u32 base = (u32) mapping & 0xffffffff;
3718
3719 return ((base > 0xffffdcc0) &&
3720 (base + len + 8 < base));
3721}
3722
72f2afb8
MC
3723/* Test for DMA addresses > 40-bit */
3724static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3725 int len)
3726{
3727#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6728a8e2 3728 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
72f2afb8
MC
3729 return (((u64) mapping + len) > DMA_40BIT_MASK);
3730 return 0;
3731#else
3732 return 0;
3733#endif
3734}
3735
1da177e4
LT
3736static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3737
72f2afb8
MC
3738/* Workaround 4GB and 40-bit hardware DMA bugs. */
3739static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
c58ec932
MC
3740 u32 last_plus_one, u32 *start,
3741 u32 base_flags, u32 mss)
1da177e4
LT
3742{
3743 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
c58ec932 3744 dma_addr_t new_addr = 0;
1da177e4 3745 u32 entry = *start;
c58ec932 3746 int i, ret = 0;
1da177e4
LT
3747
3748 if (!new_skb) {
c58ec932
MC
3749 ret = -1;
3750 } else {
3751 /* New SKB is guaranteed to be linear. */
3752 entry = *start;
3753 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3754 PCI_DMA_TODEVICE);
3755 /* Make sure new skb does not cross any 4G boundaries.
3756 * Drop the packet if it does.
3757 */
3758 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3759 ret = -1;
3760 dev_kfree_skb(new_skb);
3761 new_skb = NULL;
3762 } else {
3763 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3764 base_flags, 1 | (mss << 1));
3765 *start = NEXT_TX(entry);
3766 }
1da177e4
LT
3767 }
3768
1da177e4
LT
3769 /* Now clean up the sw ring entries. */
3770 i = 0;
3771 while (entry != last_plus_one) {
3772 int len;
3773
3774 if (i == 0)
3775 len = skb_headlen(skb);
3776 else
3777 len = skb_shinfo(skb)->frags[i-1].size;
3778 pci_unmap_single(tp->pdev,
3779 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3780 len, PCI_DMA_TODEVICE);
3781 if (i == 0) {
3782 tp->tx_buffers[entry].skb = new_skb;
3783 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3784 } else {
3785 tp->tx_buffers[entry].skb = NULL;
3786 }
3787 entry = NEXT_TX(entry);
3788 i++;
3789 }
3790
3791 dev_kfree_skb(skb);
3792
c58ec932 3793 return ret;
1da177e4
LT
3794}
3795
3796static void tg3_set_txd(struct tg3 *tp, int entry,
3797 dma_addr_t mapping, int len, u32 flags,
3798 u32 mss_and_is_end)
3799{
3800 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3801 int is_end = (mss_and_is_end & 0x1);
3802 u32 mss = (mss_and_is_end >> 1);
3803 u32 vlan_tag = 0;
3804
3805 if (is_end)
3806 flags |= TXD_FLAG_END;
3807 if (flags & TXD_FLAG_VLAN) {
3808 vlan_tag = flags >> 16;
3809 flags &= 0xffff;
3810 }
3811 vlan_tag |= (mss << TXD_MSS_SHIFT);
3812
3813 txd->addr_hi = ((u64) mapping >> 32);
3814 txd->addr_lo = ((u64) mapping & 0xffffffff);
3815 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3816 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3817}
3818
5a6f3074
MC
3819/* hard_start_xmit for devices that don't have any bugs and
3820 * support TG3_FLG2_HW_TSO_2 only.
3821 */
1da177e4 3822static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5a6f3074
MC
3823{
3824 struct tg3 *tp = netdev_priv(dev);
3825 dma_addr_t mapping;
3826 u32 len, entry, base_flags, mss;
3827
3828 len = skb_headlen(skb);
3829
00b70504
MC
3830 /* We are running in BH disabled context with netif_tx_lock
3831 * and TX reclaim runs via tp->poll inside of a software
5a6f3074
MC
3832 * interrupt. Furthermore, IRQ processing runs lockless so we have
3833 * no IRQ context deadlocks to worry about either. Rejoice!
3834 */
1b2a7205 3835 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
5a6f3074
MC
3836 if (!netif_queue_stopped(dev)) {
3837 netif_stop_queue(dev);
3838
3839 /* This is a hard error, log it. */
3840 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3841 "queue awake!\n", dev->name);
3842 }
5a6f3074
MC
3843 return NETDEV_TX_BUSY;
3844 }
3845
3846 entry = tp->tx_prod;
3847 base_flags = 0;
3848#if TG3_TSO_SUPPORT != 0
3849 mss = 0;
3850 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
7967168c 3851 (mss = skb_shinfo(skb)->gso_size) != 0) {
5a6f3074
MC
3852 int tcp_opt_len, ip_tcp_len;
3853
3854 if (skb_header_cloned(skb) &&
3855 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3856 dev_kfree_skb(skb);
3857 goto out_unlock;
3858 }
3859
b0026624
MC
3860 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3861 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3862 else {
3863 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3864 ip_tcp_len = (skb->nh.iph->ihl * 4) +
3865 sizeof(struct tcphdr);
3866
3867 skb->nh.iph->check = 0;
3868 skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
3869 tcp_opt_len);
3870 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3871 }
5a6f3074
MC
3872
3873 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3874 TXD_FLAG_CPU_POST_DMA);
3875
5a6f3074
MC
3876 skb->h.th->check = 0;
3877
5a6f3074 3878 }
84fa7933 3879 else if (skb->ip_summed == CHECKSUM_PARTIAL)
5a6f3074
MC
3880 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3881#else
3882 mss = 0;
84fa7933 3883 if (skb->ip_summed == CHECKSUM_PARTIAL)
5a6f3074
MC
3884 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3885#endif
3886#if TG3_VLAN_TAG_USED
3887 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3888 base_flags |= (TXD_FLAG_VLAN |
3889 (vlan_tx_tag_get(skb) << 16));
3890#endif
3891
3892 /* Queue skb data, a.k.a. the main skb fragment. */
3893 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3894
3895 tp->tx_buffers[entry].skb = skb;
3896 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3897
3898 tg3_set_txd(tp, entry, mapping, len, base_flags,
3899 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3900
3901 entry = NEXT_TX(entry);
3902
3903 /* Now loop through additional data fragments, and queue them. */
3904 if (skb_shinfo(skb)->nr_frags > 0) {
3905 unsigned int i, last;
3906
3907 last = skb_shinfo(skb)->nr_frags - 1;
3908 for (i = 0; i <= last; i++) {
3909 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3910
3911 len = frag->size;
3912 mapping = pci_map_page(tp->pdev,
3913 frag->page,
3914 frag->page_offset,
3915 len, PCI_DMA_TODEVICE);
3916
3917 tp->tx_buffers[entry].skb = NULL;
3918 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3919
3920 tg3_set_txd(tp, entry, mapping, len,
3921 base_flags, (i == last) | (mss << 1));
3922
3923 entry = NEXT_TX(entry);
3924 }
3925 }
3926
3927 /* Packets are ready, update Tx producer idx local and on card. */
3928 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3929
3930 tp->tx_prod = entry;
1b2a7205 3931 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5a6f3074 3932 netif_stop_queue(dev);
1b2a7205 3933 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
5a6f3074
MC
3934 netif_wake_queue(tp->dev);
3935 }
3936
3937out_unlock:
3938 mmiowb();
5a6f3074
MC
3939
3940 dev->trans_start = jiffies;
3941
3942 return NETDEV_TX_OK;
3943}
3944
52c0fd83
MC
3945#if TG3_TSO_SUPPORT != 0
3946static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3947
3948/* Use GSO to workaround a rare TSO bug that may be triggered when the
3949 * TSO header is greater than 80 bytes.
3950 */
3951static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3952{
3953 struct sk_buff *segs, *nskb;
3954
3955 /* Estimate the number of fragments in the worst case */
1b2a7205 3956 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
52c0fd83
MC
3957 netif_stop_queue(tp->dev);
3958 return NETDEV_TX_BUSY;
3959 }
3960
3961 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
3962 if (unlikely(IS_ERR(segs)))
3963 goto tg3_tso_bug_end;
3964
3965 do {
3966 nskb = segs;
3967 segs = segs->next;
3968 nskb->next = NULL;
3969 tg3_start_xmit_dma_bug(nskb, tp->dev);
3970 } while (segs);
3971
3972tg3_tso_bug_end:
3973 dev_kfree_skb(skb);
3974
3975 return NETDEV_TX_OK;
3976}
3977#endif
3978
5a6f3074
MC
3979/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3980 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3981 */
3982static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
3983{
3984 struct tg3 *tp = netdev_priv(dev);
3985 dma_addr_t mapping;
1da177e4
LT
3986 u32 len, entry, base_flags, mss;
3987 int would_hit_hwbug;
1da177e4
LT
3988
3989 len = skb_headlen(skb);
3990
00b70504
MC
3991 /* We are running in BH disabled context with netif_tx_lock
3992 * and TX reclaim runs via tp->poll inside of a software
f47c11ee
DM
3993 * interrupt. Furthermore, IRQ processing runs lockless so we have
3994 * no IRQ context deadlocks to worry about either. Rejoice!
1da177e4 3995 */
1b2a7205 3996 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
1f064a87
SH
3997 if (!netif_queue_stopped(dev)) {
3998 netif_stop_queue(dev);
3999
4000 /* This is a hard error, log it. */
4001 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4002 "queue awake!\n", dev->name);
4003 }
1da177e4
LT
4004 return NETDEV_TX_BUSY;
4005 }
4006
4007 entry = tp->tx_prod;
4008 base_flags = 0;
84fa7933 4009 if (skb->ip_summed == CHECKSUM_PARTIAL)
1da177e4
LT
4010 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4011#if TG3_TSO_SUPPORT != 0
4012 mss = 0;
4013 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
7967168c 4014 (mss = skb_shinfo(skb)->gso_size) != 0) {
52c0fd83 4015 int tcp_opt_len, ip_tcp_len, hdr_len;
1da177e4
LT
4016
4017 if (skb_header_cloned(skb) &&
4018 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4019 dev_kfree_skb(skb);
4020 goto out_unlock;
4021 }
4022
4023 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4024 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
4025
52c0fd83
MC
4026 hdr_len = ip_tcp_len + tcp_opt_len;
4027 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4028 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
4029 return (tg3_tso_bug(tp, skb));
4030
1da177e4
LT
4031 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4032 TXD_FLAG_CPU_POST_DMA);
4033
4034 skb->nh.iph->check = 0;
52c0fd83 4035 skb->nh.iph->tot_len = htons(mss + hdr_len);
1da177e4
LT
4036 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4037 skb->h.th->check = 0;
4038 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4039 }
4040 else {
4041 skb->h.th->check =
4042 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4043 skb->nh.iph->daddr,
4044 0, IPPROTO_TCP, 0);
4045 }
4046
4047 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4048 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4049 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4050 int tsflags;
4051
4052 tsflags = ((skb->nh.iph->ihl - 5) +
4053 (tcp_opt_len >> 2));
4054 mss |= (tsflags << 11);
4055 }
4056 } else {
4057 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4058 int tsflags;
4059
4060 tsflags = ((skb->nh.iph->ihl - 5) +
4061 (tcp_opt_len >> 2));
4062 base_flags |= tsflags << 12;
4063 }
4064 }
4065 }
4066#else
4067 mss = 0;
4068#endif
4069#if TG3_VLAN_TAG_USED
4070 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4071 base_flags |= (TXD_FLAG_VLAN |
4072 (vlan_tx_tag_get(skb) << 16));
4073#endif
4074
4075 /* Queue skb data, a.k.a. the main skb fragment. */
4076 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4077
4078 tp->tx_buffers[entry].skb = skb;
4079 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4080
4081 would_hit_hwbug = 0;
4082
4083 if (tg3_4g_overflow_test(mapping, len))
c58ec932 4084 would_hit_hwbug = 1;
1da177e4
LT
4085
4086 tg3_set_txd(tp, entry, mapping, len, base_flags,
4087 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4088
4089 entry = NEXT_TX(entry);
4090
4091 /* Now loop through additional data fragments, and queue them. */
4092 if (skb_shinfo(skb)->nr_frags > 0) {
4093 unsigned int i, last;
4094
4095 last = skb_shinfo(skb)->nr_frags - 1;
4096 for (i = 0; i <= last; i++) {
4097 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4098
4099 len = frag->size;
4100 mapping = pci_map_page(tp->pdev,
4101 frag->page,
4102 frag->page_offset,
4103 len, PCI_DMA_TODEVICE);
4104
4105 tp->tx_buffers[entry].skb = NULL;
4106 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4107
c58ec932
MC
4108 if (tg3_4g_overflow_test(mapping, len))
4109 would_hit_hwbug = 1;
1da177e4 4110
72f2afb8
MC
4111 if (tg3_40bit_overflow_test(tp, mapping, len))
4112 would_hit_hwbug = 1;
4113
1da177e4
LT
4114 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4115 tg3_set_txd(tp, entry, mapping, len,
4116 base_flags, (i == last)|(mss << 1));
4117 else
4118 tg3_set_txd(tp, entry, mapping, len,
4119 base_flags, (i == last));
4120
4121 entry = NEXT_TX(entry);
4122 }
4123 }
4124
4125 if (would_hit_hwbug) {
4126 u32 last_plus_one = entry;
4127 u32 start;
1da177e4 4128
c58ec932
MC
4129 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4130 start &= (TG3_TX_RING_SIZE - 1);
1da177e4
LT
4131
4132 /* If the workaround fails due to memory/mapping
4133 * failure, silently drop this packet.
4134 */
72f2afb8 4135 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
c58ec932 4136 &start, base_flags, mss))
1da177e4
LT
4137 goto out_unlock;
4138
4139 entry = start;
4140 }
4141
4142 /* Packets are ready, update Tx producer idx local and on card. */
4143 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4144
4145 tp->tx_prod = entry;
1b2a7205 4146 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
1da177e4 4147 netif_stop_queue(dev);
1b2a7205 4148 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
51b91468
MC
4149 netif_wake_queue(tp->dev);
4150 }
1da177e4
LT
4151
4152out_unlock:
4153 mmiowb();
1da177e4
LT
4154
4155 dev->trans_start = jiffies;
4156
4157 return NETDEV_TX_OK;
4158}
4159
4160static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4161 int new_mtu)
4162{
4163 dev->mtu = new_mtu;
4164
ef7f5ec0 4165 if (new_mtu > ETH_DATA_LEN) {
a4e2b347 4166 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
ef7f5ec0
MC
4167 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4168 ethtool_op_set_tso(dev, 0);
4169 }
4170 else
4171 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4172 } else {
a4e2b347 4173 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
ef7f5ec0 4174 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
0f893dc6 4175 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
ef7f5ec0 4176 }
1da177e4
LT
4177}
4178
4179static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4180{
4181 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 4182 int err;
1da177e4
LT
4183
4184 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4185 return -EINVAL;
4186
4187 if (!netif_running(dev)) {
4188 /* We'll just catch it later when the
4189 * device is up'd.
4190 */
4191 tg3_set_mtu(dev, tp, new_mtu);
4192 return 0;
4193 }
4194
4195 tg3_netif_stop(tp);
f47c11ee
DM
4196
4197 tg3_full_lock(tp, 1);
1da177e4 4198
944d980e 4199 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
4200
4201 tg3_set_mtu(dev, tp, new_mtu);
4202
b9ec6c1b 4203 err = tg3_restart_hw(tp, 0);
1da177e4 4204
b9ec6c1b
MC
4205 if (!err)
4206 tg3_netif_start(tp);
1da177e4 4207
f47c11ee 4208 tg3_full_unlock(tp);
1da177e4 4209
b9ec6c1b 4210 return err;
1da177e4
LT
4211}
4212
4213/* Free up pending packets in all rx/tx rings.
4214 *
4215 * The chip has been shut down and the driver detached from
4216 * the networking, so no interrupts or new tx packets will
4217 * end up in the driver. tp->{tx,}lock is not held and we are not
4218 * in an interrupt context and thus may sleep.
4219 */
4220static void tg3_free_rings(struct tg3 *tp)
4221{
4222 struct ring_info *rxp;
4223 int i;
4224
4225 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4226 rxp = &tp->rx_std_buffers[i];
4227
4228 if (rxp->skb == NULL)
4229 continue;
4230 pci_unmap_single(tp->pdev,
4231 pci_unmap_addr(rxp, mapping),
7e72aad4 4232 tp->rx_pkt_buf_sz - tp->rx_offset,
1da177e4
LT
4233 PCI_DMA_FROMDEVICE);
4234 dev_kfree_skb_any(rxp->skb);
4235 rxp->skb = NULL;
4236 }
4237
4238 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4239 rxp = &tp->rx_jumbo_buffers[i];
4240
4241 if (rxp->skb == NULL)
4242 continue;
4243 pci_unmap_single(tp->pdev,
4244 pci_unmap_addr(rxp, mapping),
4245 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4246 PCI_DMA_FROMDEVICE);
4247 dev_kfree_skb_any(rxp->skb);
4248 rxp->skb = NULL;
4249 }
4250
4251 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4252 struct tx_ring_info *txp;
4253 struct sk_buff *skb;
4254 int j;
4255
4256 txp = &tp->tx_buffers[i];
4257 skb = txp->skb;
4258
4259 if (skb == NULL) {
4260 i++;
4261 continue;
4262 }
4263
4264 pci_unmap_single(tp->pdev,
4265 pci_unmap_addr(txp, mapping),
4266 skb_headlen(skb),
4267 PCI_DMA_TODEVICE);
4268 txp->skb = NULL;
4269
4270 i++;
4271
4272 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4273 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4274 pci_unmap_page(tp->pdev,
4275 pci_unmap_addr(txp, mapping),
4276 skb_shinfo(skb)->frags[j].size,
4277 PCI_DMA_TODEVICE);
4278 i++;
4279 }
4280
4281 dev_kfree_skb_any(skb);
4282 }
4283}
4284
4285/* Initialize tx/rx rings for packet processing.
4286 *
4287 * The chip has been shut down and the driver detached from
4288 * the networking, so no interrupts or new tx packets will
4289 * end up in the driver. tp->{tx,}lock are held and thus
4290 * we may not sleep.
4291 */
32d8c572 4292static int tg3_init_rings(struct tg3 *tp)
1da177e4
LT
4293{
4294 u32 i;
4295
4296 /* Free up all the SKBs. */
4297 tg3_free_rings(tp);
4298
4299 /* Zero out all descriptors. */
4300 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4301 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4302 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4303 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4304
7e72aad4 4305 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
a4e2b347 4306 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
7e72aad4
MC
4307 (tp->dev->mtu > ETH_DATA_LEN))
4308 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4309
1da177e4
LT
4310 /* Initialize invariants of the rings, we only set this
4311 * stuff once. This works because the card does not
4312 * write into the rx buffer posting rings.
4313 */
4314 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4315 struct tg3_rx_buffer_desc *rxd;
4316
4317 rxd = &tp->rx_std[i];
7e72aad4 4318 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
1da177e4
LT
4319 << RXD_LEN_SHIFT;
4320 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4321 rxd->opaque = (RXD_OPAQUE_RING_STD |
4322 (i << RXD_OPAQUE_INDEX_SHIFT));
4323 }
4324
0f893dc6 4325 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
4326 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4327 struct tg3_rx_buffer_desc *rxd;
4328
4329 rxd = &tp->rx_jumbo[i];
4330 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4331 << RXD_LEN_SHIFT;
4332 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4333 RXD_FLAG_JUMBO;
4334 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4335 (i << RXD_OPAQUE_INDEX_SHIFT));
4336 }
4337 }
4338
4339 /* Now allocate fresh SKBs for each rx ring. */
4340 for (i = 0; i < tp->rx_pending; i++) {
32d8c572
MC
4341 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4342 printk(KERN_WARNING PFX
4343 "%s: Using a smaller RX standard ring, "
4344 "only %d out of %d buffers were allocated "
4345 "successfully.\n",
4346 tp->dev->name, i, tp->rx_pending);
4347 if (i == 0)
4348 return -ENOMEM;
4349 tp->rx_pending = i;
1da177e4 4350 break;
32d8c572 4351 }
1da177e4
LT
4352 }
4353
0f893dc6 4354 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
4355 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4356 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
32d8c572
MC
4357 -1, i) < 0) {
4358 printk(KERN_WARNING PFX
4359 "%s: Using a smaller RX jumbo ring, "
4360 "only %d out of %d buffers were "
4361 "allocated successfully.\n",
4362 tp->dev->name, i, tp->rx_jumbo_pending);
4363 if (i == 0) {
4364 tg3_free_rings(tp);
4365 return -ENOMEM;
4366 }
4367 tp->rx_jumbo_pending = i;
1da177e4 4368 break;
32d8c572 4369 }
1da177e4
LT
4370 }
4371 }
32d8c572 4372 return 0;
1da177e4
LT
4373}
4374
4375/*
4376 * Must not be invoked with interrupt sources disabled and
4377 * the hardware shutdown down.
4378 */
4379static void tg3_free_consistent(struct tg3 *tp)
4380{
b4558ea9
JJ
4381 kfree(tp->rx_std_buffers);
4382 tp->rx_std_buffers = NULL;
1da177e4
LT
4383 if (tp->rx_std) {
4384 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4385 tp->rx_std, tp->rx_std_mapping);
4386 tp->rx_std = NULL;
4387 }
4388 if (tp->rx_jumbo) {
4389 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4390 tp->rx_jumbo, tp->rx_jumbo_mapping);
4391 tp->rx_jumbo = NULL;
4392 }
4393 if (tp->rx_rcb) {
4394 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4395 tp->rx_rcb, tp->rx_rcb_mapping);
4396 tp->rx_rcb = NULL;
4397 }
4398 if (tp->tx_ring) {
4399 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4400 tp->tx_ring, tp->tx_desc_mapping);
4401 tp->tx_ring = NULL;
4402 }
4403 if (tp->hw_status) {
4404 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4405 tp->hw_status, tp->status_mapping);
4406 tp->hw_status = NULL;
4407 }
4408 if (tp->hw_stats) {
4409 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4410 tp->hw_stats, tp->stats_mapping);
4411 tp->hw_stats = NULL;
4412 }
4413}
4414
4415/*
4416 * Must not be invoked with interrupt sources disabled and
4417 * the hardware shutdown down. Can sleep.
4418 */
4419static int tg3_alloc_consistent(struct tg3 *tp)
4420{
4421 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4422 (TG3_RX_RING_SIZE +
4423 TG3_RX_JUMBO_RING_SIZE)) +
4424 (sizeof(struct tx_ring_info) *
4425 TG3_TX_RING_SIZE),
4426 GFP_KERNEL);
4427 if (!tp->rx_std_buffers)
4428 return -ENOMEM;
4429
4430 memset(tp->rx_std_buffers, 0,
4431 (sizeof(struct ring_info) *
4432 (TG3_RX_RING_SIZE +
4433 TG3_RX_JUMBO_RING_SIZE)) +
4434 (sizeof(struct tx_ring_info) *
4435 TG3_TX_RING_SIZE));
4436
4437 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4438 tp->tx_buffers = (struct tx_ring_info *)
4439 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4440
4441 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4442 &tp->rx_std_mapping);
4443 if (!tp->rx_std)
4444 goto err_out;
4445
4446 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4447 &tp->rx_jumbo_mapping);
4448
4449 if (!tp->rx_jumbo)
4450 goto err_out;
4451
4452 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4453 &tp->rx_rcb_mapping);
4454 if (!tp->rx_rcb)
4455 goto err_out;
4456
4457 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4458 &tp->tx_desc_mapping);
4459 if (!tp->tx_ring)
4460 goto err_out;
4461
4462 tp->hw_status = pci_alloc_consistent(tp->pdev,
4463 TG3_HW_STATUS_SIZE,
4464 &tp->status_mapping);
4465 if (!tp->hw_status)
4466 goto err_out;
4467
4468 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4469 sizeof(struct tg3_hw_stats),
4470 &tp->stats_mapping);
4471 if (!tp->hw_stats)
4472 goto err_out;
4473
4474 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4475 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4476
4477 return 0;
4478
4479err_out:
4480 tg3_free_consistent(tp);
4481 return -ENOMEM;
4482}
4483
4484#define MAX_WAIT_CNT 1000
4485
4486/* To stop a block, clear the enable bit and poll till it
4487 * clears. tp->lock is held.
4488 */
b3b7d6be 4489static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
1da177e4
LT
4490{
4491 unsigned int i;
4492 u32 val;
4493
4494 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4495 switch (ofs) {
4496 case RCVLSC_MODE:
4497 case DMAC_MODE:
4498 case MBFREE_MODE:
4499 case BUFMGR_MODE:
4500 case MEMARB_MODE:
4501 /* We can't enable/disable these bits of the
4502 * 5705/5750, just say success.
4503 */
4504 return 0;
4505
4506 default:
4507 break;
4508 };
4509 }
4510
4511 val = tr32(ofs);
4512 val &= ~enable_bit;
4513 tw32_f(ofs, val);
4514
4515 for (i = 0; i < MAX_WAIT_CNT; i++) {
4516 udelay(100);
4517 val = tr32(ofs);
4518 if ((val & enable_bit) == 0)
4519 break;
4520 }
4521
b3b7d6be 4522 if (i == MAX_WAIT_CNT && !silent) {
1da177e4
LT
4523 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4524 "ofs=%lx enable_bit=%x\n",
4525 ofs, enable_bit);
4526 return -ENODEV;
4527 }
4528
4529 return 0;
4530}
4531
4532/* tp->lock is held. */
b3b7d6be 4533static int tg3_abort_hw(struct tg3 *tp, int silent)
1da177e4
LT
4534{
4535 int i, err;
4536
4537 tg3_disable_ints(tp);
4538
4539 tp->rx_mode &= ~RX_MODE_ENABLE;
4540 tw32_f(MAC_RX_MODE, tp->rx_mode);
4541 udelay(10);
4542
b3b7d6be
DM
4543 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4544 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4545 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4546 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4547 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4548 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4549
4550 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4551 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4552 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4553 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4554 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4555 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4556 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
1da177e4
LT
4557
4558 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4559 tw32_f(MAC_MODE, tp->mac_mode);
4560 udelay(40);
4561
4562 tp->tx_mode &= ~TX_MODE_ENABLE;
4563 tw32_f(MAC_TX_MODE, tp->tx_mode);
4564
4565 for (i = 0; i < MAX_WAIT_CNT; i++) {
4566 udelay(100);
4567 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4568 break;
4569 }
4570 if (i >= MAX_WAIT_CNT) {
4571 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4572 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4573 tp->dev->name, tr32(MAC_TX_MODE));
e6de8ad1 4574 err |= -ENODEV;
1da177e4
LT
4575 }
4576
e6de8ad1 4577 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
b3b7d6be
DM
4578 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4579 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
1da177e4
LT
4580
4581 tw32(FTQ_RESET, 0xffffffff);
4582 tw32(FTQ_RESET, 0x00000000);
4583
b3b7d6be
DM
4584 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4585 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
1da177e4
LT
4586
4587 if (tp->hw_status)
4588 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4589 if (tp->hw_stats)
4590 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4591
1da177e4
LT
4592 return err;
4593}
4594
4595/* tp->lock is held. */
4596static int tg3_nvram_lock(struct tg3 *tp)
4597{
4598 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4599 int i;
4600
ec41c7df
MC
4601 if (tp->nvram_lock_cnt == 0) {
4602 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4603 for (i = 0; i < 8000; i++) {
4604 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4605 break;
4606 udelay(20);
4607 }
4608 if (i == 8000) {
4609 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4610 return -ENODEV;
4611 }
1da177e4 4612 }
ec41c7df 4613 tp->nvram_lock_cnt++;
1da177e4
LT
4614 }
4615 return 0;
4616}
4617
4618/* tp->lock is held. */
4619static void tg3_nvram_unlock(struct tg3 *tp)
4620{
ec41c7df
MC
4621 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4622 if (tp->nvram_lock_cnt > 0)
4623 tp->nvram_lock_cnt--;
4624 if (tp->nvram_lock_cnt == 0)
4625 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4626 }
1da177e4
LT
4627}
4628
e6af301b
MC
4629/* tp->lock is held. */
4630static void tg3_enable_nvram_access(struct tg3 *tp)
4631{
4632 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4633 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4634 u32 nvaccess = tr32(NVRAM_ACCESS);
4635
4636 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4637 }
4638}
4639
4640/* tp->lock is held. */
4641static void tg3_disable_nvram_access(struct tg3 *tp)
4642{
4643 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4644 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4645 u32 nvaccess = tr32(NVRAM_ACCESS);
4646
4647 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4648 }
4649}
4650
1da177e4
LT
4651/* tp->lock is held. */
4652static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4653{
f49639e6
DM
4654 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4655 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1da177e4
LT
4656
4657 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4658 switch (kind) {
4659 case RESET_KIND_INIT:
4660 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4661 DRV_STATE_START);
4662 break;
4663
4664 case RESET_KIND_SHUTDOWN:
4665 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4666 DRV_STATE_UNLOAD);
4667 break;
4668
4669 case RESET_KIND_SUSPEND:
4670 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4671 DRV_STATE_SUSPEND);
4672 break;
4673
4674 default:
4675 break;
4676 };
4677 }
4678}
4679
4680/* tp->lock is held. */
4681static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4682{
4683 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4684 switch (kind) {
4685 case RESET_KIND_INIT:
4686 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4687 DRV_STATE_START_DONE);
4688 break;
4689
4690 case RESET_KIND_SHUTDOWN:
4691 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4692 DRV_STATE_UNLOAD_DONE);
4693 break;
4694
4695 default:
4696 break;
4697 };
4698 }
4699}
4700
4701/* tp->lock is held. */
4702static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4703{
4704 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4705 switch (kind) {
4706 case RESET_KIND_INIT:
4707 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4708 DRV_STATE_START);
4709 break;
4710
4711 case RESET_KIND_SHUTDOWN:
4712 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4713 DRV_STATE_UNLOAD);
4714 break;
4715
4716 case RESET_KIND_SUSPEND:
4717 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4718 DRV_STATE_SUSPEND);
4719 break;
4720
4721 default:
4722 break;
4723 };
4724 }
4725}
4726
7a6f4369
MC
4727static int tg3_poll_fw(struct tg3 *tp)
4728{
4729 int i;
4730 u32 val;
4731
b5d3772c
MC
4732 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4733 for (i = 0; i < 400; i++) {
4734 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
4735 return 0;
4736 udelay(10);
4737 }
4738 return -ENODEV;
4739 }
4740
7a6f4369
MC
4741 /* Wait for firmware initialization to complete. */
4742 for (i = 0; i < 100000; i++) {
4743 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4744 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4745 break;
4746 udelay(10);
4747 }
4748
4749 /* Chip might not be fitted with firmware. Some Sun onboard
4750 * parts are configured like that. So don't signal the timeout
4751 * of the above loop as an error, but do report the lack of
4752 * running firmware once.
4753 */
4754 if (i >= 100000 &&
4755 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4756 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4757
4758 printk(KERN_INFO PFX "%s: No firmware running.\n",
4759 tp->dev->name);
4760 }
4761
4762 return 0;
4763}
4764
1da177e4
LT
4765static void tg3_stop_fw(struct tg3 *);
4766
4767/* tp->lock is held. */
4768static int tg3_chip_reset(struct tg3 *tp)
4769{
4770 u32 val;
1ee582d8 4771 void (*write_op)(struct tg3 *, u32, u32);
7a6f4369 4772 int err;
1da177e4 4773
f49639e6
DM
4774 tg3_nvram_lock(tp);
4775
4776 /* No matching tg3_nvram_unlock() after this because
4777 * chip reset below will undo the nvram lock.
4778 */
4779 tp->nvram_lock_cnt = 0;
1da177e4 4780
d9ab5ad1 4781 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
af36e6b6 4782 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d9ab5ad1
MC
4783 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4784 tw32(GRC_FASTBOOT_PC, 0);
4785
1da177e4
LT
4786 /*
4787 * We must avoid the readl() that normally takes place.
4788 * It locks machines, causes machine checks, and other
4789 * fun things. So, temporarily disable the 5701
4790 * hardware workaround, while we do the reset.
4791 */
1ee582d8
MC
4792 write_op = tp->write32;
4793 if (write_op == tg3_write_flush_reg32)
4794 tp->write32 = tg3_write32;
1da177e4
LT
4795
4796 /* do the reset */
4797 val = GRC_MISC_CFG_CORECLK_RESET;
4798
4799 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4800 if (tr32(0x7e2c) == 0x60) {
4801 tw32(0x7e2c, 0x20);
4802 }
4803 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4804 tw32(GRC_MISC_CFG, (1 << 29));
4805 val |= (1 << 29);
4806 }
4807 }
4808
b5d3772c
MC
4809 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4810 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
4811 tw32(GRC_VCPU_EXT_CTRL,
4812 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
4813 }
4814
1da177e4
LT
4815 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4816 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4817 tw32(GRC_MISC_CFG, val);
4818
1ee582d8
MC
4819 /* restore 5701 hardware bug workaround write method */
4820 tp->write32 = write_op;
1da177e4
LT
4821
4822 /* Unfortunately, we have to delay before the PCI read back.
4823 * Some 575X chips even will not respond to a PCI cfg access
4824 * when the reset command is given to the chip.
4825 *
4826 * How do these hardware designers expect things to work
4827 * properly if the PCI write is posted for a long period
4828 * of time? It is always necessary to have some method by
4829 * which a register read back can occur to push the write
4830 * out which does the reset.
4831 *
4832 * For most tg3 variants the trick below was working.
4833 * Ho hum...
4834 */
4835 udelay(120);
4836
4837 /* Flush PCI posted writes. The normal MMIO registers
4838 * are inaccessible at this time so this is the only
4839 * way to make this reliably (actually, this is no longer
4840 * the case, see above). I tried to use indirect
4841 * register read/write but this upset some 5701 variants.
4842 */
4843 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4844
4845 udelay(120);
4846
4847 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4848 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4849 int i;
4850 u32 cfg_val;
4851
4852 /* Wait for link training to complete. */
4853 for (i = 0; i < 5000; i++)
4854 udelay(100);
4855
4856 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4857 pci_write_config_dword(tp->pdev, 0xc4,
4858 cfg_val | (1 << 15));
4859 }
4860 /* Set PCIE max payload size and clear error status. */
4861 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4862 }
4863
4864 /* Re-enable indirect register accesses. */
4865 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4866 tp->misc_host_ctrl);
4867
4868 /* Set MAX PCI retry to zero. */
4869 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4870 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4871 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4872 val |= PCISTATE_RETRY_SAME_DMA;
4873 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4874
4875 pci_restore_state(tp->pdev);
4876
4877 /* Make sure PCI-X relaxed ordering bit is clear. */
4878 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4879 val &= ~PCIX_CAPS_RELAXED_ORDERING;
4880 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4881
a4e2b347 4882 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4cf78e4f
MC
4883 u32 val;
4884
4885 /* Chip reset on 5780 will reset MSI enable bit,
4886 * so need to restore it.
4887 */
4888 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4889 u16 ctrl;
4890
4891 pci_read_config_word(tp->pdev,
4892 tp->msi_cap + PCI_MSI_FLAGS,
4893 &ctrl);
4894 pci_write_config_word(tp->pdev,
4895 tp->msi_cap + PCI_MSI_FLAGS,
4896 ctrl | PCI_MSI_FLAGS_ENABLE);
4897 val = tr32(MSGINT_MODE);
4898 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4899 }
4900
4901 val = tr32(MEMARB_MODE);
4902 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4903
4904 } else
4905 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
1da177e4
LT
4906
4907 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4908 tg3_stop_fw(tp);
4909 tw32(0x5000, 0x400);
4910 }
4911
4912 tw32(GRC_MODE, tp->grc_mode);
4913
4914 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4915 u32 val = tr32(0xc4);
4916
4917 tw32(0xc4, val | (1 << 15));
4918 }
4919
4920 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4921 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4922 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4923 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4924 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4925 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4926 }
4927
4928 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4929 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4930 tw32_f(MAC_MODE, tp->mac_mode);
747e8f8b
MC
4931 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4932 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4933 tw32_f(MAC_MODE, tp->mac_mode);
1da177e4
LT
4934 } else
4935 tw32_f(MAC_MODE, 0);
4936 udelay(40);
4937
7a6f4369
MC
4938 err = tg3_poll_fw(tp);
4939 if (err)
4940 return err;
1da177e4
LT
4941
4942 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4943 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4944 u32 val = tr32(0x7c00);
4945
4946 tw32(0x7c00, val | (1 << 25));
4947 }
4948
4949 /* Reprobe ASF enable state. */
4950 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4951 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4952 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4953 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4954 u32 nic_cfg;
4955
4956 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4957 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4958 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 4959 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
4960 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4961 }
4962 }
4963
4964 return 0;
4965}
4966
4967/* tp->lock is held. */
4968static void tg3_stop_fw(struct tg3 *tp)
4969{
4970 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4971 u32 val;
4972 int i;
4973
4974 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4975 val = tr32(GRC_RX_CPU_EVENT);
4976 val |= (1 << 14);
4977 tw32(GRC_RX_CPU_EVENT, val);
4978
4979 /* Wait for RX cpu to ACK the event. */
4980 for (i = 0; i < 100; i++) {
4981 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4982 break;
4983 udelay(1);
4984 }
4985 }
4986}
4987
4988/* tp->lock is held. */
944d980e 4989static int tg3_halt(struct tg3 *tp, int kind, int silent)
1da177e4
LT
4990{
4991 int err;
4992
4993 tg3_stop_fw(tp);
4994
944d980e 4995 tg3_write_sig_pre_reset(tp, kind);
1da177e4 4996
b3b7d6be 4997 tg3_abort_hw(tp, silent);
1da177e4
LT
4998 err = tg3_chip_reset(tp);
4999
944d980e
MC
5000 tg3_write_sig_legacy(tp, kind);
5001 tg3_write_sig_post_reset(tp, kind);
1da177e4
LT
5002
5003 if (err)
5004 return err;
5005
5006 return 0;
5007}
5008
5009#define TG3_FW_RELEASE_MAJOR 0x0
5010#define TG3_FW_RELASE_MINOR 0x0
5011#define TG3_FW_RELEASE_FIX 0x0
5012#define TG3_FW_START_ADDR 0x08000000
5013#define TG3_FW_TEXT_ADDR 0x08000000
5014#define TG3_FW_TEXT_LEN 0x9c0
5015#define TG3_FW_RODATA_ADDR 0x080009c0
5016#define TG3_FW_RODATA_LEN 0x60
5017#define TG3_FW_DATA_ADDR 0x08000a40
5018#define TG3_FW_DATA_LEN 0x20
5019#define TG3_FW_SBSS_ADDR 0x08000a60
5020#define TG3_FW_SBSS_LEN 0xc
5021#define TG3_FW_BSS_ADDR 0x08000a70
5022#define TG3_FW_BSS_LEN 0x10
5023
50da859d 5024static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
1da177e4
LT
5025 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5026 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5027 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5028 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5029 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5030 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5031 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5032 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5033 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5034 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5035 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5036 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5037 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5038 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5039 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5040 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5041 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5042 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5043 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5044 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5045 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5046 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5047 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5048 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5049 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5050 0, 0, 0, 0, 0, 0,
5051 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5052 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5053 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5054 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5055 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5056 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5057 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5058 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5059 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5060 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5061 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5062 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5063 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5064 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5065 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5066 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5067 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5068 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5069 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5070 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5071 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5072 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5073 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5074 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5075 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5076 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5077 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5078 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5079 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5080 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5081 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5082 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5083 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5084 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5085 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5086 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5087 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5088 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5089 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5090 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5091 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5092 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5093 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5094 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5095 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5096 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5097 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5098 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5099 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5100 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5101 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5102 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5103 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5104 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5105 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5106 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5107 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5108 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5109 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5110 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5111 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5112 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5113 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5114 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5115 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5116};
5117
50da859d 5118static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
1da177e4
LT
5119 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5120 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5121 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5122 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5123 0x00000000
5124};
5125
5126#if 0 /* All zeros, don't eat up space with it. */
5127u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5128 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5129 0x00000000, 0x00000000, 0x00000000, 0x00000000
5130};
5131#endif
5132
5133#define RX_CPU_SCRATCH_BASE 0x30000
5134#define RX_CPU_SCRATCH_SIZE 0x04000
5135#define TX_CPU_SCRATCH_BASE 0x34000
5136#define TX_CPU_SCRATCH_SIZE 0x04000
5137
5138/* tp->lock is held. */
5139static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5140{
5141 int i;
5142
5d9428de
ES
5143 BUG_ON(offset == TX_CPU_BASE &&
5144 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
1da177e4 5145
b5d3772c
MC
5146 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5147 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5148
5149 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5150 return 0;
5151 }
1da177e4
LT
5152 if (offset == RX_CPU_BASE) {
5153 for (i = 0; i < 10000; i++) {
5154 tw32(offset + CPU_STATE, 0xffffffff);
5155 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5156 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5157 break;
5158 }
5159
5160 tw32(offset + CPU_STATE, 0xffffffff);
5161 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
5162 udelay(10);
5163 } else {
5164 for (i = 0; i < 10000; i++) {
5165 tw32(offset + CPU_STATE, 0xffffffff);
5166 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5167 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5168 break;
5169 }
5170 }
5171
5172 if (i >= 10000) {
5173 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5174 "and %s CPU\n",
5175 tp->dev->name,
5176 (offset == RX_CPU_BASE ? "RX" : "TX"));
5177 return -ENODEV;
5178 }
ec41c7df
MC
5179
5180 /* Clear firmware's nvram arbitration. */
5181 if (tp->tg3_flags & TG3_FLAG_NVRAM)
5182 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
1da177e4
LT
5183 return 0;
5184}
5185
5186struct fw_info {
5187 unsigned int text_base;
5188 unsigned int text_len;
50da859d 5189 const u32 *text_data;
1da177e4
LT
5190 unsigned int rodata_base;
5191 unsigned int rodata_len;
50da859d 5192 const u32 *rodata_data;
1da177e4
LT
5193 unsigned int data_base;
5194 unsigned int data_len;
50da859d 5195 const u32 *data_data;
1da177e4
LT
5196};
5197
5198/* tp->lock is held. */
5199static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5200 int cpu_scratch_size, struct fw_info *info)
5201{
ec41c7df 5202 int err, lock_err, i;
1da177e4
LT
5203 void (*write_op)(struct tg3 *, u32, u32);
5204
5205 if (cpu_base == TX_CPU_BASE &&
5206 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5207 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5208 "TX cpu firmware on %s which is 5705.\n",
5209 tp->dev->name);
5210 return -EINVAL;
5211 }
5212
5213 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5214 write_op = tg3_write_mem;
5215 else
5216 write_op = tg3_write_indirect_reg32;
5217
1b628151
MC
5218 /* It is possible that bootcode is still loading at this point.
5219 * Get the nvram lock first before halting the cpu.
5220 */
ec41c7df 5221 lock_err = tg3_nvram_lock(tp);
1da177e4 5222 err = tg3_halt_cpu(tp, cpu_base);
ec41c7df
MC
5223 if (!lock_err)
5224 tg3_nvram_unlock(tp);
1da177e4
LT
5225 if (err)
5226 goto out;
5227
5228 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5229 write_op(tp, cpu_scratch_base + i, 0);
5230 tw32(cpu_base + CPU_STATE, 0xffffffff);
5231 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5232 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5233 write_op(tp, (cpu_scratch_base +
5234 (info->text_base & 0xffff) +
5235 (i * sizeof(u32))),
5236 (info->text_data ?
5237 info->text_data[i] : 0));
5238 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5239 write_op(tp, (cpu_scratch_base +
5240 (info->rodata_base & 0xffff) +
5241 (i * sizeof(u32))),
5242 (info->rodata_data ?
5243 info->rodata_data[i] : 0));
5244 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5245 write_op(tp, (cpu_scratch_base +
5246 (info->data_base & 0xffff) +
5247 (i * sizeof(u32))),
5248 (info->data_data ?
5249 info->data_data[i] : 0));
5250
5251 err = 0;
5252
5253out:
1da177e4
LT
5254 return err;
5255}
5256
5257/* tp->lock is held. */
5258static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5259{
5260 struct fw_info info;
5261 int err, i;
5262
5263 info.text_base = TG3_FW_TEXT_ADDR;
5264 info.text_len = TG3_FW_TEXT_LEN;
5265 info.text_data = &tg3FwText[0];
5266 info.rodata_base = TG3_FW_RODATA_ADDR;
5267 info.rodata_len = TG3_FW_RODATA_LEN;
5268 info.rodata_data = &tg3FwRodata[0];
5269 info.data_base = TG3_FW_DATA_ADDR;
5270 info.data_len = TG3_FW_DATA_LEN;
5271 info.data_data = NULL;
5272
5273 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5274 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5275 &info);
5276 if (err)
5277 return err;
5278
5279 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5280 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5281 &info);
5282 if (err)
5283 return err;
5284
5285 /* Now startup only the RX cpu. */
5286 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5287 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5288
5289 for (i = 0; i < 5; i++) {
5290 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5291 break;
5292 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5293 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
5294 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5295 udelay(1000);
5296 }
5297 if (i >= 5) {
5298 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5299 "to set RX CPU PC, is %08x should be %08x\n",
5300 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5301 TG3_FW_TEXT_ADDR);
5302 return -ENODEV;
5303 }
5304 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5305 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
5306
5307 return 0;
5308}
5309
5310#if TG3_TSO_SUPPORT != 0
5311
5312#define TG3_TSO_FW_RELEASE_MAJOR 0x1
5313#define TG3_TSO_FW_RELASE_MINOR 0x6
5314#define TG3_TSO_FW_RELEASE_FIX 0x0
5315#define TG3_TSO_FW_START_ADDR 0x08000000
5316#define TG3_TSO_FW_TEXT_ADDR 0x08000000
5317#define TG3_TSO_FW_TEXT_LEN 0x1aa0
5318#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5319#define TG3_TSO_FW_RODATA_LEN 0x60
5320#define TG3_TSO_FW_DATA_ADDR 0x08001b20
5321#define TG3_TSO_FW_DATA_LEN 0x30
5322#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5323#define TG3_TSO_FW_SBSS_LEN 0x2c
5324#define TG3_TSO_FW_BSS_ADDR 0x08001b80
5325#define TG3_TSO_FW_BSS_LEN 0x894
5326
50da859d 5327static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
1da177e4
LT
5328 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5329 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5330 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5331 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5332 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5333 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5334 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5335 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5336 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5337 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5338 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5339 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5340 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5341 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5342 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5343 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5344 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5345 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5346 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5347 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5348 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5349 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5350 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5351 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5352 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5353 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5354 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5355 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5356 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5357 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5358 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5359 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5360 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5361 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5362 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5363 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5364 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5365 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5366 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5367 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5368 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5369 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5370 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5371 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5372 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5373 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5374 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5375 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5376 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5377 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5378 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5379 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5380 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5381 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5382 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5383 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5384 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5385 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5386 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5387 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5388 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5389 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5390 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5391 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5392 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5393 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5394 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5395 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5396 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5397 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5398 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5399 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5400 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5401 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5402 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5403 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5404 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5405 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5406 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5407 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5408 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5409 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5410 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5411 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5412 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5413 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5414 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5415 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5416 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5417 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5418 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5419 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5420 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5421 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5422 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5423 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5424 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5425 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5426 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5427 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5428 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5429 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5430 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5431 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5432 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5433 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5434 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5435 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5436 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5437 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5438 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5439 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5440 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5441 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5442 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5443 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5444 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5445 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5446 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5447 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5448 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5449 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5450 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5451 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5452 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5453 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5454 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5455 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5456 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5457 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5458 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5459 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5460 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5461 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5462 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5463 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5464 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5465 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5466 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5467 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5468 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5469 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5470 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5471 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5472 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5473 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5474 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5475 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5476 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5477 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5478 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5479 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5480 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5481 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5482 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5483 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5484 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5485 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5486 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5487 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5488 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5489 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5490 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5491 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5492 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5493 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5494 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5495 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5496 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5497 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5498 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5499 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5500 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5501 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5502 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5503 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5504 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5505 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5506 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5507 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5508 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5509 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5510 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5511 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5512 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5513 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5514 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5515 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5516 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5517 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5518 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5519 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5520 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5521 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5522 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5523 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5524 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5525 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5526 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5527 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5528 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5529 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5530 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5531 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5532 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5533 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5534 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5535 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5536 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5537 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5538 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5539 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5540 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5541 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5542 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5543 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5544 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5545 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5546 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5547 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5548 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5549 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5550 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5551 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5552 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5553 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5554 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5555 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5556 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5557 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5558 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5559 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5560 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5561 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5562 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5563 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5564 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5565 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5566 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5567 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5568 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5569 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5570 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5571 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5572 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5573 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5574 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5575 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5576 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5577 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5578 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5579 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5580 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5581 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5582 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5583 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5584 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5585 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5586 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5587 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5588 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5589 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5590 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5591 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5592 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5593 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5594 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5595 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5596 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5597 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5598 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5599 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5600 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5601 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5602 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5603 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5604 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5605 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5606 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5607 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5608 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5609 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5610 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5611 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5612};
5613
50da859d 5614static const u32 tg3TsoFwRodata[] = {
1da177e4
LT
5615 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5616 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5617 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5618 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5619 0x00000000,
5620};
5621
50da859d 5622static const u32 tg3TsoFwData[] = {
1da177e4
LT
5623 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5624 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5625 0x00000000,
5626};
5627
5628/* 5705 needs a special version of the TSO firmware. */
5629#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5630#define TG3_TSO5_FW_RELASE_MINOR 0x2
5631#define TG3_TSO5_FW_RELEASE_FIX 0x0
5632#define TG3_TSO5_FW_START_ADDR 0x00010000
5633#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5634#define TG3_TSO5_FW_TEXT_LEN 0xe90
5635#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5636#define TG3_TSO5_FW_RODATA_LEN 0x50
5637#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5638#define TG3_TSO5_FW_DATA_LEN 0x20
5639#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5640#define TG3_TSO5_FW_SBSS_LEN 0x28
5641#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5642#define TG3_TSO5_FW_BSS_LEN 0x88
5643
50da859d 5644static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
1da177e4
LT
5645 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5646 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5647 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5648 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5649 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5650 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5651 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5652 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5653 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5654 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5655 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5656 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5657 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5658 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5659 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5660 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5661 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5662 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5663 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5664 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5665 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5666 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5667 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5668 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5669 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5670 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5671 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5672 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5673 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5674 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5675 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5676 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5677 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5678 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5679 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5680 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5681 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5682 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5683 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5684 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5685 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5686 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5687 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5688 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5689 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5690 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5691 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5692 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5693 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5694 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5695 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5696 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5697 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5698 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5699 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5700 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5701 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5702 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5703 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5704 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5705 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5706 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5707 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5708 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5709 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5710 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5711 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5712 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5713 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5714 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5715 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5716 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5717 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5718 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5719 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5720 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5721 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5722 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5723 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5724 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5725 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5726 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5727 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5728 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5729 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5730 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5731 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5732 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5733 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5734 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5735 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5736 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5737 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5738 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5739 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5740 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5741 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5742 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5743 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5744 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5745 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5746 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5747 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5748 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5749 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5750 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5751 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5752 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5753 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5754 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5755 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5756 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5757 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5758 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5759 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5760 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5761 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5762 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5763 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5764 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5765 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5766 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5767 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5768 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5769 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5770 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5771 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5772 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5773 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5774 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5775 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5776 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5777 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5778 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5779 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5780 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5781 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5782 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5783 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5784 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5785 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5786 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5787 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5788 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5789 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5790 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5791 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5792 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5793 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5794 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5795 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5796 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5797 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5798 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5799 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5800 0x00000000, 0x00000000, 0x00000000,
5801};
5802
50da859d 5803static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
1da177e4
LT
5804 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5805 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5806 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5807 0x00000000, 0x00000000, 0x00000000,
5808};
5809
50da859d 5810static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
1da177e4
LT
5811 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5812 0x00000000, 0x00000000, 0x00000000,
5813};
5814
5815/* tp->lock is held. */
5816static int tg3_load_tso_firmware(struct tg3 *tp)
5817{
5818 struct fw_info info;
5819 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5820 int err, i;
5821
5822 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5823 return 0;
5824
5825 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5826 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5827 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5828 info.text_data = &tg3Tso5FwText[0];
5829 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5830 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5831 info.rodata_data = &tg3Tso5FwRodata[0];
5832 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5833 info.data_len = TG3_TSO5_FW_DATA_LEN;
5834 info.data_data = &tg3Tso5FwData[0];
5835 cpu_base = RX_CPU_BASE;
5836 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5837 cpu_scratch_size = (info.text_len +
5838 info.rodata_len +
5839 info.data_len +
5840 TG3_TSO5_FW_SBSS_LEN +
5841 TG3_TSO5_FW_BSS_LEN);
5842 } else {
5843 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5844 info.text_len = TG3_TSO_FW_TEXT_LEN;
5845 info.text_data = &tg3TsoFwText[0];
5846 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5847 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5848 info.rodata_data = &tg3TsoFwRodata[0];
5849 info.data_base = TG3_TSO_FW_DATA_ADDR;
5850 info.data_len = TG3_TSO_FW_DATA_LEN;
5851 info.data_data = &tg3TsoFwData[0];
5852 cpu_base = TX_CPU_BASE;
5853 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5854 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5855 }
5856
5857 err = tg3_load_firmware_cpu(tp, cpu_base,
5858 cpu_scratch_base, cpu_scratch_size,
5859 &info);
5860 if (err)
5861 return err;
5862
5863 /* Now startup the cpu. */
5864 tw32(cpu_base + CPU_STATE, 0xffffffff);
5865 tw32_f(cpu_base + CPU_PC, info.text_base);
5866
5867 for (i = 0; i < 5; i++) {
5868 if (tr32(cpu_base + CPU_PC) == info.text_base)
5869 break;
5870 tw32(cpu_base + CPU_STATE, 0xffffffff);
5871 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
5872 tw32_f(cpu_base + CPU_PC, info.text_base);
5873 udelay(1000);
5874 }
5875 if (i >= 5) {
5876 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5877 "to set CPU PC, is %08x should be %08x\n",
5878 tp->dev->name, tr32(cpu_base + CPU_PC),
5879 info.text_base);
5880 return -ENODEV;
5881 }
5882 tw32(cpu_base + CPU_STATE, 0xffffffff);
5883 tw32_f(cpu_base + CPU_MODE, 0x00000000);
5884 return 0;
5885}
5886
5887#endif /* TG3_TSO_SUPPORT != 0 */
5888
5889/* tp->lock is held. */
5890static void __tg3_set_mac_addr(struct tg3 *tp)
5891{
5892 u32 addr_high, addr_low;
5893 int i;
5894
5895 addr_high = ((tp->dev->dev_addr[0] << 8) |
5896 tp->dev->dev_addr[1]);
5897 addr_low = ((tp->dev->dev_addr[2] << 24) |
5898 (tp->dev->dev_addr[3] << 16) |
5899 (tp->dev->dev_addr[4] << 8) |
5900 (tp->dev->dev_addr[5] << 0));
5901 for (i = 0; i < 4; i++) {
5902 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5903 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5904 }
5905
5906 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5907 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5908 for (i = 0; i < 12; i++) {
5909 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5910 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5911 }
5912 }
5913
5914 addr_high = (tp->dev->dev_addr[0] +
5915 tp->dev->dev_addr[1] +
5916 tp->dev->dev_addr[2] +
5917 tp->dev->dev_addr[3] +
5918 tp->dev->dev_addr[4] +
5919 tp->dev->dev_addr[5]) &
5920 TX_BACKOFF_SEED_MASK;
5921 tw32(MAC_TX_BACKOFF_SEED, addr_high);
5922}
5923
5924static int tg3_set_mac_addr(struct net_device *dev, void *p)
5925{
5926 struct tg3 *tp = netdev_priv(dev);
5927 struct sockaddr *addr = p;
b9ec6c1b 5928 int err = 0;
1da177e4 5929
f9804ddb
MC
5930 if (!is_valid_ether_addr(addr->sa_data))
5931 return -EINVAL;
5932
1da177e4
LT
5933 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5934
e75f7c90
MC
5935 if (!netif_running(dev))
5936 return 0;
5937
58712ef9
MC
5938 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5939 /* Reset chip so that ASF can re-init any MAC addresses it
5940 * needs.
5941 */
5942 tg3_netif_stop(tp);
5943 tg3_full_lock(tp, 1);
5944
5945 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
b9ec6c1b
MC
5946 err = tg3_restart_hw(tp, 0);
5947 if (!err)
5948 tg3_netif_start(tp);
58712ef9
MC
5949 tg3_full_unlock(tp);
5950 } else {
5951 spin_lock_bh(&tp->lock);
5952 __tg3_set_mac_addr(tp);
5953 spin_unlock_bh(&tp->lock);
5954 }
1da177e4 5955
b9ec6c1b 5956 return err;
1da177e4
LT
5957}
5958
5959/* tp->lock is held. */
5960static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5961 dma_addr_t mapping, u32 maxlen_flags,
5962 u32 nic_addr)
5963{
5964 tg3_write_mem(tp,
5965 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5966 ((u64) mapping >> 32));
5967 tg3_write_mem(tp,
5968 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5969 ((u64) mapping & 0xffffffff));
5970 tg3_write_mem(tp,
5971 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5972 maxlen_flags);
5973
5974 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5975 tg3_write_mem(tp,
5976 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5977 nic_addr);
5978}
5979
5980static void __tg3_set_rx_mode(struct net_device *);
d244c892 5981static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
15f9850d
DM
5982{
5983 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5984 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5985 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5986 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5987 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5988 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5989 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5990 }
5991 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5992 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5993 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5994 u32 val = ec->stats_block_coalesce_usecs;
5995
5996 if (!netif_carrier_ok(tp->dev))
5997 val = 0;
5998
5999 tw32(HOSTCC_STAT_COAL_TICKS, val);
6000 }
6001}
1da177e4
LT
6002
6003/* tp->lock is held. */
8e7a22e3 6004static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
6005{
6006 u32 val, rdmac_mode;
6007 int i, err, limit;
6008
6009 tg3_disable_ints(tp);
6010
6011 tg3_stop_fw(tp);
6012
6013 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6014
6015 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
e6de8ad1 6016 tg3_abort_hw(tp, 1);
1da177e4
LT
6017 }
6018
8e7a22e3 6019 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
d4d2c558
MC
6020 tg3_phy_reset(tp);
6021
1da177e4
LT
6022 err = tg3_chip_reset(tp);
6023 if (err)
6024 return err;
6025
6026 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6027
6028 /* This works around an issue with Athlon chipsets on
6029 * B3 tigon3 silicon. This bit has no effect on any
6030 * other revision. But do not set this on PCI Express
6031 * chips.
6032 */
6033 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6034 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6035 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6036
6037 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6038 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6039 val = tr32(TG3PCI_PCISTATE);
6040 val |= PCISTATE_RETRY_SAME_DMA;
6041 tw32(TG3PCI_PCISTATE, val);
6042 }
6043
6044 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6045 /* Enable some hw fixes. */
6046 val = tr32(TG3PCI_MSI_DATA);
6047 val |= (1 << 26) | (1 << 28) | (1 << 29);
6048 tw32(TG3PCI_MSI_DATA, val);
6049 }
6050
6051 /* Descriptor ring init may make accesses to the
6052 * NIC SRAM area to setup the TX descriptors, so we
6053 * can only do this after the hardware has been
6054 * successfully reset.
6055 */
32d8c572
MC
6056 err = tg3_init_rings(tp);
6057 if (err)
6058 return err;
1da177e4
LT
6059
6060 /* This value is determined during the probe time DMA
6061 * engine test, tg3_test_dma.
6062 */
6063 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6064
6065 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6066 GRC_MODE_4X_NIC_SEND_RINGS |
6067 GRC_MODE_NO_TX_PHDR_CSUM |
6068 GRC_MODE_NO_RX_PHDR_CSUM);
6069 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
d2d746f8
MC
6070
6071 /* Pseudo-header checksum is done by hardware logic and not
6072 * the offload processers, so make the chip do the pseudo-
6073 * header checksums on receive. For transmit it is more
6074 * convenient to do the pseudo-header checksum in software
6075 * as Linux does that on transmit for us in all cases.
6076 */
6077 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
1da177e4
LT
6078
6079 tw32(GRC_MODE,
6080 tp->grc_mode |
6081 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6082
6083 /* Setup the timer prescalar register. Clock is always 66Mhz. */
6084 val = tr32(GRC_MISC_CFG);
6085 val &= ~0xff;
6086 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6087 tw32(GRC_MISC_CFG, val);
6088
6089 /* Initialize MBUF/DESC pool. */
cbf46853 6090 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
1da177e4
LT
6091 /* Do nothing. */
6092 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6093 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6094 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6095 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6096 else
6097 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6098 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6099 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6100 }
6101#if TG3_TSO_SUPPORT != 0
6102 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6103 int fw_len;
6104
6105 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6106 TG3_TSO5_FW_RODATA_LEN +
6107 TG3_TSO5_FW_DATA_LEN +
6108 TG3_TSO5_FW_SBSS_LEN +
6109 TG3_TSO5_FW_BSS_LEN);
6110 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6111 tw32(BUFMGR_MB_POOL_ADDR,
6112 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6113 tw32(BUFMGR_MB_POOL_SIZE,
6114 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6115 }
6116#endif
6117
0f893dc6 6118 if (tp->dev->mtu <= ETH_DATA_LEN) {
1da177e4
LT
6119 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6120 tp->bufmgr_config.mbuf_read_dma_low_water);
6121 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6122 tp->bufmgr_config.mbuf_mac_rx_low_water);
6123 tw32(BUFMGR_MB_HIGH_WATER,
6124 tp->bufmgr_config.mbuf_high_water);
6125 } else {
6126 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6127 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6128 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6129 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6130 tw32(BUFMGR_MB_HIGH_WATER,
6131 tp->bufmgr_config.mbuf_high_water_jumbo);
6132 }
6133 tw32(BUFMGR_DMA_LOW_WATER,
6134 tp->bufmgr_config.dma_low_water);
6135 tw32(BUFMGR_DMA_HIGH_WATER,
6136 tp->bufmgr_config.dma_high_water);
6137
6138 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6139 for (i = 0; i < 2000; i++) {
6140 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6141 break;
6142 udelay(10);
6143 }
6144 if (i >= 2000) {
6145 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6146 tp->dev->name);
6147 return -ENODEV;
6148 }
6149
6150 /* Setup replenish threshold. */
f92905de
MC
6151 val = tp->rx_pending / 8;
6152 if (val == 0)
6153 val = 1;
6154 else if (val > tp->rx_std_max_post)
6155 val = tp->rx_std_max_post;
b5d3772c
MC
6156 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6157 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6158 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6159
6160 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6161 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6162 }
f92905de
MC
6163
6164 tw32(RCVBDI_STD_THRESH, val);
1da177e4
LT
6165
6166 /* Initialize TG3_BDINFO's at:
6167 * RCVDBDI_STD_BD: standard eth size rx ring
6168 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6169 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6170 *
6171 * like so:
6172 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6173 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6174 * ring attribute flags
6175 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6176 *
6177 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6178 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6179 *
6180 * The size of each ring is fixed in the firmware, but the location is
6181 * configurable.
6182 */
6183 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6184 ((u64) tp->rx_std_mapping >> 32));
6185 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6186 ((u64) tp->rx_std_mapping & 0xffffffff));
6187 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6188 NIC_SRAM_RX_BUFFER_DESC);
6189
6190 /* Don't even try to program the JUMBO/MINI buffer descriptor
6191 * configs on 5705.
6192 */
6193 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6194 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6195 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6196 } else {
6197 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6198 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6199
6200 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6201 BDINFO_FLAGS_DISABLED);
6202
6203 /* Setup replenish threshold. */
6204 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6205
0f893dc6 6206 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
6207 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6208 ((u64) tp->rx_jumbo_mapping >> 32));
6209 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6210 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6211 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6212 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6213 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6214 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6215 } else {
6216 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6217 BDINFO_FLAGS_DISABLED);
6218 }
6219
6220 }
6221
6222 /* There is only one send ring on 5705/5750, no need to explicitly
6223 * disable the others.
6224 */
6225 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6226 /* Clear out send RCB ring in SRAM. */
6227 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6228 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6229 BDINFO_FLAGS_DISABLED);
6230 }
6231
6232 tp->tx_prod = 0;
6233 tp->tx_cons = 0;
6234 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6235 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6236
6237 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6238 tp->tx_desc_mapping,
6239 (TG3_TX_RING_SIZE <<
6240 BDINFO_FLAGS_MAXLEN_SHIFT),
6241 NIC_SRAM_TX_BUFFER_DESC);
6242
6243 /* There is only one receive return ring on 5705/5750, no need
6244 * to explicitly disable the others.
6245 */
6246 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6247 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6248 i += TG3_BDINFO_SIZE) {
6249 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6250 BDINFO_FLAGS_DISABLED);
6251 }
6252 }
6253
6254 tp->rx_rcb_ptr = 0;
6255 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6256
6257 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6258 tp->rx_rcb_mapping,
6259 (TG3_RX_RCB_RING_SIZE(tp) <<
6260 BDINFO_FLAGS_MAXLEN_SHIFT),
6261 0);
6262
6263 tp->rx_std_ptr = tp->rx_pending;
6264 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6265 tp->rx_std_ptr);
6266
0f893dc6 6267 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
1da177e4
LT
6268 tp->rx_jumbo_pending : 0;
6269 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6270 tp->rx_jumbo_ptr);
6271
6272 /* Initialize MAC address and backoff seed. */
6273 __tg3_set_mac_addr(tp);
6274
6275 /* MTU + ethernet header + FCS + optional VLAN tag */
6276 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6277
6278 /* The slot time is changed by tg3_setup_phy if we
6279 * run at gigabit with half duplex.
6280 */
6281 tw32(MAC_TX_LENGTHS,
6282 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6283 (6 << TX_LENGTHS_IPG_SHIFT) |
6284 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6285
6286 /* Receive rules. */
6287 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6288 tw32(RCVLPC_CONFIG, 0x0181);
6289
6290 /* Calculate RDMAC_MODE setting early, we need it to determine
6291 * the RCVLPC_STATE_ENABLE mask.
6292 */
6293 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6294 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6295 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6296 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6297 RDMAC_MODE_LNGREAD_ENAB);
6298 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6299 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
85e94ced
MC
6300
6301 /* If statement applies to 5705 and 5750 PCI devices only */
6302 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6303 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6304 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
1da177e4
LT
6305 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6306 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6307 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6308 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6309 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6310 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6311 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6312 }
6313 }
6314
85e94ced
MC
6315 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6316 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6317
1da177e4
LT
6318#if TG3_TSO_SUPPORT != 0
6319 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6320 rdmac_mode |= (1 << 27);
6321#endif
6322
6323 /* Receive/send statistics. */
1661394e
MC
6324 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6325 val = tr32(RCVLPC_STATS_ENABLE);
6326 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6327 tw32(RCVLPC_STATS_ENABLE, val);
6328 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6329 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
1da177e4
LT
6330 val = tr32(RCVLPC_STATS_ENABLE);
6331 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6332 tw32(RCVLPC_STATS_ENABLE, val);
6333 } else {
6334 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6335 }
6336 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6337 tw32(SNDDATAI_STATSENAB, 0xffffff);
6338 tw32(SNDDATAI_STATSCTRL,
6339 (SNDDATAI_SCTRL_ENABLE |
6340 SNDDATAI_SCTRL_FASTUPD));
6341
6342 /* Setup host coalescing engine. */
6343 tw32(HOSTCC_MODE, 0);
6344 for (i = 0; i < 2000; i++) {
6345 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6346 break;
6347 udelay(10);
6348 }
6349
d244c892 6350 __tg3_set_coalesce(tp, &tp->coal);
1da177e4
LT
6351
6352 /* set status block DMA address */
6353 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6354 ((u64) tp->status_mapping >> 32));
6355 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6356 ((u64) tp->status_mapping & 0xffffffff));
6357
6358 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6359 /* Status/statistics block address. See tg3_timer,
6360 * the tg3_periodic_fetch_stats call there, and
6361 * tg3_get_stats to see how this works for 5705/5750 chips.
6362 */
1da177e4
LT
6363 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6364 ((u64) tp->stats_mapping >> 32));
6365 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6366 ((u64) tp->stats_mapping & 0xffffffff));
6367 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6368 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6369 }
6370
6371 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6372
6373 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6374 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6375 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6376 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6377
6378 /* Clear statistics/status block in chip, and status block in ram. */
6379 for (i = NIC_SRAM_STATS_BLK;
6380 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6381 i += sizeof(u32)) {
6382 tg3_write_mem(tp, i, 0);
6383 udelay(40);
6384 }
6385 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6386
c94e3941
MC
6387 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6388 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6389 /* reset to prevent losing 1st rx packet intermittently */
6390 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6391 udelay(10);
6392 }
6393
1da177e4
LT
6394 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6395 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6396 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6397 udelay(40);
6398
314fba34
MC
6399 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6400 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6401 * register to preserve the GPIO settings for LOMs. The GPIOs,
6402 * whether used as inputs or outputs, are set by boot code after
6403 * reset.
6404 */
6405 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6406 u32 gpio_mask;
6407
6408 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6409 GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
3e7d83bc
MC
6410
6411 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6412 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6413 GRC_LCLCTRL_GPIO_OUTPUT3;
6414
af36e6b6
MC
6415 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6416 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6417
314fba34
MC
6418 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6419
6420 /* GPIO1 must be driven high for eeprom write protect */
1da177e4
LT
6421 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6422 GRC_LCLCTRL_GPIO_OUTPUT1);
314fba34 6423 }
1da177e4
LT
6424 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6425 udelay(100);
6426
09ee929c 6427 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
fac9b83e 6428 tp->last_tag = 0;
1da177e4
LT
6429
6430 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6431 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6432 udelay(40);
6433 }
6434
6435 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6436 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6437 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6438 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6439 WDMAC_MODE_LNGREAD_ENAB);
6440
85e94ced
MC
6441 /* If statement applies to 5705 and 5750 PCI devices only */
6442 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6443 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6444 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1da177e4
LT
6445 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6446 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6447 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6448 /* nothing */
6449 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6450 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6451 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6452 val |= WDMAC_MODE_RX_ACCEL;
6453 }
6454 }
6455
d9ab5ad1 6456 /* Enable host coalescing bug fix */
af36e6b6
MC
6457 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6458 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
d9ab5ad1
MC
6459 val |= (1 << 29);
6460
1da177e4
LT
6461 tw32_f(WDMAC_MODE, val);
6462 udelay(40);
6463
6464 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6465 val = tr32(TG3PCI_X_CAPS);
6466 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6467 val &= ~PCIX_CAPS_BURST_MASK;
6468 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6469 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6470 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6471 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6472 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6473 val |= (tp->split_mode_max_reqs <<
6474 PCIX_CAPS_SPLIT_SHIFT);
6475 }
6476 tw32(TG3PCI_X_CAPS, val);
6477 }
6478
6479 tw32_f(RDMAC_MODE, rdmac_mode);
6480 udelay(40);
6481
6482 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6483 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6484 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6485 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6486 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6487 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6488 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6489 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6490#if TG3_TSO_SUPPORT != 0
6491 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6492 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6493#endif
6494 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6495 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6496
6497 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6498 err = tg3_load_5701_a0_firmware_fix(tp);
6499 if (err)
6500 return err;
6501 }
6502
6503#if TG3_TSO_SUPPORT != 0
6504 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6505 err = tg3_load_tso_firmware(tp);
6506 if (err)
6507 return err;
6508 }
6509#endif
6510
6511 tp->tx_mode = TX_MODE_ENABLE;
6512 tw32_f(MAC_TX_MODE, tp->tx_mode);
6513 udelay(100);
6514
6515 tp->rx_mode = RX_MODE_ENABLE;
af36e6b6
MC
6516 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6517 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6518
1da177e4
LT
6519 tw32_f(MAC_RX_MODE, tp->rx_mode);
6520 udelay(10);
6521
6522 if (tp->link_config.phy_is_low_power) {
6523 tp->link_config.phy_is_low_power = 0;
6524 tp->link_config.speed = tp->link_config.orig_speed;
6525 tp->link_config.duplex = tp->link_config.orig_duplex;
6526 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6527 }
6528
6529 tp->mi_mode = MAC_MI_MODE_BASE;
6530 tw32_f(MAC_MI_MODE, tp->mi_mode);
6531 udelay(80);
6532
6533 tw32(MAC_LED_CTRL, tp->led_ctrl);
6534
6535 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
c94e3941 6536 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1da177e4
LT
6537 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6538 udelay(10);
6539 }
6540 tw32_f(MAC_RX_MODE, tp->rx_mode);
6541 udelay(10);
6542
6543 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6544 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6545 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6546 /* Set drive transmission level to 1.2V */
6547 /* only if the signal pre-emphasis bit is not set */
6548 val = tr32(MAC_SERDES_CFG);
6549 val &= 0xfffff000;
6550 val |= 0x880;
6551 tw32(MAC_SERDES_CFG, val);
6552 }
6553 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6554 tw32(MAC_SERDES_CFG, 0x616000);
6555 }
6556
6557 /* Prevent chip from dropping frames when flow control
6558 * is enabled.
6559 */
6560 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6561
6562 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6563 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6564 /* Use hardware link auto-negotiation */
6565 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6566 }
6567
d4d2c558
MC
6568 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6569 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6570 u32 tmp;
6571
6572 tmp = tr32(SERDES_RX_CTRL);
6573 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6574 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6575 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6576 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6577 }
6578
8e7a22e3 6579 err = tg3_setup_phy(tp, reset_phy);
1da177e4
LT
6580 if (err)
6581 return err;
6582
715116a1
MC
6583 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6584 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
1da177e4
LT
6585 u32 tmp;
6586
6587 /* Clear CRC stats. */
6588 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6589 tg3_writephy(tp, 0x1e, tmp | 0x8000);
6590 tg3_readphy(tp, 0x14, &tmp);
6591 }
6592 }
6593
6594 __tg3_set_rx_mode(tp->dev);
6595
6596 /* Initialize receive rules. */
6597 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
6598 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6599 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
6600 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6601
4cf78e4f 6602 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
a4e2b347 6603 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
1da177e4
LT
6604 limit = 8;
6605 else
6606 limit = 16;
6607 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6608 limit -= 4;
6609 switch (limit) {
6610 case 16:
6611 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
6612 case 15:
6613 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
6614 case 14:
6615 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
6616 case 13:
6617 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
6618 case 12:
6619 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
6620 case 11:
6621 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
6622 case 10:
6623 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
6624 case 9:
6625 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
6626 case 8:
6627 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
6628 case 7:
6629 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
6630 case 6:
6631 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
6632 case 5:
6633 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
6634 case 4:
6635 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6636 case 3:
6637 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6638 case 2:
6639 case 1:
6640
6641 default:
6642 break;
6643 };
6644
6645 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6646
1da177e4
LT
6647 return 0;
6648}
6649
6650/* Called at device open time to get the chip ready for
6651 * packet processing. Invoked with tp->lock held.
6652 */
8e7a22e3 6653static int tg3_init_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
6654{
6655 int err;
6656
6657 /* Force the chip into D0. */
bc1c7567 6658 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
6659 if (err)
6660 goto out;
6661
6662 tg3_switch_clocks(tp);
6663
6664 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6665
8e7a22e3 6666 err = tg3_reset_hw(tp, reset_phy);
1da177e4
LT
6667
6668out:
6669 return err;
6670}
6671
6672#define TG3_STAT_ADD32(PSTAT, REG) \
6673do { u32 __val = tr32(REG); \
6674 (PSTAT)->low += __val; \
6675 if ((PSTAT)->low < __val) \
6676 (PSTAT)->high += 1; \
6677} while (0)
6678
6679static void tg3_periodic_fetch_stats(struct tg3 *tp)
6680{
6681 struct tg3_hw_stats *sp = tp->hw_stats;
6682
6683 if (!netif_carrier_ok(tp->dev))
6684 return;
6685
6686 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6687 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6688 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6689 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6690 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6691 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6692 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6693 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6694 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6695 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6696 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6697 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6698 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6699
6700 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6701 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6702 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6703 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6704 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6705 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6706 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6707 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6708 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6709 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6710 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6711 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6712 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6713 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
463d305b
MC
6714
6715 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6716 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6717 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
1da177e4
LT
6718}
6719
6720static void tg3_timer(unsigned long __opaque)
6721{
6722 struct tg3 *tp = (struct tg3 *) __opaque;
1da177e4 6723
f475f163
MC
6724 if (tp->irq_sync)
6725 goto restart_timer;
6726
f47c11ee 6727 spin_lock(&tp->lock);
1da177e4 6728
fac9b83e
DM
6729 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6730 /* All of this garbage is because when using non-tagged
6731 * IRQ status the mailbox/status_block protocol the chip
6732 * uses with the cpu is race prone.
6733 */
6734 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6735 tw32(GRC_LOCAL_CTRL,
6736 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6737 } else {
6738 tw32(HOSTCC_MODE, tp->coalesce_mode |
6739 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6740 }
1da177e4 6741
fac9b83e
DM
6742 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6743 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
f47c11ee 6744 spin_unlock(&tp->lock);
fac9b83e
DM
6745 schedule_work(&tp->reset_task);
6746 return;
6747 }
1da177e4
LT
6748 }
6749
1da177e4
LT
6750 /* This part only runs once per second. */
6751 if (!--tp->timer_counter) {
fac9b83e
DM
6752 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6753 tg3_periodic_fetch_stats(tp);
6754
1da177e4
LT
6755 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6756 u32 mac_stat;
6757 int phy_event;
6758
6759 mac_stat = tr32(MAC_STATUS);
6760
6761 phy_event = 0;
6762 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6763 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6764 phy_event = 1;
6765 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6766 phy_event = 1;
6767
6768 if (phy_event)
6769 tg3_setup_phy(tp, 0);
6770 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6771 u32 mac_stat = tr32(MAC_STATUS);
6772 int need_setup = 0;
6773
6774 if (netif_carrier_ok(tp->dev) &&
6775 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6776 need_setup = 1;
6777 }
6778 if (! netif_carrier_ok(tp->dev) &&
6779 (mac_stat & (MAC_STATUS_PCS_SYNCED |
6780 MAC_STATUS_SIGNAL_DET))) {
6781 need_setup = 1;
6782 }
6783 if (need_setup) {
3d3ebe74
MC
6784 if (!tp->serdes_counter) {
6785 tw32_f(MAC_MODE,
6786 (tp->mac_mode &
6787 ~MAC_MODE_PORT_MODE_MASK));
6788 udelay(40);
6789 tw32_f(MAC_MODE, tp->mac_mode);
6790 udelay(40);
6791 }
1da177e4
LT
6792 tg3_setup_phy(tp, 0);
6793 }
747e8f8b
MC
6794 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6795 tg3_serdes_parallel_detect(tp);
1da177e4
LT
6796
6797 tp->timer_counter = tp->timer_multiplier;
6798 }
6799
130b8e4d
MC
6800 /* Heartbeat is only sent once every 2 seconds.
6801 *
6802 * The heartbeat is to tell the ASF firmware that the host
6803 * driver is still alive. In the event that the OS crashes,
6804 * ASF needs to reset the hardware to free up the FIFO space
6805 * that may be filled with rx packets destined for the host.
6806 * If the FIFO is full, ASF will no longer function properly.
6807 *
6808 * Unintended resets have been reported on real time kernels
6809 * where the timer doesn't run on time. Netpoll will also have
6810 * same problem.
6811 *
6812 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
6813 * to check the ring condition when the heartbeat is expiring
6814 * before doing the reset. This will prevent most unintended
6815 * resets.
6816 */
1da177e4
LT
6817 if (!--tp->asf_counter) {
6818 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6819 u32 val;
6820
bbadf503 6821 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
130b8e4d 6822 FWCMD_NICDRV_ALIVE3);
bbadf503 6823 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
28fbef78 6824 /* 5 seconds timeout */
bbadf503 6825 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
1da177e4
LT
6826 val = tr32(GRC_RX_CPU_EVENT);
6827 val |= (1 << 14);
6828 tw32(GRC_RX_CPU_EVENT, val);
6829 }
6830 tp->asf_counter = tp->asf_multiplier;
6831 }
6832
f47c11ee 6833 spin_unlock(&tp->lock);
1da177e4 6834
f475f163 6835restart_timer:
1da177e4
LT
6836 tp->timer.expires = jiffies + tp->timer_offset;
6837 add_timer(&tp->timer);
6838}
6839
81789ef5 6840static int tg3_request_irq(struct tg3 *tp)
fcfa0a32
MC
6841{
6842 irqreturn_t (*fn)(int, void *, struct pt_regs *);
6843 unsigned long flags;
6844 struct net_device *dev = tp->dev;
6845
6846 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6847 fn = tg3_msi;
6848 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6849 fn = tg3_msi_1shot;
1fb9df5d 6850 flags = IRQF_SAMPLE_RANDOM;
fcfa0a32
MC
6851 } else {
6852 fn = tg3_interrupt;
6853 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6854 fn = tg3_interrupt_tagged;
1fb9df5d 6855 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
fcfa0a32
MC
6856 }
6857 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6858}
6859
7938109f
MC
6860static int tg3_test_interrupt(struct tg3 *tp)
6861{
6862 struct net_device *dev = tp->dev;
6863 int err, i;
6864 u32 int_mbox = 0;
6865
d4bc3927
MC
6866 if (!netif_running(dev))
6867 return -ENODEV;
6868
7938109f
MC
6869 tg3_disable_ints(tp);
6870
6871 free_irq(tp->pdev->irq, dev);
6872
6873 err = request_irq(tp->pdev->irq, tg3_test_isr,
1fb9df5d 6874 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7938109f
MC
6875 if (err)
6876 return err;
6877
38f3843e 6878 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7938109f
MC
6879 tg3_enable_ints(tp);
6880
6881 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6882 HOSTCC_MODE_NOW);
6883
6884 for (i = 0; i < 5; i++) {
09ee929c
MC
6885 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6886 TG3_64BIT_REG_LOW);
7938109f
MC
6887 if (int_mbox != 0)
6888 break;
6889 msleep(10);
6890 }
6891
6892 tg3_disable_ints(tp);
6893
6894 free_irq(tp->pdev->irq, dev);
6aa20a22 6895
fcfa0a32 6896 err = tg3_request_irq(tp);
7938109f
MC
6897
6898 if (err)
6899 return err;
6900
6901 if (int_mbox != 0)
6902 return 0;
6903
6904 return -EIO;
6905}
6906
6907/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6908 * successfully restored
6909 */
6910static int tg3_test_msi(struct tg3 *tp)
6911{
6912 struct net_device *dev = tp->dev;
6913 int err;
6914 u16 pci_cmd;
6915
6916 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6917 return 0;
6918
6919 /* Turn off SERR reporting in case MSI terminates with Master
6920 * Abort.
6921 */
6922 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6923 pci_write_config_word(tp->pdev, PCI_COMMAND,
6924 pci_cmd & ~PCI_COMMAND_SERR);
6925
6926 err = tg3_test_interrupt(tp);
6927
6928 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6929
6930 if (!err)
6931 return 0;
6932
6933 /* other failures */
6934 if (err != -EIO)
6935 return err;
6936
6937 /* MSI test failed, go back to INTx mode */
6938 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6939 "switching to INTx mode. Please report this failure to "
6940 "the PCI maintainer and include system chipset information.\n",
6941 tp->dev->name);
6942
6943 free_irq(tp->pdev->irq, dev);
6944 pci_disable_msi(tp->pdev);
6945
6946 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6947
fcfa0a32 6948 err = tg3_request_irq(tp);
7938109f
MC
6949 if (err)
6950 return err;
6951
6952 /* Need to reset the chip because the MSI cycle may have terminated
6953 * with Master Abort.
6954 */
f47c11ee 6955 tg3_full_lock(tp, 1);
7938109f 6956
944d980e 6957 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8e7a22e3 6958 err = tg3_init_hw(tp, 1);
7938109f 6959
f47c11ee 6960 tg3_full_unlock(tp);
7938109f
MC
6961
6962 if (err)
6963 free_irq(tp->pdev->irq, dev);
6964
6965 return err;
6966}
6967
1da177e4
LT
6968static int tg3_open(struct net_device *dev)
6969{
6970 struct tg3 *tp = netdev_priv(dev);
6971 int err;
6972
f47c11ee 6973 tg3_full_lock(tp, 0);
1da177e4 6974
bc1c7567
MC
6975 err = tg3_set_power_state(tp, PCI_D0);
6976 if (err)
6977 return err;
6978
1da177e4
LT
6979 tg3_disable_ints(tp);
6980 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6981
f47c11ee 6982 tg3_full_unlock(tp);
1da177e4
LT
6983
6984 /* The placement of this call is tied
6985 * to the setup and use of Host TX descriptors.
6986 */
6987 err = tg3_alloc_consistent(tp);
6988 if (err)
6989 return err;
6990
88b06bc2
MC
6991 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6992 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
d4d2c558
MC
6993 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6994 !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6995 (tp->pdev_peer == tp->pdev))) {
fac9b83e
DM
6996 /* All MSI supporting chips should support tagged
6997 * status. Assert that this is the case.
6998 */
6999 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7000 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7001 "Not using MSI.\n", tp->dev->name);
7002 } else if (pci_enable_msi(tp->pdev) == 0) {
88b06bc2
MC
7003 u32 msi_mode;
7004
7005 msi_mode = tr32(MSGINT_MODE);
7006 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7007 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7008 }
7009 }
fcfa0a32 7010 err = tg3_request_irq(tp);
1da177e4
LT
7011
7012 if (err) {
88b06bc2
MC
7013 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7014 pci_disable_msi(tp->pdev);
7015 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7016 }
1da177e4
LT
7017 tg3_free_consistent(tp);
7018 return err;
7019 }
7020
f47c11ee 7021 tg3_full_lock(tp, 0);
1da177e4 7022
8e7a22e3 7023 err = tg3_init_hw(tp, 1);
1da177e4 7024 if (err) {
944d980e 7025 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7026 tg3_free_rings(tp);
7027 } else {
fac9b83e
DM
7028 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7029 tp->timer_offset = HZ;
7030 else
7031 tp->timer_offset = HZ / 10;
7032
7033 BUG_ON(tp->timer_offset > HZ);
7034 tp->timer_counter = tp->timer_multiplier =
7035 (HZ / tp->timer_offset);
7036 tp->asf_counter = tp->asf_multiplier =
28fbef78 7037 ((HZ / tp->timer_offset) * 2);
1da177e4
LT
7038
7039 init_timer(&tp->timer);
7040 tp->timer.expires = jiffies + tp->timer_offset;
7041 tp->timer.data = (unsigned long) tp;
7042 tp->timer.function = tg3_timer;
1da177e4
LT
7043 }
7044
f47c11ee 7045 tg3_full_unlock(tp);
1da177e4
LT
7046
7047 if (err) {
88b06bc2
MC
7048 free_irq(tp->pdev->irq, dev);
7049 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7050 pci_disable_msi(tp->pdev);
7051 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7052 }
1da177e4
LT
7053 tg3_free_consistent(tp);
7054 return err;
7055 }
7056
7938109f
MC
7057 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7058 err = tg3_test_msi(tp);
fac9b83e 7059
7938109f 7060 if (err) {
f47c11ee 7061 tg3_full_lock(tp, 0);
7938109f
MC
7062
7063 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7064 pci_disable_msi(tp->pdev);
7065 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7066 }
944d980e 7067 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f
MC
7068 tg3_free_rings(tp);
7069 tg3_free_consistent(tp);
7070
f47c11ee 7071 tg3_full_unlock(tp);
7938109f
MC
7072
7073 return err;
7074 }
fcfa0a32
MC
7075
7076 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7077 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
b5d3772c 7078 u32 val = tr32(PCIE_TRANSACTION_CFG);
fcfa0a32 7079
b5d3772c
MC
7080 tw32(PCIE_TRANSACTION_CFG,
7081 val | PCIE_TRANS_CFG_1SHOT_MSI);
fcfa0a32
MC
7082 }
7083 }
7938109f
MC
7084 }
7085
f47c11ee 7086 tg3_full_lock(tp, 0);
1da177e4 7087
7938109f
MC
7088 add_timer(&tp->timer);
7089 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
1da177e4
LT
7090 tg3_enable_ints(tp);
7091
f47c11ee 7092 tg3_full_unlock(tp);
1da177e4
LT
7093
7094 netif_start_queue(dev);
7095
7096 return 0;
7097}
7098
7099#if 0
7100/*static*/ void tg3_dump_state(struct tg3 *tp)
7101{
7102 u32 val32, val32_2, val32_3, val32_4, val32_5;
7103 u16 val16;
7104 int i;
7105
7106 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7107 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7108 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7109 val16, val32);
7110
7111 /* MAC block */
7112 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7113 tr32(MAC_MODE), tr32(MAC_STATUS));
7114 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7115 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7116 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7117 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7118 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7119 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7120
7121 /* Send data initiator control block */
7122 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7123 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7124 printk(" SNDDATAI_STATSCTRL[%08x]\n",
7125 tr32(SNDDATAI_STATSCTRL));
7126
7127 /* Send data completion control block */
7128 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7129
7130 /* Send BD ring selector block */
7131 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7132 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7133
7134 /* Send BD initiator control block */
7135 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7136 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7137
7138 /* Send BD completion control block */
7139 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7140
7141 /* Receive list placement control block */
7142 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7143 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7144 printk(" RCVLPC_STATSCTRL[%08x]\n",
7145 tr32(RCVLPC_STATSCTRL));
7146
7147 /* Receive data and receive BD initiator control block */
7148 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7149 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7150
7151 /* Receive data completion control block */
7152 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7153 tr32(RCVDCC_MODE));
7154
7155 /* Receive BD initiator control block */
7156 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7157 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7158
7159 /* Receive BD completion control block */
7160 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7161 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7162
7163 /* Receive list selector control block */
7164 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7165 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7166
7167 /* Mbuf cluster free block */
7168 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7169 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7170
7171 /* Host coalescing control block */
7172 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7173 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7174 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7175 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7176 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7177 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7178 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7179 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7180 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7181 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7182 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7183 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7184
7185 /* Memory arbiter control block */
7186 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7187 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7188
7189 /* Buffer manager control block */
7190 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7191 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7192 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7193 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7194 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7195 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7196 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7197 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7198
7199 /* Read DMA control block */
7200 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7201 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7202
7203 /* Write DMA control block */
7204 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7205 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7206
7207 /* DMA completion block */
7208 printk("DEBUG: DMAC_MODE[%08x]\n",
7209 tr32(DMAC_MODE));
7210
7211 /* GRC block */
7212 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7213 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7214 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7215 tr32(GRC_LOCAL_CTRL));
7216
7217 /* TG3_BDINFOs */
7218 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7219 tr32(RCVDBDI_JUMBO_BD + 0x0),
7220 tr32(RCVDBDI_JUMBO_BD + 0x4),
7221 tr32(RCVDBDI_JUMBO_BD + 0x8),
7222 tr32(RCVDBDI_JUMBO_BD + 0xc));
7223 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7224 tr32(RCVDBDI_STD_BD + 0x0),
7225 tr32(RCVDBDI_STD_BD + 0x4),
7226 tr32(RCVDBDI_STD_BD + 0x8),
7227 tr32(RCVDBDI_STD_BD + 0xc));
7228 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7229 tr32(RCVDBDI_MINI_BD + 0x0),
7230 tr32(RCVDBDI_MINI_BD + 0x4),
7231 tr32(RCVDBDI_MINI_BD + 0x8),
7232 tr32(RCVDBDI_MINI_BD + 0xc));
7233
7234 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7235 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7236 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7237 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7238 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7239 val32, val32_2, val32_3, val32_4);
7240
7241 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7242 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7243 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7244 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7245 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7246 val32, val32_2, val32_3, val32_4);
7247
7248 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7249 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7250 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7251 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7252 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7253 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7254 val32, val32_2, val32_3, val32_4, val32_5);
7255
7256 /* SW status block */
7257 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7258 tp->hw_status->status,
7259 tp->hw_status->status_tag,
7260 tp->hw_status->rx_jumbo_consumer,
7261 tp->hw_status->rx_consumer,
7262 tp->hw_status->rx_mini_consumer,
7263 tp->hw_status->idx[0].rx_producer,
7264 tp->hw_status->idx[0].tx_consumer);
7265
7266 /* SW statistics block */
7267 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7268 ((u32 *)tp->hw_stats)[0],
7269 ((u32 *)tp->hw_stats)[1],
7270 ((u32 *)tp->hw_stats)[2],
7271 ((u32 *)tp->hw_stats)[3]);
7272
7273 /* Mailboxes */
7274 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
09ee929c
MC
7275 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7276 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7277 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7278 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
1da177e4
LT
7279
7280 /* NIC side send descriptors. */
7281 for (i = 0; i < 6; i++) {
7282 unsigned long txd;
7283
7284 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7285 + (i * sizeof(struct tg3_tx_buffer_desc));
7286 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7287 i,
7288 readl(txd + 0x0), readl(txd + 0x4),
7289 readl(txd + 0x8), readl(txd + 0xc));
7290 }
7291
7292 /* NIC side RX descriptors. */
7293 for (i = 0; i < 6; i++) {
7294 unsigned long rxd;
7295
7296 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7297 + (i * sizeof(struct tg3_rx_buffer_desc));
7298 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7299 i,
7300 readl(rxd + 0x0), readl(rxd + 0x4),
7301 readl(rxd + 0x8), readl(rxd + 0xc));
7302 rxd += (4 * sizeof(u32));
7303 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7304 i,
7305 readl(rxd + 0x0), readl(rxd + 0x4),
7306 readl(rxd + 0x8), readl(rxd + 0xc));
7307 }
7308
7309 for (i = 0; i < 6; i++) {
7310 unsigned long rxd;
7311
7312 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7313 + (i * sizeof(struct tg3_rx_buffer_desc));
7314 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7315 i,
7316 readl(rxd + 0x0), readl(rxd + 0x4),
7317 readl(rxd + 0x8), readl(rxd + 0xc));
7318 rxd += (4 * sizeof(u32));
7319 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7320 i,
7321 readl(rxd + 0x0), readl(rxd + 0x4),
7322 readl(rxd + 0x8), readl(rxd + 0xc));
7323 }
7324}
7325#endif
7326
7327static struct net_device_stats *tg3_get_stats(struct net_device *);
7328static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7329
7330static int tg3_close(struct net_device *dev)
7331{
7332 struct tg3 *tp = netdev_priv(dev);
7333
7faa006f
MC
7334 /* Calling flush_scheduled_work() may deadlock because
7335 * linkwatch_event() may be on the workqueue and it will try to get
7336 * the rtnl_lock which we are holding.
7337 */
7338 while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7339 msleep(1);
7340
1da177e4
LT
7341 netif_stop_queue(dev);
7342
7343 del_timer_sync(&tp->timer);
7344
f47c11ee 7345 tg3_full_lock(tp, 1);
1da177e4
LT
7346#if 0
7347 tg3_dump_state(tp);
7348#endif
7349
7350 tg3_disable_ints(tp);
7351
944d980e 7352 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7353 tg3_free_rings(tp);
7354 tp->tg3_flags &=
7355 ~(TG3_FLAG_INIT_COMPLETE |
7356 TG3_FLAG_GOT_SERDES_FLOWCTL);
1da177e4 7357
f47c11ee 7358 tg3_full_unlock(tp);
1da177e4 7359
88b06bc2
MC
7360 free_irq(tp->pdev->irq, dev);
7361 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7362 pci_disable_msi(tp->pdev);
7363 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7364 }
1da177e4
LT
7365
7366 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7367 sizeof(tp->net_stats_prev));
7368 memcpy(&tp->estats_prev, tg3_get_estats(tp),
7369 sizeof(tp->estats_prev));
7370
7371 tg3_free_consistent(tp);
7372
bc1c7567
MC
7373 tg3_set_power_state(tp, PCI_D3hot);
7374
7375 netif_carrier_off(tp->dev);
7376
1da177e4
LT
7377 return 0;
7378}
7379
7380static inline unsigned long get_stat64(tg3_stat64_t *val)
7381{
7382 unsigned long ret;
7383
7384#if (BITS_PER_LONG == 32)
7385 ret = val->low;
7386#else
7387 ret = ((u64)val->high << 32) | ((u64)val->low);
7388#endif
7389 return ret;
7390}
7391
7392static unsigned long calc_crc_errors(struct tg3 *tp)
7393{
7394 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7395
7396 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7397 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7398 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1da177e4
LT
7399 u32 val;
7400
f47c11ee 7401 spin_lock_bh(&tp->lock);
1da177e4
LT
7402 if (!tg3_readphy(tp, 0x1e, &val)) {
7403 tg3_writephy(tp, 0x1e, val | 0x8000);
7404 tg3_readphy(tp, 0x14, &val);
7405 } else
7406 val = 0;
f47c11ee 7407 spin_unlock_bh(&tp->lock);
1da177e4
LT
7408
7409 tp->phy_crc_errors += val;
7410
7411 return tp->phy_crc_errors;
7412 }
7413
7414 return get_stat64(&hw_stats->rx_fcs_errors);
7415}
7416
7417#define ESTAT_ADD(member) \
7418 estats->member = old_estats->member + \
7419 get_stat64(&hw_stats->member)
7420
7421static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7422{
7423 struct tg3_ethtool_stats *estats = &tp->estats;
7424 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7425 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7426
7427 if (!hw_stats)
7428 return old_estats;
7429
7430 ESTAT_ADD(rx_octets);
7431 ESTAT_ADD(rx_fragments);
7432 ESTAT_ADD(rx_ucast_packets);
7433 ESTAT_ADD(rx_mcast_packets);
7434 ESTAT_ADD(rx_bcast_packets);
7435 ESTAT_ADD(rx_fcs_errors);
7436 ESTAT_ADD(rx_align_errors);
7437 ESTAT_ADD(rx_xon_pause_rcvd);
7438 ESTAT_ADD(rx_xoff_pause_rcvd);
7439 ESTAT_ADD(rx_mac_ctrl_rcvd);
7440 ESTAT_ADD(rx_xoff_entered);
7441 ESTAT_ADD(rx_frame_too_long_errors);
7442 ESTAT_ADD(rx_jabbers);
7443 ESTAT_ADD(rx_undersize_packets);
7444 ESTAT_ADD(rx_in_length_errors);
7445 ESTAT_ADD(rx_out_length_errors);
7446 ESTAT_ADD(rx_64_or_less_octet_packets);
7447 ESTAT_ADD(rx_65_to_127_octet_packets);
7448 ESTAT_ADD(rx_128_to_255_octet_packets);
7449 ESTAT_ADD(rx_256_to_511_octet_packets);
7450 ESTAT_ADD(rx_512_to_1023_octet_packets);
7451 ESTAT_ADD(rx_1024_to_1522_octet_packets);
7452 ESTAT_ADD(rx_1523_to_2047_octet_packets);
7453 ESTAT_ADD(rx_2048_to_4095_octet_packets);
7454 ESTAT_ADD(rx_4096_to_8191_octet_packets);
7455 ESTAT_ADD(rx_8192_to_9022_octet_packets);
7456
7457 ESTAT_ADD(tx_octets);
7458 ESTAT_ADD(tx_collisions);
7459 ESTAT_ADD(tx_xon_sent);
7460 ESTAT_ADD(tx_xoff_sent);
7461 ESTAT_ADD(tx_flow_control);
7462 ESTAT_ADD(tx_mac_errors);
7463 ESTAT_ADD(tx_single_collisions);
7464 ESTAT_ADD(tx_mult_collisions);
7465 ESTAT_ADD(tx_deferred);
7466 ESTAT_ADD(tx_excessive_collisions);
7467 ESTAT_ADD(tx_late_collisions);
7468 ESTAT_ADD(tx_collide_2times);
7469 ESTAT_ADD(tx_collide_3times);
7470 ESTAT_ADD(tx_collide_4times);
7471 ESTAT_ADD(tx_collide_5times);
7472 ESTAT_ADD(tx_collide_6times);
7473 ESTAT_ADD(tx_collide_7times);
7474 ESTAT_ADD(tx_collide_8times);
7475 ESTAT_ADD(tx_collide_9times);
7476 ESTAT_ADD(tx_collide_10times);
7477 ESTAT_ADD(tx_collide_11times);
7478 ESTAT_ADD(tx_collide_12times);
7479 ESTAT_ADD(tx_collide_13times);
7480 ESTAT_ADD(tx_collide_14times);
7481 ESTAT_ADD(tx_collide_15times);
7482 ESTAT_ADD(tx_ucast_packets);
7483 ESTAT_ADD(tx_mcast_packets);
7484 ESTAT_ADD(tx_bcast_packets);
7485 ESTAT_ADD(tx_carrier_sense_errors);
7486 ESTAT_ADD(tx_discards);
7487 ESTAT_ADD(tx_errors);
7488
7489 ESTAT_ADD(dma_writeq_full);
7490 ESTAT_ADD(dma_write_prioq_full);
7491 ESTAT_ADD(rxbds_empty);
7492 ESTAT_ADD(rx_discards);
7493 ESTAT_ADD(rx_errors);
7494 ESTAT_ADD(rx_threshold_hit);
7495
7496 ESTAT_ADD(dma_readq_full);
7497 ESTAT_ADD(dma_read_prioq_full);
7498 ESTAT_ADD(tx_comp_queue_full);
7499
7500 ESTAT_ADD(ring_set_send_prod_index);
7501 ESTAT_ADD(ring_status_update);
7502 ESTAT_ADD(nic_irqs);
7503 ESTAT_ADD(nic_avoided_irqs);
7504 ESTAT_ADD(nic_tx_threshold_hit);
7505
7506 return estats;
7507}
7508
7509static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7510{
7511 struct tg3 *tp = netdev_priv(dev);
7512 struct net_device_stats *stats = &tp->net_stats;
7513 struct net_device_stats *old_stats = &tp->net_stats_prev;
7514 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7515
7516 if (!hw_stats)
7517 return old_stats;
7518
7519 stats->rx_packets = old_stats->rx_packets +
7520 get_stat64(&hw_stats->rx_ucast_packets) +
7521 get_stat64(&hw_stats->rx_mcast_packets) +
7522 get_stat64(&hw_stats->rx_bcast_packets);
6aa20a22 7523
1da177e4
LT
7524 stats->tx_packets = old_stats->tx_packets +
7525 get_stat64(&hw_stats->tx_ucast_packets) +
7526 get_stat64(&hw_stats->tx_mcast_packets) +
7527 get_stat64(&hw_stats->tx_bcast_packets);
7528
7529 stats->rx_bytes = old_stats->rx_bytes +
7530 get_stat64(&hw_stats->rx_octets);
7531 stats->tx_bytes = old_stats->tx_bytes +
7532 get_stat64(&hw_stats->tx_octets);
7533
7534 stats->rx_errors = old_stats->rx_errors +
4f63b877 7535 get_stat64(&hw_stats->rx_errors);
1da177e4
LT
7536 stats->tx_errors = old_stats->tx_errors +
7537 get_stat64(&hw_stats->tx_errors) +
7538 get_stat64(&hw_stats->tx_mac_errors) +
7539 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7540 get_stat64(&hw_stats->tx_discards);
7541
7542 stats->multicast = old_stats->multicast +
7543 get_stat64(&hw_stats->rx_mcast_packets);
7544 stats->collisions = old_stats->collisions +
7545 get_stat64(&hw_stats->tx_collisions);
7546
7547 stats->rx_length_errors = old_stats->rx_length_errors +
7548 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7549 get_stat64(&hw_stats->rx_undersize_packets);
7550
7551 stats->rx_over_errors = old_stats->rx_over_errors +
7552 get_stat64(&hw_stats->rxbds_empty);
7553 stats->rx_frame_errors = old_stats->rx_frame_errors +
7554 get_stat64(&hw_stats->rx_align_errors);
7555 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7556 get_stat64(&hw_stats->tx_discards);
7557 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7558 get_stat64(&hw_stats->tx_carrier_sense_errors);
7559
7560 stats->rx_crc_errors = old_stats->rx_crc_errors +
7561 calc_crc_errors(tp);
7562
4f63b877
JL
7563 stats->rx_missed_errors = old_stats->rx_missed_errors +
7564 get_stat64(&hw_stats->rx_discards);
7565
1da177e4
LT
7566 return stats;
7567}
7568
7569static inline u32 calc_crc(unsigned char *buf, int len)
7570{
7571 u32 reg;
7572 u32 tmp;
7573 int j, k;
7574
7575 reg = 0xffffffff;
7576
7577 for (j = 0; j < len; j++) {
7578 reg ^= buf[j];
7579
7580 for (k = 0; k < 8; k++) {
7581 tmp = reg & 0x01;
7582
7583 reg >>= 1;
7584
7585 if (tmp) {
7586 reg ^= 0xedb88320;
7587 }
7588 }
7589 }
7590
7591 return ~reg;
7592}
7593
7594static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7595{
7596 /* accept or reject all multicast frames */
7597 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7598 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7599 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7600 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7601}
7602
7603static void __tg3_set_rx_mode(struct net_device *dev)
7604{
7605 struct tg3 *tp = netdev_priv(dev);
7606 u32 rx_mode;
7607
7608 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7609 RX_MODE_KEEP_VLAN_TAG);
7610
7611 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7612 * flag clear.
7613 */
7614#if TG3_VLAN_TAG_USED
7615 if (!tp->vlgrp &&
7616 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7617 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7618#else
7619 /* By definition, VLAN is disabled always in this
7620 * case.
7621 */
7622 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7623 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7624#endif
7625
7626 if (dev->flags & IFF_PROMISC) {
7627 /* Promiscuous mode. */
7628 rx_mode |= RX_MODE_PROMISC;
7629 } else if (dev->flags & IFF_ALLMULTI) {
7630 /* Accept all multicast. */
7631 tg3_set_multi (tp, 1);
7632 } else if (dev->mc_count < 1) {
7633 /* Reject all multicast. */
7634 tg3_set_multi (tp, 0);
7635 } else {
7636 /* Accept one or more multicast(s). */
7637 struct dev_mc_list *mclist;
7638 unsigned int i;
7639 u32 mc_filter[4] = { 0, };
7640 u32 regidx;
7641 u32 bit;
7642 u32 crc;
7643
7644 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7645 i++, mclist = mclist->next) {
7646
7647 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7648 bit = ~crc & 0x7f;
7649 regidx = (bit & 0x60) >> 5;
7650 bit &= 0x1f;
7651 mc_filter[regidx] |= (1 << bit);
7652 }
7653
7654 tw32(MAC_HASH_REG_0, mc_filter[0]);
7655 tw32(MAC_HASH_REG_1, mc_filter[1]);
7656 tw32(MAC_HASH_REG_2, mc_filter[2]);
7657 tw32(MAC_HASH_REG_3, mc_filter[3]);
7658 }
7659
7660 if (rx_mode != tp->rx_mode) {
7661 tp->rx_mode = rx_mode;
7662 tw32_f(MAC_RX_MODE, rx_mode);
7663 udelay(10);
7664 }
7665}
7666
7667static void tg3_set_rx_mode(struct net_device *dev)
7668{
7669 struct tg3 *tp = netdev_priv(dev);
7670
e75f7c90
MC
7671 if (!netif_running(dev))
7672 return;
7673
f47c11ee 7674 tg3_full_lock(tp, 0);
1da177e4 7675 __tg3_set_rx_mode(dev);
f47c11ee 7676 tg3_full_unlock(tp);
1da177e4
LT
7677}
7678
7679#define TG3_REGDUMP_LEN (32 * 1024)
7680
7681static int tg3_get_regs_len(struct net_device *dev)
7682{
7683 return TG3_REGDUMP_LEN;
7684}
7685
7686static void tg3_get_regs(struct net_device *dev,
7687 struct ethtool_regs *regs, void *_p)
7688{
7689 u32 *p = _p;
7690 struct tg3 *tp = netdev_priv(dev);
7691 u8 *orig_p = _p;
7692 int i;
7693
7694 regs->version = 0;
7695
7696 memset(p, 0, TG3_REGDUMP_LEN);
7697
bc1c7567
MC
7698 if (tp->link_config.phy_is_low_power)
7699 return;
7700
f47c11ee 7701 tg3_full_lock(tp, 0);
1da177e4
LT
7702
7703#define __GET_REG32(reg) (*(p)++ = tr32(reg))
7704#define GET_REG32_LOOP(base,len) \
7705do { p = (u32 *)(orig_p + (base)); \
7706 for (i = 0; i < len; i += 4) \
7707 __GET_REG32((base) + i); \
7708} while (0)
7709#define GET_REG32_1(reg) \
7710do { p = (u32 *)(orig_p + (reg)); \
7711 __GET_REG32((reg)); \
7712} while (0)
7713
7714 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7715 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7716 GET_REG32_LOOP(MAC_MODE, 0x4f0);
7717 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7718 GET_REG32_1(SNDDATAC_MODE);
7719 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7720 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7721 GET_REG32_1(SNDBDC_MODE);
7722 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7723 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7724 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7725 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7726 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7727 GET_REG32_1(RCVDCC_MODE);
7728 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7729 GET_REG32_LOOP(RCVCC_MODE, 0x14);
7730 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7731 GET_REG32_1(MBFREE_MODE);
7732 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7733 GET_REG32_LOOP(MEMARB_MODE, 0x10);
7734 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7735 GET_REG32_LOOP(RDMAC_MODE, 0x08);
7736 GET_REG32_LOOP(WDMAC_MODE, 0x08);
091465d7
CE
7737 GET_REG32_1(RX_CPU_MODE);
7738 GET_REG32_1(RX_CPU_STATE);
7739 GET_REG32_1(RX_CPU_PGMCTR);
7740 GET_REG32_1(RX_CPU_HWBKPT);
7741 GET_REG32_1(TX_CPU_MODE);
7742 GET_REG32_1(TX_CPU_STATE);
7743 GET_REG32_1(TX_CPU_PGMCTR);
1da177e4
LT
7744 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7745 GET_REG32_LOOP(FTQ_RESET, 0x120);
7746 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7747 GET_REG32_1(DMAC_MODE);
7748 GET_REG32_LOOP(GRC_MODE, 0x4c);
7749 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7750 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7751
7752#undef __GET_REG32
7753#undef GET_REG32_LOOP
7754#undef GET_REG32_1
7755
f47c11ee 7756 tg3_full_unlock(tp);
1da177e4
LT
7757}
7758
7759static int tg3_get_eeprom_len(struct net_device *dev)
7760{
7761 struct tg3 *tp = netdev_priv(dev);
7762
7763 return tp->nvram_size;
7764}
7765
7766static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
1820180b 7767static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
1da177e4
LT
7768
7769static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7770{
7771 struct tg3 *tp = netdev_priv(dev);
7772 int ret;
7773 u8 *pd;
7774 u32 i, offset, len, val, b_offset, b_count;
7775
bc1c7567
MC
7776 if (tp->link_config.phy_is_low_power)
7777 return -EAGAIN;
7778
1da177e4
LT
7779 offset = eeprom->offset;
7780 len = eeprom->len;
7781 eeprom->len = 0;
7782
7783 eeprom->magic = TG3_EEPROM_MAGIC;
7784
7785 if (offset & 3) {
7786 /* adjustments to start on required 4 byte boundary */
7787 b_offset = offset & 3;
7788 b_count = 4 - b_offset;
7789 if (b_count > len) {
7790 /* i.e. offset=1 len=2 */
7791 b_count = len;
7792 }
7793 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7794 if (ret)
7795 return ret;
7796 val = cpu_to_le32(val);
7797 memcpy(data, ((char*)&val) + b_offset, b_count);
7798 len -= b_count;
7799 offset += b_count;
7800 eeprom->len += b_count;
7801 }
7802
7803 /* read bytes upto the last 4 byte boundary */
7804 pd = &data[eeprom->len];
7805 for (i = 0; i < (len - (len & 3)); i += 4) {
7806 ret = tg3_nvram_read(tp, offset + i, &val);
7807 if (ret) {
7808 eeprom->len += i;
7809 return ret;
7810 }
7811 val = cpu_to_le32(val);
7812 memcpy(pd + i, &val, 4);
7813 }
7814 eeprom->len += i;
7815
7816 if (len & 3) {
7817 /* read last bytes not ending on 4 byte boundary */
7818 pd = &data[eeprom->len];
7819 b_count = len & 3;
7820 b_offset = offset + len - b_count;
7821 ret = tg3_nvram_read(tp, b_offset, &val);
7822 if (ret)
7823 return ret;
7824 val = cpu_to_le32(val);
7825 memcpy(pd, ((char*)&val), b_count);
7826 eeprom->len += b_count;
7827 }
7828 return 0;
7829}
7830
6aa20a22 7831static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
1da177e4
LT
7832
7833static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7834{
7835 struct tg3 *tp = netdev_priv(dev);
7836 int ret;
7837 u32 offset, len, b_offset, odd_len, start, end;
7838 u8 *buf;
7839
bc1c7567
MC
7840 if (tp->link_config.phy_is_low_power)
7841 return -EAGAIN;
7842
1da177e4
LT
7843 if (eeprom->magic != TG3_EEPROM_MAGIC)
7844 return -EINVAL;
7845
7846 offset = eeprom->offset;
7847 len = eeprom->len;
7848
7849 if ((b_offset = (offset & 3))) {
7850 /* adjustments to start on required 4 byte boundary */
7851 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7852 if (ret)
7853 return ret;
7854 start = cpu_to_le32(start);
7855 len += b_offset;
7856 offset &= ~3;
1c8594b4
MC
7857 if (len < 4)
7858 len = 4;
1da177e4
LT
7859 }
7860
7861 odd_len = 0;
1c8594b4 7862 if (len & 3) {
1da177e4
LT
7863 /* adjustments to end on required 4 byte boundary */
7864 odd_len = 1;
7865 len = (len + 3) & ~3;
7866 ret = tg3_nvram_read(tp, offset+len-4, &end);
7867 if (ret)
7868 return ret;
7869 end = cpu_to_le32(end);
7870 }
7871
7872 buf = data;
7873 if (b_offset || odd_len) {
7874 buf = kmalloc(len, GFP_KERNEL);
7875 if (buf == 0)
7876 return -ENOMEM;
7877 if (b_offset)
7878 memcpy(buf, &start, 4);
7879 if (odd_len)
7880 memcpy(buf+len-4, &end, 4);
7881 memcpy(buf + b_offset, data, eeprom->len);
7882 }
7883
7884 ret = tg3_nvram_write_block(tp, offset, len, buf);
7885
7886 if (buf != data)
7887 kfree(buf);
7888
7889 return ret;
7890}
7891
7892static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7893{
7894 struct tg3 *tp = netdev_priv(dev);
6aa20a22 7895
1da177e4
LT
7896 cmd->supported = (SUPPORTED_Autoneg);
7897
7898 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7899 cmd->supported |= (SUPPORTED_1000baseT_Half |
7900 SUPPORTED_1000baseT_Full);
7901
ef348144 7902 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1da177e4
LT
7903 cmd->supported |= (SUPPORTED_100baseT_Half |
7904 SUPPORTED_100baseT_Full |
7905 SUPPORTED_10baseT_Half |
7906 SUPPORTED_10baseT_Full |
7907 SUPPORTED_MII);
ef348144
KK
7908 cmd->port = PORT_TP;
7909 } else {
1da177e4 7910 cmd->supported |= SUPPORTED_FIBRE;
ef348144
KK
7911 cmd->port = PORT_FIBRE;
7912 }
6aa20a22 7913
1da177e4
LT
7914 cmd->advertising = tp->link_config.advertising;
7915 if (netif_running(dev)) {
7916 cmd->speed = tp->link_config.active_speed;
7917 cmd->duplex = tp->link_config.active_duplex;
7918 }
1da177e4
LT
7919 cmd->phy_address = PHY_ADDR;
7920 cmd->transceiver = 0;
7921 cmd->autoneg = tp->link_config.autoneg;
7922 cmd->maxtxpkt = 0;
7923 cmd->maxrxpkt = 0;
7924 return 0;
7925}
6aa20a22 7926
1da177e4
LT
7927static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7928{
7929 struct tg3 *tp = netdev_priv(dev);
6aa20a22
JG
7930
7931 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
1da177e4
LT
7932 /* These are the only valid advertisement bits allowed. */
7933 if (cmd->autoneg == AUTONEG_ENABLE &&
7934 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7935 ADVERTISED_1000baseT_Full |
7936 ADVERTISED_Autoneg |
7937 ADVERTISED_FIBRE)))
7938 return -EINVAL;
37ff238d
MC
7939 /* Fiber can only do SPEED_1000. */
7940 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7941 (cmd->speed != SPEED_1000))
7942 return -EINVAL;
7943 /* Copper cannot force SPEED_1000. */
7944 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7945 (cmd->speed == SPEED_1000))
7946 return -EINVAL;
7947 else if ((cmd->speed == SPEED_1000) &&
7948 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7949 return -EINVAL;
1da177e4 7950
f47c11ee 7951 tg3_full_lock(tp, 0);
1da177e4
LT
7952
7953 tp->link_config.autoneg = cmd->autoneg;
7954 if (cmd->autoneg == AUTONEG_ENABLE) {
7955 tp->link_config.advertising = cmd->advertising;
7956 tp->link_config.speed = SPEED_INVALID;
7957 tp->link_config.duplex = DUPLEX_INVALID;
7958 } else {
7959 tp->link_config.advertising = 0;
7960 tp->link_config.speed = cmd->speed;
7961 tp->link_config.duplex = cmd->duplex;
7962 }
6aa20a22 7963
1da177e4
LT
7964 if (netif_running(dev))
7965 tg3_setup_phy(tp, 1);
7966
f47c11ee 7967 tg3_full_unlock(tp);
6aa20a22 7968
1da177e4
LT
7969 return 0;
7970}
6aa20a22 7971
1da177e4
LT
7972static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7973{
7974 struct tg3 *tp = netdev_priv(dev);
6aa20a22 7975
1da177e4
LT
7976 strcpy(info->driver, DRV_MODULE_NAME);
7977 strcpy(info->version, DRV_MODULE_VERSION);
c4e6575c 7978 strcpy(info->fw_version, tp->fw_ver);
1da177e4
LT
7979 strcpy(info->bus_info, pci_name(tp->pdev));
7980}
6aa20a22 7981
1da177e4
LT
7982static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7983{
7984 struct tg3 *tp = netdev_priv(dev);
6aa20a22 7985
1da177e4
LT
7986 wol->supported = WAKE_MAGIC;
7987 wol->wolopts = 0;
7988 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7989 wol->wolopts = WAKE_MAGIC;
7990 memset(&wol->sopass, 0, sizeof(wol->sopass));
7991}
6aa20a22 7992
1da177e4
LT
7993static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7994{
7995 struct tg3 *tp = netdev_priv(dev);
6aa20a22 7996
1da177e4
LT
7997 if (wol->wolopts & ~WAKE_MAGIC)
7998 return -EINVAL;
7999 if ((wol->wolopts & WAKE_MAGIC) &&
3f7045c1 8000 tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
1da177e4
LT
8001 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
8002 return -EINVAL;
6aa20a22 8003
f47c11ee 8004 spin_lock_bh(&tp->lock);
1da177e4
LT
8005 if (wol->wolopts & WAKE_MAGIC)
8006 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8007 else
8008 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
f47c11ee 8009 spin_unlock_bh(&tp->lock);
6aa20a22 8010
1da177e4
LT
8011 return 0;
8012}
6aa20a22 8013
1da177e4
LT
8014static u32 tg3_get_msglevel(struct net_device *dev)
8015{
8016 struct tg3 *tp = netdev_priv(dev);
8017 return tp->msg_enable;
8018}
6aa20a22 8019
1da177e4
LT
8020static void tg3_set_msglevel(struct net_device *dev, u32 value)
8021{
8022 struct tg3 *tp = netdev_priv(dev);
8023 tp->msg_enable = value;
8024}
6aa20a22 8025
1da177e4
LT
8026#if TG3_TSO_SUPPORT != 0
8027static int tg3_set_tso(struct net_device *dev, u32 value)
8028{
8029 struct tg3 *tp = netdev_priv(dev);
8030
8031 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8032 if (value)
8033 return -EINVAL;
8034 return 0;
8035 }
b5d3772c
MC
8036 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8037 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
b0026624
MC
8038 if (value)
8039 dev->features |= NETIF_F_TSO6;
8040 else
8041 dev->features &= ~NETIF_F_TSO6;
8042 }
1da177e4
LT
8043 return ethtool_op_set_tso(dev, value);
8044}
8045#endif
6aa20a22 8046
1da177e4
LT
8047static int tg3_nway_reset(struct net_device *dev)
8048{
8049 struct tg3 *tp = netdev_priv(dev);
8050 u32 bmcr;
8051 int r;
6aa20a22 8052
1da177e4
LT
8053 if (!netif_running(dev))
8054 return -EAGAIN;
8055
c94e3941
MC
8056 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8057 return -EINVAL;
8058
f47c11ee 8059 spin_lock_bh(&tp->lock);
1da177e4
LT
8060 r = -EINVAL;
8061 tg3_readphy(tp, MII_BMCR, &bmcr);
8062 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
c94e3941
MC
8063 ((bmcr & BMCR_ANENABLE) ||
8064 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8065 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8066 BMCR_ANENABLE);
1da177e4
LT
8067 r = 0;
8068 }
f47c11ee 8069 spin_unlock_bh(&tp->lock);
6aa20a22 8070
1da177e4
LT
8071 return r;
8072}
6aa20a22 8073
1da177e4
LT
8074static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8075{
8076 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8077
1da177e4
LT
8078 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8079 ering->rx_mini_max_pending = 0;
4f81c32b
MC
8080 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8081 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8082 else
8083 ering->rx_jumbo_max_pending = 0;
8084
8085 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
1da177e4
LT
8086
8087 ering->rx_pending = tp->rx_pending;
8088 ering->rx_mini_pending = 0;
4f81c32b
MC
8089 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8090 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8091 else
8092 ering->rx_jumbo_pending = 0;
8093
1da177e4
LT
8094 ering->tx_pending = tp->tx_pending;
8095}
6aa20a22 8096
1da177e4
LT
8097static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8098{
8099 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 8100 int irq_sync = 0, err = 0;
6aa20a22 8101
1da177e4
LT
8102 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8103 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8104 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
8105 return -EINVAL;
6aa20a22 8106
bbe832c0 8107 if (netif_running(dev)) {
1da177e4 8108 tg3_netif_stop(tp);
bbe832c0
MC
8109 irq_sync = 1;
8110 }
1da177e4 8111
bbe832c0 8112 tg3_full_lock(tp, irq_sync);
6aa20a22 8113
1da177e4
LT
8114 tp->rx_pending = ering->rx_pending;
8115
8116 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8117 tp->rx_pending > 63)
8118 tp->rx_pending = 63;
8119 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8120 tp->tx_pending = ering->tx_pending;
8121
8122 if (netif_running(dev)) {
944d980e 8123 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
b9ec6c1b
MC
8124 err = tg3_restart_hw(tp, 1);
8125 if (!err)
8126 tg3_netif_start(tp);
1da177e4
LT
8127 }
8128
f47c11ee 8129 tg3_full_unlock(tp);
6aa20a22 8130
b9ec6c1b 8131 return err;
1da177e4 8132}
6aa20a22 8133
1da177e4
LT
8134static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8135{
8136 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8137
1da177e4
LT
8138 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8139 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8140 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8141}
6aa20a22 8142
1da177e4
LT
8143static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8144{
8145 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 8146 int irq_sync = 0, err = 0;
6aa20a22 8147
bbe832c0 8148 if (netif_running(dev)) {
1da177e4 8149 tg3_netif_stop(tp);
bbe832c0
MC
8150 irq_sync = 1;
8151 }
1da177e4 8152
bbe832c0 8153 tg3_full_lock(tp, irq_sync);
f47c11ee 8154
1da177e4
LT
8155 if (epause->autoneg)
8156 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8157 else
8158 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8159 if (epause->rx_pause)
8160 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8161 else
8162 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8163 if (epause->tx_pause)
8164 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8165 else
8166 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8167
8168 if (netif_running(dev)) {
944d980e 8169 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
b9ec6c1b
MC
8170 err = tg3_restart_hw(tp, 1);
8171 if (!err)
8172 tg3_netif_start(tp);
1da177e4 8173 }
f47c11ee
DM
8174
8175 tg3_full_unlock(tp);
6aa20a22 8176
b9ec6c1b 8177 return err;
1da177e4 8178}
6aa20a22 8179
1da177e4
LT
8180static u32 tg3_get_rx_csum(struct net_device *dev)
8181{
8182 struct tg3 *tp = netdev_priv(dev);
8183 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8184}
6aa20a22 8185
1da177e4
LT
8186static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8187{
8188 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8189
1da177e4
LT
8190 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8191 if (data != 0)
8192 return -EINVAL;
8193 return 0;
8194 }
6aa20a22 8195
f47c11ee 8196 spin_lock_bh(&tp->lock);
1da177e4
LT
8197 if (data)
8198 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8199 else
8200 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
f47c11ee 8201 spin_unlock_bh(&tp->lock);
6aa20a22 8202
1da177e4
LT
8203 return 0;
8204}
6aa20a22 8205
1da177e4
LT
8206static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8207{
8208 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8209
1da177e4
LT
8210 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8211 if (data != 0)
8212 return -EINVAL;
8213 return 0;
8214 }
6aa20a22 8215
af36e6b6
MC
8216 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8217 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9c27dbdf 8218 ethtool_op_set_tx_hw_csum(dev, data);
1da177e4 8219 else
9c27dbdf 8220 ethtool_op_set_tx_csum(dev, data);
1da177e4
LT
8221
8222 return 0;
8223}
8224
8225static int tg3_get_stats_count (struct net_device *dev)
8226{
8227 return TG3_NUM_STATS;
8228}
8229
4cafd3f5
MC
8230static int tg3_get_test_count (struct net_device *dev)
8231{
8232 return TG3_NUM_TEST;
8233}
8234
1da177e4
LT
8235static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8236{
8237 switch (stringset) {
8238 case ETH_SS_STATS:
8239 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8240 break;
4cafd3f5
MC
8241 case ETH_SS_TEST:
8242 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8243 break;
1da177e4
LT
8244 default:
8245 WARN_ON(1); /* we need a WARN() */
8246 break;
8247 }
8248}
8249
4009a93d
MC
8250static int tg3_phys_id(struct net_device *dev, u32 data)
8251{
8252 struct tg3 *tp = netdev_priv(dev);
8253 int i;
8254
8255 if (!netif_running(tp->dev))
8256 return -EAGAIN;
8257
8258 if (data == 0)
8259 data = 2;
8260
8261 for (i = 0; i < (data * 2); i++) {
8262 if ((i % 2) == 0)
8263 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8264 LED_CTRL_1000MBPS_ON |
8265 LED_CTRL_100MBPS_ON |
8266 LED_CTRL_10MBPS_ON |
8267 LED_CTRL_TRAFFIC_OVERRIDE |
8268 LED_CTRL_TRAFFIC_BLINK |
8269 LED_CTRL_TRAFFIC_LED);
6aa20a22 8270
4009a93d
MC
8271 else
8272 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8273 LED_CTRL_TRAFFIC_OVERRIDE);
8274
8275 if (msleep_interruptible(500))
8276 break;
8277 }
8278 tw32(MAC_LED_CTRL, tp->led_ctrl);
8279 return 0;
8280}
8281
1da177e4
LT
8282static void tg3_get_ethtool_stats (struct net_device *dev,
8283 struct ethtool_stats *estats, u64 *tmp_stats)
8284{
8285 struct tg3 *tp = netdev_priv(dev);
8286 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8287}
8288
566f86ad 8289#define NVRAM_TEST_SIZE 0x100
1b27777a 8290#define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
566f86ad
MC
8291
8292static int tg3_test_nvram(struct tg3 *tp)
8293{
1b27777a
MC
8294 u32 *buf, csum, magic;
8295 int i, j, err = 0, size;
566f86ad 8296
1820180b 8297 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
1b27777a
MC
8298 return -EIO;
8299
1b27777a
MC
8300 if (magic == TG3_EEPROM_MAGIC)
8301 size = NVRAM_TEST_SIZE;
8302 else if ((magic & 0xff000000) == 0xa5000000) {
8303 if ((magic & 0xe00000) == 0x200000)
8304 size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8305 else
8306 return 0;
8307 } else
8308 return -EIO;
8309
8310 buf = kmalloc(size, GFP_KERNEL);
566f86ad
MC
8311 if (buf == NULL)
8312 return -ENOMEM;
8313
1b27777a
MC
8314 err = -EIO;
8315 for (i = 0, j = 0; i < size; i += 4, j++) {
566f86ad
MC
8316 u32 val;
8317
8318 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8319 break;
8320 buf[j] = cpu_to_le32(val);
8321 }
1b27777a 8322 if (i < size)
566f86ad
MC
8323 goto out;
8324
1b27777a
MC
8325 /* Selfboot format */
8326 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8327 u8 *buf8 = (u8 *) buf, csum8 = 0;
8328
8329 for (i = 0; i < size; i++)
8330 csum8 += buf8[i];
8331
ad96b485
AB
8332 if (csum8 == 0) {
8333 err = 0;
8334 goto out;
8335 }
8336
8337 err = -EIO;
8338 goto out;
1b27777a 8339 }
566f86ad
MC
8340
8341 /* Bootstrap checksum at offset 0x10 */
8342 csum = calc_crc((unsigned char *) buf, 0x10);
8343 if(csum != cpu_to_le32(buf[0x10/4]))
8344 goto out;
8345
8346 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8347 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8348 if (csum != cpu_to_le32(buf[0xfc/4]))
8349 goto out;
8350
8351 err = 0;
8352
8353out:
8354 kfree(buf);
8355 return err;
8356}
8357
ca43007a
MC
8358#define TG3_SERDES_TIMEOUT_SEC 2
8359#define TG3_COPPER_TIMEOUT_SEC 6
8360
8361static int tg3_test_link(struct tg3 *tp)
8362{
8363 int i, max;
8364
8365 if (!netif_running(tp->dev))
8366 return -ENODEV;
8367
4c987487 8368 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
ca43007a
MC
8369 max = TG3_SERDES_TIMEOUT_SEC;
8370 else
8371 max = TG3_COPPER_TIMEOUT_SEC;
8372
8373 for (i = 0; i < max; i++) {
8374 if (netif_carrier_ok(tp->dev))
8375 return 0;
8376
8377 if (msleep_interruptible(1000))
8378 break;
8379 }
8380
8381 return -EIO;
8382}
8383
a71116d1 8384/* Only test the commonly used registers */
30ca3e37 8385static int tg3_test_registers(struct tg3 *tp)
a71116d1
MC
8386{
8387 int i, is_5705;
8388 u32 offset, read_mask, write_mask, val, save_val, read_val;
8389 static struct {
8390 u16 offset;
8391 u16 flags;
8392#define TG3_FL_5705 0x1
8393#define TG3_FL_NOT_5705 0x2
8394#define TG3_FL_NOT_5788 0x4
8395 u32 read_mask;
8396 u32 write_mask;
8397 } reg_tbl[] = {
8398 /* MAC Control Registers */
8399 { MAC_MODE, TG3_FL_NOT_5705,
8400 0x00000000, 0x00ef6f8c },
8401 { MAC_MODE, TG3_FL_5705,
8402 0x00000000, 0x01ef6b8c },
8403 { MAC_STATUS, TG3_FL_NOT_5705,
8404 0x03800107, 0x00000000 },
8405 { MAC_STATUS, TG3_FL_5705,
8406 0x03800100, 0x00000000 },
8407 { MAC_ADDR_0_HIGH, 0x0000,
8408 0x00000000, 0x0000ffff },
8409 { MAC_ADDR_0_LOW, 0x0000,
8410 0x00000000, 0xffffffff },
8411 { MAC_RX_MTU_SIZE, 0x0000,
8412 0x00000000, 0x0000ffff },
8413 { MAC_TX_MODE, 0x0000,
8414 0x00000000, 0x00000070 },
8415 { MAC_TX_LENGTHS, 0x0000,
8416 0x00000000, 0x00003fff },
8417 { MAC_RX_MODE, TG3_FL_NOT_5705,
8418 0x00000000, 0x000007fc },
8419 { MAC_RX_MODE, TG3_FL_5705,
8420 0x00000000, 0x000007dc },
8421 { MAC_HASH_REG_0, 0x0000,
8422 0x00000000, 0xffffffff },
8423 { MAC_HASH_REG_1, 0x0000,
8424 0x00000000, 0xffffffff },
8425 { MAC_HASH_REG_2, 0x0000,
8426 0x00000000, 0xffffffff },
8427 { MAC_HASH_REG_3, 0x0000,
8428 0x00000000, 0xffffffff },
8429
8430 /* Receive Data and Receive BD Initiator Control Registers. */
8431 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8432 0x00000000, 0xffffffff },
8433 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8434 0x00000000, 0xffffffff },
8435 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8436 0x00000000, 0x00000003 },
8437 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8438 0x00000000, 0xffffffff },
8439 { RCVDBDI_STD_BD+0, 0x0000,
8440 0x00000000, 0xffffffff },
8441 { RCVDBDI_STD_BD+4, 0x0000,
8442 0x00000000, 0xffffffff },
8443 { RCVDBDI_STD_BD+8, 0x0000,
8444 0x00000000, 0xffff0002 },
8445 { RCVDBDI_STD_BD+0xc, 0x0000,
8446 0x00000000, 0xffffffff },
6aa20a22 8447
a71116d1
MC
8448 /* Receive BD Initiator Control Registers. */
8449 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8450 0x00000000, 0xffffffff },
8451 { RCVBDI_STD_THRESH, TG3_FL_5705,
8452 0x00000000, 0x000003ff },
8453 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8454 0x00000000, 0xffffffff },
6aa20a22 8455
a71116d1
MC
8456 /* Host Coalescing Control Registers. */
8457 { HOSTCC_MODE, TG3_FL_NOT_5705,
8458 0x00000000, 0x00000004 },
8459 { HOSTCC_MODE, TG3_FL_5705,
8460 0x00000000, 0x000000f6 },
8461 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8462 0x00000000, 0xffffffff },
8463 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8464 0x00000000, 0x000003ff },
8465 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8466 0x00000000, 0xffffffff },
8467 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8468 0x00000000, 0x000003ff },
8469 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8470 0x00000000, 0xffffffff },
8471 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8472 0x00000000, 0x000000ff },
8473 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8474 0x00000000, 0xffffffff },
8475 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8476 0x00000000, 0x000000ff },
8477 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8478 0x00000000, 0xffffffff },
8479 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8480 0x00000000, 0xffffffff },
8481 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8482 0x00000000, 0xffffffff },
8483 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8484 0x00000000, 0x000000ff },
8485 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8486 0x00000000, 0xffffffff },
8487 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8488 0x00000000, 0x000000ff },
8489 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8490 0x00000000, 0xffffffff },
8491 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8492 0x00000000, 0xffffffff },
8493 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8494 0x00000000, 0xffffffff },
8495 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8496 0x00000000, 0xffffffff },
8497 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8498 0x00000000, 0xffffffff },
8499 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8500 0xffffffff, 0x00000000 },
8501 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8502 0xffffffff, 0x00000000 },
8503
8504 /* Buffer Manager Control Registers. */
8505 { BUFMGR_MB_POOL_ADDR, 0x0000,
8506 0x00000000, 0x007fff80 },
8507 { BUFMGR_MB_POOL_SIZE, 0x0000,
8508 0x00000000, 0x007fffff },
8509 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8510 0x00000000, 0x0000003f },
8511 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8512 0x00000000, 0x000001ff },
8513 { BUFMGR_MB_HIGH_WATER, 0x0000,
8514 0x00000000, 0x000001ff },
8515 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8516 0xffffffff, 0x00000000 },
8517 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8518 0xffffffff, 0x00000000 },
6aa20a22 8519
a71116d1
MC
8520 /* Mailbox Registers */
8521 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8522 0x00000000, 0x000001ff },
8523 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8524 0x00000000, 0x000001ff },
8525 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8526 0x00000000, 0x000007ff },
8527 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8528 0x00000000, 0x000001ff },
8529
8530 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8531 };
8532
8533 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8534 is_5705 = 1;
8535 else
8536 is_5705 = 0;
8537
8538 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8539 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8540 continue;
8541
8542 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8543 continue;
8544
8545 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8546 (reg_tbl[i].flags & TG3_FL_NOT_5788))
8547 continue;
8548
8549 offset = (u32) reg_tbl[i].offset;
8550 read_mask = reg_tbl[i].read_mask;
8551 write_mask = reg_tbl[i].write_mask;
8552
8553 /* Save the original register content */
8554 save_val = tr32(offset);
8555
8556 /* Determine the read-only value. */
8557 read_val = save_val & read_mask;
8558
8559 /* Write zero to the register, then make sure the read-only bits
8560 * are not changed and the read/write bits are all zeros.
8561 */
8562 tw32(offset, 0);
8563
8564 val = tr32(offset);
8565
8566 /* Test the read-only and read/write bits. */
8567 if (((val & read_mask) != read_val) || (val & write_mask))
8568 goto out;
8569
8570 /* Write ones to all the bits defined by RdMask and WrMask, then
8571 * make sure the read-only bits are not changed and the
8572 * read/write bits are all ones.
8573 */
8574 tw32(offset, read_mask | write_mask);
8575
8576 val = tr32(offset);
8577
8578 /* Test the read-only bits. */
8579 if ((val & read_mask) != read_val)
8580 goto out;
8581
8582 /* Test the read/write bits. */
8583 if ((val & write_mask) != write_mask)
8584 goto out;
8585
8586 tw32(offset, save_val);
8587 }
8588
8589 return 0;
8590
8591out:
8592 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8593 tw32(offset, save_val);
8594 return -EIO;
8595}
8596
7942e1db
MC
8597static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8598{
f71e1309 8599 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7942e1db
MC
8600 int i;
8601 u32 j;
8602
8603 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8604 for (j = 0; j < len; j += 4) {
8605 u32 val;
8606
8607 tg3_write_mem(tp, offset + j, test_pattern[i]);
8608 tg3_read_mem(tp, offset + j, &val);
8609 if (val != test_pattern[i])
8610 return -EIO;
8611 }
8612 }
8613 return 0;
8614}
8615
8616static int tg3_test_memory(struct tg3 *tp)
8617{
8618 static struct mem_entry {
8619 u32 offset;
8620 u32 len;
8621 } mem_tbl_570x[] = {
38690194 8622 { 0x00000000, 0x00b50},
7942e1db
MC
8623 { 0x00002000, 0x1c000},
8624 { 0xffffffff, 0x00000}
8625 }, mem_tbl_5705[] = {
8626 { 0x00000100, 0x0000c},
8627 { 0x00000200, 0x00008},
7942e1db
MC
8628 { 0x00004000, 0x00800},
8629 { 0x00006000, 0x01000},
8630 { 0x00008000, 0x02000},
8631 { 0x00010000, 0x0e000},
8632 { 0xffffffff, 0x00000}
79f4d13a
MC
8633 }, mem_tbl_5755[] = {
8634 { 0x00000200, 0x00008},
8635 { 0x00004000, 0x00800},
8636 { 0x00006000, 0x00800},
8637 { 0x00008000, 0x02000},
8638 { 0x00010000, 0x0c000},
8639 { 0xffffffff, 0x00000}
7942e1db
MC
8640 };
8641 struct mem_entry *mem_tbl;
8642 int err = 0;
8643 int i;
8644
79f4d13a 8645 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
af36e6b6
MC
8646 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8647 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
79f4d13a
MC
8648 mem_tbl = mem_tbl_5755;
8649 else
8650 mem_tbl = mem_tbl_5705;
8651 } else
7942e1db
MC
8652 mem_tbl = mem_tbl_570x;
8653
8654 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8655 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8656 mem_tbl[i].len)) != 0)
8657 break;
8658 }
6aa20a22 8659
7942e1db
MC
8660 return err;
8661}
8662
9f40dead
MC
8663#define TG3_MAC_LOOPBACK 0
8664#define TG3_PHY_LOOPBACK 1
8665
8666static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
c76949a6 8667{
9f40dead 8668 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
c76949a6
MC
8669 u32 desc_idx;
8670 struct sk_buff *skb, *rx_skb;
8671 u8 *tx_data;
8672 dma_addr_t map;
8673 int num_pkts, tx_len, rx_len, i, err;
8674 struct tg3_rx_buffer_desc *desc;
8675
9f40dead 8676 if (loopback_mode == TG3_MAC_LOOPBACK) {
c94e3941
MC
8677 /* HW errata - mac loopback fails in some cases on 5780.
8678 * Normal traffic and PHY loopback are not affected by
8679 * errata.
8680 */
8681 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8682 return 0;
8683
9f40dead 8684 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
3f7045c1
MC
8685 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY;
8686 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8687 mac_mode |= MAC_MODE_PORT_MODE_MII;
8688 else
8689 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9f40dead
MC
8690 tw32(MAC_MODE, mac_mode);
8691 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
3f7045c1
MC
8692 u32 val;
8693
8694 val = BMCR_LOOPBACK | BMCR_FULLDPLX;
8695 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8696 val |= BMCR_SPEED100;
8697 else
8698 val |= BMCR_SPEED1000;
8699
8700 tg3_writephy(tp, MII_BMCR, val);
c94e3941
MC
8701 udelay(40);
8702 /* reset to prevent losing 1st rx packet intermittently */
8703 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8704 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8705 udelay(10);
8706 tw32_f(MAC_RX_MODE, tp->rx_mode);
8707 }
9f40dead 8708 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
3f7045c1
MC
8709 MAC_MODE_LINK_POLARITY;
8710 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8711 mac_mode |= MAC_MODE_PORT_MODE_MII;
8712 else
8713 mac_mode |= MAC_MODE_PORT_MODE_GMII;
ff18ff02 8714 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9f40dead 8715 mac_mode &= ~MAC_MODE_LINK_POLARITY;
ff18ff02
MC
8716 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8717 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8718 }
9f40dead 8719 tw32(MAC_MODE, mac_mode);
9f40dead
MC
8720 }
8721 else
8722 return -EINVAL;
c76949a6
MC
8723
8724 err = -EIO;
8725
c76949a6 8726 tx_len = 1514;
a20e9c62 8727 skb = netdev_alloc_skb(tp->dev, tx_len);
a50bb7b9
JJ
8728 if (!skb)
8729 return -ENOMEM;
8730
c76949a6
MC
8731 tx_data = skb_put(skb, tx_len);
8732 memcpy(tx_data, tp->dev->dev_addr, 6);
8733 memset(tx_data + 6, 0x0, 8);
8734
8735 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8736
8737 for (i = 14; i < tx_len; i++)
8738 tx_data[i] = (u8) (i & 0xff);
8739
8740 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8741
8742 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8743 HOSTCC_MODE_NOW);
8744
8745 udelay(10);
8746
8747 rx_start_idx = tp->hw_status->idx[0].rx_producer;
8748
c76949a6
MC
8749 num_pkts = 0;
8750
9f40dead 8751 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
c76949a6 8752
9f40dead 8753 tp->tx_prod++;
c76949a6
MC
8754 num_pkts++;
8755
9f40dead
MC
8756 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8757 tp->tx_prod);
09ee929c 8758 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
c76949a6
MC
8759
8760 udelay(10);
8761
3f7045c1
MC
8762 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
8763 for (i = 0; i < 25; i++) {
c76949a6
MC
8764 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8765 HOSTCC_MODE_NOW);
8766
8767 udelay(10);
8768
8769 tx_idx = tp->hw_status->idx[0].tx_consumer;
8770 rx_idx = tp->hw_status->idx[0].rx_producer;
9f40dead 8771 if ((tx_idx == tp->tx_prod) &&
c76949a6
MC
8772 (rx_idx == (rx_start_idx + num_pkts)))
8773 break;
8774 }
8775
8776 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8777 dev_kfree_skb(skb);
8778
9f40dead 8779 if (tx_idx != tp->tx_prod)
c76949a6
MC
8780 goto out;
8781
8782 if (rx_idx != rx_start_idx + num_pkts)
8783 goto out;
8784
8785 desc = &tp->rx_rcb[rx_start_idx];
8786 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8787 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8788 if (opaque_key != RXD_OPAQUE_RING_STD)
8789 goto out;
8790
8791 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8792 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8793 goto out;
8794
8795 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8796 if (rx_len != tx_len)
8797 goto out;
8798
8799 rx_skb = tp->rx_std_buffers[desc_idx].skb;
8800
8801 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8802 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8803
8804 for (i = 14; i < tx_len; i++) {
8805 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8806 goto out;
8807 }
8808 err = 0;
6aa20a22 8809
c76949a6
MC
8810 /* tg3_free_rings will unmap and free the rx_skb */
8811out:
8812 return err;
8813}
8814
9f40dead
MC
8815#define TG3_MAC_LOOPBACK_FAILED 1
8816#define TG3_PHY_LOOPBACK_FAILED 2
8817#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
8818 TG3_PHY_LOOPBACK_FAILED)
8819
8820static int tg3_test_loopback(struct tg3 *tp)
8821{
8822 int err = 0;
8823
8824 if (!netif_running(tp->dev))
8825 return TG3_LOOPBACK_FAILED;
8826
b9ec6c1b
MC
8827 err = tg3_reset_hw(tp, 1);
8828 if (err)
8829 return TG3_LOOPBACK_FAILED;
9f40dead
MC
8830
8831 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8832 err |= TG3_MAC_LOOPBACK_FAILED;
8833 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8834 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8835 err |= TG3_PHY_LOOPBACK_FAILED;
8836 }
8837
8838 return err;
8839}
8840
4cafd3f5
MC
8841static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8842 u64 *data)
8843{
566f86ad
MC
8844 struct tg3 *tp = netdev_priv(dev);
8845
bc1c7567
MC
8846 if (tp->link_config.phy_is_low_power)
8847 tg3_set_power_state(tp, PCI_D0);
8848
566f86ad
MC
8849 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8850
8851 if (tg3_test_nvram(tp) != 0) {
8852 etest->flags |= ETH_TEST_FL_FAILED;
8853 data[0] = 1;
8854 }
ca43007a
MC
8855 if (tg3_test_link(tp) != 0) {
8856 etest->flags |= ETH_TEST_FL_FAILED;
8857 data[1] = 1;
8858 }
a71116d1 8859 if (etest->flags & ETH_TEST_FL_OFFLINE) {
ec41c7df 8860 int err, irq_sync = 0;
bbe832c0
MC
8861
8862 if (netif_running(dev)) {
a71116d1 8863 tg3_netif_stop(tp);
bbe832c0
MC
8864 irq_sync = 1;
8865 }
a71116d1 8866
bbe832c0 8867 tg3_full_lock(tp, irq_sync);
a71116d1
MC
8868
8869 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
ec41c7df 8870 err = tg3_nvram_lock(tp);
a71116d1
MC
8871 tg3_halt_cpu(tp, RX_CPU_BASE);
8872 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8873 tg3_halt_cpu(tp, TX_CPU_BASE);
ec41c7df
MC
8874 if (!err)
8875 tg3_nvram_unlock(tp);
a71116d1 8876
d9ab5ad1
MC
8877 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8878 tg3_phy_reset(tp);
8879
a71116d1
MC
8880 if (tg3_test_registers(tp) != 0) {
8881 etest->flags |= ETH_TEST_FL_FAILED;
8882 data[2] = 1;
8883 }
7942e1db
MC
8884 if (tg3_test_memory(tp) != 0) {
8885 etest->flags |= ETH_TEST_FL_FAILED;
8886 data[3] = 1;
8887 }
9f40dead 8888 if ((data[4] = tg3_test_loopback(tp)) != 0)
c76949a6 8889 etest->flags |= ETH_TEST_FL_FAILED;
a71116d1 8890
f47c11ee
DM
8891 tg3_full_unlock(tp);
8892
d4bc3927
MC
8893 if (tg3_test_interrupt(tp) != 0) {
8894 etest->flags |= ETH_TEST_FL_FAILED;
8895 data[5] = 1;
8896 }
f47c11ee
DM
8897
8898 tg3_full_lock(tp, 0);
d4bc3927 8899
a71116d1
MC
8900 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8901 if (netif_running(dev)) {
8902 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
b9ec6c1b
MC
8903 if (!tg3_restart_hw(tp, 1))
8904 tg3_netif_start(tp);
a71116d1 8905 }
f47c11ee
DM
8906
8907 tg3_full_unlock(tp);
a71116d1 8908 }
bc1c7567
MC
8909 if (tp->link_config.phy_is_low_power)
8910 tg3_set_power_state(tp, PCI_D3hot);
8911
4cafd3f5
MC
8912}
8913
1da177e4
LT
8914static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8915{
8916 struct mii_ioctl_data *data = if_mii(ifr);
8917 struct tg3 *tp = netdev_priv(dev);
8918 int err;
8919
8920 switch(cmd) {
8921 case SIOCGMIIPHY:
8922 data->phy_id = PHY_ADDR;
8923
8924 /* fallthru */
8925 case SIOCGMIIREG: {
8926 u32 mii_regval;
8927
8928 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8929 break; /* We have no PHY */
8930
bc1c7567
MC
8931 if (tp->link_config.phy_is_low_power)
8932 return -EAGAIN;
8933
f47c11ee 8934 spin_lock_bh(&tp->lock);
1da177e4 8935 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
f47c11ee 8936 spin_unlock_bh(&tp->lock);
1da177e4
LT
8937
8938 data->val_out = mii_regval;
8939
8940 return err;
8941 }
8942
8943 case SIOCSMIIREG:
8944 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8945 break; /* We have no PHY */
8946
8947 if (!capable(CAP_NET_ADMIN))
8948 return -EPERM;
8949
bc1c7567
MC
8950 if (tp->link_config.phy_is_low_power)
8951 return -EAGAIN;
8952
f47c11ee 8953 spin_lock_bh(&tp->lock);
1da177e4 8954 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
f47c11ee 8955 spin_unlock_bh(&tp->lock);
1da177e4
LT
8956
8957 return err;
8958
8959 default:
8960 /* do nothing */
8961 break;
8962 }
8963 return -EOPNOTSUPP;
8964}
8965
8966#if TG3_VLAN_TAG_USED
8967static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8968{
8969 struct tg3 *tp = netdev_priv(dev);
8970
29315e87
MC
8971 if (netif_running(dev))
8972 tg3_netif_stop(tp);
8973
f47c11ee 8974 tg3_full_lock(tp, 0);
1da177e4
LT
8975
8976 tp->vlgrp = grp;
8977
8978 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8979 __tg3_set_rx_mode(dev);
8980
f47c11ee 8981 tg3_full_unlock(tp);
29315e87
MC
8982
8983 if (netif_running(dev))
8984 tg3_netif_start(tp);
1da177e4
LT
8985}
8986
8987static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8988{
8989 struct tg3 *tp = netdev_priv(dev);
8990
29315e87
MC
8991 if (netif_running(dev))
8992 tg3_netif_stop(tp);
8993
f47c11ee 8994 tg3_full_lock(tp, 0);
1da177e4
LT
8995 if (tp->vlgrp)
8996 tp->vlgrp->vlan_devices[vid] = NULL;
f47c11ee 8997 tg3_full_unlock(tp);
29315e87
MC
8998
8999 if (netif_running(dev))
9000 tg3_netif_start(tp);
1da177e4
LT
9001}
9002#endif
9003
15f9850d
DM
9004static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9005{
9006 struct tg3 *tp = netdev_priv(dev);
9007
9008 memcpy(ec, &tp->coal, sizeof(*ec));
9009 return 0;
9010}
9011
d244c892
MC
9012static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9013{
9014 struct tg3 *tp = netdev_priv(dev);
9015 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9016 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9017
9018 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9019 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9020 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9021 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9022 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9023 }
9024
9025 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9026 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9027 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9028 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9029 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9030 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9031 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9032 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9033 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9034 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9035 return -EINVAL;
9036
9037 /* No rx interrupts will be generated if both are zero */
9038 if ((ec->rx_coalesce_usecs == 0) &&
9039 (ec->rx_max_coalesced_frames == 0))
9040 return -EINVAL;
9041
9042 /* No tx interrupts will be generated if both are zero */
9043 if ((ec->tx_coalesce_usecs == 0) &&
9044 (ec->tx_max_coalesced_frames == 0))
9045 return -EINVAL;
9046
9047 /* Only copy relevant parameters, ignore all others. */
9048 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9049 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9050 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9051 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9052 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9053 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9054 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9055 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9056 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9057
9058 if (netif_running(dev)) {
9059 tg3_full_lock(tp, 0);
9060 __tg3_set_coalesce(tp, &tp->coal);
9061 tg3_full_unlock(tp);
9062 }
9063 return 0;
9064}
9065
7282d491 9066static const struct ethtool_ops tg3_ethtool_ops = {
1da177e4
LT
9067 .get_settings = tg3_get_settings,
9068 .set_settings = tg3_set_settings,
9069 .get_drvinfo = tg3_get_drvinfo,
9070 .get_regs_len = tg3_get_regs_len,
9071 .get_regs = tg3_get_regs,
9072 .get_wol = tg3_get_wol,
9073 .set_wol = tg3_set_wol,
9074 .get_msglevel = tg3_get_msglevel,
9075 .set_msglevel = tg3_set_msglevel,
9076 .nway_reset = tg3_nway_reset,
9077 .get_link = ethtool_op_get_link,
9078 .get_eeprom_len = tg3_get_eeprom_len,
9079 .get_eeprom = tg3_get_eeprom,
9080 .set_eeprom = tg3_set_eeprom,
9081 .get_ringparam = tg3_get_ringparam,
9082 .set_ringparam = tg3_set_ringparam,
9083 .get_pauseparam = tg3_get_pauseparam,
9084 .set_pauseparam = tg3_set_pauseparam,
9085 .get_rx_csum = tg3_get_rx_csum,
9086 .set_rx_csum = tg3_set_rx_csum,
9087 .get_tx_csum = ethtool_op_get_tx_csum,
9088 .set_tx_csum = tg3_set_tx_csum,
9089 .get_sg = ethtool_op_get_sg,
9090 .set_sg = ethtool_op_set_sg,
9091#if TG3_TSO_SUPPORT != 0
9092 .get_tso = ethtool_op_get_tso,
9093 .set_tso = tg3_set_tso,
9094#endif
4cafd3f5
MC
9095 .self_test_count = tg3_get_test_count,
9096 .self_test = tg3_self_test,
1da177e4 9097 .get_strings = tg3_get_strings,
4009a93d 9098 .phys_id = tg3_phys_id,
1da177e4
LT
9099 .get_stats_count = tg3_get_stats_count,
9100 .get_ethtool_stats = tg3_get_ethtool_stats,
15f9850d 9101 .get_coalesce = tg3_get_coalesce,
d244c892 9102 .set_coalesce = tg3_set_coalesce,
2ff43697 9103 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
9104};
9105
9106static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9107{
1b27777a 9108 u32 cursize, val, magic;
1da177e4
LT
9109
9110 tp->nvram_size = EEPROM_CHIP_SIZE;
9111
1820180b 9112 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
1da177e4
LT
9113 return;
9114
1b27777a 9115 if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
1da177e4
LT
9116 return;
9117
9118 /*
9119 * Size the chip by reading offsets at increasing powers of two.
9120 * When we encounter our validation signature, we know the addressing
9121 * has wrapped around, and thus have our chip size.
9122 */
1b27777a 9123 cursize = 0x10;
1da177e4
LT
9124
9125 while (cursize < tp->nvram_size) {
1820180b 9126 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
1da177e4
LT
9127 return;
9128
1820180b 9129 if (val == magic)
1da177e4
LT
9130 break;
9131
9132 cursize <<= 1;
9133 }
9134
9135 tp->nvram_size = cursize;
9136}
6aa20a22 9137
1da177e4
LT
9138static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9139{
9140 u32 val;
9141
1820180b 9142 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
1b27777a
MC
9143 return;
9144
9145 /* Selfboot format */
1820180b 9146 if (val != TG3_EEPROM_MAGIC) {
1b27777a
MC
9147 tg3_get_eeprom_size(tp);
9148 return;
9149 }
9150
1da177e4
LT
9151 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9152 if (val != 0) {
9153 tp->nvram_size = (val >> 16) * 1024;
9154 return;
9155 }
9156 }
9157 tp->nvram_size = 0x20000;
9158}
9159
9160static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9161{
9162 u32 nvcfg1;
9163
9164 nvcfg1 = tr32(NVRAM_CFG1);
9165 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9166 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9167 }
9168 else {
9169 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9170 tw32(NVRAM_CFG1, nvcfg1);
9171 }
9172
4c987487 9173 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
a4e2b347 9174 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
9175 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9176 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9177 tp->nvram_jedecnum = JEDEC_ATMEL;
9178 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9179 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9180 break;
9181 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9182 tp->nvram_jedecnum = JEDEC_ATMEL;
9183 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9184 break;
9185 case FLASH_VENDOR_ATMEL_EEPROM:
9186 tp->nvram_jedecnum = JEDEC_ATMEL;
9187 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9188 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9189 break;
9190 case FLASH_VENDOR_ST:
9191 tp->nvram_jedecnum = JEDEC_ST;
9192 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9193 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9194 break;
9195 case FLASH_VENDOR_SAIFUN:
9196 tp->nvram_jedecnum = JEDEC_SAIFUN;
9197 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9198 break;
9199 case FLASH_VENDOR_SST_SMALL:
9200 case FLASH_VENDOR_SST_LARGE:
9201 tp->nvram_jedecnum = JEDEC_SST;
9202 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9203 break;
9204 }
9205 }
9206 else {
9207 tp->nvram_jedecnum = JEDEC_ATMEL;
9208 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9209 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9210 }
9211}
9212
361b4ac2
MC
9213static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9214{
9215 u32 nvcfg1;
9216
9217 nvcfg1 = tr32(NVRAM_CFG1);
9218
e6af301b
MC
9219 /* NVRAM protection for TPM */
9220 if (nvcfg1 & (1 << 27))
9221 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9222
361b4ac2
MC
9223 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9224 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9225 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9226 tp->nvram_jedecnum = JEDEC_ATMEL;
9227 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9228 break;
9229 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9230 tp->nvram_jedecnum = JEDEC_ATMEL;
9231 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9232 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9233 break;
9234 case FLASH_5752VENDOR_ST_M45PE10:
9235 case FLASH_5752VENDOR_ST_M45PE20:
9236 case FLASH_5752VENDOR_ST_M45PE40:
9237 tp->nvram_jedecnum = JEDEC_ST;
9238 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9239 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9240 break;
9241 }
9242
9243 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9244 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9245 case FLASH_5752PAGE_SIZE_256:
9246 tp->nvram_pagesize = 256;
9247 break;
9248 case FLASH_5752PAGE_SIZE_512:
9249 tp->nvram_pagesize = 512;
9250 break;
9251 case FLASH_5752PAGE_SIZE_1K:
9252 tp->nvram_pagesize = 1024;
9253 break;
9254 case FLASH_5752PAGE_SIZE_2K:
9255 tp->nvram_pagesize = 2048;
9256 break;
9257 case FLASH_5752PAGE_SIZE_4K:
9258 tp->nvram_pagesize = 4096;
9259 break;
9260 case FLASH_5752PAGE_SIZE_264:
9261 tp->nvram_pagesize = 264;
9262 break;
9263 }
9264 }
9265 else {
9266 /* For eeprom, set pagesize to maximum eeprom size */
9267 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9268
9269 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9270 tw32(NVRAM_CFG1, nvcfg1);
9271 }
9272}
9273
d3c7b886
MC
9274static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9275{
9276 u32 nvcfg1;
9277
9278 nvcfg1 = tr32(NVRAM_CFG1);
9279
9280 /* NVRAM protection for TPM */
9281 if (nvcfg1 & (1 << 27))
9282 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9283
9284 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9285 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9286 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9287 tp->nvram_jedecnum = JEDEC_ATMEL;
9288 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9289 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9290
9291 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9292 tw32(NVRAM_CFG1, nvcfg1);
9293 break;
9294 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9295 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9296 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9297 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9298 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9299 tp->nvram_jedecnum = JEDEC_ATMEL;
9300 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9301 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9302 tp->nvram_pagesize = 264;
9303 break;
9304 case FLASH_5752VENDOR_ST_M45PE10:
9305 case FLASH_5752VENDOR_ST_M45PE20:
9306 case FLASH_5752VENDOR_ST_M45PE40:
9307 tp->nvram_jedecnum = JEDEC_ST;
9308 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9309 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9310 tp->nvram_pagesize = 256;
9311 break;
9312 }
9313}
9314
1b27777a
MC
9315static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9316{
9317 u32 nvcfg1;
9318
9319 nvcfg1 = tr32(NVRAM_CFG1);
9320
9321 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9322 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9323 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9324 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9325 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9326 tp->nvram_jedecnum = JEDEC_ATMEL;
9327 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9328 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9329
9330 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9331 tw32(NVRAM_CFG1, nvcfg1);
9332 break;
9333 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9334 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9335 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9336 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9337 tp->nvram_jedecnum = JEDEC_ATMEL;
9338 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9339 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9340 tp->nvram_pagesize = 264;
9341 break;
9342 case FLASH_5752VENDOR_ST_M45PE10:
9343 case FLASH_5752VENDOR_ST_M45PE20:
9344 case FLASH_5752VENDOR_ST_M45PE40:
9345 tp->nvram_jedecnum = JEDEC_ST;
9346 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9347 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9348 tp->nvram_pagesize = 256;
9349 break;
9350 }
9351}
9352
b5d3772c
MC
9353static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9354{
9355 tp->nvram_jedecnum = JEDEC_ATMEL;
9356 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9357 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9358}
9359
1da177e4
LT
9360/* Chips other than 5700/5701 use the NVRAM for fetching info. */
9361static void __devinit tg3_nvram_init(struct tg3 *tp)
9362{
9363 int j;
9364
1da177e4
LT
9365 tw32_f(GRC_EEPROM_ADDR,
9366 (EEPROM_ADDR_FSM_RESET |
9367 (EEPROM_DEFAULT_CLOCK_PERIOD <<
9368 EEPROM_ADDR_CLKPERD_SHIFT)));
9369
9370 /* XXX schedule_timeout() ... */
9371 for (j = 0; j < 100; j++)
9372 udelay(10);
9373
9374 /* Enable seeprom accesses. */
9375 tw32_f(GRC_LOCAL_CTRL,
9376 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9377 udelay(100);
9378
9379 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9380 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9381 tp->tg3_flags |= TG3_FLAG_NVRAM;
9382
ec41c7df
MC
9383 if (tg3_nvram_lock(tp)) {
9384 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9385 "tg3_nvram_init failed.\n", tp->dev->name);
9386 return;
9387 }
e6af301b 9388 tg3_enable_nvram_access(tp);
1da177e4 9389
361b4ac2
MC
9390 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9391 tg3_get_5752_nvram_info(tp);
d3c7b886
MC
9392 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9393 tg3_get_5755_nvram_info(tp);
1b27777a
MC
9394 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9395 tg3_get_5787_nvram_info(tp);
b5d3772c
MC
9396 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9397 tg3_get_5906_nvram_info(tp);
361b4ac2
MC
9398 else
9399 tg3_get_nvram_info(tp);
9400
1da177e4
LT
9401 tg3_get_nvram_size(tp);
9402
e6af301b 9403 tg3_disable_nvram_access(tp);
381291b7 9404 tg3_nvram_unlock(tp);
1da177e4
LT
9405
9406 } else {
9407 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9408
9409 tg3_get_eeprom_size(tp);
9410 }
9411}
9412
9413static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9414 u32 offset, u32 *val)
9415{
9416 u32 tmp;
9417 int i;
9418
9419 if (offset > EEPROM_ADDR_ADDR_MASK ||
9420 (offset % 4) != 0)
9421 return -EINVAL;
9422
9423 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9424 EEPROM_ADDR_DEVID_MASK |
9425 EEPROM_ADDR_READ);
9426 tw32(GRC_EEPROM_ADDR,
9427 tmp |
9428 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9429 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9430 EEPROM_ADDR_ADDR_MASK) |
9431 EEPROM_ADDR_READ | EEPROM_ADDR_START);
9432
9433 for (i = 0; i < 10000; i++) {
9434 tmp = tr32(GRC_EEPROM_ADDR);
9435
9436 if (tmp & EEPROM_ADDR_COMPLETE)
9437 break;
9438 udelay(100);
9439 }
9440 if (!(tmp & EEPROM_ADDR_COMPLETE))
9441 return -EBUSY;
9442
9443 *val = tr32(GRC_EEPROM_DATA);
9444 return 0;
9445}
9446
9447#define NVRAM_CMD_TIMEOUT 10000
9448
9449static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9450{
9451 int i;
9452
9453 tw32(NVRAM_CMD, nvram_cmd);
9454 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9455 udelay(10);
9456 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9457 udelay(10);
9458 break;
9459 }
9460 }
9461 if (i == NVRAM_CMD_TIMEOUT) {
9462 return -EBUSY;
9463 }
9464 return 0;
9465}
9466
1820180b
MC
9467static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9468{
9469 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9470 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9471 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9472 (tp->nvram_jedecnum == JEDEC_ATMEL))
9473
9474 addr = ((addr / tp->nvram_pagesize) <<
9475 ATMEL_AT45DB0X1B_PAGE_POS) +
9476 (addr % tp->nvram_pagesize);
9477
9478 return addr;
9479}
9480
c4e6575c
MC
9481static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9482{
9483 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9484 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9485 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9486 (tp->nvram_jedecnum == JEDEC_ATMEL))
9487
9488 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9489 tp->nvram_pagesize) +
9490 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9491
9492 return addr;
9493}
9494
1da177e4
LT
9495static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9496{
9497 int ret;
9498
1da177e4
LT
9499 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9500 return tg3_nvram_read_using_eeprom(tp, offset, val);
9501
1820180b 9502 offset = tg3_nvram_phys_addr(tp, offset);
1da177e4
LT
9503
9504 if (offset > NVRAM_ADDR_MSK)
9505 return -EINVAL;
9506
ec41c7df
MC
9507 ret = tg3_nvram_lock(tp);
9508 if (ret)
9509 return ret;
1da177e4 9510
e6af301b 9511 tg3_enable_nvram_access(tp);
1da177e4
LT
9512
9513 tw32(NVRAM_ADDR, offset);
9514 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9515 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9516
9517 if (ret == 0)
9518 *val = swab32(tr32(NVRAM_RDDATA));
9519
e6af301b 9520 tg3_disable_nvram_access(tp);
1da177e4 9521
381291b7
MC
9522 tg3_nvram_unlock(tp);
9523
1da177e4
LT
9524 return ret;
9525}
9526
1820180b
MC
9527static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9528{
9529 int err;
9530 u32 tmp;
9531
9532 err = tg3_nvram_read(tp, offset, &tmp);
9533 *val = swab32(tmp);
9534 return err;
9535}
9536
1da177e4
LT
9537static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9538 u32 offset, u32 len, u8 *buf)
9539{
9540 int i, j, rc = 0;
9541 u32 val;
9542
9543 for (i = 0; i < len; i += 4) {
9544 u32 addr, data;
9545
9546 addr = offset + i;
9547
9548 memcpy(&data, buf + i, 4);
9549
9550 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9551
9552 val = tr32(GRC_EEPROM_ADDR);
9553 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9554
9555 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9556 EEPROM_ADDR_READ);
9557 tw32(GRC_EEPROM_ADDR, val |
9558 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9559 (addr & EEPROM_ADDR_ADDR_MASK) |
9560 EEPROM_ADDR_START |
9561 EEPROM_ADDR_WRITE);
6aa20a22 9562
1da177e4
LT
9563 for (j = 0; j < 10000; j++) {
9564 val = tr32(GRC_EEPROM_ADDR);
9565
9566 if (val & EEPROM_ADDR_COMPLETE)
9567 break;
9568 udelay(100);
9569 }
9570 if (!(val & EEPROM_ADDR_COMPLETE)) {
9571 rc = -EBUSY;
9572 break;
9573 }
9574 }
9575
9576 return rc;
9577}
9578
9579/* offset and length are dword aligned */
9580static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9581 u8 *buf)
9582{
9583 int ret = 0;
9584 u32 pagesize = tp->nvram_pagesize;
9585 u32 pagemask = pagesize - 1;
9586 u32 nvram_cmd;
9587 u8 *tmp;
9588
9589 tmp = kmalloc(pagesize, GFP_KERNEL);
9590 if (tmp == NULL)
9591 return -ENOMEM;
9592
9593 while (len) {
9594 int j;
e6af301b 9595 u32 phy_addr, page_off, size;
1da177e4
LT
9596
9597 phy_addr = offset & ~pagemask;
6aa20a22 9598
1da177e4
LT
9599 for (j = 0; j < pagesize; j += 4) {
9600 if ((ret = tg3_nvram_read(tp, phy_addr + j,
9601 (u32 *) (tmp + j))))
9602 break;
9603 }
9604 if (ret)
9605 break;
9606
9607 page_off = offset & pagemask;
9608 size = pagesize;
9609 if (len < size)
9610 size = len;
9611
9612 len -= size;
9613
9614 memcpy(tmp + page_off, buf, size);
9615
9616 offset = offset + (pagesize - page_off);
9617
e6af301b 9618 tg3_enable_nvram_access(tp);
1da177e4
LT
9619
9620 /*
9621 * Before we can erase the flash page, we need
9622 * to issue a special "write enable" command.
9623 */
9624 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9625
9626 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9627 break;
9628
9629 /* Erase the target page */
9630 tw32(NVRAM_ADDR, phy_addr);
9631
9632 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9633 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9634
9635 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9636 break;
9637
9638 /* Issue another write enable to start the write. */
9639 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9640
9641 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9642 break;
9643
9644 for (j = 0; j < pagesize; j += 4) {
9645 u32 data;
9646
9647 data = *((u32 *) (tmp + j));
9648 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9649
9650 tw32(NVRAM_ADDR, phy_addr + j);
9651
9652 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9653 NVRAM_CMD_WR;
9654
9655 if (j == 0)
9656 nvram_cmd |= NVRAM_CMD_FIRST;
9657 else if (j == (pagesize - 4))
9658 nvram_cmd |= NVRAM_CMD_LAST;
9659
9660 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9661 break;
9662 }
9663 if (ret)
9664 break;
9665 }
9666
9667 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9668 tg3_nvram_exec_cmd(tp, nvram_cmd);
9669
9670 kfree(tmp);
9671
9672 return ret;
9673}
9674
9675/* offset and length are dword aligned */
9676static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9677 u8 *buf)
9678{
9679 int i, ret = 0;
9680
9681 for (i = 0; i < len; i += 4, offset += 4) {
9682 u32 data, page_off, phy_addr, nvram_cmd;
9683
9684 memcpy(&data, buf + i, 4);
9685 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9686
9687 page_off = offset % tp->nvram_pagesize;
9688
1820180b 9689 phy_addr = tg3_nvram_phys_addr(tp, offset);
1da177e4
LT
9690
9691 tw32(NVRAM_ADDR, phy_addr);
9692
9693 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9694
9695 if ((page_off == 0) || (i == 0))
9696 nvram_cmd |= NVRAM_CMD_FIRST;
f6d9a256 9697 if (page_off == (tp->nvram_pagesize - 4))
1da177e4
LT
9698 nvram_cmd |= NVRAM_CMD_LAST;
9699
9700 if (i == (len - 4))
9701 nvram_cmd |= NVRAM_CMD_LAST;
9702
4c987487 9703 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
af36e6b6 9704 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
1b27777a 9705 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
4c987487
MC
9706 (tp->nvram_jedecnum == JEDEC_ST) &&
9707 (nvram_cmd & NVRAM_CMD_FIRST)) {
1da177e4
LT
9708
9709 if ((ret = tg3_nvram_exec_cmd(tp,
9710 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9711 NVRAM_CMD_DONE)))
9712
9713 break;
9714 }
9715 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9716 /* We always do complete word writes to eeprom. */
9717 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9718 }
9719
9720 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9721 break;
9722 }
9723 return ret;
9724}
9725
9726/* offset and length are dword aligned */
9727static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9728{
9729 int ret;
9730
1da177e4 9731 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34
MC
9732 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9733 ~GRC_LCLCTRL_GPIO_OUTPUT1);
1da177e4
LT
9734 udelay(40);
9735 }
9736
9737 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9738 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9739 }
9740 else {
9741 u32 grc_mode;
9742
ec41c7df
MC
9743 ret = tg3_nvram_lock(tp);
9744 if (ret)
9745 return ret;
1da177e4 9746
e6af301b
MC
9747 tg3_enable_nvram_access(tp);
9748 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9749 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
1da177e4 9750 tw32(NVRAM_WRITE1, 0x406);
1da177e4
LT
9751
9752 grc_mode = tr32(GRC_MODE);
9753 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9754
9755 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9756 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9757
9758 ret = tg3_nvram_write_block_buffered(tp, offset, len,
9759 buf);
9760 }
9761 else {
9762 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9763 buf);
9764 }
9765
9766 grc_mode = tr32(GRC_MODE);
9767 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9768
e6af301b 9769 tg3_disable_nvram_access(tp);
1da177e4
LT
9770 tg3_nvram_unlock(tp);
9771 }
9772
9773 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34 9774 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1da177e4
LT
9775 udelay(40);
9776 }
9777
9778 return ret;
9779}
9780
9781struct subsys_tbl_ent {
9782 u16 subsys_vendor, subsys_devid;
9783 u32 phy_id;
9784};
9785
9786static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9787 /* Broadcom boards. */
9788 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9789 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9790 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9791 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
9792 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9793 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9794 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
9795 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9796 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9797 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9798 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9799
9800 /* 3com boards. */
9801 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9802 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9803 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
9804 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9805 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9806
9807 /* DELL boards. */
9808 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9809 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9810 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9811 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9812
9813 /* Compaq boards. */
9814 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9815 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9816 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
9817 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9818 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9819
9820 /* IBM boards. */
9821 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9822};
9823
9824static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9825{
9826 int i;
9827
9828 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9829 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9830 tp->pdev->subsystem_vendor) &&
9831 (subsys_id_to_phy_id[i].subsys_devid ==
9832 tp->pdev->subsystem_device))
9833 return &subsys_id_to_phy_id[i];
9834 }
9835 return NULL;
9836}
9837
7d0c41ef 9838static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
1da177e4 9839{
1da177e4 9840 u32 val;
caf636c7
MC
9841 u16 pmcsr;
9842
9843 /* On some early chips the SRAM cannot be accessed in D3hot state,
9844 * so need make sure we're in D0.
9845 */
9846 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9847 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9848 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9849 msleep(1);
7d0c41ef
MC
9850
9851 /* Make sure register accesses (indirect or otherwise)
9852 * will function correctly.
9853 */
9854 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9855 tp->misc_host_ctrl);
1da177e4 9856
f49639e6
DM
9857 /* The memory arbiter has to be enabled in order for SRAM accesses
9858 * to succeed. Normally on powerup the tg3 chip firmware will make
9859 * sure it is enabled, but other entities such as system netboot
9860 * code might disable it.
9861 */
9862 val = tr32(MEMARB_MODE);
9863 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9864
1da177e4 9865 tp->phy_id = PHY_ID_INVALID;
7d0c41ef
MC
9866 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9867
f49639e6
DM
9868 /* Assume an onboard device by default. */
9869 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
72b845e0 9870
b5d3772c
MC
9871 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9872 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM))
9873 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9874 return;
9875 }
9876
1da177e4
LT
9877 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9878 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9879 u32 nic_cfg, led_cfg;
7d0c41ef
MC
9880 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9881 int eeprom_phy_serdes = 0;
1da177e4
LT
9882
9883 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9884 tp->nic_sram_data_cfg = nic_cfg;
9885
9886 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9887 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9888 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9889 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9890 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9891 (ver > 0) && (ver < 0x100))
9892 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9893
1da177e4
LT
9894 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9895 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9896 eeprom_phy_serdes = 1;
9897
9898 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9899 if (nic_phy_id != 0) {
9900 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9901 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9902
9903 eeprom_phy_id = (id1 >> 16) << 10;
9904 eeprom_phy_id |= (id2 & 0xfc00) << 16;
9905 eeprom_phy_id |= (id2 & 0x03ff) << 0;
9906 } else
9907 eeprom_phy_id = 0;
9908
7d0c41ef 9909 tp->phy_id = eeprom_phy_id;
747e8f8b 9910 if (eeprom_phy_serdes) {
a4e2b347 9911 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
747e8f8b
MC
9912 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9913 else
9914 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9915 }
7d0c41ef 9916
cbf46853 9917 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
9918 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9919 SHASTA_EXT_LED_MODE_MASK);
cbf46853 9920 else
1da177e4
LT
9921 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9922
9923 switch (led_cfg) {
9924 default:
9925 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9926 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9927 break;
9928
9929 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9930 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9931 break;
9932
9933 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9934 tp->led_ctrl = LED_CTRL_MODE_MAC;
9ba27794
MC
9935
9936 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9937 * read on some older 5700/5701 bootcode.
9938 */
9939 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9940 ASIC_REV_5700 ||
9941 GET_ASIC_REV(tp->pci_chip_rev_id) ==
9942 ASIC_REV_5701)
9943 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9944
1da177e4
LT
9945 break;
9946
9947 case SHASTA_EXT_LED_SHARED:
9948 tp->led_ctrl = LED_CTRL_MODE_SHARED;
9949 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9950 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9951 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9952 LED_CTRL_MODE_PHY_2);
9953 break;
9954
9955 case SHASTA_EXT_LED_MAC:
9956 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9957 break;
9958
9959 case SHASTA_EXT_LED_COMBO:
9960 tp->led_ctrl = LED_CTRL_MODE_COMBO;
9961 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9962 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9963 LED_CTRL_MODE_PHY_2);
9964 break;
9965
9966 };
9967
9968 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9969 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9970 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9971 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9972
bbadf503 9973 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
1da177e4 9974 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
f49639e6
DM
9975 else
9976 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
1da177e4
LT
9977
9978 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9979 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 9980 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
9981 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9982 }
9983 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9984 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9985
9986 if (cfg2 & (1 << 17))
9987 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9988
9989 /* serdes signal pre-emphasis in register 0x590 set by */
9990 /* bootcode if bit 18 is set */
9991 if (cfg2 & (1 << 18))
9992 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9993 }
7d0c41ef
MC
9994}
9995
9996static int __devinit tg3_phy_probe(struct tg3 *tp)
9997{
9998 u32 hw_phy_id_1, hw_phy_id_2;
9999 u32 hw_phy_id, hw_phy_id_masked;
10000 int err;
1da177e4
LT
10001
10002 /* Reading the PHY ID register can conflict with ASF
10003 * firwmare access to the PHY hardware.
10004 */
10005 err = 0;
10006 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
10007 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10008 } else {
10009 /* Now read the physical PHY_ID from the chip and verify
10010 * that it is sane. If it doesn't look good, we fall back
10011 * to either the hard-coded table based PHY_ID and failing
10012 * that the value found in the eeprom area.
10013 */
10014 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10015 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10016
10017 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
10018 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10019 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
10020
10021 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10022 }
10023
10024 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10025 tp->phy_id = hw_phy_id;
10026 if (hw_phy_id_masked == PHY_ID_BCM8002)
10027 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
da6b2d01
MC
10028 else
10029 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
1da177e4 10030 } else {
7d0c41ef
MC
10031 if (tp->phy_id != PHY_ID_INVALID) {
10032 /* Do nothing, phy ID already set up in
10033 * tg3_get_eeprom_hw_cfg().
10034 */
1da177e4
LT
10035 } else {
10036 struct subsys_tbl_ent *p;
10037
10038 /* No eeprom signature? Try the hardcoded
10039 * subsys device table.
10040 */
10041 p = lookup_by_subsys(tp);
10042 if (!p)
10043 return -ENODEV;
10044
10045 tp->phy_id = p->phy_id;
10046 if (!tp->phy_id ||
10047 tp->phy_id == PHY_ID_BCM8002)
10048 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10049 }
10050 }
10051
747e8f8b 10052 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
1da177e4
LT
10053 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10054 u32 bmsr, adv_reg, tg3_ctrl;
10055
10056 tg3_readphy(tp, MII_BMSR, &bmsr);
10057 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10058 (bmsr & BMSR_LSTATUS))
10059 goto skip_phy_reset;
6aa20a22 10060
1da177e4
LT
10061 err = tg3_phy_reset(tp);
10062 if (err)
10063 return err;
10064
10065 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10066 ADVERTISE_100HALF | ADVERTISE_100FULL |
10067 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10068 tg3_ctrl = 0;
10069 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10070 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10071 MII_TG3_CTRL_ADV_1000_FULL);
10072 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10073 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10074 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10075 MII_TG3_CTRL_ENABLE_AS_MASTER);
10076 }
10077
10078 if (!tg3_copper_is_advertising_all(tp)) {
10079 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10080
10081 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10082 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10083
10084 tg3_writephy(tp, MII_BMCR,
10085 BMCR_ANENABLE | BMCR_ANRESTART);
10086 }
10087 tg3_phy_set_wirespeed(tp);
10088
10089 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10090 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10091 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10092 }
10093
10094skip_phy_reset:
10095 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10096 err = tg3_init_5401phy_dsp(tp);
10097 if (err)
10098 return err;
10099 }
10100
10101 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10102 err = tg3_init_5401phy_dsp(tp);
10103 }
10104
747e8f8b 10105 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1da177e4
LT
10106 tp->link_config.advertising =
10107 (ADVERTISED_1000baseT_Half |
10108 ADVERTISED_1000baseT_Full |
10109 ADVERTISED_Autoneg |
10110 ADVERTISED_FIBRE);
10111 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10112 tp->link_config.advertising &=
10113 ~(ADVERTISED_1000baseT_Half |
10114 ADVERTISED_1000baseT_Full);
10115
10116 return err;
10117}
10118
10119static void __devinit tg3_read_partno(struct tg3 *tp)
10120{
10121 unsigned char vpd_data[256];
10122 int i;
1b27777a 10123 u32 magic;
1da177e4 10124
1820180b 10125 if (tg3_nvram_read_swab(tp, 0x0, &magic))
f49639e6 10126 goto out_not_found;
1da177e4 10127
1820180b 10128 if (magic == TG3_EEPROM_MAGIC) {
1b27777a
MC
10129 for (i = 0; i < 256; i += 4) {
10130 u32 tmp;
1da177e4 10131
1b27777a
MC
10132 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10133 goto out_not_found;
10134
10135 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
10136 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
10137 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10138 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10139 }
10140 } else {
10141 int vpd_cap;
10142
10143 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10144 for (i = 0; i < 256; i += 4) {
10145 u32 tmp, j = 0;
10146 u16 tmp16;
10147
10148 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10149 i);
10150 while (j++ < 100) {
10151 pci_read_config_word(tp->pdev, vpd_cap +
10152 PCI_VPD_ADDR, &tmp16);
10153 if (tmp16 & 0x8000)
10154 break;
10155 msleep(1);
10156 }
f49639e6
DM
10157 if (!(tmp16 & 0x8000))
10158 goto out_not_found;
10159
1b27777a
MC
10160 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10161 &tmp);
10162 tmp = cpu_to_le32(tmp);
10163 memcpy(&vpd_data[i], &tmp, 4);
10164 }
1da177e4
LT
10165 }
10166
10167 /* Now parse and find the part number. */
10168 for (i = 0; i < 256; ) {
10169 unsigned char val = vpd_data[i];
10170 int block_end;
10171
10172 if (val == 0x82 || val == 0x91) {
10173 i = (i + 3 +
10174 (vpd_data[i + 1] +
10175 (vpd_data[i + 2] << 8)));
10176 continue;
10177 }
10178
10179 if (val != 0x90)
10180 goto out_not_found;
10181
10182 block_end = (i + 3 +
10183 (vpd_data[i + 1] +
10184 (vpd_data[i + 2] << 8)));
10185 i += 3;
10186 while (i < block_end) {
10187 if (vpd_data[i + 0] == 'P' &&
10188 vpd_data[i + 1] == 'N') {
10189 int partno_len = vpd_data[i + 2];
10190
10191 if (partno_len > 24)
10192 goto out_not_found;
10193
10194 memcpy(tp->board_part_number,
10195 &vpd_data[i + 3],
10196 partno_len);
10197
10198 /* Success. */
10199 return;
10200 }
10201 }
10202
10203 /* Part number not found. */
10204 goto out_not_found;
10205 }
10206
10207out_not_found:
b5d3772c
MC
10208 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10209 strcpy(tp->board_part_number, "BCM95906");
10210 else
10211 strcpy(tp->board_part_number, "none");
1da177e4
LT
10212}
10213
c4e6575c
MC
10214static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10215{
10216 u32 val, offset, start;
10217
10218 if (tg3_nvram_read_swab(tp, 0, &val))
10219 return;
10220
10221 if (val != TG3_EEPROM_MAGIC)
10222 return;
10223
10224 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10225 tg3_nvram_read_swab(tp, 0x4, &start))
10226 return;
10227
10228 offset = tg3_nvram_logical_addr(tp, offset);
10229 if (tg3_nvram_read_swab(tp, offset, &val))
10230 return;
10231
10232 if ((val & 0xfc000000) == 0x0c000000) {
10233 u32 ver_offset, addr;
10234 int i;
10235
10236 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10237 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10238 return;
10239
10240 if (val != 0)
10241 return;
10242
10243 addr = offset + ver_offset - start;
10244 for (i = 0; i < 16; i += 4) {
10245 if (tg3_nvram_read(tp, addr + i, &val))
10246 return;
10247
10248 val = cpu_to_le32(val);
10249 memcpy(tp->fw_ver + i, &val, 4);
10250 }
10251 }
10252}
10253
1da177e4
LT
10254static int __devinit tg3_get_invariants(struct tg3 *tp)
10255{
10256 static struct pci_device_id write_reorder_chipsets[] = {
1da177e4
LT
10257 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10258 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
c165b004
JL
10259 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10260 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
399de50b
MC
10261 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10262 PCI_DEVICE_ID_VIA_8385_0) },
1da177e4
LT
10263 { },
10264 };
10265 u32 misc_ctrl_reg;
10266 u32 cacheline_sz_reg;
10267 u32 pci_state_reg, grc_misc_cfg;
10268 u32 val;
10269 u16 pci_cmd;
10270 int err;
10271
1da177e4
LT
10272 /* Force memory write invalidate off. If we leave it on,
10273 * then on 5700_BX chips we have to enable a workaround.
10274 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10275 * to match the cacheline size. The Broadcom driver have this
10276 * workaround but turns MWI off all the times so never uses
10277 * it. This seems to suggest that the workaround is insufficient.
10278 */
10279 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10280 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10281 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10282
10283 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10284 * has the register indirect write enable bit set before
10285 * we try to access any of the MMIO registers. It is also
10286 * critical that the PCI-X hw workaround situation is decided
10287 * before that as well.
10288 */
10289 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10290 &misc_ctrl_reg);
10291
10292 tp->pci_chip_rev_id = (misc_ctrl_reg >>
10293 MISC_HOST_CTRL_CHIPREV_SHIFT);
10294
ff645bec
MC
10295 /* Wrong chip ID in 5752 A0. This code can be removed later
10296 * as A0 is not in production.
10297 */
10298 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10299 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10300
6892914f
MC
10301 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10302 * we need to disable memory and use config. cycles
10303 * only to access all registers. The 5702/03 chips
10304 * can mistakenly decode the special cycles from the
10305 * ICH chipsets as memory write cycles, causing corruption
10306 * of register and memory space. Only certain ICH bridges
10307 * will drive special cycles with non-zero data during the
10308 * address phase which can fall within the 5703's address
10309 * range. This is not an ICH bug as the PCI spec allows
10310 * non-zero address during special cycles. However, only
10311 * these ICH bridges are known to drive non-zero addresses
10312 * during special cycles.
10313 *
10314 * Since special cycles do not cross PCI bridges, we only
10315 * enable this workaround if the 5703 is on the secondary
10316 * bus of these ICH bridges.
10317 */
10318 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10319 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10320 static struct tg3_dev_id {
10321 u32 vendor;
10322 u32 device;
10323 u32 rev;
10324 } ich_chipsets[] = {
10325 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10326 PCI_ANY_ID },
10327 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10328 PCI_ANY_ID },
10329 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10330 0xa },
10331 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10332 PCI_ANY_ID },
10333 { },
10334 };
10335 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10336 struct pci_dev *bridge = NULL;
10337
10338 while (pci_id->vendor != 0) {
10339 bridge = pci_get_device(pci_id->vendor, pci_id->device,
10340 bridge);
10341 if (!bridge) {
10342 pci_id++;
10343 continue;
10344 }
10345 if (pci_id->rev != PCI_ANY_ID) {
10346 u8 rev;
10347
10348 pci_read_config_byte(bridge, PCI_REVISION_ID,
10349 &rev);
10350 if (rev > pci_id->rev)
10351 continue;
10352 }
10353 if (bridge->subordinate &&
10354 (bridge->subordinate->number ==
10355 tp->pdev->bus->number)) {
10356
10357 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10358 pci_dev_put(bridge);
10359 break;
10360 }
10361 }
10362 }
10363
4a29cc2e
MC
10364 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10365 * DMA addresses > 40-bit. This bridge may have other additional
10366 * 57xx devices behind it in some 4-port NIC designs for example.
10367 * Any tg3 device found behind the bridge will also need the 40-bit
10368 * DMA workaround.
10369 */
a4e2b347
MC
10370 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10371 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10372 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
4a29cc2e 10373 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
4cf78e4f 10374 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
a4e2b347 10375 }
4a29cc2e
MC
10376 else {
10377 struct pci_dev *bridge = NULL;
10378
10379 do {
10380 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10381 PCI_DEVICE_ID_SERVERWORKS_EPB,
10382 bridge);
10383 if (bridge && bridge->subordinate &&
10384 (bridge->subordinate->number <=
10385 tp->pdev->bus->number) &&
10386 (bridge->subordinate->subordinate >=
10387 tp->pdev->bus->number)) {
10388 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10389 pci_dev_put(bridge);
10390 break;
10391 }
10392 } while (bridge);
10393 }
4cf78e4f 10394
1da177e4
LT
10395 /* Initialize misc host control in PCI block. */
10396 tp->misc_host_ctrl |= (misc_ctrl_reg &
10397 MISC_HOST_CTRL_CHIPREV);
10398 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10399 tp->misc_host_ctrl);
10400
10401 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10402 &cacheline_sz_reg);
10403
10404 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
10405 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
10406 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
10407 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
10408
6708e5cc 10409 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
4cf78e4f 10410 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
af36e6b6 10411 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d9ab5ad1 10412 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
b5d3772c 10413 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
a4e2b347 10414 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6708e5cc
JL
10415 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10416
1b440c56
JL
10417 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10418 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10419 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10420
5a6f3074 10421 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
af36e6b6 10422 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
b5d3772c
MC
10423 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10424 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5a6f3074 10425 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
fcfa0a32 10426 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
52c0fd83
MC
10427 } else {
10428 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
10429 TG3_FLG2_HW_TSO_1_BUG;
10430 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10431 ASIC_REV_5750 &&
10432 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10433 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
10434 }
5a6f3074 10435 }
1da177e4 10436
0f893dc6
MC
10437 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10438 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
d9ab5ad1 10439 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
af36e6b6 10440 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
b5d3772c
MC
10441 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
10442 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
0f893dc6
MC
10443 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10444
1da177e4
LT
10445 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10446 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10447
399de50b
MC
10448 /* If we have an AMD 762 or VIA K8T800 chipset, write
10449 * reordering to the mailbox registers done by the host
10450 * controller can cause major troubles. We read back from
10451 * every mailbox register write to force the writes to be
10452 * posted to the chip in order.
10453 */
10454 if (pci_dev_present(write_reorder_chipsets) &&
10455 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10456 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10457
1da177e4
LT
10458 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10459 tp->pci_lat_timer < 64) {
10460 tp->pci_lat_timer = 64;
10461
10462 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
10463 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
10464 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
10465 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
10466
10467 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10468 cacheline_sz_reg);
10469 }
10470
10471 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10472 &pci_state_reg);
10473
10474 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10475 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10476
10477 /* If this is a 5700 BX chipset, and we are in PCI-X
10478 * mode, enable register write workaround.
10479 *
10480 * The workaround is to use indirect register accesses
10481 * for all chip writes not to mailbox registers.
10482 */
10483 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10484 u32 pm_reg;
10485 u16 pci_cmd;
10486
10487 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10488
10489 /* The chip can have it's power management PCI config
10490 * space registers clobbered due to this bug.
10491 * So explicitly force the chip into D0 here.
10492 */
10493 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10494 &pm_reg);
10495 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10496 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10497 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10498 pm_reg);
10499
10500 /* Also, force SERR#/PERR# in PCI command. */
10501 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10502 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10503 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10504 }
10505 }
10506
087fe256
MC
10507 /* 5700 BX chips need to have their TX producer index mailboxes
10508 * written twice to workaround a bug.
10509 */
10510 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10511 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10512
1da177e4
LT
10513 /* Back to back register writes can cause problems on this chip,
10514 * the workaround is to read back all reg writes except those to
10515 * mailbox regs. See tg3_write_indirect_reg32().
10516 *
10517 * PCI Express 5750_A0 rev chips need this workaround too.
10518 */
10519 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10520 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10521 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10522 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10523
10524 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10525 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10526 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10527 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10528
10529 /* Chip-specific fixup from Broadcom driver */
10530 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10531 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10532 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10533 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10534 }
10535
1ee582d8 10536 /* Default fast path register access methods */
20094930 10537 tp->read32 = tg3_read32;
1ee582d8 10538 tp->write32 = tg3_write32;
09ee929c 10539 tp->read32_mbox = tg3_read32;
20094930 10540 tp->write32_mbox = tg3_write32;
1ee582d8
MC
10541 tp->write32_tx_mbox = tg3_write32;
10542 tp->write32_rx_mbox = tg3_write32;
10543
10544 /* Various workaround register access methods */
10545 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10546 tp->write32 = tg3_write_indirect_reg32;
10547 else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10548 tp->write32 = tg3_write_flush_reg32;
10549
10550 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10551 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10552 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10553 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10554 tp->write32_rx_mbox = tg3_write_flush_reg32;
10555 }
20094930 10556
6892914f
MC
10557 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10558 tp->read32 = tg3_read_indirect_reg32;
10559 tp->write32 = tg3_write_indirect_reg32;
10560 tp->read32_mbox = tg3_read_indirect_mbox;
10561 tp->write32_mbox = tg3_write_indirect_mbox;
10562 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10563 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10564
10565 iounmap(tp->regs);
22abe310 10566 tp->regs = NULL;
6892914f
MC
10567
10568 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10569 pci_cmd &= ~PCI_COMMAND_MEMORY;
10570 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10571 }
b5d3772c
MC
10572 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10573 tp->read32_mbox = tg3_read32_mbox_5906;
10574 tp->write32_mbox = tg3_write32_mbox_5906;
10575 tp->write32_tx_mbox = tg3_write32_mbox_5906;
10576 tp->write32_rx_mbox = tg3_write32_mbox_5906;
10577 }
6892914f 10578
bbadf503
MC
10579 if (tp->write32 == tg3_write_indirect_reg32 ||
10580 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10581 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
f49639e6 10582 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
bbadf503
MC
10583 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10584
7d0c41ef
MC
10585 /* Get eeprom hw config before calling tg3_set_power_state().
10586 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10587 * determined before calling tg3_set_power_state() so that
10588 * we know whether or not to switch out of Vaux power.
10589 * When the flag is set, it means that GPIO1 is used for eeprom
10590 * write protect and also implies that it is a LOM where GPIOs
10591 * are not used to switch power.
6aa20a22 10592 */
7d0c41ef
MC
10593 tg3_get_eeprom_hw_cfg(tp);
10594
314fba34
MC
10595 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10596 * GPIO1 driven high will bring 5700's external PHY out of reset.
10597 * It is also used as eeprom write protect on LOMs.
10598 */
10599 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10600 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10601 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10602 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10603 GRC_LCLCTRL_GPIO_OUTPUT1);
3e7d83bc
MC
10604 /* Unused GPIO3 must be driven as output on 5752 because there
10605 * are no pull-up resistors on unused GPIO pins.
10606 */
10607 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10608 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
314fba34 10609
af36e6b6
MC
10610 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10611 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10612
1da177e4 10613 /* Force the chip into D0. */
bc1c7567 10614 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
10615 if (err) {
10616 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10617 pci_name(tp->pdev));
10618 return err;
10619 }
10620
10621 /* 5700 B0 chips do not support checksumming correctly due
10622 * to hardware bugs.
10623 */
10624 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10625 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10626
1da177e4
LT
10627 /* Derive initial jumbo mode from MTU assigned in
10628 * ether_setup() via the alloc_etherdev() call
10629 */
0f893dc6 10630 if (tp->dev->mtu > ETH_DATA_LEN &&
a4e2b347 10631 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
0f893dc6 10632 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
1da177e4
LT
10633
10634 /* Determine WakeOnLan speed to use. */
10635 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10636 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10637 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10638 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10639 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10640 } else {
10641 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10642 }
10643
10644 /* A few boards don't want Ethernet@WireSpeed phy feature */
10645 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10646 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10647 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
747e8f8b 10648 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
b5d3772c 10649 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
747e8f8b 10650 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1da177e4
LT
10651 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10652
10653 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10654 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10655 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10656 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10657 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10658
c424cb24
MC
10659 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10660 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10661 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10662 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
b5d3772c 10663 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
c424cb24
MC
10664 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10665 }
1da177e4 10666
1da177e4 10667 tp->coalesce_mode = 0;
1da177e4
LT
10668 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10669 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10670 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10671
10672 /* Initialize MAC MI mode, polling disabled. */
10673 tw32_f(MAC_MI_MODE, tp->mi_mode);
10674 udelay(80);
10675
10676 /* Initialize data/descriptor byte/word swapping. */
10677 val = tr32(GRC_MODE);
10678 val &= GRC_MODE_HOST_STACKUP;
10679 tw32(GRC_MODE, val | tp->grc_mode);
10680
10681 tg3_switch_clocks(tp);
10682
10683 /* Clear this out for sanity. */
10684 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10685
10686 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10687 &pci_state_reg);
10688 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10689 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10690 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10691
10692 if (chiprevid == CHIPREV_ID_5701_A0 ||
10693 chiprevid == CHIPREV_ID_5701_B0 ||
10694 chiprevid == CHIPREV_ID_5701_B2 ||
10695 chiprevid == CHIPREV_ID_5701_B5) {
10696 void __iomem *sram_base;
10697
10698 /* Write some dummy words into the SRAM status block
10699 * area, see if it reads back correctly. If the return
10700 * value is bad, force enable the PCIX workaround.
10701 */
10702 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10703
10704 writel(0x00000000, sram_base);
10705 writel(0x00000000, sram_base + 4);
10706 writel(0xffffffff, sram_base + 4);
10707 if (readl(sram_base) != 0x00000000)
10708 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10709 }
10710 }
10711
10712 udelay(50);
10713 tg3_nvram_init(tp);
10714
10715 grc_misc_cfg = tr32(GRC_MISC_CFG);
10716 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10717
10718 /* Broadcom's driver says that CIOBE multisplit has a bug */
10719#if 0
10720 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10721 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10722 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10723 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10724 }
10725#endif
10726 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10727 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10728 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10729 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10730
fac9b83e
DM
10731 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10732 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10733 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10734 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10735 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10736 HOSTCC_MODE_CLRTICK_TXBD);
10737
10738 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10739 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10740 tp->misc_host_ctrl);
10741 }
10742
1da177e4
LT
10743 /* these are limited to 10/100 only */
10744 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10745 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10746 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10747 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10748 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10749 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10750 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10751 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10752 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
b5d3772c
MC
10753 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)) ||
10754 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
1da177e4
LT
10755 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10756
10757 err = tg3_phy_probe(tp);
10758 if (err) {
10759 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10760 pci_name(tp->pdev), err);
10761 /* ... but do not return immediately ... */
10762 }
10763
10764 tg3_read_partno(tp);
c4e6575c 10765 tg3_read_fw_ver(tp);
1da177e4
LT
10766
10767 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10768 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10769 } else {
10770 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10771 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10772 else
10773 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10774 }
10775
10776 /* 5700 {AX,BX} chips have a broken status block link
10777 * change bit implementation, so we must use the
10778 * status register in those cases.
10779 */
10780 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10781 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10782 else
10783 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10784
10785 /* The led_ctrl is set during tg3_phy_probe, here we might
10786 * have to force the link status polling mechanism based
10787 * upon subsystem IDs.
10788 */
10789 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10790 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10791 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10792 TG3_FLAG_USE_LINKCHG_REG);
10793 }
10794
10795 /* For all SERDES we poll the MAC status register. */
10796 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10797 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10798 else
10799 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10800
5a6f3074 10801 /* All chips before 5787 can get confused if TX buffers
1da177e4
LT
10802 * straddle the 4GB address boundary in some cases.
10803 */
af36e6b6 10804 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
b5d3772c
MC
10805 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10806 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
5a6f3074
MC
10807 tp->dev->hard_start_xmit = tg3_start_xmit;
10808 else
10809 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
1da177e4
LT
10810
10811 tp->rx_offset = 2;
10812 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10813 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10814 tp->rx_offset = 0;
10815
f92905de
MC
10816 tp->rx_std_max_post = TG3_RX_RING_SIZE;
10817
10818 /* Increment the rx prod index on the rx std ring by at most
10819 * 8 for these chips to workaround hw errata.
10820 */
10821 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10822 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10823 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10824 tp->rx_std_max_post = 8;
10825
1da177e4
LT
10826 /* By default, disable wake-on-lan. User can change this
10827 * using ETHTOOL_SWOL.
10828 */
10829 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10830
10831 return err;
10832}
10833
10834#ifdef CONFIG_SPARC64
10835static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10836{
10837 struct net_device *dev = tp->dev;
10838 struct pci_dev *pdev = tp->pdev;
10839 struct pcidev_cookie *pcp = pdev->sysdata;
10840
10841 if (pcp != NULL) {
de8d28b1
DM
10842 unsigned char *addr;
10843 int len;
1da177e4 10844
de8d28b1
DM
10845 addr = of_get_property(pcp->prom_node, "local-mac-address",
10846 &len);
10847 if (addr && len == 6) {
10848 memcpy(dev->dev_addr, addr, 6);
2ff43697 10849 memcpy(dev->perm_addr, dev->dev_addr, 6);
1da177e4
LT
10850 return 0;
10851 }
10852 }
10853 return -ENODEV;
10854}
10855
10856static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10857{
10858 struct net_device *dev = tp->dev;
10859
10860 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2ff43697 10861 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
1da177e4
LT
10862 return 0;
10863}
10864#endif
10865
10866static int __devinit tg3_get_device_address(struct tg3 *tp)
10867{
10868 struct net_device *dev = tp->dev;
10869 u32 hi, lo, mac_offset;
008652b3 10870 int addr_ok = 0;
1da177e4
LT
10871
10872#ifdef CONFIG_SPARC64
10873 if (!tg3_get_macaddr_sparc(tp))
10874 return 0;
10875#endif
10876
10877 mac_offset = 0x7c;
f49639e6 10878 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
a4e2b347 10879 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
10880 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10881 mac_offset = 0xcc;
10882 if (tg3_nvram_lock(tp))
10883 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10884 else
10885 tg3_nvram_unlock(tp);
10886 }
b5d3772c
MC
10887 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10888 mac_offset = 0x10;
1da177e4
LT
10889
10890 /* First try to get it from MAC address mailbox. */
10891 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10892 if ((hi >> 16) == 0x484b) {
10893 dev->dev_addr[0] = (hi >> 8) & 0xff;
10894 dev->dev_addr[1] = (hi >> 0) & 0xff;
10895
10896 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10897 dev->dev_addr[2] = (lo >> 24) & 0xff;
10898 dev->dev_addr[3] = (lo >> 16) & 0xff;
10899 dev->dev_addr[4] = (lo >> 8) & 0xff;
10900 dev->dev_addr[5] = (lo >> 0) & 0xff;
1da177e4 10901
008652b3
MC
10902 /* Some old bootcode may report a 0 MAC address in SRAM */
10903 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10904 }
10905 if (!addr_ok) {
10906 /* Next, try NVRAM. */
f49639e6 10907 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
008652b3
MC
10908 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10909 dev->dev_addr[0] = ((hi >> 16) & 0xff);
10910 dev->dev_addr[1] = ((hi >> 24) & 0xff);
10911 dev->dev_addr[2] = ((lo >> 0) & 0xff);
10912 dev->dev_addr[3] = ((lo >> 8) & 0xff);
10913 dev->dev_addr[4] = ((lo >> 16) & 0xff);
10914 dev->dev_addr[5] = ((lo >> 24) & 0xff);
10915 }
10916 /* Finally just fetch it out of the MAC control regs. */
10917 else {
10918 hi = tr32(MAC_ADDR_0_HIGH);
10919 lo = tr32(MAC_ADDR_0_LOW);
10920
10921 dev->dev_addr[5] = lo & 0xff;
10922 dev->dev_addr[4] = (lo >> 8) & 0xff;
10923 dev->dev_addr[3] = (lo >> 16) & 0xff;
10924 dev->dev_addr[2] = (lo >> 24) & 0xff;
10925 dev->dev_addr[1] = hi & 0xff;
10926 dev->dev_addr[0] = (hi >> 8) & 0xff;
10927 }
1da177e4
LT
10928 }
10929
10930 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10931#ifdef CONFIG_SPARC64
10932 if (!tg3_get_default_macaddr_sparc(tp))
10933 return 0;
10934#endif
10935 return -EINVAL;
10936 }
2ff43697 10937 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
10938 return 0;
10939}
10940
59e6b434
DM
10941#define BOUNDARY_SINGLE_CACHELINE 1
10942#define BOUNDARY_MULTI_CACHELINE 2
10943
10944static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10945{
10946 int cacheline_size;
10947 u8 byte;
10948 int goal;
10949
10950 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10951 if (byte == 0)
10952 cacheline_size = 1024;
10953 else
10954 cacheline_size = (int) byte * 4;
10955
10956 /* On 5703 and later chips, the boundary bits have no
10957 * effect.
10958 */
10959 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10960 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10961 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10962 goto out;
10963
10964#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10965 goal = BOUNDARY_MULTI_CACHELINE;
10966#else
10967#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10968 goal = BOUNDARY_SINGLE_CACHELINE;
10969#else
10970 goal = 0;
10971#endif
10972#endif
10973
10974 if (!goal)
10975 goto out;
10976
10977 /* PCI controllers on most RISC systems tend to disconnect
10978 * when a device tries to burst across a cache-line boundary.
10979 * Therefore, letting tg3 do so just wastes PCI bandwidth.
10980 *
10981 * Unfortunately, for PCI-E there are only limited
10982 * write-side controls for this, and thus for reads
10983 * we will still get the disconnects. We'll also waste
10984 * these PCI cycles for both read and write for chips
10985 * other than 5700 and 5701 which do not implement the
10986 * boundary bits.
10987 */
10988 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10989 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10990 switch (cacheline_size) {
10991 case 16:
10992 case 32:
10993 case 64:
10994 case 128:
10995 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10996 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10997 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10998 } else {
10999 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11000 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11001 }
11002 break;
11003
11004 case 256:
11005 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11006 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11007 break;
11008
11009 default:
11010 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11011 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11012 break;
11013 };
11014 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11015 switch (cacheline_size) {
11016 case 16:
11017 case 32:
11018 case 64:
11019 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11020 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11021 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11022 break;
11023 }
11024 /* fallthrough */
11025 case 128:
11026 default:
11027 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11028 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11029 break;
11030 };
11031 } else {
11032 switch (cacheline_size) {
11033 case 16:
11034 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11035 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11036 DMA_RWCTRL_WRITE_BNDRY_16);
11037 break;
11038 }
11039 /* fallthrough */
11040 case 32:
11041 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11042 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11043 DMA_RWCTRL_WRITE_BNDRY_32);
11044 break;
11045 }
11046 /* fallthrough */
11047 case 64:
11048 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11049 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11050 DMA_RWCTRL_WRITE_BNDRY_64);
11051 break;
11052 }
11053 /* fallthrough */
11054 case 128:
11055 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11056 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11057 DMA_RWCTRL_WRITE_BNDRY_128);
11058 break;
11059 }
11060 /* fallthrough */
11061 case 256:
11062 val |= (DMA_RWCTRL_READ_BNDRY_256 |
11063 DMA_RWCTRL_WRITE_BNDRY_256);
11064 break;
11065 case 512:
11066 val |= (DMA_RWCTRL_READ_BNDRY_512 |
11067 DMA_RWCTRL_WRITE_BNDRY_512);
11068 break;
11069 case 1024:
11070 default:
11071 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11072 DMA_RWCTRL_WRITE_BNDRY_1024);
11073 break;
11074 };
11075 }
11076
11077out:
11078 return val;
11079}
11080
1da177e4
LT
11081static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11082{
11083 struct tg3_internal_buffer_desc test_desc;
11084 u32 sram_dma_descs;
11085 int i, ret;
11086
11087 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11088
11089 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11090 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11091 tw32(RDMAC_STATUS, 0);
11092 tw32(WDMAC_STATUS, 0);
11093
11094 tw32(BUFMGR_MODE, 0);
11095 tw32(FTQ_RESET, 0);
11096
11097 test_desc.addr_hi = ((u64) buf_dma) >> 32;
11098 test_desc.addr_lo = buf_dma & 0xffffffff;
11099 test_desc.nic_mbuf = 0x00002100;
11100 test_desc.len = size;
11101
11102 /*
11103 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11104 * the *second* time the tg3 driver was getting loaded after an
11105 * initial scan.
11106 *
11107 * Broadcom tells me:
11108 * ...the DMA engine is connected to the GRC block and a DMA
11109 * reset may affect the GRC block in some unpredictable way...
11110 * The behavior of resets to individual blocks has not been tested.
11111 *
11112 * Broadcom noted the GRC reset will also reset all sub-components.
11113 */
11114 if (to_device) {
11115 test_desc.cqid_sqid = (13 << 8) | 2;
11116
11117 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11118 udelay(40);
11119 } else {
11120 test_desc.cqid_sqid = (16 << 8) | 7;
11121
11122 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11123 udelay(40);
11124 }
11125 test_desc.flags = 0x00000005;
11126
11127 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11128 u32 val;
11129
11130 val = *(((u32 *)&test_desc) + i);
11131 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11132 sram_dma_descs + (i * sizeof(u32)));
11133 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11134 }
11135 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11136
11137 if (to_device) {
11138 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11139 } else {
11140 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11141 }
11142
11143 ret = -ENODEV;
11144 for (i = 0; i < 40; i++) {
11145 u32 val;
11146
11147 if (to_device)
11148 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11149 else
11150 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11151 if ((val & 0xffff) == sram_dma_descs) {
11152 ret = 0;
11153 break;
11154 }
11155
11156 udelay(100);
11157 }
11158
11159 return ret;
11160}
11161
ded7340d 11162#define TEST_BUFFER_SIZE 0x2000
1da177e4
LT
11163
11164static int __devinit tg3_test_dma(struct tg3 *tp)
11165{
11166 dma_addr_t buf_dma;
59e6b434 11167 u32 *buf, saved_dma_rwctrl;
1da177e4
LT
11168 int ret;
11169
11170 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11171 if (!buf) {
11172 ret = -ENOMEM;
11173 goto out_nofree;
11174 }
11175
11176 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11177 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11178
59e6b434 11179 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
1da177e4
LT
11180
11181 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11182 /* DMA read watermark not used on PCIE */
11183 tp->dma_rwctrl |= 0x00180000;
11184 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
85e94ced
MC
11185 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11186 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
1da177e4
LT
11187 tp->dma_rwctrl |= 0x003f0000;
11188 else
11189 tp->dma_rwctrl |= 0x003f000f;
11190 } else {
11191 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11192 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11193 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11194
4a29cc2e
MC
11195 /* If the 5704 is behind the EPB bridge, we can
11196 * do the less restrictive ONE_DMA workaround for
11197 * better performance.
11198 */
11199 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11200 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11201 tp->dma_rwctrl |= 0x8000;
11202 else if (ccval == 0x6 || ccval == 0x7)
1da177e4
LT
11203 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11204
59e6b434 11205 /* Set bit 23 to enable PCIX hw bug fix */
1da177e4 11206 tp->dma_rwctrl |= 0x009f0000;
4cf78e4f
MC
11207 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11208 /* 5780 always in PCIX mode */
11209 tp->dma_rwctrl |= 0x00144000;
a4e2b347
MC
11210 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11211 /* 5714 always in PCIX mode */
11212 tp->dma_rwctrl |= 0x00148000;
1da177e4
LT
11213 } else {
11214 tp->dma_rwctrl |= 0x001b000f;
11215 }
11216 }
11217
11218 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11219 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11220 tp->dma_rwctrl &= 0xfffffff0;
11221
11222 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11223 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11224 /* Remove this if it causes problems for some boards. */
11225 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11226
11227 /* On 5700/5701 chips, we need to set this bit.
11228 * Otherwise the chip will issue cacheline transactions
11229 * to streamable DMA memory with not all the byte
11230 * enables turned on. This is an error on several
11231 * RISC PCI controllers, in particular sparc64.
11232 *
11233 * On 5703/5704 chips, this bit has been reassigned
11234 * a different meaning. In particular, it is used
11235 * on those chips to enable a PCI-X workaround.
11236 */
11237 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11238 }
11239
11240 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11241
11242#if 0
11243 /* Unneeded, already done by tg3_get_invariants. */
11244 tg3_switch_clocks(tp);
11245#endif
11246
11247 ret = 0;
11248 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11249 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11250 goto out;
11251
59e6b434
DM
11252 /* It is best to perform DMA test with maximum write burst size
11253 * to expose the 5700/5701 write DMA bug.
11254 */
11255 saved_dma_rwctrl = tp->dma_rwctrl;
11256 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11257 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11258
1da177e4
LT
11259 while (1) {
11260 u32 *p = buf, i;
11261
11262 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11263 p[i] = i;
11264
11265 /* Send the buffer to the chip. */
11266 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11267 if (ret) {
11268 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11269 break;
11270 }
11271
11272#if 0
11273 /* validate data reached card RAM correctly. */
11274 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11275 u32 val;
11276 tg3_read_mem(tp, 0x2100 + (i*4), &val);
11277 if (le32_to_cpu(val) != p[i]) {
11278 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
11279 /* ret = -ENODEV here? */
11280 }
11281 p[i] = 0;
11282 }
11283#endif
11284 /* Now read it back. */
11285 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11286 if (ret) {
11287 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11288
11289 break;
11290 }
11291
11292 /* Verify it. */
11293 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11294 if (p[i] == i)
11295 continue;
11296
59e6b434
DM
11297 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11298 DMA_RWCTRL_WRITE_BNDRY_16) {
11299 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
1da177e4
LT
11300 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11301 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11302 break;
11303 } else {
11304 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11305 ret = -ENODEV;
11306 goto out;
11307 }
11308 }
11309
11310 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11311 /* Success. */
11312 ret = 0;
11313 break;
11314 }
11315 }
59e6b434
DM
11316 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11317 DMA_RWCTRL_WRITE_BNDRY_16) {
6d1cfbab
MC
11318 static struct pci_device_id dma_wait_state_chipsets[] = {
11319 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11320 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11321 { },
11322 };
11323
59e6b434 11324 /* DMA test passed without adjusting DMA boundary,
6d1cfbab
MC
11325 * now look for chipsets that are known to expose the
11326 * DMA bug without failing the test.
59e6b434 11327 */
6d1cfbab
MC
11328 if (pci_dev_present(dma_wait_state_chipsets)) {
11329 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11330 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11331 }
11332 else
11333 /* Safe to use the calculated DMA boundary. */
11334 tp->dma_rwctrl = saved_dma_rwctrl;
11335
59e6b434
DM
11336 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11337 }
1da177e4
LT
11338
11339out:
11340 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11341out_nofree:
11342 return ret;
11343}
11344
11345static void __devinit tg3_init_link_config(struct tg3 *tp)
11346{
11347 tp->link_config.advertising =
11348 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11349 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11350 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11351 ADVERTISED_Autoneg | ADVERTISED_MII);
11352 tp->link_config.speed = SPEED_INVALID;
11353 tp->link_config.duplex = DUPLEX_INVALID;
11354 tp->link_config.autoneg = AUTONEG_ENABLE;
1da177e4
LT
11355 tp->link_config.active_speed = SPEED_INVALID;
11356 tp->link_config.active_duplex = DUPLEX_INVALID;
11357 tp->link_config.phy_is_low_power = 0;
11358 tp->link_config.orig_speed = SPEED_INVALID;
11359 tp->link_config.orig_duplex = DUPLEX_INVALID;
11360 tp->link_config.orig_autoneg = AUTONEG_INVALID;
11361}
11362
11363static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11364{
fdfec172
MC
11365 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11366 tp->bufmgr_config.mbuf_read_dma_low_water =
11367 DEFAULT_MB_RDMA_LOW_WATER_5705;
11368 tp->bufmgr_config.mbuf_mac_rx_low_water =
11369 DEFAULT_MB_MACRX_LOW_WATER_5705;
11370 tp->bufmgr_config.mbuf_high_water =
11371 DEFAULT_MB_HIGH_WATER_5705;
b5d3772c
MC
11372 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11373 tp->bufmgr_config.mbuf_mac_rx_low_water =
11374 DEFAULT_MB_MACRX_LOW_WATER_5906;
11375 tp->bufmgr_config.mbuf_high_water =
11376 DEFAULT_MB_HIGH_WATER_5906;
11377 }
fdfec172
MC
11378
11379 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11380 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11381 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11382 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11383 tp->bufmgr_config.mbuf_high_water_jumbo =
11384 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11385 } else {
11386 tp->bufmgr_config.mbuf_read_dma_low_water =
11387 DEFAULT_MB_RDMA_LOW_WATER;
11388 tp->bufmgr_config.mbuf_mac_rx_low_water =
11389 DEFAULT_MB_MACRX_LOW_WATER;
11390 tp->bufmgr_config.mbuf_high_water =
11391 DEFAULT_MB_HIGH_WATER;
11392
11393 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11394 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11395 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11396 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11397 tp->bufmgr_config.mbuf_high_water_jumbo =
11398 DEFAULT_MB_HIGH_WATER_JUMBO;
11399 }
1da177e4
LT
11400
11401 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11402 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11403}
11404
11405static char * __devinit tg3_phy_string(struct tg3 *tp)
11406{
11407 switch (tp->phy_id & PHY_ID_MASK) {
11408 case PHY_ID_BCM5400: return "5400";
11409 case PHY_ID_BCM5401: return "5401";
11410 case PHY_ID_BCM5411: return "5411";
11411 case PHY_ID_BCM5701: return "5701";
11412 case PHY_ID_BCM5703: return "5703";
11413 case PHY_ID_BCM5704: return "5704";
11414 case PHY_ID_BCM5705: return "5705";
11415 case PHY_ID_BCM5750: return "5750";
85e94ced 11416 case PHY_ID_BCM5752: return "5752";
a4e2b347 11417 case PHY_ID_BCM5714: return "5714";
4cf78e4f 11418 case PHY_ID_BCM5780: return "5780";
af36e6b6 11419 case PHY_ID_BCM5755: return "5755";
d9ab5ad1 11420 case PHY_ID_BCM5787: return "5787";
126a3368 11421 case PHY_ID_BCM5756: return "5722/5756";
b5d3772c 11422 case PHY_ID_BCM5906: return "5906";
1da177e4
LT
11423 case PHY_ID_BCM8002: return "8002/serdes";
11424 case 0: return "serdes";
11425 default: return "unknown";
11426 };
11427}
11428
f9804ddb
MC
11429static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11430{
11431 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11432 strcpy(str, "PCI Express");
11433 return str;
11434 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11435 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11436
11437 strcpy(str, "PCIX:");
11438
11439 if ((clock_ctrl == 7) ||
11440 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11441 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11442 strcat(str, "133MHz");
11443 else if (clock_ctrl == 0)
11444 strcat(str, "33MHz");
11445 else if (clock_ctrl == 2)
11446 strcat(str, "50MHz");
11447 else if (clock_ctrl == 4)
11448 strcat(str, "66MHz");
11449 else if (clock_ctrl == 6)
11450 strcat(str, "100MHz");
f9804ddb
MC
11451 } else {
11452 strcpy(str, "PCI:");
11453 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11454 strcat(str, "66MHz");
11455 else
11456 strcat(str, "33MHz");
11457 }
11458 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11459 strcat(str, ":32-bit");
11460 else
11461 strcat(str, ":64-bit");
11462 return str;
11463}
11464
8c2dc7e1 11465static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
1da177e4
LT
11466{
11467 struct pci_dev *peer;
11468 unsigned int func, devnr = tp->pdev->devfn & ~7;
11469
11470 for (func = 0; func < 8; func++) {
11471 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11472 if (peer && peer != tp->pdev)
11473 break;
11474 pci_dev_put(peer);
11475 }
16fe9d74
MC
11476 /* 5704 can be configured in single-port mode, set peer to
11477 * tp->pdev in that case.
11478 */
11479 if (!peer) {
11480 peer = tp->pdev;
11481 return peer;
11482 }
1da177e4
LT
11483
11484 /*
11485 * We don't need to keep the refcount elevated; there's no way
11486 * to remove one half of this device without removing the other
11487 */
11488 pci_dev_put(peer);
11489
11490 return peer;
11491}
11492
15f9850d
DM
11493static void __devinit tg3_init_coal(struct tg3 *tp)
11494{
11495 struct ethtool_coalesce *ec = &tp->coal;
11496
11497 memset(ec, 0, sizeof(*ec));
11498 ec->cmd = ETHTOOL_GCOALESCE;
11499 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11500 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11501 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11502 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11503 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11504 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11505 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11506 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11507 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11508
11509 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11510 HOSTCC_MODE_CLRTICK_TXBD)) {
11511 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11512 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11513 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11514 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11515 }
d244c892
MC
11516
11517 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11518 ec->rx_coalesce_usecs_irq = 0;
11519 ec->tx_coalesce_usecs_irq = 0;
11520 ec->stats_block_coalesce_usecs = 0;
11521 }
15f9850d
DM
11522}
11523
1da177e4
LT
11524static int __devinit tg3_init_one(struct pci_dev *pdev,
11525 const struct pci_device_id *ent)
11526{
11527 static int tg3_version_printed = 0;
11528 unsigned long tg3reg_base, tg3reg_len;
11529 struct net_device *dev;
11530 struct tg3 *tp;
72f2afb8 11531 int i, err, pm_cap;
f9804ddb 11532 char str[40];
72f2afb8 11533 u64 dma_mask, persist_dma_mask;
1da177e4
LT
11534
11535 if (tg3_version_printed++ == 0)
11536 printk(KERN_INFO "%s", version);
11537
11538 err = pci_enable_device(pdev);
11539 if (err) {
11540 printk(KERN_ERR PFX "Cannot enable PCI device, "
11541 "aborting.\n");
11542 return err;
11543 }
11544
11545 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11546 printk(KERN_ERR PFX "Cannot find proper PCI device "
11547 "base address, aborting.\n");
11548 err = -ENODEV;
11549 goto err_out_disable_pdev;
11550 }
11551
11552 err = pci_request_regions(pdev, DRV_MODULE_NAME);
11553 if (err) {
11554 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11555 "aborting.\n");
11556 goto err_out_disable_pdev;
11557 }
11558
11559 pci_set_master(pdev);
11560
11561 /* Find power-management capability. */
11562 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11563 if (pm_cap == 0) {
11564 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11565 "aborting.\n");
11566 err = -EIO;
11567 goto err_out_free_res;
11568 }
11569
1da177e4
LT
11570 tg3reg_base = pci_resource_start(pdev, 0);
11571 tg3reg_len = pci_resource_len(pdev, 0);
11572
11573 dev = alloc_etherdev(sizeof(*tp));
11574 if (!dev) {
11575 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11576 err = -ENOMEM;
11577 goto err_out_free_res;
11578 }
11579
11580 SET_MODULE_OWNER(dev);
11581 SET_NETDEV_DEV(dev, &pdev->dev);
11582
1da177e4
LT
11583#if TG3_VLAN_TAG_USED
11584 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11585 dev->vlan_rx_register = tg3_vlan_rx_register;
11586 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11587#endif
11588
11589 tp = netdev_priv(dev);
11590 tp->pdev = pdev;
11591 tp->dev = dev;
11592 tp->pm_cap = pm_cap;
11593 tp->mac_mode = TG3_DEF_MAC_MODE;
11594 tp->rx_mode = TG3_DEF_RX_MODE;
11595 tp->tx_mode = TG3_DEF_TX_MODE;
11596 tp->mi_mode = MAC_MI_MODE_BASE;
11597 if (tg3_debug > 0)
11598 tp->msg_enable = tg3_debug;
11599 else
11600 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11601
11602 /* The word/byte swap controls here control register access byte
11603 * swapping. DMA data byte swapping is controlled in the GRC_MODE
11604 * setting below.
11605 */
11606 tp->misc_host_ctrl =
11607 MISC_HOST_CTRL_MASK_PCI_INT |
11608 MISC_HOST_CTRL_WORD_SWAP |
11609 MISC_HOST_CTRL_INDIR_ACCESS |
11610 MISC_HOST_CTRL_PCISTATE_RW;
11611
11612 /* The NONFRM (non-frame) byte/word swap controls take effect
11613 * on descriptor entries, anything which isn't packet data.
11614 *
11615 * The StrongARM chips on the board (one for tx, one for rx)
11616 * are running in big-endian mode.
11617 */
11618 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11619 GRC_MODE_WSWAP_NONFRM_DATA);
11620#ifdef __BIG_ENDIAN
11621 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11622#endif
11623 spin_lock_init(&tp->lock);
1da177e4
LT
11624 spin_lock_init(&tp->indirect_lock);
11625 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11626
11627 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11628 if (tp->regs == 0UL) {
11629 printk(KERN_ERR PFX "Cannot map device registers, "
11630 "aborting.\n");
11631 err = -ENOMEM;
11632 goto err_out_free_dev;
11633 }
11634
11635 tg3_init_link_config(tp);
11636
1da177e4
LT
11637 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11638 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11639 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11640
11641 dev->open = tg3_open;
11642 dev->stop = tg3_close;
11643 dev->get_stats = tg3_get_stats;
11644 dev->set_multicast_list = tg3_set_rx_mode;
11645 dev->set_mac_address = tg3_set_mac_addr;
11646 dev->do_ioctl = tg3_ioctl;
11647 dev->tx_timeout = tg3_tx_timeout;
11648 dev->poll = tg3_poll;
11649 dev->ethtool_ops = &tg3_ethtool_ops;
11650 dev->weight = 64;
11651 dev->watchdog_timeo = TG3_TX_TIMEOUT;
11652 dev->change_mtu = tg3_change_mtu;
11653 dev->irq = pdev->irq;
11654#ifdef CONFIG_NET_POLL_CONTROLLER
11655 dev->poll_controller = tg3_poll_controller;
11656#endif
11657
11658 err = tg3_get_invariants(tp);
11659 if (err) {
11660 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11661 "aborting.\n");
11662 goto err_out_iounmap;
11663 }
11664
4a29cc2e
MC
11665 /* The EPB bridge inside 5714, 5715, and 5780 and any
11666 * device behind the EPB cannot support DMA addresses > 40-bit.
72f2afb8
MC
11667 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11668 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11669 * do DMA address check in tg3_start_xmit().
11670 */
4a29cc2e
MC
11671 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11672 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11673 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
72f2afb8
MC
11674 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11675#ifdef CONFIG_HIGHMEM
11676 dma_mask = DMA_64BIT_MASK;
11677#endif
4a29cc2e 11678 } else
72f2afb8
MC
11679 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11680
11681 /* Configure DMA attributes. */
11682 if (dma_mask > DMA_32BIT_MASK) {
11683 err = pci_set_dma_mask(pdev, dma_mask);
11684 if (!err) {
11685 dev->features |= NETIF_F_HIGHDMA;
11686 err = pci_set_consistent_dma_mask(pdev,
11687 persist_dma_mask);
11688 if (err < 0) {
11689 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11690 "DMA for consistent allocations\n");
11691 goto err_out_iounmap;
11692 }
11693 }
11694 }
11695 if (err || dma_mask == DMA_32BIT_MASK) {
11696 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11697 if (err) {
11698 printk(KERN_ERR PFX "No usable DMA configuration, "
11699 "aborting.\n");
11700 goto err_out_iounmap;
11701 }
11702 }
11703
fdfec172 11704 tg3_init_bufmgr_config(tp);
1da177e4
LT
11705
11706#if TG3_TSO_SUPPORT != 0
11707 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11708 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11709 }
11710 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11711 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11712 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11713 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11714 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11715 } else {
11716 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11717 }
11718
4e3a7aaa
MC
11719 /* TSO is on by default on chips that support hardware TSO.
11720 * Firmware TSO on older chips gives lower performance, so it
11721 * is off by default, but can be enabled using ethtool.
11722 */
b0026624 11723 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
1da177e4 11724 dev->features |= NETIF_F_TSO;
b5d3772c
MC
11725 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
11726 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
b0026624
MC
11727 dev->features |= NETIF_F_TSO6;
11728 }
1da177e4
LT
11729
11730#endif
11731
11732 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11733 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11734 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11735 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11736 tp->rx_pending = 63;
11737 }
11738
8c2dc7e1
MC
11739 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11740 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11741 tp->pdev_peer = tg3_find_peer(tp);
1da177e4
LT
11742
11743 err = tg3_get_device_address(tp);
11744 if (err) {
11745 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11746 "aborting.\n");
11747 goto err_out_iounmap;
11748 }
11749
11750 /*
11751 * Reset chip in case UNDI or EFI driver did not shutdown
11752 * DMA self test will enable WDMAC and we'll see (spurious)
11753 * pending DMA on the PCI bus at that point.
11754 */
11755 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11756 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11757 pci_save_state(tp->pdev);
11758 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
944d980e 11759 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
11760 }
11761
11762 err = tg3_test_dma(tp);
11763 if (err) {
11764 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11765 goto err_out_iounmap;
11766 }
11767
11768 /* Tigon3 can do ipv4 only... and some chips have buggy
11769 * checksumming.
11770 */
11771 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
af36e6b6
MC
11772 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11773 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9c27dbdf
MC
11774 dev->features |= NETIF_F_HW_CSUM;
11775 else
11776 dev->features |= NETIF_F_IP_CSUM;
11777 dev->features |= NETIF_F_SG;
1da177e4
LT
11778 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11779 } else
11780 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11781
1da177e4
LT
11782 /* flow control autonegotiation is default behavior */
11783 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11784
15f9850d
DM
11785 tg3_init_coal(tp);
11786
7d3f4c97
DM
11787 /* Now that we have fully setup the chip, save away a snapshot
11788 * of the PCI config space. We need to restore this after
11789 * GRC_MISC_CFG core clock resets and some resume events.
11790 */
11791 pci_save_state(tp->pdev);
11792
1da177e4
LT
11793 err = register_netdev(dev);
11794 if (err) {
11795 printk(KERN_ERR PFX "Cannot register net device, "
11796 "aborting.\n");
11797 goto err_out_iounmap;
11798 }
11799
11800 pci_set_drvdata(pdev, dev);
11801
f9804ddb 11802 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
1da177e4
LT
11803 dev->name,
11804 tp->board_part_number,
11805 tp->pci_chip_rev_id,
11806 tg3_phy_string(tp),
f9804ddb 11807 tg3_bus_string(tp, str),
1da177e4
LT
11808 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11809
11810 for (i = 0; i < 6; i++)
11811 printk("%2.2x%c", dev->dev_addr[i],
11812 i == 5 ? '\n' : ':');
11813
11814 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11815 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11816 "TSOcap[%d] \n",
11817 dev->name,
11818 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11819 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11820 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11821 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11822 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11823 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11824 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
4a29cc2e
MC
11825 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11826 dev->name, tp->dma_rwctrl,
11827 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11828 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
1da177e4 11829
59f1741e
JM
11830 netif_carrier_off(tp->dev);
11831
1da177e4
LT
11832 return 0;
11833
11834err_out_iounmap:
6892914f
MC
11835 if (tp->regs) {
11836 iounmap(tp->regs);
22abe310 11837 tp->regs = NULL;
6892914f 11838 }
1da177e4
LT
11839
11840err_out_free_dev:
11841 free_netdev(dev);
11842
11843err_out_free_res:
11844 pci_release_regions(pdev);
11845
11846err_out_disable_pdev:
11847 pci_disable_device(pdev);
11848 pci_set_drvdata(pdev, NULL);
11849 return err;
11850}
11851
11852static void __devexit tg3_remove_one(struct pci_dev *pdev)
11853{
11854 struct net_device *dev = pci_get_drvdata(pdev);
11855
11856 if (dev) {
11857 struct tg3 *tp = netdev_priv(dev);
11858
7faa006f 11859 flush_scheduled_work();
1da177e4 11860 unregister_netdev(dev);
6892914f
MC
11861 if (tp->regs) {
11862 iounmap(tp->regs);
22abe310 11863 tp->regs = NULL;
6892914f 11864 }
1da177e4
LT
11865 free_netdev(dev);
11866 pci_release_regions(pdev);
11867 pci_disable_device(pdev);
11868 pci_set_drvdata(pdev, NULL);
11869 }
11870}
11871
11872static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11873{
11874 struct net_device *dev = pci_get_drvdata(pdev);
11875 struct tg3 *tp = netdev_priv(dev);
11876 int err;
11877
11878 if (!netif_running(dev))
11879 return 0;
11880
7faa006f 11881 flush_scheduled_work();
1da177e4
LT
11882 tg3_netif_stop(tp);
11883
11884 del_timer_sync(&tp->timer);
11885
f47c11ee 11886 tg3_full_lock(tp, 1);
1da177e4 11887 tg3_disable_ints(tp);
f47c11ee 11888 tg3_full_unlock(tp);
1da177e4
LT
11889
11890 netif_device_detach(dev);
11891
f47c11ee 11892 tg3_full_lock(tp, 0);
944d980e 11893 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6a9eba15 11894 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
f47c11ee 11895 tg3_full_unlock(tp);
1da177e4
LT
11896
11897 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11898 if (err) {
f47c11ee 11899 tg3_full_lock(tp, 0);
1da177e4 11900
6a9eba15 11901 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
b9ec6c1b
MC
11902 if (tg3_restart_hw(tp, 1))
11903 goto out;
1da177e4
LT
11904
11905 tp->timer.expires = jiffies + tp->timer_offset;
11906 add_timer(&tp->timer);
11907
11908 netif_device_attach(dev);
11909 tg3_netif_start(tp);
11910
b9ec6c1b 11911out:
f47c11ee 11912 tg3_full_unlock(tp);
1da177e4
LT
11913 }
11914
11915 return err;
11916}
11917
11918static int tg3_resume(struct pci_dev *pdev)
11919{
11920 struct net_device *dev = pci_get_drvdata(pdev);
11921 struct tg3 *tp = netdev_priv(dev);
11922 int err;
11923
11924 if (!netif_running(dev))
11925 return 0;
11926
11927 pci_restore_state(tp->pdev);
11928
bc1c7567 11929 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
11930 if (err)
11931 return err;
11932
11933 netif_device_attach(dev);
11934
f47c11ee 11935 tg3_full_lock(tp, 0);
1da177e4 11936
6a9eba15 11937 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
b9ec6c1b
MC
11938 err = tg3_restart_hw(tp, 1);
11939 if (err)
11940 goto out;
1da177e4
LT
11941
11942 tp->timer.expires = jiffies + tp->timer_offset;
11943 add_timer(&tp->timer);
11944
1da177e4
LT
11945 tg3_netif_start(tp);
11946
b9ec6c1b 11947out:
f47c11ee 11948 tg3_full_unlock(tp);
1da177e4 11949
b9ec6c1b 11950 return err;
1da177e4
LT
11951}
11952
11953static struct pci_driver tg3_driver = {
11954 .name = DRV_MODULE_NAME,
11955 .id_table = tg3_pci_tbl,
11956 .probe = tg3_init_one,
11957 .remove = __devexit_p(tg3_remove_one),
11958 .suspend = tg3_suspend,
11959 .resume = tg3_resume
11960};
11961
11962static int __init tg3_init(void)
11963{
29917620 11964 return pci_register_driver(&tg3_driver);
1da177e4
LT
11965}
11966
11967static void __exit tg3_cleanup(void)
11968{
11969 pci_unregister_driver(&tg3_driver);
11970}
11971
11972module_init(tg3_init);
11973module_exit(tg3_cleanup);