]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/tg3.c
[TG3]: Add 5723 support
[net-next-2.6.git] / drivers / net / tg3.c
CommitLineData
1da177e4
LT
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
65610fba 7 * Copyright (C) 2005-2007 Broadcom Corporation.
1da177e4
LT
8 *
9 * Firmware is:
49cabf49
MC
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
1da177e4
LT
16 */
17
1da177e4
LT
18
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/compiler.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
14c85021 26#include <linux/in.h>
1da177e4
LT
27#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/workqueue.h>
61487480 39#include <linux/prefetch.h>
f9a5f7d3 40#include <linux/dma-mapping.h>
1da177e4
LT
41
42#include <net/checksum.h>
c9bdd4b5 43#include <net/ip.h>
1da177e4
LT
44
45#include <asm/system.h>
46#include <asm/io.h>
47#include <asm/byteorder.h>
48#include <asm/uaccess.h>
49
49b6e95f 50#ifdef CONFIG_SPARC
1da177e4 51#include <asm/idprom.h>
49b6e95f 52#include <asm/prom.h>
1da177e4
LT
53#endif
54
55#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56#define TG3_VLAN_TAG_USED 1
57#else
58#define TG3_VLAN_TAG_USED 0
59#endif
60
1da177e4 61#define TG3_TSO_SUPPORT 1
1da177e4
LT
62
63#include "tg3.h"
64
65#define DRV_MODULE_NAME "tg3"
66#define PFX DRV_MODULE_NAME ": "
114342f2
MC
67#define DRV_MODULE_VERSION "3.84"
68#define DRV_MODULE_RELDATE "October 12, 2007"
1da177e4
LT
69
70#define TG3_DEF_MAC_MODE 0
71#define TG3_DEF_RX_MODE 0
72#define TG3_DEF_TX_MODE 0
73#define TG3_DEF_MSG_ENABLE \
74 (NETIF_MSG_DRV | \
75 NETIF_MSG_PROBE | \
76 NETIF_MSG_LINK | \
77 NETIF_MSG_TIMER | \
78 NETIF_MSG_IFDOWN | \
79 NETIF_MSG_IFUP | \
80 NETIF_MSG_RX_ERR | \
81 NETIF_MSG_TX_ERR)
82
83/* length of time before we decide the hardware is borked,
84 * and dev->tx_timeout() should be called to fix the problem
85 */
86#define TG3_TX_TIMEOUT (5 * HZ)
87
88/* hardware minimum and maximum for a single frame's data payload */
89#define TG3_MIN_MTU 60
90#define TG3_MAX_MTU(tp) \
0f893dc6 91 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
1da177e4
LT
92
93/* These numbers seem to be hard coded in the NIC firmware somehow.
94 * You can't change the ring sizes, but you can change where you place
95 * them in the NIC onboard memory.
96 */
97#define TG3_RX_RING_SIZE 512
98#define TG3_DEF_RX_RING_PENDING 200
99#define TG3_RX_JUMBO_RING_SIZE 256
100#define TG3_DEF_RX_JUMBO_RING_PENDING 100
101
102/* Do not place this n-ring entries value into the tp struct itself,
103 * we really want to expose these constants to GCC so that modulo et
104 * al. operations are done with shifts and masks instead of with
105 * hw multiply/modulo instructions. Another solution would be to
106 * replace things like '% foo' with '& (foo - 1)'.
107 */
108#define TG3_RX_RCB_RING_SIZE(tp) \
109 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
110
111#define TG3_TX_RING_SIZE 512
112#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
113
114#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
115 TG3_RX_RING_SIZE)
116#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117 TG3_RX_JUMBO_RING_SIZE)
118#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_RCB_RING_SIZE(tp))
120#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
121 TG3_TX_RING_SIZE)
1da177e4
LT
122#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
125#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
126
127/* minimum number of free TX descriptors required to wake up TX process */
42952231 128#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
1da177e4
LT
129
130/* number of ETHTOOL_GSTATS u64's */
131#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
4cafd3f5
MC
133#define TG3_NUM_TEST 6
134
1da177e4
LT
135static char version[] __devinitdata =
136 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140MODULE_LICENSE("GPL");
141MODULE_VERSION(DRV_MODULE_VERSION);
142
143static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
144module_param(tg3_debug, int, 0);
145MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147static struct pci_device_id tg3_pci_tbl[] = {
13185217
HK
148 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
126a3368 172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
13185217
HK
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
126a3368 187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
13185217
HK
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
676917d4 191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
13185217
HK
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
b5d3772c
MC
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
d30cdd28
MC
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
6c7af27c 203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
9936bcf6
MC
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
13185217
HK
206 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213 {}
1da177e4
LT
214};
215
216MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
50da859d 218static const struct {
1da177e4
LT
219 const char string[ETH_GSTRING_LEN];
220} ethtool_stats_keys[TG3_NUM_STATS] = {
221 { "rx_octets" },
222 { "rx_fragments" },
223 { "rx_ucast_packets" },
224 { "rx_mcast_packets" },
225 { "rx_bcast_packets" },
226 { "rx_fcs_errors" },
227 { "rx_align_errors" },
228 { "rx_xon_pause_rcvd" },
229 { "rx_xoff_pause_rcvd" },
230 { "rx_mac_ctrl_rcvd" },
231 { "rx_xoff_entered" },
232 { "rx_frame_too_long_errors" },
233 { "rx_jabbers" },
234 { "rx_undersize_packets" },
235 { "rx_in_length_errors" },
236 { "rx_out_length_errors" },
237 { "rx_64_or_less_octet_packets" },
238 { "rx_65_to_127_octet_packets" },
239 { "rx_128_to_255_octet_packets" },
240 { "rx_256_to_511_octet_packets" },
241 { "rx_512_to_1023_octet_packets" },
242 { "rx_1024_to_1522_octet_packets" },
243 { "rx_1523_to_2047_octet_packets" },
244 { "rx_2048_to_4095_octet_packets" },
245 { "rx_4096_to_8191_octet_packets" },
246 { "rx_8192_to_9022_octet_packets" },
247
248 { "tx_octets" },
249 { "tx_collisions" },
250
251 { "tx_xon_sent" },
252 { "tx_xoff_sent" },
253 { "tx_flow_control" },
254 { "tx_mac_errors" },
255 { "tx_single_collisions" },
256 { "tx_mult_collisions" },
257 { "tx_deferred" },
258 { "tx_excessive_collisions" },
259 { "tx_late_collisions" },
260 { "tx_collide_2times" },
261 { "tx_collide_3times" },
262 { "tx_collide_4times" },
263 { "tx_collide_5times" },
264 { "tx_collide_6times" },
265 { "tx_collide_7times" },
266 { "tx_collide_8times" },
267 { "tx_collide_9times" },
268 { "tx_collide_10times" },
269 { "tx_collide_11times" },
270 { "tx_collide_12times" },
271 { "tx_collide_13times" },
272 { "tx_collide_14times" },
273 { "tx_collide_15times" },
274 { "tx_ucast_packets" },
275 { "tx_mcast_packets" },
276 { "tx_bcast_packets" },
277 { "tx_carrier_sense_errors" },
278 { "tx_discards" },
279 { "tx_errors" },
280
281 { "dma_writeq_full" },
282 { "dma_write_prioq_full" },
283 { "rxbds_empty" },
284 { "rx_discards" },
285 { "rx_errors" },
286 { "rx_threshold_hit" },
287
288 { "dma_readq_full" },
289 { "dma_read_prioq_full" },
290 { "tx_comp_queue_full" },
291
292 { "ring_set_send_prod_index" },
293 { "ring_status_update" },
294 { "nic_irqs" },
295 { "nic_avoided_irqs" },
296 { "nic_tx_threshold_hit" }
297};
298
50da859d 299static const struct {
4cafd3f5
MC
300 const char string[ETH_GSTRING_LEN];
301} ethtool_test_keys[TG3_NUM_TEST] = {
302 { "nvram test (online) " },
303 { "link test (online) " },
304 { "register test (offline)" },
305 { "memory test (offline)" },
306 { "loopback test (offline)" },
307 { "interrupt test (offline)" },
308};
309
b401e9e2
MC
310static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311{
312 writel(val, tp->regs + off);
313}
314
315static u32 tg3_read32(struct tg3 *tp, u32 off)
316{
6aa20a22 317 return (readl(tp->regs + off));
b401e9e2
MC
318}
319
0d3031d9
MC
320static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321{
322 writel(val, tp->aperegs + off);
323}
324
325static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326{
327 return (readl(tp->aperegs + off));
328}
329
1da177e4
LT
330static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331{
6892914f
MC
332 unsigned long flags;
333
334 spin_lock_irqsave(&tp->indirect_lock, flags);
1ee582d8
MC
335 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
6892914f 337 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1ee582d8
MC
338}
339
340static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341{
342 writel(val, tp->regs + off);
343 readl(tp->regs + off);
1da177e4
LT
344}
345
6892914f 346static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
1da177e4 347{
6892914f
MC
348 unsigned long flags;
349 u32 val;
350
351 spin_lock_irqsave(&tp->indirect_lock, flags);
352 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354 spin_unlock_irqrestore(&tp->indirect_lock, flags);
355 return val;
356}
357
358static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359{
360 unsigned long flags;
361
362 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364 TG3_64BIT_REG_LOW, val);
365 return;
366 }
367 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369 TG3_64BIT_REG_LOW, val);
370 return;
1da177e4 371 }
6892914f
MC
372
373 spin_lock_irqsave(&tp->indirect_lock, flags);
374 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376 spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378 /* In indirect mode when disabling interrupts, we also need
379 * to clear the interrupt bit in the GRC local ctrl register.
380 */
381 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382 (val == 0x1)) {
383 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385 }
386}
387
388static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389{
390 unsigned long flags;
391 u32 val;
392
393 spin_lock_irqsave(&tp->indirect_lock, flags);
394 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396 spin_unlock_irqrestore(&tp->indirect_lock, flags);
397 return val;
398}
399
b401e9e2
MC
400/* usec_wait specifies the wait time in usec when writing to certain registers
401 * where it is unsafe to read back the register without some delay.
402 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404 */
405static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
6892914f 406{
b401e9e2
MC
407 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409 /* Non-posted methods */
410 tp->write32(tp, off, val);
411 else {
412 /* Posted method */
413 tg3_write32(tp, off, val);
414 if (usec_wait)
415 udelay(usec_wait);
416 tp->read32(tp, off);
417 }
418 /* Wait again after the read for the posted method to guarantee that
419 * the wait time is met.
420 */
421 if (usec_wait)
422 udelay(usec_wait);
1da177e4
LT
423}
424
09ee929c
MC
425static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426{
427 tp->write32_mbox(tp, off, val);
6892914f
MC
428 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430 tp->read32_mbox(tp, off);
09ee929c
MC
431}
432
20094930 433static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
1da177e4
LT
434{
435 void __iomem *mbox = tp->regs + off;
436 writel(val, mbox);
437 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438 writel(val, mbox);
439 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440 readl(mbox);
441}
442
b5d3772c
MC
443static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444{
445 return (readl(tp->regs + off + GRCMBOX_BASE));
446}
447
448static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449{
450 writel(val, tp->regs + off + GRCMBOX_BASE);
451}
452
20094930 453#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
09ee929c 454#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
20094930
MC
455#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
456#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
09ee929c 457#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
20094930
MC
458
459#define tw32(reg,val) tp->write32(tp, reg, val)
b401e9e2
MC
460#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
461#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
20094930 462#define tr32(reg) tp->read32(tp, reg)
1da177e4
LT
463
464static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465{
6892914f
MC
466 unsigned long flags;
467
b5d3772c
MC
468 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470 return;
471
6892914f 472 spin_lock_irqsave(&tp->indirect_lock, flags);
bbadf503
MC
473 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 476
bbadf503
MC
477 /* Always leave this as zero. */
478 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479 } else {
480 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481 tw32_f(TG3PCI_MEM_WIN_DATA, val);
28fbef78 482
bbadf503
MC
483 /* Always leave this as zero. */
484 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485 }
486 spin_unlock_irqrestore(&tp->indirect_lock, flags);
758a6139
DM
487}
488
1da177e4
LT
489static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490{
6892914f
MC
491 unsigned long flags;
492
b5d3772c
MC
493 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495 *val = 0;
496 return;
497 }
498
6892914f 499 spin_lock_irqsave(&tp->indirect_lock, flags);
bbadf503
MC
500 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 503
bbadf503
MC
504 /* Always leave this as zero. */
505 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506 } else {
507 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510 /* Always leave this as zero. */
511 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512 }
6892914f 513 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
514}
515
0d3031d9
MC
516static void tg3_ape_lock_init(struct tg3 *tp)
517{
518 int i;
519
520 /* Make sure the driver hasn't any stale locks. */
521 for (i = 0; i < 8; i++)
522 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523 APE_LOCK_GRANT_DRIVER);
524}
525
526static int tg3_ape_lock(struct tg3 *tp, int locknum)
527{
528 int i, off;
529 int ret = 0;
530 u32 status;
531
532 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533 return 0;
534
535 switch (locknum) {
536 case TG3_APE_LOCK_MEM:
537 break;
538 default:
539 return -EINVAL;
540 }
541
542 off = 4 * locknum;
543
544 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546 /* Wait for up to 1 millisecond to acquire lock. */
547 for (i = 0; i < 100; i++) {
548 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549 if (status == APE_LOCK_GRANT_DRIVER)
550 break;
551 udelay(10);
552 }
553
554 if (status != APE_LOCK_GRANT_DRIVER) {
555 /* Revoke the lock request. */
556 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557 APE_LOCK_GRANT_DRIVER);
558
559 ret = -EBUSY;
560 }
561
562 return ret;
563}
564
565static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566{
567 int off;
568
569 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570 return;
571
572 switch (locknum) {
573 case TG3_APE_LOCK_MEM:
574 break;
575 default:
576 return;
577 }
578
579 off = 4 * locknum;
580 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581}
582
1da177e4
LT
583static void tg3_disable_ints(struct tg3 *tp)
584{
585 tw32(TG3PCI_MISC_HOST_CTRL,
586 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c 587 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
1da177e4
LT
588}
589
590static inline void tg3_cond_int(struct tg3 *tp)
591{
38f3843e
MC
592 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593 (tp->hw_status->status & SD_STATUS_UPDATED))
1da177e4 594 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
b5d3772c
MC
595 else
596 tw32(HOSTCC_MODE, tp->coalesce_mode |
597 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
1da177e4
LT
598}
599
600static void tg3_enable_ints(struct tg3 *tp)
601{
bbe832c0
MC
602 tp->irq_sync = 0;
603 wmb();
604
1da177e4
LT
605 tw32(TG3PCI_MISC_HOST_CTRL,
606 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c
MC
607 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608 (tp->last_tag << 24));
fcfa0a32
MC
609 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611 (tp->last_tag << 24));
1da177e4
LT
612 tg3_cond_int(tp);
613}
614
04237ddd
MC
615static inline unsigned int tg3_has_work(struct tg3 *tp)
616{
617 struct tg3_hw_status *sblk = tp->hw_status;
618 unsigned int work_exists = 0;
619
620 /* check for phy events */
621 if (!(tp->tg3_flags &
622 (TG3_FLAG_USE_LINKCHG_REG |
623 TG3_FLAG_POLL_SERDES))) {
624 if (sblk->status & SD_STATUS_LINK_CHG)
625 work_exists = 1;
626 }
627 /* check for RX/TX work to do */
628 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630 work_exists = 1;
631
632 return work_exists;
633}
634
1da177e4 635/* tg3_restart_ints
04237ddd
MC
636 * similar to tg3_enable_ints, but it accurately determines whether there
637 * is new work pending and can return without flushing the PIO write
6aa20a22 638 * which reenables interrupts
1da177e4
LT
639 */
640static void tg3_restart_ints(struct tg3 *tp)
641{
fac9b83e
DM
642 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643 tp->last_tag << 24);
1da177e4
LT
644 mmiowb();
645
fac9b83e
DM
646 /* When doing tagged status, this work check is unnecessary.
647 * The last_tag we write above tells the chip which piece of
648 * work we've completed.
649 */
650 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651 tg3_has_work(tp))
04237ddd
MC
652 tw32(HOSTCC_MODE, tp->coalesce_mode |
653 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
1da177e4
LT
654}
655
656static inline void tg3_netif_stop(struct tg3 *tp)
657{
bbe832c0 658 tp->dev->trans_start = jiffies; /* prevent tx timeout */
bea3348e 659 napi_disable(&tp->napi);
1da177e4
LT
660 netif_tx_disable(tp->dev);
661}
662
663static inline void tg3_netif_start(struct tg3 *tp)
664{
665 netif_wake_queue(tp->dev);
666 /* NOTE: unconditional netif_wake_queue is only appropriate
667 * so long as all callers are assured to have free tx slots
668 * (such as after tg3_init_hw)
669 */
bea3348e 670 napi_enable(&tp->napi);
f47c11ee
DM
671 tp->hw_status->status |= SD_STATUS_UPDATED;
672 tg3_enable_ints(tp);
1da177e4
LT
673}
674
675static void tg3_switch_clocks(struct tg3 *tp)
676{
677 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678 u32 orig_clock_ctrl;
679
795d01c5
MC
680 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
4cf78e4f
MC
682 return;
683
1da177e4
LT
684 orig_clock_ctrl = clock_ctrl;
685 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686 CLOCK_CTRL_CLKRUN_OENABLE |
687 0x1f);
688 tp->pci_clock_ctrl = clock_ctrl;
689
690 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
b401e9e2
MC
692 tw32_wait_f(TG3PCI_CLOCK_CTRL,
693 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1da177e4
LT
694 }
695 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
b401e9e2
MC
696 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697 clock_ctrl |
698 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699 40);
700 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701 clock_ctrl | (CLOCK_CTRL_ALTCLK),
702 40);
1da177e4 703 }
b401e9e2 704 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1da177e4
LT
705}
706
707#define PHY_BUSY_LOOPS 5000
708
709static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710{
711 u32 frame_val;
712 unsigned int loops;
713 int ret;
714
715 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716 tw32_f(MAC_MI_MODE,
717 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718 udelay(80);
719 }
720
721 *val = 0x0;
722
723 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724 MI_COM_PHY_ADDR_MASK);
725 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726 MI_COM_REG_ADDR_MASK);
727 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
6aa20a22 728
1da177e4
LT
729 tw32_f(MAC_MI_COM, frame_val);
730
731 loops = PHY_BUSY_LOOPS;
732 while (loops != 0) {
733 udelay(10);
734 frame_val = tr32(MAC_MI_COM);
735
736 if ((frame_val & MI_COM_BUSY) == 0) {
737 udelay(5);
738 frame_val = tr32(MAC_MI_COM);
739 break;
740 }
741 loops -= 1;
742 }
743
744 ret = -EBUSY;
745 if (loops != 0) {
746 *val = frame_val & MI_COM_DATA_MASK;
747 ret = 0;
748 }
749
750 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751 tw32_f(MAC_MI_MODE, tp->mi_mode);
752 udelay(80);
753 }
754
755 return ret;
756}
757
758static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759{
760 u32 frame_val;
761 unsigned int loops;
762 int ret;
763
b5d3772c
MC
764 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766 return 0;
767
1da177e4
LT
768 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769 tw32_f(MAC_MI_MODE,
770 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771 udelay(80);
772 }
773
774 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775 MI_COM_PHY_ADDR_MASK);
776 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777 MI_COM_REG_ADDR_MASK);
778 frame_val |= (val & MI_COM_DATA_MASK);
779 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
6aa20a22 780
1da177e4
LT
781 tw32_f(MAC_MI_COM, frame_val);
782
783 loops = PHY_BUSY_LOOPS;
784 while (loops != 0) {
785 udelay(10);
786 frame_val = tr32(MAC_MI_COM);
787 if ((frame_val & MI_COM_BUSY) == 0) {
788 udelay(5);
789 frame_val = tr32(MAC_MI_COM);
790 break;
791 }
792 loops -= 1;
793 }
794
795 ret = -EBUSY;
796 if (loops != 0)
797 ret = 0;
798
799 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800 tw32_f(MAC_MI_MODE, tp->mi_mode);
801 udelay(80);
802 }
803
804 return ret;
805}
806
9ef8ca99
MC
807static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
808{
809 u32 phy;
810
811 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
812 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
813 return;
814
815 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
816 u32 ephy;
817
818 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
819 tg3_writephy(tp, MII_TG3_EPHY_TEST,
820 ephy | MII_TG3_EPHY_SHADOW_EN);
821 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
822 if (enable)
823 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
824 else
825 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
826 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
827 }
828 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
829 }
830 } else {
831 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
832 MII_TG3_AUXCTL_SHDWSEL_MISC;
833 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
834 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
835 if (enable)
836 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
837 else
838 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
839 phy |= MII_TG3_AUXCTL_MISC_WREN;
840 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
841 }
842 }
843}
844
1da177e4
LT
845static void tg3_phy_set_wirespeed(struct tg3 *tp)
846{
847 u32 val;
848
849 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
850 return;
851
852 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
853 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
854 tg3_writephy(tp, MII_TG3_AUX_CTRL,
855 (val | (1 << 15) | (1 << 4)));
856}
857
858static int tg3_bmcr_reset(struct tg3 *tp)
859{
860 u32 phy_control;
861 int limit, err;
862
863 /* OK, reset it, and poll the BMCR_RESET bit until it
864 * clears or we time out.
865 */
866 phy_control = BMCR_RESET;
867 err = tg3_writephy(tp, MII_BMCR, phy_control);
868 if (err != 0)
869 return -EBUSY;
870
871 limit = 5000;
872 while (limit--) {
873 err = tg3_readphy(tp, MII_BMCR, &phy_control);
874 if (err != 0)
875 return -EBUSY;
876
877 if ((phy_control & BMCR_RESET) == 0) {
878 udelay(40);
879 break;
880 }
881 udelay(10);
882 }
883 if (limit <= 0)
884 return -EBUSY;
885
886 return 0;
887}
888
889static int tg3_wait_macro_done(struct tg3 *tp)
890{
891 int limit = 100;
892
893 while (limit--) {
894 u32 tmp32;
895
896 if (!tg3_readphy(tp, 0x16, &tmp32)) {
897 if ((tmp32 & 0x1000) == 0)
898 break;
899 }
900 }
901 if (limit <= 0)
902 return -EBUSY;
903
904 return 0;
905}
906
907static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
908{
909 static const u32 test_pat[4][6] = {
910 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
911 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
912 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
913 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
914 };
915 int chan;
916
917 for (chan = 0; chan < 4; chan++) {
918 int i;
919
920 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
921 (chan * 0x2000) | 0x0200);
922 tg3_writephy(tp, 0x16, 0x0002);
923
924 for (i = 0; i < 6; i++)
925 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
926 test_pat[chan][i]);
927
928 tg3_writephy(tp, 0x16, 0x0202);
929 if (tg3_wait_macro_done(tp)) {
930 *resetp = 1;
931 return -EBUSY;
932 }
933
934 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
935 (chan * 0x2000) | 0x0200);
936 tg3_writephy(tp, 0x16, 0x0082);
937 if (tg3_wait_macro_done(tp)) {
938 *resetp = 1;
939 return -EBUSY;
940 }
941
942 tg3_writephy(tp, 0x16, 0x0802);
943 if (tg3_wait_macro_done(tp)) {
944 *resetp = 1;
945 return -EBUSY;
946 }
947
948 for (i = 0; i < 6; i += 2) {
949 u32 low, high;
950
951 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
952 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
953 tg3_wait_macro_done(tp)) {
954 *resetp = 1;
955 return -EBUSY;
956 }
957 low &= 0x7fff;
958 high &= 0x000f;
959 if (low != test_pat[chan][i] ||
960 high != test_pat[chan][i+1]) {
961 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
962 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
963 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
964
965 return -EBUSY;
966 }
967 }
968 }
969
970 return 0;
971}
972
973static int tg3_phy_reset_chanpat(struct tg3 *tp)
974{
975 int chan;
976
977 for (chan = 0; chan < 4; chan++) {
978 int i;
979
980 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
981 (chan * 0x2000) | 0x0200);
982 tg3_writephy(tp, 0x16, 0x0002);
983 for (i = 0; i < 6; i++)
984 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
985 tg3_writephy(tp, 0x16, 0x0202);
986 if (tg3_wait_macro_done(tp))
987 return -EBUSY;
988 }
989
990 return 0;
991}
992
993static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
994{
995 u32 reg32, phy9_orig;
996 int retries, do_phy_reset, err;
997
998 retries = 10;
999 do_phy_reset = 1;
1000 do {
1001 if (do_phy_reset) {
1002 err = tg3_bmcr_reset(tp);
1003 if (err)
1004 return err;
1005 do_phy_reset = 0;
1006 }
1007
1008 /* Disable transmitter and interrupt. */
1009 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1010 continue;
1011
1012 reg32 |= 0x3000;
1013 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1014
1015 /* Set full-duplex, 1000 mbps. */
1016 tg3_writephy(tp, MII_BMCR,
1017 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1018
1019 /* Set to master mode. */
1020 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1021 continue;
1022
1023 tg3_writephy(tp, MII_TG3_CTRL,
1024 (MII_TG3_CTRL_AS_MASTER |
1025 MII_TG3_CTRL_ENABLE_AS_MASTER));
1026
1027 /* Enable SM_DSP_CLOCK and 6dB. */
1028 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1029
1030 /* Block the PHY control access. */
1031 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1032 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1033
1034 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1035 if (!err)
1036 break;
1037 } while (--retries);
1038
1039 err = tg3_phy_reset_chanpat(tp);
1040 if (err)
1041 return err;
1042
1043 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1044 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1045
1046 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1047 tg3_writephy(tp, 0x16, 0x0000);
1048
1049 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1050 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1051 /* Set Extended packet length bit for jumbo frames */
1052 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1053 }
1054 else {
1055 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1056 }
1057
1058 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1059
1060 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1061 reg32 &= ~0x3000;
1062 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1063 } else if (!err)
1064 err = -EBUSY;
1065
1066 return err;
1067}
1068
c8e1e82b
MC
1069static void tg3_link_report(struct tg3 *);
1070
1da177e4
LT
1071/* This will reset the tigon3 PHY if there is no valid
1072 * link unless the FORCE argument is non-zero.
1073 */
1074static int tg3_phy_reset(struct tg3 *tp)
1075{
1076 u32 phy_status;
1077 int err;
1078
60189ddf
MC
1079 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1080 u32 val;
1081
1082 val = tr32(GRC_MISC_CFG);
1083 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1084 udelay(40);
1085 }
1da177e4
LT
1086 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1087 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1088 if (err != 0)
1089 return -EBUSY;
1090
c8e1e82b
MC
1091 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1092 netif_carrier_off(tp->dev);
1093 tg3_link_report(tp);
1094 }
1095
1da177e4
LT
1096 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1097 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1098 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1099 err = tg3_phy_reset_5703_4_5(tp);
1100 if (err)
1101 return err;
1102 goto out;
1103 }
1104
1105 err = tg3_bmcr_reset(tp);
1106 if (err)
1107 return err;
1108
1109out:
1110 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1111 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1112 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1113 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1114 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1115 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1116 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1117 }
1118 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1119 tg3_writephy(tp, 0x1c, 0x8d68);
1120 tg3_writephy(tp, 0x1c, 0x8d68);
1121 }
1122 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1123 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1124 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1125 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1126 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1127 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1128 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1129 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1130 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1131 }
c424cb24
MC
1132 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1133 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1134 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
c1d2a196
MC
1135 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1136 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1137 tg3_writephy(tp, MII_TG3_TEST1,
1138 MII_TG3_TEST1_TRIM_EN | 0x4);
1139 } else
1140 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
c424cb24
MC
1141 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1142 }
1da177e4
LT
1143 /* Set Extended packet length bit (bit 14) on all chips that */
1144 /* support jumbo frames */
1145 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1146 /* Cannot do read-modify-write on 5401 */
1147 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
0f893dc6 1148 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1149 u32 phy_reg;
1150
1151 /* Set bit 14 with read-modify-write to preserve other bits */
1152 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1153 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1154 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1155 }
1156
1157 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1158 * jumbo frames transmission.
1159 */
0f893dc6 1160 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1161 u32 phy_reg;
1162
1163 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1164 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1165 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1166 }
1167
715116a1 1168 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
715116a1
MC
1169 /* adjust output voltage */
1170 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
715116a1
MC
1171 }
1172
9ef8ca99 1173 tg3_phy_toggle_automdix(tp, 1);
1da177e4
LT
1174 tg3_phy_set_wirespeed(tp);
1175 return 0;
1176}
1177
1178static void tg3_frob_aux_power(struct tg3 *tp)
1179{
1180 struct tg3 *tp_peer = tp;
1181
9d26e213 1182 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1da177e4
LT
1183 return;
1184
8c2dc7e1
MC
1185 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1186 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1187 struct net_device *dev_peer;
1188
1189 dev_peer = pci_get_drvdata(tp->pdev_peer);
bc1c7567 1190 /* remove_one() may have been run on the peer. */
8c2dc7e1 1191 if (!dev_peer)
bc1c7567
MC
1192 tp_peer = tp;
1193 else
1194 tp_peer = netdev_priv(dev_peer);
1da177e4
LT
1195 }
1196
1da177e4 1197 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
6921d201
MC
1198 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1199 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1200 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1da177e4
LT
1201 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1202 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
b401e9e2
MC
1203 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1204 (GRC_LCLCTRL_GPIO_OE0 |
1205 GRC_LCLCTRL_GPIO_OE1 |
1206 GRC_LCLCTRL_GPIO_OE2 |
1207 GRC_LCLCTRL_GPIO_OUTPUT0 |
1208 GRC_LCLCTRL_GPIO_OUTPUT1),
1209 100);
1da177e4
LT
1210 } else {
1211 u32 no_gpio2;
dc56b7d4 1212 u32 grc_local_ctrl = 0;
1da177e4
LT
1213
1214 if (tp_peer != tp &&
1215 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1216 return;
1217
dc56b7d4
MC
1218 /* Workaround to prevent overdrawing Amps. */
1219 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1220 ASIC_REV_5714) {
1221 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
b401e9e2
MC
1222 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1223 grc_local_ctrl, 100);
dc56b7d4
MC
1224 }
1225
1da177e4
LT
1226 /* On 5753 and variants, GPIO2 cannot be used. */
1227 no_gpio2 = tp->nic_sram_data_cfg &
1228 NIC_SRAM_DATA_CFG_NO_GPIO2;
1229
dc56b7d4 1230 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1da177e4
LT
1231 GRC_LCLCTRL_GPIO_OE1 |
1232 GRC_LCLCTRL_GPIO_OE2 |
1233 GRC_LCLCTRL_GPIO_OUTPUT1 |
1234 GRC_LCLCTRL_GPIO_OUTPUT2;
1235 if (no_gpio2) {
1236 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1237 GRC_LCLCTRL_GPIO_OUTPUT2);
1238 }
b401e9e2
MC
1239 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1240 grc_local_ctrl, 100);
1da177e4
LT
1241
1242 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1243
b401e9e2
MC
1244 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1245 grc_local_ctrl, 100);
1da177e4
LT
1246
1247 if (!no_gpio2) {
1248 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
b401e9e2
MC
1249 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1250 grc_local_ctrl, 100);
1da177e4
LT
1251 }
1252 }
1253 } else {
1254 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1255 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1256 if (tp_peer != tp &&
1257 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1258 return;
1259
b401e9e2
MC
1260 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1261 (GRC_LCLCTRL_GPIO_OE1 |
1262 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4 1263
b401e9e2
MC
1264 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1265 GRC_LCLCTRL_GPIO_OE1, 100);
1da177e4 1266
b401e9e2
MC
1267 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1268 (GRC_LCLCTRL_GPIO_OE1 |
1269 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4
LT
1270 }
1271 }
1272}
1273
e8f3f6ca
MC
1274static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1275{
1276 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1277 return 1;
1278 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1279 if (speed != SPEED_10)
1280 return 1;
1281 } else if (speed == SPEED_10)
1282 return 1;
1283
1284 return 0;
1285}
1286
1da177e4
LT
1287static int tg3_setup_phy(struct tg3 *, int);
1288
1289#define RESET_KIND_SHUTDOWN 0
1290#define RESET_KIND_INIT 1
1291#define RESET_KIND_SUSPEND 2
1292
1293static void tg3_write_sig_post_reset(struct tg3 *, int);
1294static int tg3_halt_cpu(struct tg3 *, u32);
6921d201
MC
1295static int tg3_nvram_lock(struct tg3 *);
1296static void tg3_nvram_unlock(struct tg3 *);
1da177e4 1297
15c3b696
MC
1298static void tg3_power_down_phy(struct tg3 *tp)
1299{
5129724a
MC
1300 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1301 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1302 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1303 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1304
1305 sg_dig_ctrl |=
1306 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1307 tw32(SG_DIG_CTRL, sg_dig_ctrl);
1308 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1309 }
3f7045c1 1310 return;
5129724a 1311 }
3f7045c1 1312
60189ddf
MC
1313 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1314 u32 val;
1315
1316 tg3_bmcr_reset(tp);
1317 val = tr32(GRC_MISC_CFG);
1318 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1319 udelay(40);
1320 return;
1321 } else {
715116a1
MC
1322 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1323 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1324 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1325 }
3f7045c1 1326
15c3b696
MC
1327 /* The PHY should not be powered down on some chips because
1328 * of bugs.
1329 */
1330 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1331 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1332 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1333 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1334 return;
1335 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1336}
1337
bc1c7567 1338static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1da177e4
LT
1339{
1340 u32 misc_host_ctrl;
1341 u16 power_control, power_caps;
1342 int pm = tp->pm_cap;
1343
1344 /* Make sure register accesses (indirect or otherwise)
1345 * will function correctly.
1346 */
1347 pci_write_config_dword(tp->pdev,
1348 TG3PCI_MISC_HOST_CTRL,
1349 tp->misc_host_ctrl);
1350
1351 pci_read_config_word(tp->pdev,
1352 pm + PCI_PM_CTRL,
1353 &power_control);
1354 power_control |= PCI_PM_CTRL_PME_STATUS;
1355 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1356 switch (state) {
bc1c7567 1357 case PCI_D0:
1da177e4
LT
1358 power_control |= 0;
1359 pci_write_config_word(tp->pdev,
1360 pm + PCI_PM_CTRL,
1361 power_control);
8c6bda1a
MC
1362 udelay(100); /* Delay after power state change */
1363
9d26e213
MC
1364 /* Switch out of Vaux if it is a NIC */
1365 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
b401e9e2 1366 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1da177e4
LT
1367
1368 return 0;
1369
bc1c7567 1370 case PCI_D1:
1da177e4
LT
1371 power_control |= 1;
1372 break;
1373
bc1c7567 1374 case PCI_D2:
1da177e4
LT
1375 power_control |= 2;
1376 break;
1377
bc1c7567 1378 case PCI_D3hot:
1da177e4
LT
1379 power_control |= 3;
1380 break;
1381
1382 default:
1383 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1384 "requested.\n",
1385 tp->dev->name, state);
1386 return -EINVAL;
1387 };
1388
1389 power_control |= PCI_PM_CTRL_PME_ENABLE;
1390
1391 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1392 tw32(TG3PCI_MISC_HOST_CTRL,
1393 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1394
1395 if (tp->link_config.phy_is_low_power == 0) {
1396 tp->link_config.phy_is_low_power = 1;
1397 tp->link_config.orig_speed = tp->link_config.speed;
1398 tp->link_config.orig_duplex = tp->link_config.duplex;
1399 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1400 }
1401
747e8f8b 1402 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1da177e4
LT
1403 tp->link_config.speed = SPEED_10;
1404 tp->link_config.duplex = DUPLEX_HALF;
1405 tp->link_config.autoneg = AUTONEG_ENABLE;
1406 tg3_setup_phy(tp, 0);
1407 }
1408
b5d3772c
MC
1409 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1410 u32 val;
1411
1412 val = tr32(GRC_VCPU_EXT_CTRL);
1413 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1414 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
6921d201
MC
1415 int i;
1416 u32 val;
1417
1418 for (i = 0; i < 200; i++) {
1419 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1420 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1421 break;
1422 msleep(1);
1423 }
1424 }
a85feb8c
GZ
1425 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1426 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1427 WOL_DRV_STATE_SHUTDOWN |
1428 WOL_DRV_WOL |
1429 WOL_SET_MAGIC_PKT);
6921d201 1430
1da177e4
LT
1431 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1432
1433 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1434 u32 mac_mode;
1435
1436 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1437 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1438 udelay(40);
1439
3f7045c1
MC
1440 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1441 mac_mode = MAC_MODE_PORT_MODE_GMII;
1442 else
1443 mac_mode = MAC_MODE_PORT_MODE_MII;
1da177e4 1444
e8f3f6ca
MC
1445 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1446 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1447 ASIC_REV_5700) {
1448 u32 speed = (tp->tg3_flags &
1449 TG3_FLAG_WOL_SPEED_100MB) ?
1450 SPEED_100 : SPEED_10;
1451 if (tg3_5700_link_polarity(tp, speed))
1452 mac_mode |= MAC_MODE_LINK_POLARITY;
1453 else
1454 mac_mode &= ~MAC_MODE_LINK_POLARITY;
1455 }
1da177e4
LT
1456 } else {
1457 mac_mode = MAC_MODE_PORT_MODE_TBI;
1458 }
1459
cbf46853 1460 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1da177e4
LT
1461 tw32(MAC_LED_CTRL, tp->led_ctrl);
1462
1463 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1464 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1465 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1466
1467 tw32_f(MAC_MODE, mac_mode);
1468 udelay(100);
1469
1470 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1471 udelay(10);
1472 }
1473
1474 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1475 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1476 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1477 u32 base_val;
1478
1479 base_val = tp->pci_clock_ctrl;
1480 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1481 CLOCK_CTRL_TXCLK_DISABLE);
1482
b401e9e2
MC
1483 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1484 CLOCK_CTRL_PWRDOWN_PLL133, 40);
d7b0a857 1485 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
795d01c5 1486 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
d7b0a857 1487 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
4cf78e4f 1488 /* do nothing */
85e94ced 1489 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1da177e4
LT
1490 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1491 u32 newbits1, newbits2;
1492
1493 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1494 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1495 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1496 CLOCK_CTRL_TXCLK_DISABLE |
1497 CLOCK_CTRL_ALTCLK);
1498 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1499 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1500 newbits1 = CLOCK_CTRL_625_CORE;
1501 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1502 } else {
1503 newbits1 = CLOCK_CTRL_ALTCLK;
1504 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1505 }
1506
b401e9e2
MC
1507 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1508 40);
1da177e4 1509
b401e9e2
MC
1510 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1511 40);
1da177e4
LT
1512
1513 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1514 u32 newbits3;
1515
1516 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1517 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1518 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1519 CLOCK_CTRL_TXCLK_DISABLE |
1520 CLOCK_CTRL_44MHZ_CORE);
1521 } else {
1522 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1523 }
1524
b401e9e2
MC
1525 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1526 tp->pci_clock_ctrl | newbits3, 40);
1da177e4
LT
1527 }
1528 }
1529
6921d201 1530 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
0d3031d9
MC
1531 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1532 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
3f7045c1 1533 tg3_power_down_phy(tp);
6921d201 1534
1da177e4
LT
1535 tg3_frob_aux_power(tp);
1536
1537 /* Workaround for unstable PLL clock */
1538 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1539 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1540 u32 val = tr32(0x7d00);
1541
1542 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1543 tw32(0x7d00, val);
6921d201 1544 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
ec41c7df
MC
1545 int err;
1546
1547 err = tg3_nvram_lock(tp);
1da177e4 1548 tg3_halt_cpu(tp, RX_CPU_BASE);
ec41c7df
MC
1549 if (!err)
1550 tg3_nvram_unlock(tp);
6921d201 1551 }
1da177e4
LT
1552 }
1553
bbadf503
MC
1554 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1555
1da177e4
LT
1556 /* Finally, set the new power state. */
1557 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
8c6bda1a 1558 udelay(100); /* Delay after power state change */
1da177e4 1559
1da177e4
LT
1560 return 0;
1561}
1562
1563static void tg3_link_report(struct tg3 *tp)
1564{
1565 if (!netif_carrier_ok(tp->dev)) {
9f88f29f
MC
1566 if (netif_msg_link(tp))
1567 printk(KERN_INFO PFX "%s: Link is down.\n",
1568 tp->dev->name);
1569 } else if (netif_msg_link(tp)) {
1da177e4
LT
1570 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1571 tp->dev->name,
1572 (tp->link_config.active_speed == SPEED_1000 ?
1573 1000 :
1574 (tp->link_config.active_speed == SPEED_100 ?
1575 100 : 10)),
1576 (tp->link_config.active_duplex == DUPLEX_FULL ?
1577 "full" : "half"));
1578
1579 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1580 "%s for RX.\n",
1581 tp->dev->name,
1582 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1583 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1584 }
1585}
1586
1587static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1588{
1589 u32 new_tg3_flags = 0;
1590 u32 old_rx_mode = tp->rx_mode;
1591 u32 old_tx_mode = tp->tx_mode;
1592
1593 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
747e8f8b
MC
1594
1595 /* Convert 1000BaseX flow control bits to 1000BaseT
1596 * bits before resolving flow control.
1597 */
1598 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1599 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1600 ADVERTISE_PAUSE_ASYM);
1601 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1602
1603 if (local_adv & ADVERTISE_1000XPAUSE)
1604 local_adv |= ADVERTISE_PAUSE_CAP;
1605 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1606 local_adv |= ADVERTISE_PAUSE_ASYM;
1607 if (remote_adv & LPA_1000XPAUSE)
1608 remote_adv |= LPA_PAUSE_CAP;
1609 if (remote_adv & LPA_1000XPAUSE_ASYM)
1610 remote_adv |= LPA_PAUSE_ASYM;
1611 }
1612
1da177e4
LT
1613 if (local_adv & ADVERTISE_PAUSE_CAP) {
1614 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1615 if (remote_adv & LPA_PAUSE_CAP)
1616 new_tg3_flags |=
1617 (TG3_FLAG_RX_PAUSE |
1618 TG3_FLAG_TX_PAUSE);
1619 else if (remote_adv & LPA_PAUSE_ASYM)
1620 new_tg3_flags |=
1621 (TG3_FLAG_RX_PAUSE);
1622 } else {
1623 if (remote_adv & LPA_PAUSE_CAP)
1624 new_tg3_flags |=
1625 (TG3_FLAG_RX_PAUSE |
1626 TG3_FLAG_TX_PAUSE);
1627 }
1628 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1629 if ((remote_adv & LPA_PAUSE_CAP) &&
1630 (remote_adv & LPA_PAUSE_ASYM))
1631 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1632 }
1633
1634 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1635 tp->tg3_flags |= new_tg3_flags;
1636 } else {
1637 new_tg3_flags = tp->tg3_flags;
1638 }
1639
1640 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1641 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1642 else
1643 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1644
1645 if (old_rx_mode != tp->rx_mode) {
1646 tw32_f(MAC_RX_MODE, tp->rx_mode);
1647 }
6aa20a22 1648
1da177e4
LT
1649 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1650 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1651 else
1652 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1653
1654 if (old_tx_mode != tp->tx_mode) {
1655 tw32_f(MAC_TX_MODE, tp->tx_mode);
1656 }
1657}
1658
1659static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1660{
1661 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1662 case MII_TG3_AUX_STAT_10HALF:
1663 *speed = SPEED_10;
1664 *duplex = DUPLEX_HALF;
1665 break;
1666
1667 case MII_TG3_AUX_STAT_10FULL:
1668 *speed = SPEED_10;
1669 *duplex = DUPLEX_FULL;
1670 break;
1671
1672 case MII_TG3_AUX_STAT_100HALF:
1673 *speed = SPEED_100;
1674 *duplex = DUPLEX_HALF;
1675 break;
1676
1677 case MII_TG3_AUX_STAT_100FULL:
1678 *speed = SPEED_100;
1679 *duplex = DUPLEX_FULL;
1680 break;
1681
1682 case MII_TG3_AUX_STAT_1000HALF:
1683 *speed = SPEED_1000;
1684 *duplex = DUPLEX_HALF;
1685 break;
1686
1687 case MII_TG3_AUX_STAT_1000FULL:
1688 *speed = SPEED_1000;
1689 *duplex = DUPLEX_FULL;
1690 break;
1691
1692 default:
715116a1
MC
1693 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1694 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1695 SPEED_10;
1696 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1697 DUPLEX_HALF;
1698 break;
1699 }
1da177e4
LT
1700 *speed = SPEED_INVALID;
1701 *duplex = DUPLEX_INVALID;
1702 break;
1703 };
1704}
1705
1706static void tg3_phy_copper_begin(struct tg3 *tp)
1707{
1708 u32 new_adv;
1709 int i;
1710
1711 if (tp->link_config.phy_is_low_power) {
1712 /* Entering low power mode. Disable gigabit and
1713 * 100baseT advertisements.
1714 */
1715 tg3_writephy(tp, MII_TG3_CTRL, 0);
1716
1717 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1718 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1719 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1720 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1721
1722 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1723 } else if (tp->link_config.speed == SPEED_INVALID) {
1da177e4
LT
1724 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1725 tp->link_config.advertising &=
1726 ~(ADVERTISED_1000baseT_Half |
1727 ADVERTISED_1000baseT_Full);
1728
1729 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1730 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1731 new_adv |= ADVERTISE_10HALF;
1732 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1733 new_adv |= ADVERTISE_10FULL;
1734 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1735 new_adv |= ADVERTISE_100HALF;
1736 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1737 new_adv |= ADVERTISE_100FULL;
1738 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1739
1740 if (tp->link_config.advertising &
1741 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1742 new_adv = 0;
1743 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1744 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1745 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1746 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1747 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1748 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1749 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1750 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1751 MII_TG3_CTRL_ENABLE_AS_MASTER);
1752 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1753 } else {
1754 tg3_writephy(tp, MII_TG3_CTRL, 0);
1755 }
1756 } else {
1757 /* Asking for a specific link mode. */
1758 if (tp->link_config.speed == SPEED_1000) {
1759 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1760 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1761
1762 if (tp->link_config.duplex == DUPLEX_FULL)
1763 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1764 else
1765 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1766 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1767 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1768 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1769 MII_TG3_CTRL_ENABLE_AS_MASTER);
1770 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1771 } else {
1772 tg3_writephy(tp, MII_TG3_CTRL, 0);
1773
1774 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1775 if (tp->link_config.speed == SPEED_100) {
1776 if (tp->link_config.duplex == DUPLEX_FULL)
1777 new_adv |= ADVERTISE_100FULL;
1778 else
1779 new_adv |= ADVERTISE_100HALF;
1780 } else {
1781 if (tp->link_config.duplex == DUPLEX_FULL)
1782 new_adv |= ADVERTISE_10FULL;
1783 else
1784 new_adv |= ADVERTISE_10HALF;
1785 }
1786 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1787 }
1788 }
1789
1790 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1791 tp->link_config.speed != SPEED_INVALID) {
1792 u32 bmcr, orig_bmcr;
1793
1794 tp->link_config.active_speed = tp->link_config.speed;
1795 tp->link_config.active_duplex = tp->link_config.duplex;
1796
1797 bmcr = 0;
1798 switch (tp->link_config.speed) {
1799 default:
1800 case SPEED_10:
1801 break;
1802
1803 case SPEED_100:
1804 bmcr |= BMCR_SPEED100;
1805 break;
1806
1807 case SPEED_1000:
1808 bmcr |= TG3_BMCR_SPEED1000;
1809 break;
1810 };
1811
1812 if (tp->link_config.duplex == DUPLEX_FULL)
1813 bmcr |= BMCR_FULLDPLX;
1814
1815 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1816 (bmcr != orig_bmcr)) {
1817 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1818 for (i = 0; i < 1500; i++) {
1819 u32 tmp;
1820
1821 udelay(10);
1822 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1823 tg3_readphy(tp, MII_BMSR, &tmp))
1824 continue;
1825 if (!(tmp & BMSR_LSTATUS)) {
1826 udelay(40);
1827 break;
1828 }
1829 }
1830 tg3_writephy(tp, MII_BMCR, bmcr);
1831 udelay(40);
1832 }
1833 } else {
1834 tg3_writephy(tp, MII_BMCR,
1835 BMCR_ANENABLE | BMCR_ANRESTART);
1836 }
1837}
1838
1839static int tg3_init_5401phy_dsp(struct tg3 *tp)
1840{
1841 int err;
1842
1843 /* Turn off tap power management. */
1844 /* Set Extended packet length bit */
1845 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1846
1847 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1848 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1849
1850 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1851 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1852
1853 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1854 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1855
1856 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1857 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1858
1859 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1860 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1861
1862 udelay(40);
1863
1864 return err;
1865}
1866
3600d918 1867static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1da177e4 1868{
3600d918
MC
1869 u32 adv_reg, all_mask = 0;
1870
1871 if (mask & ADVERTISED_10baseT_Half)
1872 all_mask |= ADVERTISE_10HALF;
1873 if (mask & ADVERTISED_10baseT_Full)
1874 all_mask |= ADVERTISE_10FULL;
1875 if (mask & ADVERTISED_100baseT_Half)
1876 all_mask |= ADVERTISE_100HALF;
1877 if (mask & ADVERTISED_100baseT_Full)
1878 all_mask |= ADVERTISE_100FULL;
1da177e4
LT
1879
1880 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1881 return 0;
1882
1da177e4
LT
1883 if ((adv_reg & all_mask) != all_mask)
1884 return 0;
1885 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1886 u32 tg3_ctrl;
1887
3600d918
MC
1888 all_mask = 0;
1889 if (mask & ADVERTISED_1000baseT_Half)
1890 all_mask |= ADVERTISE_1000HALF;
1891 if (mask & ADVERTISED_1000baseT_Full)
1892 all_mask |= ADVERTISE_1000FULL;
1893
1da177e4
LT
1894 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1895 return 0;
1896
1da177e4
LT
1897 if ((tg3_ctrl & all_mask) != all_mask)
1898 return 0;
1899 }
1900 return 1;
1901}
1902
1903static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1904{
1905 int current_link_up;
1906 u32 bmsr, dummy;
1907 u16 current_speed;
1908 u8 current_duplex;
1909 int i, err;
1910
1911 tw32(MAC_EVENT, 0);
1912
1913 tw32_f(MAC_STATUS,
1914 (MAC_STATUS_SYNC_CHANGED |
1915 MAC_STATUS_CFG_CHANGED |
1916 MAC_STATUS_MI_COMPLETION |
1917 MAC_STATUS_LNKSTATE_CHANGED));
1918 udelay(40);
1919
1920 tp->mi_mode = MAC_MI_MODE_BASE;
1921 tw32_f(MAC_MI_MODE, tp->mi_mode);
1922 udelay(80);
1923
1924 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1925
1926 /* Some third-party PHYs need to be reset on link going
1927 * down.
1928 */
1929 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1930 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1931 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1932 netif_carrier_ok(tp->dev)) {
1933 tg3_readphy(tp, MII_BMSR, &bmsr);
1934 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1935 !(bmsr & BMSR_LSTATUS))
1936 force_reset = 1;
1937 }
1938 if (force_reset)
1939 tg3_phy_reset(tp);
1940
1941 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1942 tg3_readphy(tp, MII_BMSR, &bmsr);
1943 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1944 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1945 bmsr = 0;
1946
1947 if (!(bmsr & BMSR_LSTATUS)) {
1948 err = tg3_init_5401phy_dsp(tp);
1949 if (err)
1950 return err;
1951
1952 tg3_readphy(tp, MII_BMSR, &bmsr);
1953 for (i = 0; i < 1000; i++) {
1954 udelay(10);
1955 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1956 (bmsr & BMSR_LSTATUS)) {
1957 udelay(40);
1958 break;
1959 }
1960 }
1961
1962 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1963 !(bmsr & BMSR_LSTATUS) &&
1964 tp->link_config.active_speed == SPEED_1000) {
1965 err = tg3_phy_reset(tp);
1966 if (!err)
1967 err = tg3_init_5401phy_dsp(tp);
1968 if (err)
1969 return err;
1970 }
1971 }
1972 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1973 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1974 /* 5701 {A0,B0} CRC bug workaround */
1975 tg3_writephy(tp, 0x15, 0x0a75);
1976 tg3_writephy(tp, 0x1c, 0x8c68);
1977 tg3_writephy(tp, 0x1c, 0x8d68);
1978 tg3_writephy(tp, 0x1c, 0x8c68);
1979 }
1980
1981 /* Clear pending interrupts... */
1982 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1983 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1984
1985 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1986 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
715116a1 1987 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1da177e4
LT
1988 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1989
1990 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1991 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1992 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1993 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1994 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1995 else
1996 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1997 }
1998
1999 current_link_up = 0;
2000 current_speed = SPEED_INVALID;
2001 current_duplex = DUPLEX_INVALID;
2002
2003 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2004 u32 val;
2005
2006 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2007 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2008 if (!(val & (1 << 10))) {
2009 val |= (1 << 10);
2010 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2011 goto relink;
2012 }
2013 }
2014
2015 bmsr = 0;
2016 for (i = 0; i < 100; i++) {
2017 tg3_readphy(tp, MII_BMSR, &bmsr);
2018 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2019 (bmsr & BMSR_LSTATUS))
2020 break;
2021 udelay(40);
2022 }
2023
2024 if (bmsr & BMSR_LSTATUS) {
2025 u32 aux_stat, bmcr;
2026
2027 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2028 for (i = 0; i < 2000; i++) {
2029 udelay(10);
2030 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2031 aux_stat)
2032 break;
2033 }
2034
2035 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2036 &current_speed,
2037 &current_duplex);
2038
2039 bmcr = 0;
2040 for (i = 0; i < 200; i++) {
2041 tg3_readphy(tp, MII_BMCR, &bmcr);
2042 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2043 continue;
2044 if (bmcr && bmcr != 0x7fff)
2045 break;
2046 udelay(10);
2047 }
2048
2049 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2050 if (bmcr & BMCR_ANENABLE) {
2051 current_link_up = 1;
2052
2053 /* Force autoneg restart if we are exiting
2054 * low power mode.
2055 */
3600d918
MC
2056 if (!tg3_copper_is_advertising_all(tp,
2057 tp->link_config.advertising))
1da177e4
LT
2058 current_link_up = 0;
2059 } else {
2060 current_link_up = 0;
2061 }
2062 } else {
2063 if (!(bmcr & BMCR_ANENABLE) &&
2064 tp->link_config.speed == current_speed &&
2065 tp->link_config.duplex == current_duplex) {
2066 current_link_up = 1;
2067 } else {
2068 current_link_up = 0;
2069 }
2070 }
2071
2072 tp->link_config.active_speed = current_speed;
2073 tp->link_config.active_duplex = current_duplex;
2074 }
2075
2076 if (current_link_up == 1 &&
2077 (tp->link_config.active_duplex == DUPLEX_FULL) &&
2078 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2079 u32 local_adv, remote_adv;
2080
2081 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2082 local_adv = 0;
2083 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2084
2085 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2086 remote_adv = 0;
2087
2088 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
2089
2090 /* If we are not advertising full pause capability,
2091 * something is wrong. Bring the link down and reconfigure.
2092 */
2093 if (local_adv != ADVERTISE_PAUSE_CAP) {
2094 current_link_up = 0;
2095 } else {
2096 tg3_setup_flow_control(tp, local_adv, remote_adv);
2097 }
2098 }
2099relink:
6921d201 2100 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1da177e4
LT
2101 u32 tmp;
2102
2103 tg3_phy_copper_begin(tp);
2104
2105 tg3_readphy(tp, MII_BMSR, &tmp);
2106 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2107 (tmp & BMSR_LSTATUS))
2108 current_link_up = 1;
2109 }
2110
2111 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2112 if (current_link_up == 1) {
2113 if (tp->link_config.active_speed == SPEED_100 ||
2114 tp->link_config.active_speed == SPEED_10)
2115 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2116 else
2117 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2118 } else
2119 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2120
2121 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2122 if (tp->link_config.active_duplex == DUPLEX_HALF)
2123 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2124
1da177e4 2125 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
e8f3f6ca
MC
2126 if (current_link_up == 1 &&
2127 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
1da177e4 2128 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
e8f3f6ca
MC
2129 else
2130 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1da177e4
LT
2131 }
2132
2133 /* ??? Without this setting Netgear GA302T PHY does not
2134 * ??? send/receive packets...
2135 */
2136 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2137 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2138 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2139 tw32_f(MAC_MI_MODE, tp->mi_mode);
2140 udelay(80);
2141 }
2142
2143 tw32_f(MAC_MODE, tp->mac_mode);
2144 udelay(40);
2145
2146 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2147 /* Polled via timer. */
2148 tw32_f(MAC_EVENT, 0);
2149 } else {
2150 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2151 }
2152 udelay(40);
2153
2154 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2155 current_link_up == 1 &&
2156 tp->link_config.active_speed == SPEED_1000 &&
2157 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2158 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2159 udelay(120);
2160 tw32_f(MAC_STATUS,
2161 (MAC_STATUS_SYNC_CHANGED |
2162 MAC_STATUS_CFG_CHANGED));
2163 udelay(40);
2164 tg3_write_mem(tp,
2165 NIC_SRAM_FIRMWARE_MBOX,
2166 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2167 }
2168
2169 if (current_link_up != netif_carrier_ok(tp->dev)) {
2170 if (current_link_up)
2171 netif_carrier_on(tp->dev);
2172 else
2173 netif_carrier_off(tp->dev);
2174 tg3_link_report(tp);
2175 }
2176
2177 return 0;
2178}
2179
2180struct tg3_fiber_aneginfo {
2181 int state;
2182#define ANEG_STATE_UNKNOWN 0
2183#define ANEG_STATE_AN_ENABLE 1
2184#define ANEG_STATE_RESTART_INIT 2
2185#define ANEG_STATE_RESTART 3
2186#define ANEG_STATE_DISABLE_LINK_OK 4
2187#define ANEG_STATE_ABILITY_DETECT_INIT 5
2188#define ANEG_STATE_ABILITY_DETECT 6
2189#define ANEG_STATE_ACK_DETECT_INIT 7
2190#define ANEG_STATE_ACK_DETECT 8
2191#define ANEG_STATE_COMPLETE_ACK_INIT 9
2192#define ANEG_STATE_COMPLETE_ACK 10
2193#define ANEG_STATE_IDLE_DETECT_INIT 11
2194#define ANEG_STATE_IDLE_DETECT 12
2195#define ANEG_STATE_LINK_OK 13
2196#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2197#define ANEG_STATE_NEXT_PAGE_WAIT 15
2198
2199 u32 flags;
2200#define MR_AN_ENABLE 0x00000001
2201#define MR_RESTART_AN 0x00000002
2202#define MR_AN_COMPLETE 0x00000004
2203#define MR_PAGE_RX 0x00000008
2204#define MR_NP_LOADED 0x00000010
2205#define MR_TOGGLE_TX 0x00000020
2206#define MR_LP_ADV_FULL_DUPLEX 0x00000040
2207#define MR_LP_ADV_HALF_DUPLEX 0x00000080
2208#define MR_LP_ADV_SYM_PAUSE 0x00000100
2209#define MR_LP_ADV_ASYM_PAUSE 0x00000200
2210#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2211#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2212#define MR_LP_ADV_NEXT_PAGE 0x00001000
2213#define MR_TOGGLE_RX 0x00002000
2214#define MR_NP_RX 0x00004000
2215
2216#define MR_LINK_OK 0x80000000
2217
2218 unsigned long link_time, cur_time;
2219
2220 u32 ability_match_cfg;
2221 int ability_match_count;
2222
2223 char ability_match, idle_match, ack_match;
2224
2225 u32 txconfig, rxconfig;
2226#define ANEG_CFG_NP 0x00000080
2227#define ANEG_CFG_ACK 0x00000040
2228#define ANEG_CFG_RF2 0x00000020
2229#define ANEG_CFG_RF1 0x00000010
2230#define ANEG_CFG_PS2 0x00000001
2231#define ANEG_CFG_PS1 0x00008000
2232#define ANEG_CFG_HD 0x00004000
2233#define ANEG_CFG_FD 0x00002000
2234#define ANEG_CFG_INVAL 0x00001f06
2235
2236};
2237#define ANEG_OK 0
2238#define ANEG_DONE 1
2239#define ANEG_TIMER_ENAB 2
2240#define ANEG_FAILED -1
2241
2242#define ANEG_STATE_SETTLE_TIME 10000
2243
2244static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2245 struct tg3_fiber_aneginfo *ap)
2246{
2247 unsigned long delta;
2248 u32 rx_cfg_reg;
2249 int ret;
2250
2251 if (ap->state == ANEG_STATE_UNKNOWN) {
2252 ap->rxconfig = 0;
2253 ap->link_time = 0;
2254 ap->cur_time = 0;
2255 ap->ability_match_cfg = 0;
2256 ap->ability_match_count = 0;
2257 ap->ability_match = 0;
2258 ap->idle_match = 0;
2259 ap->ack_match = 0;
2260 }
2261 ap->cur_time++;
2262
2263 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2264 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2265
2266 if (rx_cfg_reg != ap->ability_match_cfg) {
2267 ap->ability_match_cfg = rx_cfg_reg;
2268 ap->ability_match = 0;
2269 ap->ability_match_count = 0;
2270 } else {
2271 if (++ap->ability_match_count > 1) {
2272 ap->ability_match = 1;
2273 ap->ability_match_cfg = rx_cfg_reg;
2274 }
2275 }
2276 if (rx_cfg_reg & ANEG_CFG_ACK)
2277 ap->ack_match = 1;
2278 else
2279 ap->ack_match = 0;
2280
2281 ap->idle_match = 0;
2282 } else {
2283 ap->idle_match = 1;
2284 ap->ability_match_cfg = 0;
2285 ap->ability_match_count = 0;
2286 ap->ability_match = 0;
2287 ap->ack_match = 0;
2288
2289 rx_cfg_reg = 0;
2290 }
2291
2292 ap->rxconfig = rx_cfg_reg;
2293 ret = ANEG_OK;
2294
2295 switch(ap->state) {
2296 case ANEG_STATE_UNKNOWN:
2297 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2298 ap->state = ANEG_STATE_AN_ENABLE;
2299
2300 /* fallthru */
2301 case ANEG_STATE_AN_ENABLE:
2302 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2303 if (ap->flags & MR_AN_ENABLE) {
2304 ap->link_time = 0;
2305 ap->cur_time = 0;
2306 ap->ability_match_cfg = 0;
2307 ap->ability_match_count = 0;
2308 ap->ability_match = 0;
2309 ap->idle_match = 0;
2310 ap->ack_match = 0;
2311
2312 ap->state = ANEG_STATE_RESTART_INIT;
2313 } else {
2314 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2315 }
2316 break;
2317
2318 case ANEG_STATE_RESTART_INIT:
2319 ap->link_time = ap->cur_time;
2320 ap->flags &= ~(MR_NP_LOADED);
2321 ap->txconfig = 0;
2322 tw32(MAC_TX_AUTO_NEG, 0);
2323 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2324 tw32_f(MAC_MODE, tp->mac_mode);
2325 udelay(40);
2326
2327 ret = ANEG_TIMER_ENAB;
2328 ap->state = ANEG_STATE_RESTART;
2329
2330 /* fallthru */
2331 case ANEG_STATE_RESTART:
2332 delta = ap->cur_time - ap->link_time;
2333 if (delta > ANEG_STATE_SETTLE_TIME) {
2334 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2335 } else {
2336 ret = ANEG_TIMER_ENAB;
2337 }
2338 break;
2339
2340 case ANEG_STATE_DISABLE_LINK_OK:
2341 ret = ANEG_DONE;
2342 break;
2343
2344 case ANEG_STATE_ABILITY_DETECT_INIT:
2345 ap->flags &= ~(MR_TOGGLE_TX);
2346 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2347 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2348 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2349 tw32_f(MAC_MODE, tp->mac_mode);
2350 udelay(40);
2351
2352 ap->state = ANEG_STATE_ABILITY_DETECT;
2353 break;
2354
2355 case ANEG_STATE_ABILITY_DETECT:
2356 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2357 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2358 }
2359 break;
2360
2361 case ANEG_STATE_ACK_DETECT_INIT:
2362 ap->txconfig |= ANEG_CFG_ACK;
2363 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2364 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2365 tw32_f(MAC_MODE, tp->mac_mode);
2366 udelay(40);
2367
2368 ap->state = ANEG_STATE_ACK_DETECT;
2369
2370 /* fallthru */
2371 case ANEG_STATE_ACK_DETECT:
2372 if (ap->ack_match != 0) {
2373 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2374 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2375 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2376 } else {
2377 ap->state = ANEG_STATE_AN_ENABLE;
2378 }
2379 } else if (ap->ability_match != 0 &&
2380 ap->rxconfig == 0) {
2381 ap->state = ANEG_STATE_AN_ENABLE;
2382 }
2383 break;
2384
2385 case ANEG_STATE_COMPLETE_ACK_INIT:
2386 if (ap->rxconfig & ANEG_CFG_INVAL) {
2387 ret = ANEG_FAILED;
2388 break;
2389 }
2390 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2391 MR_LP_ADV_HALF_DUPLEX |
2392 MR_LP_ADV_SYM_PAUSE |
2393 MR_LP_ADV_ASYM_PAUSE |
2394 MR_LP_ADV_REMOTE_FAULT1 |
2395 MR_LP_ADV_REMOTE_FAULT2 |
2396 MR_LP_ADV_NEXT_PAGE |
2397 MR_TOGGLE_RX |
2398 MR_NP_RX);
2399 if (ap->rxconfig & ANEG_CFG_FD)
2400 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2401 if (ap->rxconfig & ANEG_CFG_HD)
2402 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2403 if (ap->rxconfig & ANEG_CFG_PS1)
2404 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2405 if (ap->rxconfig & ANEG_CFG_PS2)
2406 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2407 if (ap->rxconfig & ANEG_CFG_RF1)
2408 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2409 if (ap->rxconfig & ANEG_CFG_RF2)
2410 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2411 if (ap->rxconfig & ANEG_CFG_NP)
2412 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2413
2414 ap->link_time = ap->cur_time;
2415
2416 ap->flags ^= (MR_TOGGLE_TX);
2417 if (ap->rxconfig & 0x0008)
2418 ap->flags |= MR_TOGGLE_RX;
2419 if (ap->rxconfig & ANEG_CFG_NP)
2420 ap->flags |= MR_NP_RX;
2421 ap->flags |= MR_PAGE_RX;
2422
2423 ap->state = ANEG_STATE_COMPLETE_ACK;
2424 ret = ANEG_TIMER_ENAB;
2425 break;
2426
2427 case ANEG_STATE_COMPLETE_ACK:
2428 if (ap->ability_match != 0 &&
2429 ap->rxconfig == 0) {
2430 ap->state = ANEG_STATE_AN_ENABLE;
2431 break;
2432 }
2433 delta = ap->cur_time - ap->link_time;
2434 if (delta > ANEG_STATE_SETTLE_TIME) {
2435 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2436 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2437 } else {
2438 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2439 !(ap->flags & MR_NP_RX)) {
2440 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2441 } else {
2442 ret = ANEG_FAILED;
2443 }
2444 }
2445 }
2446 break;
2447
2448 case ANEG_STATE_IDLE_DETECT_INIT:
2449 ap->link_time = ap->cur_time;
2450 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2451 tw32_f(MAC_MODE, tp->mac_mode);
2452 udelay(40);
2453
2454 ap->state = ANEG_STATE_IDLE_DETECT;
2455 ret = ANEG_TIMER_ENAB;
2456 break;
2457
2458 case ANEG_STATE_IDLE_DETECT:
2459 if (ap->ability_match != 0 &&
2460 ap->rxconfig == 0) {
2461 ap->state = ANEG_STATE_AN_ENABLE;
2462 break;
2463 }
2464 delta = ap->cur_time - ap->link_time;
2465 if (delta > ANEG_STATE_SETTLE_TIME) {
2466 /* XXX another gem from the Broadcom driver :( */
2467 ap->state = ANEG_STATE_LINK_OK;
2468 }
2469 break;
2470
2471 case ANEG_STATE_LINK_OK:
2472 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2473 ret = ANEG_DONE;
2474 break;
2475
2476 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2477 /* ??? unimplemented */
2478 break;
2479
2480 case ANEG_STATE_NEXT_PAGE_WAIT:
2481 /* ??? unimplemented */
2482 break;
2483
2484 default:
2485 ret = ANEG_FAILED;
2486 break;
2487 };
2488
2489 return ret;
2490}
2491
2492static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2493{
2494 int res = 0;
2495 struct tg3_fiber_aneginfo aninfo;
2496 int status = ANEG_FAILED;
2497 unsigned int tick;
2498 u32 tmp;
2499
2500 tw32_f(MAC_TX_AUTO_NEG, 0);
2501
2502 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2503 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2504 udelay(40);
2505
2506 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2507 udelay(40);
2508
2509 memset(&aninfo, 0, sizeof(aninfo));
2510 aninfo.flags |= MR_AN_ENABLE;
2511 aninfo.state = ANEG_STATE_UNKNOWN;
2512 aninfo.cur_time = 0;
2513 tick = 0;
2514 while (++tick < 195000) {
2515 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2516 if (status == ANEG_DONE || status == ANEG_FAILED)
2517 break;
2518
2519 udelay(1);
2520 }
2521
2522 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2523 tw32_f(MAC_MODE, tp->mac_mode);
2524 udelay(40);
2525
2526 *flags = aninfo.flags;
2527
2528 if (status == ANEG_DONE &&
2529 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2530 MR_LP_ADV_FULL_DUPLEX)))
2531 res = 1;
2532
2533 return res;
2534}
2535
2536static void tg3_init_bcm8002(struct tg3 *tp)
2537{
2538 u32 mac_status = tr32(MAC_STATUS);
2539 int i;
2540
2541 /* Reset when initting first time or we have a link. */
2542 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2543 !(mac_status & MAC_STATUS_PCS_SYNCED))
2544 return;
2545
2546 /* Set PLL lock range. */
2547 tg3_writephy(tp, 0x16, 0x8007);
2548
2549 /* SW reset */
2550 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2551
2552 /* Wait for reset to complete. */
2553 /* XXX schedule_timeout() ... */
2554 for (i = 0; i < 500; i++)
2555 udelay(10);
2556
2557 /* Config mode; select PMA/Ch 1 regs. */
2558 tg3_writephy(tp, 0x10, 0x8411);
2559
2560 /* Enable auto-lock and comdet, select txclk for tx. */
2561 tg3_writephy(tp, 0x11, 0x0a10);
2562
2563 tg3_writephy(tp, 0x18, 0x00a0);
2564 tg3_writephy(tp, 0x16, 0x41ff);
2565
2566 /* Assert and deassert POR. */
2567 tg3_writephy(tp, 0x13, 0x0400);
2568 udelay(40);
2569 tg3_writephy(tp, 0x13, 0x0000);
2570
2571 tg3_writephy(tp, 0x11, 0x0a50);
2572 udelay(40);
2573 tg3_writephy(tp, 0x11, 0x0a10);
2574
2575 /* Wait for signal to stabilize */
2576 /* XXX schedule_timeout() ... */
2577 for (i = 0; i < 15000; i++)
2578 udelay(10);
2579
2580 /* Deselect the channel register so we can read the PHYID
2581 * later.
2582 */
2583 tg3_writephy(tp, 0x10, 0x8011);
2584}
2585
2586static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2587{
2588 u32 sg_dig_ctrl, sg_dig_status;
2589 u32 serdes_cfg, expected_sg_dig_ctrl;
2590 int workaround, port_a;
2591 int current_link_up;
2592
2593 serdes_cfg = 0;
2594 expected_sg_dig_ctrl = 0;
2595 workaround = 0;
2596 port_a = 1;
2597 current_link_up = 0;
2598
2599 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2600 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2601 workaround = 1;
2602 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2603 port_a = 0;
2604
2605 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2606 /* preserve bits 20-23 for voltage regulator */
2607 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2608 }
2609
2610 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2611
2612 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2613 if (sg_dig_ctrl & (1 << 31)) {
2614 if (workaround) {
2615 u32 val = serdes_cfg;
2616
2617 if (port_a)
2618 val |= 0xc010000;
2619 else
2620 val |= 0x4010000;
2621 tw32_f(MAC_SERDES_CFG, val);
2622 }
2623 tw32_f(SG_DIG_CTRL, 0x01388400);
2624 }
2625 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2626 tg3_setup_flow_control(tp, 0, 0);
2627 current_link_up = 1;
2628 }
2629 goto out;
2630 }
2631
2632 /* Want auto-negotiation. */
2633 expected_sg_dig_ctrl = 0x81388400;
2634
2635 /* Pause capability */
2636 expected_sg_dig_ctrl |= (1 << 11);
2637
2638 /* Asymettric pause */
2639 expected_sg_dig_ctrl |= (1 << 12);
2640
2641 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3d3ebe74
MC
2642 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2643 tp->serdes_counter &&
2644 ((mac_status & (MAC_STATUS_PCS_SYNCED |
2645 MAC_STATUS_RCVD_CFG)) ==
2646 MAC_STATUS_PCS_SYNCED)) {
2647 tp->serdes_counter--;
2648 current_link_up = 1;
2649 goto out;
2650 }
2651restart_autoneg:
1da177e4
LT
2652 if (workaround)
2653 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2654 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2655 udelay(5);
2656 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2657
3d3ebe74
MC
2658 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2659 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
1da177e4
LT
2660 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2661 MAC_STATUS_SIGNAL_DET)) {
3d3ebe74 2662 sg_dig_status = tr32(SG_DIG_STATUS);
1da177e4
LT
2663 mac_status = tr32(MAC_STATUS);
2664
2665 if ((sg_dig_status & (1 << 1)) &&
2666 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2667 u32 local_adv, remote_adv;
2668
2669 local_adv = ADVERTISE_PAUSE_CAP;
2670 remote_adv = 0;
2671 if (sg_dig_status & (1 << 19))
2672 remote_adv |= LPA_PAUSE_CAP;
2673 if (sg_dig_status & (1 << 20))
2674 remote_adv |= LPA_PAUSE_ASYM;
2675
2676 tg3_setup_flow_control(tp, local_adv, remote_adv);
2677 current_link_up = 1;
3d3ebe74
MC
2678 tp->serdes_counter = 0;
2679 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
1da177e4 2680 } else if (!(sg_dig_status & (1 << 1))) {
3d3ebe74
MC
2681 if (tp->serdes_counter)
2682 tp->serdes_counter--;
1da177e4
LT
2683 else {
2684 if (workaround) {
2685 u32 val = serdes_cfg;
2686
2687 if (port_a)
2688 val |= 0xc010000;
2689 else
2690 val |= 0x4010000;
2691
2692 tw32_f(MAC_SERDES_CFG, val);
2693 }
2694
2695 tw32_f(SG_DIG_CTRL, 0x01388400);
2696 udelay(40);
2697
2698 /* Link parallel detection - link is up */
2699 /* only if we have PCS_SYNC and not */
2700 /* receiving config code words */
2701 mac_status = tr32(MAC_STATUS);
2702 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2703 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2704 tg3_setup_flow_control(tp, 0, 0);
2705 current_link_up = 1;
3d3ebe74
MC
2706 tp->tg3_flags2 |=
2707 TG3_FLG2_PARALLEL_DETECT;
2708 tp->serdes_counter =
2709 SERDES_PARALLEL_DET_TIMEOUT;
2710 } else
2711 goto restart_autoneg;
1da177e4
LT
2712 }
2713 }
3d3ebe74
MC
2714 } else {
2715 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2716 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
1da177e4
LT
2717 }
2718
2719out:
2720 return current_link_up;
2721}
2722
2723static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2724{
2725 int current_link_up = 0;
2726
5cf64b8a 2727 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
1da177e4 2728 goto out;
1da177e4
LT
2729
2730 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2731 u32 flags;
2732 int i;
6aa20a22 2733
1da177e4
LT
2734 if (fiber_autoneg(tp, &flags)) {
2735 u32 local_adv, remote_adv;
2736
2737 local_adv = ADVERTISE_PAUSE_CAP;
2738 remote_adv = 0;
2739 if (flags & MR_LP_ADV_SYM_PAUSE)
2740 remote_adv |= LPA_PAUSE_CAP;
2741 if (flags & MR_LP_ADV_ASYM_PAUSE)
2742 remote_adv |= LPA_PAUSE_ASYM;
2743
2744 tg3_setup_flow_control(tp, local_adv, remote_adv);
2745
1da177e4
LT
2746 current_link_up = 1;
2747 }
2748 for (i = 0; i < 30; i++) {
2749 udelay(20);
2750 tw32_f(MAC_STATUS,
2751 (MAC_STATUS_SYNC_CHANGED |
2752 MAC_STATUS_CFG_CHANGED));
2753 udelay(40);
2754 if ((tr32(MAC_STATUS) &
2755 (MAC_STATUS_SYNC_CHANGED |
2756 MAC_STATUS_CFG_CHANGED)) == 0)
2757 break;
2758 }
2759
2760 mac_status = tr32(MAC_STATUS);
2761 if (current_link_up == 0 &&
2762 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2763 !(mac_status & MAC_STATUS_RCVD_CFG))
2764 current_link_up = 1;
2765 } else {
2766 /* Forcing 1000FD link up. */
2767 current_link_up = 1;
1da177e4
LT
2768
2769 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2770 udelay(40);
e8f3f6ca
MC
2771
2772 tw32_f(MAC_MODE, tp->mac_mode);
2773 udelay(40);
1da177e4
LT
2774 }
2775
2776out:
2777 return current_link_up;
2778}
2779
2780static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2781{
2782 u32 orig_pause_cfg;
2783 u16 orig_active_speed;
2784 u8 orig_active_duplex;
2785 u32 mac_status;
2786 int current_link_up;
2787 int i;
2788
2789 orig_pause_cfg =
2790 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2791 TG3_FLAG_TX_PAUSE));
2792 orig_active_speed = tp->link_config.active_speed;
2793 orig_active_duplex = tp->link_config.active_duplex;
2794
2795 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2796 netif_carrier_ok(tp->dev) &&
2797 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2798 mac_status = tr32(MAC_STATUS);
2799 mac_status &= (MAC_STATUS_PCS_SYNCED |
2800 MAC_STATUS_SIGNAL_DET |
2801 MAC_STATUS_CFG_CHANGED |
2802 MAC_STATUS_RCVD_CFG);
2803 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2804 MAC_STATUS_SIGNAL_DET)) {
2805 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2806 MAC_STATUS_CFG_CHANGED));
2807 return 0;
2808 }
2809 }
2810
2811 tw32_f(MAC_TX_AUTO_NEG, 0);
2812
2813 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2814 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2815 tw32_f(MAC_MODE, tp->mac_mode);
2816 udelay(40);
2817
2818 if (tp->phy_id == PHY_ID_BCM8002)
2819 tg3_init_bcm8002(tp);
2820
2821 /* Enable link change event even when serdes polling. */
2822 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2823 udelay(40);
2824
2825 current_link_up = 0;
2826 mac_status = tr32(MAC_STATUS);
2827
2828 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2829 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2830 else
2831 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2832
1da177e4
LT
2833 tp->hw_status->status =
2834 (SD_STATUS_UPDATED |
2835 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2836
2837 for (i = 0; i < 100; i++) {
2838 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2839 MAC_STATUS_CFG_CHANGED));
2840 udelay(5);
2841 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3d3ebe74
MC
2842 MAC_STATUS_CFG_CHANGED |
2843 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
1da177e4
LT
2844 break;
2845 }
2846
2847 mac_status = tr32(MAC_STATUS);
2848 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2849 current_link_up = 0;
3d3ebe74
MC
2850 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2851 tp->serdes_counter == 0) {
1da177e4
LT
2852 tw32_f(MAC_MODE, (tp->mac_mode |
2853 MAC_MODE_SEND_CONFIGS));
2854 udelay(1);
2855 tw32_f(MAC_MODE, tp->mac_mode);
2856 }
2857 }
2858
2859 if (current_link_up == 1) {
2860 tp->link_config.active_speed = SPEED_1000;
2861 tp->link_config.active_duplex = DUPLEX_FULL;
2862 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2863 LED_CTRL_LNKLED_OVERRIDE |
2864 LED_CTRL_1000MBPS_ON));
2865 } else {
2866 tp->link_config.active_speed = SPEED_INVALID;
2867 tp->link_config.active_duplex = DUPLEX_INVALID;
2868 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2869 LED_CTRL_LNKLED_OVERRIDE |
2870 LED_CTRL_TRAFFIC_OVERRIDE));
2871 }
2872
2873 if (current_link_up != netif_carrier_ok(tp->dev)) {
2874 if (current_link_up)
2875 netif_carrier_on(tp->dev);
2876 else
2877 netif_carrier_off(tp->dev);
2878 tg3_link_report(tp);
2879 } else {
2880 u32 now_pause_cfg =
2881 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2882 TG3_FLAG_TX_PAUSE);
2883 if (orig_pause_cfg != now_pause_cfg ||
2884 orig_active_speed != tp->link_config.active_speed ||
2885 orig_active_duplex != tp->link_config.active_duplex)
2886 tg3_link_report(tp);
2887 }
2888
2889 return 0;
2890}
2891
747e8f8b
MC
2892static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2893{
2894 int current_link_up, err = 0;
2895 u32 bmsr, bmcr;
2896 u16 current_speed;
2897 u8 current_duplex;
2898
2899 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2900 tw32_f(MAC_MODE, tp->mac_mode);
2901 udelay(40);
2902
2903 tw32(MAC_EVENT, 0);
2904
2905 tw32_f(MAC_STATUS,
2906 (MAC_STATUS_SYNC_CHANGED |
2907 MAC_STATUS_CFG_CHANGED |
2908 MAC_STATUS_MI_COMPLETION |
2909 MAC_STATUS_LNKSTATE_CHANGED));
2910 udelay(40);
2911
2912 if (force_reset)
2913 tg3_phy_reset(tp);
2914
2915 current_link_up = 0;
2916 current_speed = SPEED_INVALID;
2917 current_duplex = DUPLEX_INVALID;
2918
2919 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2920 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
2921 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2922 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2923 bmsr |= BMSR_LSTATUS;
2924 else
2925 bmsr &= ~BMSR_LSTATUS;
2926 }
747e8f8b
MC
2927
2928 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2929
2930 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2931 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2932 /* do nothing, just check for link up at the end */
2933 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2934 u32 adv, new_adv;
2935
2936 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2937 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2938 ADVERTISE_1000XPAUSE |
2939 ADVERTISE_1000XPSE_ASYM |
2940 ADVERTISE_SLCT);
2941
2942 /* Always advertise symmetric PAUSE just like copper */
2943 new_adv |= ADVERTISE_1000XPAUSE;
2944
2945 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2946 new_adv |= ADVERTISE_1000XHALF;
2947 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2948 new_adv |= ADVERTISE_1000XFULL;
2949
2950 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2951 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2952 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2953 tg3_writephy(tp, MII_BMCR, bmcr);
2954
2955 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3d3ebe74 2956 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
747e8f8b
MC
2957 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2958
2959 return err;
2960 }
2961 } else {
2962 u32 new_bmcr;
2963
2964 bmcr &= ~BMCR_SPEED1000;
2965 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2966
2967 if (tp->link_config.duplex == DUPLEX_FULL)
2968 new_bmcr |= BMCR_FULLDPLX;
2969
2970 if (new_bmcr != bmcr) {
2971 /* BMCR_SPEED1000 is a reserved bit that needs
2972 * to be set on write.
2973 */
2974 new_bmcr |= BMCR_SPEED1000;
2975
2976 /* Force a linkdown */
2977 if (netif_carrier_ok(tp->dev)) {
2978 u32 adv;
2979
2980 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2981 adv &= ~(ADVERTISE_1000XFULL |
2982 ADVERTISE_1000XHALF |
2983 ADVERTISE_SLCT);
2984 tg3_writephy(tp, MII_ADVERTISE, adv);
2985 tg3_writephy(tp, MII_BMCR, bmcr |
2986 BMCR_ANRESTART |
2987 BMCR_ANENABLE);
2988 udelay(10);
2989 netif_carrier_off(tp->dev);
2990 }
2991 tg3_writephy(tp, MII_BMCR, new_bmcr);
2992 bmcr = new_bmcr;
2993 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2994 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
2995 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2996 ASIC_REV_5714) {
2997 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2998 bmsr |= BMSR_LSTATUS;
2999 else
3000 bmsr &= ~BMSR_LSTATUS;
3001 }
747e8f8b
MC
3002 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3003 }
3004 }
3005
3006 if (bmsr & BMSR_LSTATUS) {
3007 current_speed = SPEED_1000;
3008 current_link_up = 1;
3009 if (bmcr & BMCR_FULLDPLX)
3010 current_duplex = DUPLEX_FULL;
3011 else
3012 current_duplex = DUPLEX_HALF;
3013
3014 if (bmcr & BMCR_ANENABLE) {
3015 u32 local_adv, remote_adv, common;
3016
3017 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3018 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3019 common = local_adv & remote_adv;
3020 if (common & (ADVERTISE_1000XHALF |
3021 ADVERTISE_1000XFULL)) {
3022 if (common & ADVERTISE_1000XFULL)
3023 current_duplex = DUPLEX_FULL;
3024 else
3025 current_duplex = DUPLEX_HALF;
3026
3027 tg3_setup_flow_control(tp, local_adv,
3028 remote_adv);
3029 }
3030 else
3031 current_link_up = 0;
3032 }
3033 }
3034
3035 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3036 if (tp->link_config.active_duplex == DUPLEX_HALF)
3037 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3038
3039 tw32_f(MAC_MODE, tp->mac_mode);
3040 udelay(40);
3041
3042 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3043
3044 tp->link_config.active_speed = current_speed;
3045 tp->link_config.active_duplex = current_duplex;
3046
3047 if (current_link_up != netif_carrier_ok(tp->dev)) {
3048 if (current_link_up)
3049 netif_carrier_on(tp->dev);
3050 else {
3051 netif_carrier_off(tp->dev);
3052 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3053 }
3054 tg3_link_report(tp);
3055 }
3056 return err;
3057}
3058
3059static void tg3_serdes_parallel_detect(struct tg3 *tp)
3060{
3d3ebe74 3061 if (tp->serdes_counter) {
747e8f8b 3062 /* Give autoneg time to complete. */
3d3ebe74 3063 tp->serdes_counter--;
747e8f8b
MC
3064 return;
3065 }
3066 if (!netif_carrier_ok(tp->dev) &&
3067 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3068 u32 bmcr;
3069
3070 tg3_readphy(tp, MII_BMCR, &bmcr);
3071 if (bmcr & BMCR_ANENABLE) {
3072 u32 phy1, phy2;
3073
3074 /* Select shadow register 0x1f */
3075 tg3_writephy(tp, 0x1c, 0x7c00);
3076 tg3_readphy(tp, 0x1c, &phy1);
3077
3078 /* Select expansion interrupt status register */
3079 tg3_writephy(tp, 0x17, 0x0f01);
3080 tg3_readphy(tp, 0x15, &phy2);
3081 tg3_readphy(tp, 0x15, &phy2);
3082
3083 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3084 /* We have signal detect and not receiving
3085 * config code words, link is up by parallel
3086 * detection.
3087 */
3088
3089 bmcr &= ~BMCR_ANENABLE;
3090 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3091 tg3_writephy(tp, MII_BMCR, bmcr);
3092 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3093 }
3094 }
3095 }
3096 else if (netif_carrier_ok(tp->dev) &&
3097 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3098 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3099 u32 phy2;
3100
3101 /* Select expansion interrupt status register */
3102 tg3_writephy(tp, 0x17, 0x0f01);
3103 tg3_readphy(tp, 0x15, &phy2);
3104 if (phy2 & 0x20) {
3105 u32 bmcr;
3106
3107 /* Config code words received, turn on autoneg. */
3108 tg3_readphy(tp, MII_BMCR, &bmcr);
3109 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3110
3111 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3112
3113 }
3114 }
3115}
3116
1da177e4
LT
3117static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3118{
3119 int err;
3120
3121 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3122 err = tg3_setup_fiber_phy(tp, force_reset);
747e8f8b
MC
3123 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3124 err = tg3_setup_fiber_mii_phy(tp, force_reset);
1da177e4
LT
3125 } else {
3126 err = tg3_setup_copper_phy(tp, force_reset);
3127 }
3128
3129 if (tp->link_config.active_speed == SPEED_1000 &&
3130 tp->link_config.active_duplex == DUPLEX_HALF)
3131 tw32(MAC_TX_LENGTHS,
3132 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3133 (6 << TX_LENGTHS_IPG_SHIFT) |
3134 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3135 else
3136 tw32(MAC_TX_LENGTHS,
3137 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3138 (6 << TX_LENGTHS_IPG_SHIFT) |
3139 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3140
3141 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3142 if (netif_carrier_ok(tp->dev)) {
3143 tw32(HOSTCC_STAT_COAL_TICKS,
15f9850d 3144 tp->coal.stats_block_coalesce_usecs);
1da177e4
LT
3145 } else {
3146 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3147 }
3148 }
3149
8ed5d97e
MC
3150 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3151 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3152 if (!netif_carrier_ok(tp->dev))
3153 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3154 tp->pwrmgmt_thresh;
3155 else
3156 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3157 tw32(PCIE_PWR_MGMT_THRESH, val);
3158 }
3159
1da177e4
LT
3160 return err;
3161}
3162
df3e6548
MC
3163/* This is called whenever we suspect that the system chipset is re-
3164 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3165 * is bogus tx completions. We try to recover by setting the
3166 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3167 * in the workqueue.
3168 */
3169static void tg3_tx_recover(struct tg3 *tp)
3170{
3171 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3172 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3173
3174 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3175 "mapped I/O cycles to the network device, attempting to "
3176 "recover. Please report the problem to the driver maintainer "
3177 "and include system chipset information.\n", tp->dev->name);
3178
3179 spin_lock(&tp->lock);
df3e6548 3180 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
df3e6548
MC
3181 spin_unlock(&tp->lock);
3182}
3183
1b2a7205
MC
3184static inline u32 tg3_tx_avail(struct tg3 *tp)
3185{
3186 smp_mb();
3187 return (tp->tx_pending -
3188 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3189}
3190
1da177e4
LT
3191/* Tigon3 never reports partial packet sends. So we do not
3192 * need special logic to handle SKBs that have not had all
3193 * of their frags sent yet, like SunGEM does.
3194 */
3195static void tg3_tx(struct tg3 *tp)
3196{
3197 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3198 u32 sw_idx = tp->tx_cons;
3199
3200 while (sw_idx != hw_idx) {
3201 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3202 struct sk_buff *skb = ri->skb;
df3e6548
MC
3203 int i, tx_bug = 0;
3204
3205 if (unlikely(skb == NULL)) {
3206 tg3_tx_recover(tp);
3207 return;
3208 }
1da177e4 3209
1da177e4
LT
3210 pci_unmap_single(tp->pdev,
3211 pci_unmap_addr(ri, mapping),
3212 skb_headlen(skb),
3213 PCI_DMA_TODEVICE);
3214
3215 ri->skb = NULL;
3216
3217 sw_idx = NEXT_TX(sw_idx);
3218
3219 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1da177e4 3220 ri = &tp->tx_buffers[sw_idx];
df3e6548
MC
3221 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3222 tx_bug = 1;
1da177e4
LT
3223
3224 pci_unmap_page(tp->pdev,
3225 pci_unmap_addr(ri, mapping),
3226 skb_shinfo(skb)->frags[i].size,
3227 PCI_DMA_TODEVICE);
3228
3229 sw_idx = NEXT_TX(sw_idx);
3230 }
3231
f47c11ee 3232 dev_kfree_skb(skb);
df3e6548
MC
3233
3234 if (unlikely(tx_bug)) {
3235 tg3_tx_recover(tp);
3236 return;
3237 }
1da177e4
LT
3238 }
3239
3240 tp->tx_cons = sw_idx;
3241
1b2a7205
MC
3242 /* Need to make the tx_cons update visible to tg3_start_xmit()
3243 * before checking for netif_queue_stopped(). Without the
3244 * memory barrier, there is a small possibility that tg3_start_xmit()
3245 * will miss it and cause the queue to be stopped forever.
3246 */
3247 smp_mb();
3248
3249 if (unlikely(netif_queue_stopped(tp->dev) &&
42952231 3250 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
1b2a7205 3251 netif_tx_lock(tp->dev);
51b91468 3252 if (netif_queue_stopped(tp->dev) &&
42952231 3253 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
51b91468 3254 netif_wake_queue(tp->dev);
1b2a7205 3255 netif_tx_unlock(tp->dev);
51b91468 3256 }
1da177e4
LT
3257}
3258
3259/* Returns size of skb allocated or < 0 on error.
3260 *
3261 * We only need to fill in the address because the other members
3262 * of the RX descriptor are invariant, see tg3_init_rings.
3263 *
3264 * Note the purposeful assymetry of cpu vs. chip accesses. For
3265 * posting buffers we only dirty the first cache line of the RX
3266 * descriptor (containing the address). Whereas for the RX status
3267 * buffers the cpu only reads the last cacheline of the RX descriptor
3268 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3269 */
3270static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3271 int src_idx, u32 dest_idx_unmasked)
3272{
3273 struct tg3_rx_buffer_desc *desc;
3274 struct ring_info *map, *src_map;
3275 struct sk_buff *skb;
3276 dma_addr_t mapping;
3277 int skb_size, dest_idx;
3278
3279 src_map = NULL;
3280 switch (opaque_key) {
3281 case RXD_OPAQUE_RING_STD:
3282 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3283 desc = &tp->rx_std[dest_idx];
3284 map = &tp->rx_std_buffers[dest_idx];
3285 if (src_idx >= 0)
3286 src_map = &tp->rx_std_buffers[src_idx];
7e72aad4 3287 skb_size = tp->rx_pkt_buf_sz;
1da177e4
LT
3288 break;
3289
3290 case RXD_OPAQUE_RING_JUMBO:
3291 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3292 desc = &tp->rx_jumbo[dest_idx];
3293 map = &tp->rx_jumbo_buffers[dest_idx];
3294 if (src_idx >= 0)
3295 src_map = &tp->rx_jumbo_buffers[src_idx];
3296 skb_size = RX_JUMBO_PKT_BUF_SZ;
3297 break;
3298
3299 default:
3300 return -EINVAL;
3301 };
3302
3303 /* Do not overwrite any of the map or rp information
3304 * until we are sure we can commit to a new buffer.
3305 *
3306 * Callers depend upon this behavior and assume that
3307 * we leave everything unchanged if we fail.
3308 */
a20e9c62 3309 skb = netdev_alloc_skb(tp->dev, skb_size);
1da177e4
LT
3310 if (skb == NULL)
3311 return -ENOMEM;
3312
1da177e4
LT
3313 skb_reserve(skb, tp->rx_offset);
3314
3315 mapping = pci_map_single(tp->pdev, skb->data,
3316 skb_size - tp->rx_offset,
3317 PCI_DMA_FROMDEVICE);
3318
3319 map->skb = skb;
3320 pci_unmap_addr_set(map, mapping, mapping);
3321
3322 if (src_map != NULL)
3323 src_map->skb = NULL;
3324
3325 desc->addr_hi = ((u64)mapping >> 32);
3326 desc->addr_lo = ((u64)mapping & 0xffffffff);
3327
3328 return skb_size;
3329}
3330
3331/* We only need to move over in the address because the other
3332 * members of the RX descriptor are invariant. See notes above
3333 * tg3_alloc_rx_skb for full details.
3334 */
3335static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3336 int src_idx, u32 dest_idx_unmasked)
3337{
3338 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3339 struct ring_info *src_map, *dest_map;
3340 int dest_idx;
3341
3342 switch (opaque_key) {
3343 case RXD_OPAQUE_RING_STD:
3344 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3345 dest_desc = &tp->rx_std[dest_idx];
3346 dest_map = &tp->rx_std_buffers[dest_idx];
3347 src_desc = &tp->rx_std[src_idx];
3348 src_map = &tp->rx_std_buffers[src_idx];
3349 break;
3350
3351 case RXD_OPAQUE_RING_JUMBO:
3352 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3353 dest_desc = &tp->rx_jumbo[dest_idx];
3354 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3355 src_desc = &tp->rx_jumbo[src_idx];
3356 src_map = &tp->rx_jumbo_buffers[src_idx];
3357 break;
3358
3359 default:
3360 return;
3361 };
3362
3363 dest_map->skb = src_map->skb;
3364 pci_unmap_addr_set(dest_map, mapping,
3365 pci_unmap_addr(src_map, mapping));
3366 dest_desc->addr_hi = src_desc->addr_hi;
3367 dest_desc->addr_lo = src_desc->addr_lo;
3368
3369 src_map->skb = NULL;
3370}
3371
3372#if TG3_VLAN_TAG_USED
3373static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3374{
3375 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3376}
3377#endif
3378
3379/* The RX ring scheme is composed of multiple rings which post fresh
3380 * buffers to the chip, and one special ring the chip uses to report
3381 * status back to the host.
3382 *
3383 * The special ring reports the status of received packets to the
3384 * host. The chip does not write into the original descriptor the
3385 * RX buffer was obtained from. The chip simply takes the original
3386 * descriptor as provided by the host, updates the status and length
3387 * field, then writes this into the next status ring entry.
3388 *
3389 * Each ring the host uses to post buffers to the chip is described
3390 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3391 * it is first placed into the on-chip ram. When the packet's length
3392 * is known, it walks down the TG3_BDINFO entries to select the ring.
3393 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3394 * which is within the range of the new packet's length is chosen.
3395 *
3396 * The "separate ring for rx status" scheme may sound queer, but it makes
3397 * sense from a cache coherency perspective. If only the host writes
3398 * to the buffer post rings, and only the chip writes to the rx status
3399 * rings, then cache lines never move beyond shared-modified state.
3400 * If both the host and chip were to write into the same ring, cache line
3401 * eviction could occur since both entities want it in an exclusive state.
3402 */
3403static int tg3_rx(struct tg3 *tp, int budget)
3404{
f92905de 3405 u32 work_mask, rx_std_posted = 0;
483ba50b
MC
3406 u32 sw_idx = tp->rx_rcb_ptr;
3407 u16 hw_idx;
1da177e4
LT
3408 int received;
3409
3410 hw_idx = tp->hw_status->idx[0].rx_producer;
3411 /*
3412 * We need to order the read of hw_idx and the read of
3413 * the opaque cookie.
3414 */
3415 rmb();
1da177e4
LT
3416 work_mask = 0;
3417 received = 0;
3418 while (sw_idx != hw_idx && budget > 0) {
3419 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3420 unsigned int len;
3421 struct sk_buff *skb;
3422 dma_addr_t dma_addr;
3423 u32 opaque_key, desc_idx, *post_ptr;
3424
3425 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3426 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3427 if (opaque_key == RXD_OPAQUE_RING_STD) {
3428 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3429 mapping);
3430 skb = tp->rx_std_buffers[desc_idx].skb;
3431 post_ptr = &tp->rx_std_ptr;
f92905de 3432 rx_std_posted++;
1da177e4
LT
3433 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3434 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3435 mapping);
3436 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3437 post_ptr = &tp->rx_jumbo_ptr;
3438 }
3439 else {
3440 goto next_pkt_nopost;
3441 }
3442
3443 work_mask |= opaque_key;
3444
3445 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3446 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3447 drop_it:
3448 tg3_recycle_rx(tp, opaque_key,
3449 desc_idx, *post_ptr);
3450 drop_it_no_recycle:
3451 /* Other statistics kept track of by card. */
3452 tp->net_stats.rx_dropped++;
3453 goto next_pkt;
3454 }
3455
3456 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3457
6aa20a22 3458 if (len > RX_COPY_THRESHOLD
1da177e4
LT
3459 && tp->rx_offset == 2
3460 /* rx_offset != 2 iff this is a 5701 card running
3461 * in PCI-X mode [see tg3_get_invariants()] */
3462 ) {
3463 int skb_size;
3464
3465 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3466 desc_idx, *post_ptr);
3467 if (skb_size < 0)
3468 goto drop_it;
3469
3470 pci_unmap_single(tp->pdev, dma_addr,
3471 skb_size - tp->rx_offset,
3472 PCI_DMA_FROMDEVICE);
3473
3474 skb_put(skb, len);
3475 } else {
3476 struct sk_buff *copy_skb;
3477
3478 tg3_recycle_rx(tp, opaque_key,
3479 desc_idx, *post_ptr);
3480
a20e9c62 3481 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
1da177e4
LT
3482 if (copy_skb == NULL)
3483 goto drop_it_no_recycle;
3484
1da177e4
LT
3485 skb_reserve(copy_skb, 2);
3486 skb_put(copy_skb, len);
3487 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
d626f62b 3488 skb_copy_from_linear_data(skb, copy_skb->data, len);
1da177e4
LT
3489 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3490
3491 /* We'll reuse the original ring buffer. */
3492 skb = copy_skb;
3493 }
3494
3495 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3496 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3497 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3498 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3499 skb->ip_summed = CHECKSUM_UNNECESSARY;
3500 else
3501 skb->ip_summed = CHECKSUM_NONE;
3502
3503 skb->protocol = eth_type_trans(skb, tp->dev);
3504#if TG3_VLAN_TAG_USED
3505 if (tp->vlgrp != NULL &&
3506 desc->type_flags & RXD_FLAG_VLAN) {
3507 tg3_vlan_rx(tp, skb,
3508 desc->err_vlan & RXD_VLAN_MASK);
3509 } else
3510#endif
3511 netif_receive_skb(skb);
3512
3513 tp->dev->last_rx = jiffies;
3514 received++;
3515 budget--;
3516
3517next_pkt:
3518 (*post_ptr)++;
f92905de
MC
3519
3520 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3521 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3522
3523 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3524 TG3_64BIT_REG_LOW, idx);
3525 work_mask &= ~RXD_OPAQUE_RING_STD;
3526 rx_std_posted = 0;
3527 }
1da177e4 3528next_pkt_nopost:
483ba50b 3529 sw_idx++;
6b31a515 3530 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
52f6d697
MC
3531
3532 /* Refresh hw_idx to see if there is new work */
3533 if (sw_idx == hw_idx) {
3534 hw_idx = tp->hw_status->idx[0].rx_producer;
3535 rmb();
3536 }
1da177e4
LT
3537 }
3538
3539 /* ACK the status ring. */
483ba50b
MC
3540 tp->rx_rcb_ptr = sw_idx;
3541 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
1da177e4
LT
3542
3543 /* Refill RX ring(s). */
3544 if (work_mask & RXD_OPAQUE_RING_STD) {
3545 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3546 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3547 sw_idx);
3548 }
3549 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3550 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3551 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3552 sw_idx);
3553 }
3554 mmiowb();
3555
3556 return received;
3557}
3558
6f535763 3559static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
1da177e4 3560{
1da177e4 3561 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4 3562
1da177e4
LT
3563 /* handle link change and other phy events */
3564 if (!(tp->tg3_flags &
3565 (TG3_FLAG_USE_LINKCHG_REG |
3566 TG3_FLAG_POLL_SERDES))) {
3567 if (sblk->status & SD_STATUS_LINK_CHG) {
3568 sblk->status = SD_STATUS_UPDATED |
3569 (sblk->status & ~SD_STATUS_LINK_CHG);
f47c11ee 3570 spin_lock(&tp->lock);
1da177e4 3571 tg3_setup_phy(tp, 0);
f47c11ee 3572 spin_unlock(&tp->lock);
1da177e4
LT
3573 }
3574 }
3575
3576 /* run TX completion thread */
3577 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
1da177e4 3578 tg3_tx(tp);
6f535763 3579 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4fd7ab59 3580 return work_done;
1da177e4
LT
3581 }
3582
1da177e4
LT
3583 /* run RX thread, within the bounds set by NAPI.
3584 * All RX "locking" is done by ensuring outside
bea3348e 3585 * code synchronizes with tg3->napi.poll()
1da177e4 3586 */
bea3348e 3587 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
6f535763 3588 work_done += tg3_rx(tp, budget - work_done);
1da177e4 3589
6f535763
DM
3590 return work_done;
3591}
3592
3593static int tg3_poll(struct napi_struct *napi, int budget)
3594{
3595 struct tg3 *tp = container_of(napi, struct tg3, napi);
3596 int work_done = 0;
4fd7ab59 3597 struct tg3_hw_status *sblk = tp->hw_status;
6f535763
DM
3598
3599 while (1) {
3600 work_done = tg3_poll_work(tp, work_done, budget);
3601
3602 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3603 goto tx_recovery;
3604
3605 if (unlikely(work_done >= budget))
3606 break;
3607
4fd7ab59
MC
3608 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3609 /* tp->last_tag is used in tg3_restart_ints() below
3610 * to tell the hw how much work has been processed,
3611 * so we must read it before checking for more work.
3612 */
3613 tp->last_tag = sblk->status_tag;
3614 rmb();
3615 } else
3616 sblk->status &= ~SD_STATUS_UPDATED;
6f535763 3617
4fd7ab59 3618 if (likely(!tg3_has_work(tp))) {
6f535763
DM
3619 netif_rx_complete(tp->dev, napi);
3620 tg3_restart_ints(tp);
3621 break;
3622 }
1da177e4
LT
3623 }
3624
bea3348e 3625 return work_done;
6f535763
DM
3626
3627tx_recovery:
4fd7ab59 3628 /* work_done is guaranteed to be less than budget. */
6f535763
DM
3629 netif_rx_complete(tp->dev, napi);
3630 schedule_work(&tp->reset_task);
4fd7ab59 3631 return work_done;
1da177e4
LT
3632}
3633
f47c11ee
DM
3634static void tg3_irq_quiesce(struct tg3 *tp)
3635{
3636 BUG_ON(tp->irq_sync);
3637
3638 tp->irq_sync = 1;
3639 smp_mb();
3640
3641 synchronize_irq(tp->pdev->irq);
3642}
3643
3644static inline int tg3_irq_sync(struct tg3 *tp)
3645{
3646 return tp->irq_sync;
3647}
3648
3649/* Fully shutdown all tg3 driver activity elsewhere in the system.
3650 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3651 * with as well. Most of the time, this is not necessary except when
3652 * shutting down the device.
3653 */
3654static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3655{
46966545 3656 spin_lock_bh(&tp->lock);
f47c11ee
DM
3657 if (irq_sync)
3658 tg3_irq_quiesce(tp);
f47c11ee
DM
3659}
3660
3661static inline void tg3_full_unlock(struct tg3 *tp)
3662{
f47c11ee
DM
3663 spin_unlock_bh(&tp->lock);
3664}
3665
fcfa0a32
MC
3666/* One-shot MSI handler - Chip automatically disables interrupt
3667 * after sending MSI so driver doesn't have to do it.
3668 */
7d12e780 3669static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
fcfa0a32
MC
3670{
3671 struct net_device *dev = dev_id;
3672 struct tg3 *tp = netdev_priv(dev);
3673
3674 prefetch(tp->hw_status);
3675 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3676
3677 if (likely(!tg3_irq_sync(tp)))
bea3348e 3678 netif_rx_schedule(dev, &tp->napi);
fcfa0a32
MC
3679
3680 return IRQ_HANDLED;
3681}
3682
88b06bc2
MC
3683/* MSI ISR - No need to check for interrupt sharing and no need to
3684 * flush status block and interrupt mailbox. PCI ordering rules
3685 * guarantee that MSI will arrive after the status block.
3686 */
7d12e780 3687static irqreturn_t tg3_msi(int irq, void *dev_id)
88b06bc2
MC
3688{
3689 struct net_device *dev = dev_id;
3690 struct tg3 *tp = netdev_priv(dev);
88b06bc2 3691
61487480
MC
3692 prefetch(tp->hw_status);
3693 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
88b06bc2 3694 /*
fac9b83e 3695 * Writing any value to intr-mbox-0 clears PCI INTA# and
88b06bc2 3696 * chip-internal interrupt pending events.
fac9b83e 3697 * Writing non-zero to intr-mbox-0 additional tells the
88b06bc2
MC
3698 * NIC to stop sending us irqs, engaging "in-intr-handler"
3699 * event coalescing.
3700 */
3701 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
61487480 3702 if (likely(!tg3_irq_sync(tp)))
bea3348e 3703 netif_rx_schedule(dev, &tp->napi);
61487480 3704
88b06bc2
MC
3705 return IRQ_RETVAL(1);
3706}
3707
7d12e780 3708static irqreturn_t tg3_interrupt(int irq, void *dev_id)
1da177e4
LT
3709{
3710 struct net_device *dev = dev_id;
3711 struct tg3 *tp = netdev_priv(dev);
3712 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4
LT
3713 unsigned int handled = 1;
3714
1da177e4
LT
3715 /* In INTx mode, it is possible for the interrupt to arrive at
3716 * the CPU before the status block posted prior to the interrupt.
3717 * Reading the PCI State register will confirm whether the
3718 * interrupt is ours and will flush the status block.
3719 */
d18edcb2
MC
3720 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3721 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3722 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3723 handled = 0;
f47c11ee 3724 goto out;
fac9b83e 3725 }
d18edcb2
MC
3726 }
3727
3728 /*
3729 * Writing any value to intr-mbox-0 clears PCI INTA# and
3730 * chip-internal interrupt pending events.
3731 * Writing non-zero to intr-mbox-0 additional tells the
3732 * NIC to stop sending us irqs, engaging "in-intr-handler"
3733 * event coalescing.
c04cb347
MC
3734 *
3735 * Flush the mailbox to de-assert the IRQ immediately to prevent
3736 * spurious interrupts. The flush impacts performance but
3737 * excessive spurious interrupts can be worse in some cases.
d18edcb2 3738 */
c04cb347 3739 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
d18edcb2
MC
3740 if (tg3_irq_sync(tp))
3741 goto out;
3742 sblk->status &= ~SD_STATUS_UPDATED;
3743 if (likely(tg3_has_work(tp))) {
3744 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
bea3348e 3745 netif_rx_schedule(dev, &tp->napi);
d18edcb2
MC
3746 } else {
3747 /* No work, shared interrupt perhaps? re-enable
3748 * interrupts, and flush that PCI write
3749 */
3750 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3751 0x00000000);
fac9b83e 3752 }
f47c11ee 3753out:
fac9b83e
DM
3754 return IRQ_RETVAL(handled);
3755}
3756
7d12e780 3757static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
fac9b83e
DM
3758{
3759 struct net_device *dev = dev_id;
3760 struct tg3 *tp = netdev_priv(dev);
3761 struct tg3_hw_status *sblk = tp->hw_status;
fac9b83e
DM
3762 unsigned int handled = 1;
3763
fac9b83e
DM
3764 /* In INTx mode, it is possible for the interrupt to arrive at
3765 * the CPU before the status block posted prior to the interrupt.
3766 * Reading the PCI State register will confirm whether the
3767 * interrupt is ours and will flush the status block.
3768 */
d18edcb2
MC
3769 if (unlikely(sblk->status_tag == tp->last_tag)) {
3770 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3771 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3772 handled = 0;
f47c11ee 3773 goto out;
1da177e4 3774 }
d18edcb2
MC
3775 }
3776
3777 /*
3778 * writing any value to intr-mbox-0 clears PCI INTA# and
3779 * chip-internal interrupt pending events.
3780 * writing non-zero to intr-mbox-0 additional tells the
3781 * NIC to stop sending us irqs, engaging "in-intr-handler"
3782 * event coalescing.
c04cb347
MC
3783 *
3784 * Flush the mailbox to de-assert the IRQ immediately to prevent
3785 * spurious interrupts. The flush impacts performance but
3786 * excessive spurious interrupts can be worse in some cases.
d18edcb2 3787 */
c04cb347 3788 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
d18edcb2
MC
3789 if (tg3_irq_sync(tp))
3790 goto out;
bea3348e 3791 if (netif_rx_schedule_prep(dev, &tp->napi)) {
d18edcb2
MC
3792 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3793 /* Update last_tag to mark that this status has been
3794 * seen. Because interrupt may be shared, we may be
3795 * racing with tg3_poll(), so only update last_tag
3796 * if tg3_poll() is not scheduled.
3797 */
3798 tp->last_tag = sblk->status_tag;
bea3348e 3799 __netif_rx_schedule(dev, &tp->napi);
1da177e4 3800 }
f47c11ee 3801out:
1da177e4
LT
3802 return IRQ_RETVAL(handled);
3803}
3804
7938109f 3805/* ISR for interrupt test */
7d12e780 3806static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7938109f
MC
3807{
3808 struct net_device *dev = dev_id;
3809 struct tg3 *tp = netdev_priv(dev);
3810 struct tg3_hw_status *sblk = tp->hw_status;
3811
f9804ddb
MC
3812 if ((sblk->status & SD_STATUS_UPDATED) ||
3813 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
b16250e3 3814 tg3_disable_ints(tp);
7938109f
MC
3815 return IRQ_RETVAL(1);
3816 }
3817 return IRQ_RETVAL(0);
3818}
3819
8e7a22e3 3820static int tg3_init_hw(struct tg3 *, int);
944d980e 3821static int tg3_halt(struct tg3 *, int, int);
1da177e4 3822
b9ec6c1b
MC
3823/* Restart hardware after configuration changes, self-test, etc.
3824 * Invoked with tp->lock held.
3825 */
3826static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3827{
3828 int err;
3829
3830 err = tg3_init_hw(tp, reset_phy);
3831 if (err) {
3832 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3833 "aborting.\n", tp->dev->name);
3834 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3835 tg3_full_unlock(tp);
3836 del_timer_sync(&tp->timer);
3837 tp->irq_sync = 0;
bea3348e 3838 napi_enable(&tp->napi);
b9ec6c1b
MC
3839 dev_close(tp->dev);
3840 tg3_full_lock(tp, 0);
3841 }
3842 return err;
3843}
3844
1da177e4
LT
3845#ifdef CONFIG_NET_POLL_CONTROLLER
3846static void tg3_poll_controller(struct net_device *dev)
3847{
88b06bc2
MC
3848 struct tg3 *tp = netdev_priv(dev);
3849
7d12e780 3850 tg3_interrupt(tp->pdev->irq, dev);
1da177e4
LT
3851}
3852#endif
3853
c4028958 3854static void tg3_reset_task(struct work_struct *work)
1da177e4 3855{
c4028958 3856 struct tg3 *tp = container_of(work, struct tg3, reset_task);
1da177e4
LT
3857 unsigned int restart_timer;
3858
7faa006f 3859 tg3_full_lock(tp, 0);
7faa006f
MC
3860
3861 if (!netif_running(tp->dev)) {
7faa006f
MC
3862 tg3_full_unlock(tp);
3863 return;
3864 }
3865
3866 tg3_full_unlock(tp);
3867
1da177e4
LT
3868 tg3_netif_stop(tp);
3869
f47c11ee 3870 tg3_full_lock(tp, 1);
1da177e4
LT
3871
3872 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3873 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3874
df3e6548
MC
3875 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3876 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3877 tp->write32_rx_mbox = tg3_write_flush_reg32;
3878 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3879 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3880 }
3881
944d980e 3882 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
b9ec6c1b
MC
3883 if (tg3_init_hw(tp, 1))
3884 goto out;
1da177e4
LT
3885
3886 tg3_netif_start(tp);
3887
1da177e4
LT
3888 if (restart_timer)
3889 mod_timer(&tp->timer, jiffies + 1);
7faa006f 3890
b9ec6c1b 3891out:
7faa006f 3892 tg3_full_unlock(tp);
1da177e4
LT
3893}
3894
b0408751
MC
3895static void tg3_dump_short_state(struct tg3 *tp)
3896{
3897 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3898 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3899 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3900 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3901}
3902
1da177e4
LT
3903static void tg3_tx_timeout(struct net_device *dev)
3904{
3905 struct tg3 *tp = netdev_priv(dev);
3906
b0408751 3907 if (netif_msg_tx_err(tp)) {
9f88f29f
MC
3908 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3909 dev->name);
b0408751
MC
3910 tg3_dump_short_state(tp);
3911 }
1da177e4
LT
3912
3913 schedule_work(&tp->reset_task);
3914}
3915
c58ec932
MC
3916/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3917static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3918{
3919 u32 base = (u32) mapping & 0xffffffff;
3920
3921 return ((base > 0xffffdcc0) &&
3922 (base + len + 8 < base));
3923}
3924
72f2afb8
MC
3925/* Test for DMA addresses > 40-bit */
3926static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3927 int len)
3928{
3929#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6728a8e2 3930 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
72f2afb8
MC
3931 return (((u64) mapping + len) > DMA_40BIT_MASK);
3932 return 0;
3933#else
3934 return 0;
3935#endif
3936}
3937
1da177e4
LT
3938static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3939
72f2afb8
MC
3940/* Workaround 4GB and 40-bit hardware DMA bugs. */
3941static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
c58ec932
MC
3942 u32 last_plus_one, u32 *start,
3943 u32 base_flags, u32 mss)
1da177e4
LT
3944{
3945 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
c58ec932 3946 dma_addr_t new_addr = 0;
1da177e4 3947 u32 entry = *start;
c58ec932 3948 int i, ret = 0;
1da177e4
LT
3949
3950 if (!new_skb) {
c58ec932
MC
3951 ret = -1;
3952 } else {
3953 /* New SKB is guaranteed to be linear. */
3954 entry = *start;
3955 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3956 PCI_DMA_TODEVICE);
3957 /* Make sure new skb does not cross any 4G boundaries.
3958 * Drop the packet if it does.
3959 */
3960 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3961 ret = -1;
3962 dev_kfree_skb(new_skb);
3963 new_skb = NULL;
3964 } else {
3965 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3966 base_flags, 1 | (mss << 1));
3967 *start = NEXT_TX(entry);
3968 }
1da177e4
LT
3969 }
3970
1da177e4
LT
3971 /* Now clean up the sw ring entries. */
3972 i = 0;
3973 while (entry != last_plus_one) {
3974 int len;
3975
3976 if (i == 0)
3977 len = skb_headlen(skb);
3978 else
3979 len = skb_shinfo(skb)->frags[i-1].size;
3980 pci_unmap_single(tp->pdev,
3981 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3982 len, PCI_DMA_TODEVICE);
3983 if (i == 0) {
3984 tp->tx_buffers[entry].skb = new_skb;
3985 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3986 } else {
3987 tp->tx_buffers[entry].skb = NULL;
3988 }
3989 entry = NEXT_TX(entry);
3990 i++;
3991 }
3992
3993 dev_kfree_skb(skb);
3994
c58ec932 3995 return ret;
1da177e4
LT
3996}
3997
3998static void tg3_set_txd(struct tg3 *tp, int entry,
3999 dma_addr_t mapping, int len, u32 flags,
4000 u32 mss_and_is_end)
4001{
4002 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4003 int is_end = (mss_and_is_end & 0x1);
4004 u32 mss = (mss_and_is_end >> 1);
4005 u32 vlan_tag = 0;
4006
4007 if (is_end)
4008 flags |= TXD_FLAG_END;
4009 if (flags & TXD_FLAG_VLAN) {
4010 vlan_tag = flags >> 16;
4011 flags &= 0xffff;
4012 }
4013 vlan_tag |= (mss << TXD_MSS_SHIFT);
4014
4015 txd->addr_hi = ((u64) mapping >> 32);
4016 txd->addr_lo = ((u64) mapping & 0xffffffff);
4017 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4018 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4019}
4020
5a6f3074
MC
4021/* hard_start_xmit for devices that don't have any bugs and
4022 * support TG3_FLG2_HW_TSO_2 only.
4023 */
1da177e4 4024static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5a6f3074
MC
4025{
4026 struct tg3 *tp = netdev_priv(dev);
4027 dma_addr_t mapping;
4028 u32 len, entry, base_flags, mss;
4029
4030 len = skb_headlen(skb);
4031
00b70504 4032 /* We are running in BH disabled context with netif_tx_lock
bea3348e 4033 * and TX reclaim runs via tp->napi.poll inside of a software
5a6f3074
MC
4034 * interrupt. Furthermore, IRQ processing runs lockless so we have
4035 * no IRQ context deadlocks to worry about either. Rejoice!
4036 */
1b2a7205 4037 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
5a6f3074
MC
4038 if (!netif_queue_stopped(dev)) {
4039 netif_stop_queue(dev);
4040
4041 /* This is a hard error, log it. */
4042 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4043 "queue awake!\n", dev->name);
4044 }
5a6f3074
MC
4045 return NETDEV_TX_BUSY;
4046 }
4047
4048 entry = tp->tx_prod;
4049 base_flags = 0;
5a6f3074 4050 mss = 0;
c13e3713 4051 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5a6f3074
MC
4052 int tcp_opt_len, ip_tcp_len;
4053
4054 if (skb_header_cloned(skb) &&
4055 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4056 dev_kfree_skb(skb);
4057 goto out_unlock;
4058 }
4059
b0026624
MC
4060 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4061 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4062 else {
eddc9ec5
ACM
4063 struct iphdr *iph = ip_hdr(skb);
4064
ab6a5bb6 4065 tcp_opt_len = tcp_optlen(skb);
c9bdd4b5 4066 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
b0026624 4067
eddc9ec5
ACM
4068 iph->check = 0;
4069 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
b0026624
MC
4070 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4071 }
5a6f3074
MC
4072
4073 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4074 TXD_FLAG_CPU_POST_DMA);
4075
aa8223c7 4076 tcp_hdr(skb)->check = 0;
5a6f3074 4077
5a6f3074 4078 }
84fa7933 4079 else if (skb->ip_summed == CHECKSUM_PARTIAL)
5a6f3074 4080 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5a6f3074
MC
4081#if TG3_VLAN_TAG_USED
4082 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4083 base_flags |= (TXD_FLAG_VLAN |
4084 (vlan_tx_tag_get(skb) << 16));
4085#endif
4086
4087 /* Queue skb data, a.k.a. the main skb fragment. */
4088 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4089
4090 tp->tx_buffers[entry].skb = skb;
4091 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4092
4093 tg3_set_txd(tp, entry, mapping, len, base_flags,
4094 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4095
4096 entry = NEXT_TX(entry);
4097
4098 /* Now loop through additional data fragments, and queue them. */
4099 if (skb_shinfo(skb)->nr_frags > 0) {
4100 unsigned int i, last;
4101
4102 last = skb_shinfo(skb)->nr_frags - 1;
4103 for (i = 0; i <= last; i++) {
4104 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4105
4106 len = frag->size;
4107 mapping = pci_map_page(tp->pdev,
4108 frag->page,
4109 frag->page_offset,
4110 len, PCI_DMA_TODEVICE);
4111
4112 tp->tx_buffers[entry].skb = NULL;
4113 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4114
4115 tg3_set_txd(tp, entry, mapping, len,
4116 base_flags, (i == last) | (mss << 1));
4117
4118 entry = NEXT_TX(entry);
4119 }
4120 }
4121
4122 /* Packets are ready, update Tx producer idx local and on card. */
4123 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4124
4125 tp->tx_prod = entry;
1b2a7205 4126 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5a6f3074 4127 netif_stop_queue(dev);
42952231 4128 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5a6f3074
MC
4129 netif_wake_queue(tp->dev);
4130 }
4131
4132out_unlock:
4133 mmiowb();
5a6f3074
MC
4134
4135 dev->trans_start = jiffies;
4136
4137 return NETDEV_TX_OK;
4138}
4139
52c0fd83
MC
4140static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4141
4142/* Use GSO to workaround a rare TSO bug that may be triggered when the
4143 * TSO header is greater than 80 bytes.
4144 */
4145static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4146{
4147 struct sk_buff *segs, *nskb;
4148
4149 /* Estimate the number of fragments in the worst case */
1b2a7205 4150 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
52c0fd83 4151 netif_stop_queue(tp->dev);
7f62ad5d
MC
4152 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4153 return NETDEV_TX_BUSY;
4154
4155 netif_wake_queue(tp->dev);
52c0fd83
MC
4156 }
4157
4158 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4159 if (unlikely(IS_ERR(segs)))
4160 goto tg3_tso_bug_end;
4161
4162 do {
4163 nskb = segs;
4164 segs = segs->next;
4165 nskb->next = NULL;
4166 tg3_start_xmit_dma_bug(nskb, tp->dev);
4167 } while (segs);
4168
4169tg3_tso_bug_end:
4170 dev_kfree_skb(skb);
4171
4172 return NETDEV_TX_OK;
4173}
52c0fd83 4174
5a6f3074
MC
4175/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4176 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4177 */
4178static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
4179{
4180 struct tg3 *tp = netdev_priv(dev);
4181 dma_addr_t mapping;
1da177e4
LT
4182 u32 len, entry, base_flags, mss;
4183 int would_hit_hwbug;
1da177e4
LT
4184
4185 len = skb_headlen(skb);
4186
00b70504 4187 /* We are running in BH disabled context with netif_tx_lock
bea3348e 4188 * and TX reclaim runs via tp->napi.poll inside of a software
f47c11ee
DM
4189 * interrupt. Furthermore, IRQ processing runs lockless so we have
4190 * no IRQ context deadlocks to worry about either. Rejoice!
1da177e4 4191 */
1b2a7205 4192 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
1f064a87
SH
4193 if (!netif_queue_stopped(dev)) {
4194 netif_stop_queue(dev);
4195
4196 /* This is a hard error, log it. */
4197 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4198 "queue awake!\n", dev->name);
4199 }
1da177e4
LT
4200 return NETDEV_TX_BUSY;
4201 }
4202
4203 entry = tp->tx_prod;
4204 base_flags = 0;
84fa7933 4205 if (skb->ip_summed == CHECKSUM_PARTIAL)
1da177e4 4206 base_flags |= TXD_FLAG_TCPUDP_CSUM;
1da177e4 4207 mss = 0;
c13e3713 4208 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
eddc9ec5 4209 struct iphdr *iph;
52c0fd83 4210 int tcp_opt_len, ip_tcp_len, hdr_len;
1da177e4
LT
4211
4212 if (skb_header_cloned(skb) &&
4213 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4214 dev_kfree_skb(skb);
4215 goto out_unlock;
4216 }
4217
ab6a5bb6 4218 tcp_opt_len = tcp_optlen(skb);
c9bdd4b5 4219 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
1da177e4 4220
52c0fd83
MC
4221 hdr_len = ip_tcp_len + tcp_opt_len;
4222 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7f62ad5d 4223 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
52c0fd83
MC
4224 return (tg3_tso_bug(tp, skb));
4225
1da177e4
LT
4226 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4227 TXD_FLAG_CPU_POST_DMA);
4228
eddc9ec5
ACM
4229 iph = ip_hdr(skb);
4230 iph->check = 0;
4231 iph->tot_len = htons(mss + hdr_len);
1da177e4 4232 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
aa8223c7 4233 tcp_hdr(skb)->check = 0;
1da177e4 4234 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
aa8223c7
ACM
4235 } else
4236 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4237 iph->daddr, 0,
4238 IPPROTO_TCP,
4239 0);
1da177e4
LT
4240
4241 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4242 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
eddc9ec5 4243 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
4244 int tsflags;
4245
eddc9ec5 4246 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
4247 mss |= (tsflags << 11);
4248 }
4249 } else {
eddc9ec5 4250 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
4251 int tsflags;
4252
eddc9ec5 4253 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
4254 base_flags |= tsflags << 12;
4255 }
4256 }
4257 }
1da177e4
LT
4258#if TG3_VLAN_TAG_USED
4259 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4260 base_flags |= (TXD_FLAG_VLAN |
4261 (vlan_tx_tag_get(skb) << 16));
4262#endif
4263
4264 /* Queue skb data, a.k.a. the main skb fragment. */
4265 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4266
4267 tp->tx_buffers[entry].skb = skb;
4268 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4269
4270 would_hit_hwbug = 0;
4271
4272 if (tg3_4g_overflow_test(mapping, len))
c58ec932 4273 would_hit_hwbug = 1;
1da177e4
LT
4274
4275 tg3_set_txd(tp, entry, mapping, len, base_flags,
4276 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4277
4278 entry = NEXT_TX(entry);
4279
4280 /* Now loop through additional data fragments, and queue them. */
4281 if (skb_shinfo(skb)->nr_frags > 0) {
4282 unsigned int i, last;
4283
4284 last = skb_shinfo(skb)->nr_frags - 1;
4285 for (i = 0; i <= last; i++) {
4286 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4287
4288 len = frag->size;
4289 mapping = pci_map_page(tp->pdev,
4290 frag->page,
4291 frag->page_offset,
4292 len, PCI_DMA_TODEVICE);
4293
4294 tp->tx_buffers[entry].skb = NULL;
4295 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4296
c58ec932
MC
4297 if (tg3_4g_overflow_test(mapping, len))
4298 would_hit_hwbug = 1;
1da177e4 4299
72f2afb8
MC
4300 if (tg3_40bit_overflow_test(tp, mapping, len))
4301 would_hit_hwbug = 1;
4302
1da177e4
LT
4303 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4304 tg3_set_txd(tp, entry, mapping, len,
4305 base_flags, (i == last)|(mss << 1));
4306 else
4307 tg3_set_txd(tp, entry, mapping, len,
4308 base_flags, (i == last));
4309
4310 entry = NEXT_TX(entry);
4311 }
4312 }
4313
4314 if (would_hit_hwbug) {
4315 u32 last_plus_one = entry;
4316 u32 start;
1da177e4 4317
c58ec932
MC
4318 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4319 start &= (TG3_TX_RING_SIZE - 1);
1da177e4
LT
4320
4321 /* If the workaround fails due to memory/mapping
4322 * failure, silently drop this packet.
4323 */
72f2afb8 4324 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
c58ec932 4325 &start, base_flags, mss))
1da177e4
LT
4326 goto out_unlock;
4327
4328 entry = start;
4329 }
4330
4331 /* Packets are ready, update Tx producer idx local and on card. */
4332 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4333
4334 tp->tx_prod = entry;
1b2a7205 4335 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
1da177e4 4336 netif_stop_queue(dev);
42952231 4337 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
51b91468
MC
4338 netif_wake_queue(tp->dev);
4339 }
1da177e4
LT
4340
4341out_unlock:
4342 mmiowb();
1da177e4
LT
4343
4344 dev->trans_start = jiffies;
4345
4346 return NETDEV_TX_OK;
4347}
4348
4349static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4350 int new_mtu)
4351{
4352 dev->mtu = new_mtu;
4353
ef7f5ec0 4354 if (new_mtu > ETH_DATA_LEN) {
a4e2b347 4355 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
ef7f5ec0
MC
4356 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4357 ethtool_op_set_tso(dev, 0);
4358 }
4359 else
4360 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4361 } else {
a4e2b347 4362 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
ef7f5ec0 4363 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
0f893dc6 4364 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
ef7f5ec0 4365 }
1da177e4
LT
4366}
4367
4368static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4369{
4370 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 4371 int err;
1da177e4
LT
4372
4373 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4374 return -EINVAL;
4375
4376 if (!netif_running(dev)) {
4377 /* We'll just catch it later when the
4378 * device is up'd.
4379 */
4380 tg3_set_mtu(dev, tp, new_mtu);
4381 return 0;
4382 }
4383
4384 tg3_netif_stop(tp);
f47c11ee
DM
4385
4386 tg3_full_lock(tp, 1);
1da177e4 4387
944d980e 4388 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
4389
4390 tg3_set_mtu(dev, tp, new_mtu);
4391
b9ec6c1b 4392 err = tg3_restart_hw(tp, 0);
1da177e4 4393
b9ec6c1b
MC
4394 if (!err)
4395 tg3_netif_start(tp);
1da177e4 4396
f47c11ee 4397 tg3_full_unlock(tp);
1da177e4 4398
b9ec6c1b 4399 return err;
1da177e4
LT
4400}
4401
4402/* Free up pending packets in all rx/tx rings.
4403 *
4404 * The chip has been shut down and the driver detached from
4405 * the networking, so no interrupts or new tx packets will
4406 * end up in the driver. tp->{tx,}lock is not held and we are not
4407 * in an interrupt context and thus may sleep.
4408 */
4409static void tg3_free_rings(struct tg3 *tp)
4410{
4411 struct ring_info *rxp;
4412 int i;
4413
4414 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4415 rxp = &tp->rx_std_buffers[i];
4416
4417 if (rxp->skb == NULL)
4418 continue;
4419 pci_unmap_single(tp->pdev,
4420 pci_unmap_addr(rxp, mapping),
7e72aad4 4421 tp->rx_pkt_buf_sz - tp->rx_offset,
1da177e4
LT
4422 PCI_DMA_FROMDEVICE);
4423 dev_kfree_skb_any(rxp->skb);
4424 rxp->skb = NULL;
4425 }
4426
4427 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4428 rxp = &tp->rx_jumbo_buffers[i];
4429
4430 if (rxp->skb == NULL)
4431 continue;
4432 pci_unmap_single(tp->pdev,
4433 pci_unmap_addr(rxp, mapping),
4434 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4435 PCI_DMA_FROMDEVICE);
4436 dev_kfree_skb_any(rxp->skb);
4437 rxp->skb = NULL;
4438 }
4439
4440 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4441 struct tx_ring_info *txp;
4442 struct sk_buff *skb;
4443 int j;
4444
4445 txp = &tp->tx_buffers[i];
4446 skb = txp->skb;
4447
4448 if (skb == NULL) {
4449 i++;
4450 continue;
4451 }
4452
4453 pci_unmap_single(tp->pdev,
4454 pci_unmap_addr(txp, mapping),
4455 skb_headlen(skb),
4456 PCI_DMA_TODEVICE);
4457 txp->skb = NULL;
4458
4459 i++;
4460
4461 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4462 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4463 pci_unmap_page(tp->pdev,
4464 pci_unmap_addr(txp, mapping),
4465 skb_shinfo(skb)->frags[j].size,
4466 PCI_DMA_TODEVICE);
4467 i++;
4468 }
4469
4470 dev_kfree_skb_any(skb);
4471 }
4472}
4473
4474/* Initialize tx/rx rings for packet processing.
4475 *
4476 * The chip has been shut down and the driver detached from
4477 * the networking, so no interrupts or new tx packets will
4478 * end up in the driver. tp->{tx,}lock are held and thus
4479 * we may not sleep.
4480 */
32d8c572 4481static int tg3_init_rings(struct tg3 *tp)
1da177e4
LT
4482{
4483 u32 i;
4484
4485 /* Free up all the SKBs. */
4486 tg3_free_rings(tp);
4487
4488 /* Zero out all descriptors. */
4489 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4490 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4491 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4492 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4493
7e72aad4 4494 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
a4e2b347 4495 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
7e72aad4
MC
4496 (tp->dev->mtu > ETH_DATA_LEN))
4497 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4498
1da177e4
LT
4499 /* Initialize invariants of the rings, we only set this
4500 * stuff once. This works because the card does not
4501 * write into the rx buffer posting rings.
4502 */
4503 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4504 struct tg3_rx_buffer_desc *rxd;
4505
4506 rxd = &tp->rx_std[i];
7e72aad4 4507 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
1da177e4
LT
4508 << RXD_LEN_SHIFT;
4509 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4510 rxd->opaque = (RXD_OPAQUE_RING_STD |
4511 (i << RXD_OPAQUE_INDEX_SHIFT));
4512 }
4513
0f893dc6 4514 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
4515 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4516 struct tg3_rx_buffer_desc *rxd;
4517
4518 rxd = &tp->rx_jumbo[i];
4519 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4520 << RXD_LEN_SHIFT;
4521 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4522 RXD_FLAG_JUMBO;
4523 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4524 (i << RXD_OPAQUE_INDEX_SHIFT));
4525 }
4526 }
4527
4528 /* Now allocate fresh SKBs for each rx ring. */
4529 for (i = 0; i < tp->rx_pending; i++) {
32d8c572
MC
4530 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4531 printk(KERN_WARNING PFX
4532 "%s: Using a smaller RX standard ring, "
4533 "only %d out of %d buffers were allocated "
4534 "successfully.\n",
4535 tp->dev->name, i, tp->rx_pending);
4536 if (i == 0)
4537 return -ENOMEM;
4538 tp->rx_pending = i;
1da177e4 4539 break;
32d8c572 4540 }
1da177e4
LT
4541 }
4542
0f893dc6 4543 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
4544 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4545 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
32d8c572
MC
4546 -1, i) < 0) {
4547 printk(KERN_WARNING PFX
4548 "%s: Using a smaller RX jumbo ring, "
4549 "only %d out of %d buffers were "
4550 "allocated successfully.\n",
4551 tp->dev->name, i, tp->rx_jumbo_pending);
4552 if (i == 0) {
4553 tg3_free_rings(tp);
4554 return -ENOMEM;
4555 }
4556 tp->rx_jumbo_pending = i;
1da177e4 4557 break;
32d8c572 4558 }
1da177e4
LT
4559 }
4560 }
32d8c572 4561 return 0;
1da177e4
LT
4562}
4563
4564/*
4565 * Must not be invoked with interrupt sources disabled and
4566 * the hardware shutdown down.
4567 */
4568static void tg3_free_consistent(struct tg3 *tp)
4569{
b4558ea9
JJ
4570 kfree(tp->rx_std_buffers);
4571 tp->rx_std_buffers = NULL;
1da177e4
LT
4572 if (tp->rx_std) {
4573 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4574 tp->rx_std, tp->rx_std_mapping);
4575 tp->rx_std = NULL;
4576 }
4577 if (tp->rx_jumbo) {
4578 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4579 tp->rx_jumbo, tp->rx_jumbo_mapping);
4580 tp->rx_jumbo = NULL;
4581 }
4582 if (tp->rx_rcb) {
4583 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4584 tp->rx_rcb, tp->rx_rcb_mapping);
4585 tp->rx_rcb = NULL;
4586 }
4587 if (tp->tx_ring) {
4588 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4589 tp->tx_ring, tp->tx_desc_mapping);
4590 tp->tx_ring = NULL;
4591 }
4592 if (tp->hw_status) {
4593 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4594 tp->hw_status, tp->status_mapping);
4595 tp->hw_status = NULL;
4596 }
4597 if (tp->hw_stats) {
4598 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4599 tp->hw_stats, tp->stats_mapping);
4600 tp->hw_stats = NULL;
4601 }
4602}
4603
4604/*
4605 * Must not be invoked with interrupt sources disabled and
4606 * the hardware shutdown down. Can sleep.
4607 */
4608static int tg3_alloc_consistent(struct tg3 *tp)
4609{
bd2b3343 4610 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
1da177e4
LT
4611 (TG3_RX_RING_SIZE +
4612 TG3_RX_JUMBO_RING_SIZE)) +
4613 (sizeof(struct tx_ring_info) *
4614 TG3_TX_RING_SIZE),
4615 GFP_KERNEL);
4616 if (!tp->rx_std_buffers)
4617 return -ENOMEM;
4618
1da177e4
LT
4619 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4620 tp->tx_buffers = (struct tx_ring_info *)
4621 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4622
4623 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4624 &tp->rx_std_mapping);
4625 if (!tp->rx_std)
4626 goto err_out;
4627
4628 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4629 &tp->rx_jumbo_mapping);
4630
4631 if (!tp->rx_jumbo)
4632 goto err_out;
4633
4634 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4635 &tp->rx_rcb_mapping);
4636 if (!tp->rx_rcb)
4637 goto err_out;
4638
4639 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4640 &tp->tx_desc_mapping);
4641 if (!tp->tx_ring)
4642 goto err_out;
4643
4644 tp->hw_status = pci_alloc_consistent(tp->pdev,
4645 TG3_HW_STATUS_SIZE,
4646 &tp->status_mapping);
4647 if (!tp->hw_status)
4648 goto err_out;
4649
4650 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4651 sizeof(struct tg3_hw_stats),
4652 &tp->stats_mapping);
4653 if (!tp->hw_stats)
4654 goto err_out;
4655
4656 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4657 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4658
4659 return 0;
4660
4661err_out:
4662 tg3_free_consistent(tp);
4663 return -ENOMEM;
4664}
4665
4666#define MAX_WAIT_CNT 1000
4667
4668/* To stop a block, clear the enable bit and poll till it
4669 * clears. tp->lock is held.
4670 */
b3b7d6be 4671static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
1da177e4
LT
4672{
4673 unsigned int i;
4674 u32 val;
4675
4676 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4677 switch (ofs) {
4678 case RCVLSC_MODE:
4679 case DMAC_MODE:
4680 case MBFREE_MODE:
4681 case BUFMGR_MODE:
4682 case MEMARB_MODE:
4683 /* We can't enable/disable these bits of the
4684 * 5705/5750, just say success.
4685 */
4686 return 0;
4687
4688 default:
4689 break;
4690 };
4691 }
4692
4693 val = tr32(ofs);
4694 val &= ~enable_bit;
4695 tw32_f(ofs, val);
4696
4697 for (i = 0; i < MAX_WAIT_CNT; i++) {
4698 udelay(100);
4699 val = tr32(ofs);
4700 if ((val & enable_bit) == 0)
4701 break;
4702 }
4703
b3b7d6be 4704 if (i == MAX_WAIT_CNT && !silent) {
1da177e4
LT
4705 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4706 "ofs=%lx enable_bit=%x\n",
4707 ofs, enable_bit);
4708 return -ENODEV;
4709 }
4710
4711 return 0;
4712}
4713
4714/* tp->lock is held. */
b3b7d6be 4715static int tg3_abort_hw(struct tg3 *tp, int silent)
1da177e4
LT
4716{
4717 int i, err;
4718
4719 tg3_disable_ints(tp);
4720
4721 tp->rx_mode &= ~RX_MODE_ENABLE;
4722 tw32_f(MAC_RX_MODE, tp->rx_mode);
4723 udelay(10);
4724
b3b7d6be
DM
4725 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4726 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4727 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4728 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4729 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4730 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4731
4732 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4733 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4734 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4735 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4736 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4737 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4738 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
1da177e4
LT
4739
4740 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4741 tw32_f(MAC_MODE, tp->mac_mode);
4742 udelay(40);
4743
4744 tp->tx_mode &= ~TX_MODE_ENABLE;
4745 tw32_f(MAC_TX_MODE, tp->tx_mode);
4746
4747 for (i = 0; i < MAX_WAIT_CNT; i++) {
4748 udelay(100);
4749 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4750 break;
4751 }
4752 if (i >= MAX_WAIT_CNT) {
4753 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4754 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4755 tp->dev->name, tr32(MAC_TX_MODE));
e6de8ad1 4756 err |= -ENODEV;
1da177e4
LT
4757 }
4758
e6de8ad1 4759 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
b3b7d6be
DM
4760 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4761 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
1da177e4
LT
4762
4763 tw32(FTQ_RESET, 0xffffffff);
4764 tw32(FTQ_RESET, 0x00000000);
4765
b3b7d6be
DM
4766 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4767 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
1da177e4
LT
4768
4769 if (tp->hw_status)
4770 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4771 if (tp->hw_stats)
4772 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4773
1da177e4
LT
4774 return err;
4775}
4776
4777/* tp->lock is held. */
4778static int tg3_nvram_lock(struct tg3 *tp)
4779{
4780 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4781 int i;
4782
ec41c7df
MC
4783 if (tp->nvram_lock_cnt == 0) {
4784 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4785 for (i = 0; i < 8000; i++) {
4786 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4787 break;
4788 udelay(20);
4789 }
4790 if (i == 8000) {
4791 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4792 return -ENODEV;
4793 }
1da177e4 4794 }
ec41c7df 4795 tp->nvram_lock_cnt++;
1da177e4
LT
4796 }
4797 return 0;
4798}
4799
4800/* tp->lock is held. */
4801static void tg3_nvram_unlock(struct tg3 *tp)
4802{
ec41c7df
MC
4803 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4804 if (tp->nvram_lock_cnt > 0)
4805 tp->nvram_lock_cnt--;
4806 if (tp->nvram_lock_cnt == 0)
4807 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4808 }
1da177e4
LT
4809}
4810
e6af301b
MC
4811/* tp->lock is held. */
4812static void tg3_enable_nvram_access(struct tg3 *tp)
4813{
4814 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4815 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4816 u32 nvaccess = tr32(NVRAM_ACCESS);
4817
4818 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4819 }
4820}
4821
4822/* tp->lock is held. */
4823static void tg3_disable_nvram_access(struct tg3 *tp)
4824{
4825 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4826 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4827 u32 nvaccess = tr32(NVRAM_ACCESS);
4828
4829 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4830 }
4831}
4832
0d3031d9
MC
4833static void tg3_ape_send_event(struct tg3 *tp, u32 event)
4834{
4835 int i;
4836 u32 apedata;
4837
4838 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
4839 if (apedata != APE_SEG_SIG_MAGIC)
4840 return;
4841
4842 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
4843 if (apedata != APE_FW_STATUS_READY)
4844 return;
4845
4846 /* Wait for up to 1 millisecond for APE to service previous event. */
4847 for (i = 0; i < 10; i++) {
4848 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
4849 return;
4850
4851 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
4852
4853 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4854 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
4855 event | APE_EVENT_STATUS_EVENT_PENDING);
4856
4857 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
4858
4859 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4860 break;
4861
4862 udelay(100);
4863 }
4864
4865 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4866 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
4867}
4868
4869static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
4870{
4871 u32 event;
4872 u32 apedata;
4873
4874 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
4875 return;
4876
4877 switch (kind) {
4878 case RESET_KIND_INIT:
4879 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
4880 APE_HOST_SEG_SIG_MAGIC);
4881 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
4882 APE_HOST_SEG_LEN_MAGIC);
4883 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
4884 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
4885 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
4886 APE_HOST_DRIVER_ID_MAGIC);
4887 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
4888 APE_HOST_BEHAV_NO_PHYLOCK);
4889
4890 event = APE_EVENT_STATUS_STATE_START;
4891 break;
4892 case RESET_KIND_SHUTDOWN:
4893 event = APE_EVENT_STATUS_STATE_UNLOAD;
4894 break;
4895 case RESET_KIND_SUSPEND:
4896 event = APE_EVENT_STATUS_STATE_SUSPEND;
4897 break;
4898 default:
4899 return;
4900 }
4901
4902 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
4903
4904 tg3_ape_send_event(tp, event);
4905}
4906
1da177e4
LT
4907/* tp->lock is held. */
4908static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4909{
f49639e6
DM
4910 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4911 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1da177e4
LT
4912
4913 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4914 switch (kind) {
4915 case RESET_KIND_INIT:
4916 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4917 DRV_STATE_START);
4918 break;
4919
4920 case RESET_KIND_SHUTDOWN:
4921 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4922 DRV_STATE_UNLOAD);
4923 break;
4924
4925 case RESET_KIND_SUSPEND:
4926 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4927 DRV_STATE_SUSPEND);
4928 break;
4929
4930 default:
4931 break;
4932 };
4933 }
0d3031d9
MC
4934
4935 if (kind == RESET_KIND_INIT ||
4936 kind == RESET_KIND_SUSPEND)
4937 tg3_ape_driver_state_change(tp, kind);
1da177e4
LT
4938}
4939
4940/* tp->lock is held. */
4941static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4942{
4943 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4944 switch (kind) {
4945 case RESET_KIND_INIT:
4946 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4947 DRV_STATE_START_DONE);
4948 break;
4949
4950 case RESET_KIND_SHUTDOWN:
4951 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4952 DRV_STATE_UNLOAD_DONE);
4953 break;
4954
4955 default:
4956 break;
4957 };
4958 }
0d3031d9
MC
4959
4960 if (kind == RESET_KIND_SHUTDOWN)
4961 tg3_ape_driver_state_change(tp, kind);
1da177e4
LT
4962}
4963
4964/* tp->lock is held. */
4965static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4966{
4967 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4968 switch (kind) {
4969 case RESET_KIND_INIT:
4970 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4971 DRV_STATE_START);
4972 break;
4973
4974 case RESET_KIND_SHUTDOWN:
4975 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4976 DRV_STATE_UNLOAD);
4977 break;
4978
4979 case RESET_KIND_SUSPEND:
4980 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4981 DRV_STATE_SUSPEND);
4982 break;
4983
4984 default:
4985 break;
4986 };
4987 }
4988}
4989
7a6f4369
MC
4990static int tg3_poll_fw(struct tg3 *tp)
4991{
4992 int i;
4993 u32 val;
4994
b5d3772c 4995 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
0ccead18
GZ
4996 /* Wait up to 20ms for init done. */
4997 for (i = 0; i < 200; i++) {
b5d3772c
MC
4998 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
4999 return 0;
0ccead18 5000 udelay(100);
b5d3772c
MC
5001 }
5002 return -ENODEV;
5003 }
5004
7a6f4369
MC
5005 /* Wait for firmware initialization to complete. */
5006 for (i = 0; i < 100000; i++) {
5007 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5008 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5009 break;
5010 udelay(10);
5011 }
5012
5013 /* Chip might not be fitted with firmware. Some Sun onboard
5014 * parts are configured like that. So don't signal the timeout
5015 * of the above loop as an error, but do report the lack of
5016 * running firmware once.
5017 */
5018 if (i >= 100000 &&
5019 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5020 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5021
5022 printk(KERN_INFO PFX "%s: No firmware running.\n",
5023 tp->dev->name);
5024 }
5025
5026 return 0;
5027}
5028
ee6a99b5
MC
5029/* Save PCI command register before chip reset */
5030static void tg3_save_pci_state(struct tg3 *tp)
5031{
5032 u32 val;
5033
5034 pci_read_config_dword(tp->pdev, TG3PCI_COMMAND, &val);
5035 tp->pci_cmd = val;
5036}
5037
5038/* Restore PCI state after chip reset */
5039static void tg3_restore_pci_state(struct tg3 *tp)
5040{
5041 u32 val;
5042
5043 /* Re-enable indirect register accesses. */
5044 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5045 tp->misc_host_ctrl);
5046
5047 /* Set MAX PCI retry to zero. */
5048 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5049 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5050 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5051 val |= PCISTATE_RETRY_SAME_DMA;
0d3031d9
MC
5052 /* Allow reads and writes to the APE register and memory space. */
5053 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5054 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5055 PCISTATE_ALLOW_APE_SHMEM_WR;
ee6a99b5
MC
5056 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5057
5058 pci_write_config_dword(tp->pdev, TG3PCI_COMMAND, tp->pci_cmd);
5059
114342f2
MC
5060 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5061 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5062 tp->pci_cacheline_sz);
5063 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5064 tp->pci_lat_timer);
5065 }
ee6a99b5 5066 /* Make sure PCI-X relaxed ordering bit is clear. */
9974a356
MC
5067 if (tp->pcix_cap) {
5068 u16 pcix_cmd;
5069
5070 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5071 &pcix_cmd);
5072 pcix_cmd &= ~PCI_X_CMD_ERO;
5073 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5074 pcix_cmd);
5075 }
ee6a99b5
MC
5076
5077 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
ee6a99b5
MC
5078
5079 /* Chip reset on 5780 will reset MSI enable bit,
5080 * so need to restore it.
5081 */
5082 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5083 u16 ctrl;
5084
5085 pci_read_config_word(tp->pdev,
5086 tp->msi_cap + PCI_MSI_FLAGS,
5087 &ctrl);
5088 pci_write_config_word(tp->pdev,
5089 tp->msi_cap + PCI_MSI_FLAGS,
5090 ctrl | PCI_MSI_FLAGS_ENABLE);
5091 val = tr32(MSGINT_MODE);
5092 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5093 }
5094 }
5095}
5096
1da177e4
LT
5097static void tg3_stop_fw(struct tg3 *);
5098
5099/* tp->lock is held. */
5100static int tg3_chip_reset(struct tg3 *tp)
5101{
5102 u32 val;
1ee582d8 5103 void (*write_op)(struct tg3 *, u32, u32);
7a6f4369 5104 int err;
1da177e4 5105
f49639e6
DM
5106 tg3_nvram_lock(tp);
5107
5108 /* No matching tg3_nvram_unlock() after this because
5109 * chip reset below will undo the nvram lock.
5110 */
5111 tp->nvram_lock_cnt = 0;
1da177e4 5112
ee6a99b5
MC
5113 /* GRC_MISC_CFG core clock reset will clear the memory
5114 * enable bit in PCI register 4 and the MSI enable bit
5115 * on some chips, so we save relevant registers here.
5116 */
5117 tg3_save_pci_state(tp);
5118
d9ab5ad1 5119 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
af36e6b6 5120 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 5121 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
5122 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5123 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
d9ab5ad1
MC
5124 tw32(GRC_FASTBOOT_PC, 0);
5125
1da177e4
LT
5126 /*
5127 * We must avoid the readl() that normally takes place.
5128 * It locks machines, causes machine checks, and other
5129 * fun things. So, temporarily disable the 5701
5130 * hardware workaround, while we do the reset.
5131 */
1ee582d8
MC
5132 write_op = tp->write32;
5133 if (write_op == tg3_write_flush_reg32)
5134 tp->write32 = tg3_write32;
1da177e4 5135
d18edcb2
MC
5136 /* Prevent the irq handler from reading or writing PCI registers
5137 * during chip reset when the memory enable bit in the PCI command
5138 * register may be cleared. The chip does not generate interrupt
5139 * at this time, but the irq handler may still be called due to irq
5140 * sharing or irqpoll.
5141 */
5142 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
b8fa2f3a
MC
5143 if (tp->hw_status) {
5144 tp->hw_status->status = 0;
5145 tp->hw_status->status_tag = 0;
5146 }
d18edcb2
MC
5147 tp->last_tag = 0;
5148 smp_mb();
5149 synchronize_irq(tp->pdev->irq);
5150
1da177e4
LT
5151 /* do the reset */
5152 val = GRC_MISC_CFG_CORECLK_RESET;
5153
5154 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5155 if (tr32(0x7e2c) == 0x60) {
5156 tw32(0x7e2c, 0x20);
5157 }
5158 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5159 tw32(GRC_MISC_CFG, (1 << 29));
5160 val |= (1 << 29);
5161 }
5162 }
5163
b5d3772c
MC
5164 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5165 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5166 tw32(GRC_VCPU_EXT_CTRL,
5167 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5168 }
5169
1da177e4
LT
5170 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5171 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5172 tw32(GRC_MISC_CFG, val);
5173
1ee582d8
MC
5174 /* restore 5701 hardware bug workaround write method */
5175 tp->write32 = write_op;
1da177e4
LT
5176
5177 /* Unfortunately, we have to delay before the PCI read back.
5178 * Some 575X chips even will not respond to a PCI cfg access
5179 * when the reset command is given to the chip.
5180 *
5181 * How do these hardware designers expect things to work
5182 * properly if the PCI write is posted for a long period
5183 * of time? It is always necessary to have some method by
5184 * which a register read back can occur to push the write
5185 * out which does the reset.
5186 *
5187 * For most tg3 variants the trick below was working.
5188 * Ho hum...
5189 */
5190 udelay(120);
5191
5192 /* Flush PCI posted writes. The normal MMIO registers
5193 * are inaccessible at this time so this is the only
5194 * way to make this reliably (actually, this is no longer
5195 * the case, see above). I tried to use indirect
5196 * register read/write but this upset some 5701 variants.
5197 */
5198 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5199
5200 udelay(120);
5201
5202 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5203 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5204 int i;
5205 u32 cfg_val;
5206
5207 /* Wait for link training to complete. */
5208 for (i = 0; i < 5000; i++)
5209 udelay(100);
5210
5211 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5212 pci_write_config_dword(tp->pdev, 0xc4,
5213 cfg_val | (1 << 15));
5214 }
5215 /* Set PCIE max payload size and clear error status. */
5216 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5217 }
5218
ee6a99b5 5219 tg3_restore_pci_state(tp);
1da177e4 5220
d18edcb2
MC
5221 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5222
ee6a99b5
MC
5223 val = 0;
5224 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4cf78e4f 5225 val = tr32(MEMARB_MODE);
ee6a99b5 5226 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
1da177e4
LT
5227
5228 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5229 tg3_stop_fw(tp);
5230 tw32(0x5000, 0x400);
5231 }
5232
5233 tw32(GRC_MODE, tp->grc_mode);
5234
5235 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
ab0049b4 5236 val = tr32(0xc4);
1da177e4
LT
5237
5238 tw32(0xc4, val | (1 << 15));
5239 }
5240
5241 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5242 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5243 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5244 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5245 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5246 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5247 }
5248
5249 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5250 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5251 tw32_f(MAC_MODE, tp->mac_mode);
747e8f8b
MC
5252 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5253 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5254 tw32_f(MAC_MODE, tp->mac_mode);
1da177e4
LT
5255 } else
5256 tw32_f(MAC_MODE, 0);
5257 udelay(40);
5258
7a6f4369
MC
5259 err = tg3_poll_fw(tp);
5260 if (err)
5261 return err;
1da177e4
LT
5262
5263 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5264 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
ab0049b4 5265 val = tr32(0x7c00);
1da177e4
LT
5266
5267 tw32(0x7c00, val | (1 << 25));
5268 }
5269
5270 /* Reprobe ASF enable state. */
5271 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5272 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5273 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5274 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5275 u32 nic_cfg;
5276
5277 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5278 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5279 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 5280 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
5281 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5282 }
5283 }
5284
5285 return 0;
5286}
5287
5288/* tp->lock is held. */
5289static void tg3_stop_fw(struct tg3 *tp)
5290{
0d3031d9
MC
5291 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5292 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
1da177e4
LT
5293 u32 val;
5294 int i;
5295
5296 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5297 val = tr32(GRC_RX_CPU_EVENT);
5298 val |= (1 << 14);
5299 tw32(GRC_RX_CPU_EVENT, val);
5300
5301 /* Wait for RX cpu to ACK the event. */
5302 for (i = 0; i < 100; i++) {
5303 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5304 break;
5305 udelay(1);
5306 }
5307 }
5308}
5309
5310/* tp->lock is held. */
944d980e 5311static int tg3_halt(struct tg3 *tp, int kind, int silent)
1da177e4
LT
5312{
5313 int err;
5314
5315 tg3_stop_fw(tp);
5316
944d980e 5317 tg3_write_sig_pre_reset(tp, kind);
1da177e4 5318
b3b7d6be 5319 tg3_abort_hw(tp, silent);
1da177e4
LT
5320 err = tg3_chip_reset(tp);
5321
944d980e
MC
5322 tg3_write_sig_legacy(tp, kind);
5323 tg3_write_sig_post_reset(tp, kind);
1da177e4
LT
5324
5325 if (err)
5326 return err;
5327
5328 return 0;
5329}
5330
5331#define TG3_FW_RELEASE_MAJOR 0x0
5332#define TG3_FW_RELASE_MINOR 0x0
5333#define TG3_FW_RELEASE_FIX 0x0
5334#define TG3_FW_START_ADDR 0x08000000
5335#define TG3_FW_TEXT_ADDR 0x08000000
5336#define TG3_FW_TEXT_LEN 0x9c0
5337#define TG3_FW_RODATA_ADDR 0x080009c0
5338#define TG3_FW_RODATA_LEN 0x60
5339#define TG3_FW_DATA_ADDR 0x08000a40
5340#define TG3_FW_DATA_LEN 0x20
5341#define TG3_FW_SBSS_ADDR 0x08000a60
5342#define TG3_FW_SBSS_LEN 0xc
5343#define TG3_FW_BSS_ADDR 0x08000a70
5344#define TG3_FW_BSS_LEN 0x10
5345
50da859d 5346static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
1da177e4
LT
5347 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5348 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5349 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5350 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5351 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5352 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5353 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5354 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5355 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5356 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5357 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5358 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5359 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5360 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5361 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5362 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5363 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5364 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5365 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5366 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5367 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5368 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5369 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5370 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5371 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5372 0, 0, 0, 0, 0, 0,
5373 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5374 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5375 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5376 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5377 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5378 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5379 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5380 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5381 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5382 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5383 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5384 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5385 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5386 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5387 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5388 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5389 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5390 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5391 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5392 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5393 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5394 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5395 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5396 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5397 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5398 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5399 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5400 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5401 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5402 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5403 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5404 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5405 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5406 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5407 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5408 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5409 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5410 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5411 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5412 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5413 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5414 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5415 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5416 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5417 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5418 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5419 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5420 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5421 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5422 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5423 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5424 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5425 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5426 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5427 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5428 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5429 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5430 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5431 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5432 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5433 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5434 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5435 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5436 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5437 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5438};
5439
50da859d 5440static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
1da177e4
LT
5441 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5442 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5443 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5444 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5445 0x00000000
5446};
5447
5448#if 0 /* All zeros, don't eat up space with it. */
5449u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5450 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5451 0x00000000, 0x00000000, 0x00000000, 0x00000000
5452};
5453#endif
5454
5455#define RX_CPU_SCRATCH_BASE 0x30000
5456#define RX_CPU_SCRATCH_SIZE 0x04000
5457#define TX_CPU_SCRATCH_BASE 0x34000
5458#define TX_CPU_SCRATCH_SIZE 0x04000
5459
5460/* tp->lock is held. */
5461static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5462{
5463 int i;
5464
5d9428de
ES
5465 BUG_ON(offset == TX_CPU_BASE &&
5466 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
1da177e4 5467
b5d3772c
MC
5468 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5469 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5470
5471 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5472 return 0;
5473 }
1da177e4
LT
5474 if (offset == RX_CPU_BASE) {
5475 for (i = 0; i < 10000; i++) {
5476 tw32(offset + CPU_STATE, 0xffffffff);
5477 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5478 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5479 break;
5480 }
5481
5482 tw32(offset + CPU_STATE, 0xffffffff);
5483 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
5484 udelay(10);
5485 } else {
5486 for (i = 0; i < 10000; i++) {
5487 tw32(offset + CPU_STATE, 0xffffffff);
5488 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5489 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5490 break;
5491 }
5492 }
5493
5494 if (i >= 10000) {
5495 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5496 "and %s CPU\n",
5497 tp->dev->name,
5498 (offset == RX_CPU_BASE ? "RX" : "TX"));
5499 return -ENODEV;
5500 }
ec41c7df
MC
5501
5502 /* Clear firmware's nvram arbitration. */
5503 if (tp->tg3_flags & TG3_FLAG_NVRAM)
5504 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
1da177e4
LT
5505 return 0;
5506}
5507
5508struct fw_info {
5509 unsigned int text_base;
5510 unsigned int text_len;
50da859d 5511 const u32 *text_data;
1da177e4
LT
5512 unsigned int rodata_base;
5513 unsigned int rodata_len;
50da859d 5514 const u32 *rodata_data;
1da177e4
LT
5515 unsigned int data_base;
5516 unsigned int data_len;
50da859d 5517 const u32 *data_data;
1da177e4
LT
5518};
5519
5520/* tp->lock is held. */
5521static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5522 int cpu_scratch_size, struct fw_info *info)
5523{
ec41c7df 5524 int err, lock_err, i;
1da177e4
LT
5525 void (*write_op)(struct tg3 *, u32, u32);
5526
5527 if (cpu_base == TX_CPU_BASE &&
5528 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5529 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5530 "TX cpu firmware on %s which is 5705.\n",
5531 tp->dev->name);
5532 return -EINVAL;
5533 }
5534
5535 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5536 write_op = tg3_write_mem;
5537 else
5538 write_op = tg3_write_indirect_reg32;
5539
1b628151
MC
5540 /* It is possible that bootcode is still loading at this point.
5541 * Get the nvram lock first before halting the cpu.
5542 */
ec41c7df 5543 lock_err = tg3_nvram_lock(tp);
1da177e4 5544 err = tg3_halt_cpu(tp, cpu_base);
ec41c7df
MC
5545 if (!lock_err)
5546 tg3_nvram_unlock(tp);
1da177e4
LT
5547 if (err)
5548 goto out;
5549
5550 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5551 write_op(tp, cpu_scratch_base + i, 0);
5552 tw32(cpu_base + CPU_STATE, 0xffffffff);
5553 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5554 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5555 write_op(tp, (cpu_scratch_base +
5556 (info->text_base & 0xffff) +
5557 (i * sizeof(u32))),
5558 (info->text_data ?
5559 info->text_data[i] : 0));
5560 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5561 write_op(tp, (cpu_scratch_base +
5562 (info->rodata_base & 0xffff) +
5563 (i * sizeof(u32))),
5564 (info->rodata_data ?
5565 info->rodata_data[i] : 0));
5566 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5567 write_op(tp, (cpu_scratch_base +
5568 (info->data_base & 0xffff) +
5569 (i * sizeof(u32))),
5570 (info->data_data ?
5571 info->data_data[i] : 0));
5572
5573 err = 0;
5574
5575out:
1da177e4
LT
5576 return err;
5577}
5578
5579/* tp->lock is held. */
5580static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5581{
5582 struct fw_info info;
5583 int err, i;
5584
5585 info.text_base = TG3_FW_TEXT_ADDR;
5586 info.text_len = TG3_FW_TEXT_LEN;
5587 info.text_data = &tg3FwText[0];
5588 info.rodata_base = TG3_FW_RODATA_ADDR;
5589 info.rodata_len = TG3_FW_RODATA_LEN;
5590 info.rodata_data = &tg3FwRodata[0];
5591 info.data_base = TG3_FW_DATA_ADDR;
5592 info.data_len = TG3_FW_DATA_LEN;
5593 info.data_data = NULL;
5594
5595 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5596 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5597 &info);
5598 if (err)
5599 return err;
5600
5601 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5602 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5603 &info);
5604 if (err)
5605 return err;
5606
5607 /* Now startup only the RX cpu. */
5608 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5609 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5610
5611 for (i = 0; i < 5; i++) {
5612 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5613 break;
5614 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5615 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
5616 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5617 udelay(1000);
5618 }
5619 if (i >= 5) {
5620 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5621 "to set RX CPU PC, is %08x should be %08x\n",
5622 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5623 TG3_FW_TEXT_ADDR);
5624 return -ENODEV;
5625 }
5626 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5627 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
5628
5629 return 0;
5630}
5631
1da177e4
LT
5632
5633#define TG3_TSO_FW_RELEASE_MAJOR 0x1
5634#define TG3_TSO_FW_RELASE_MINOR 0x6
5635#define TG3_TSO_FW_RELEASE_FIX 0x0
5636#define TG3_TSO_FW_START_ADDR 0x08000000
5637#define TG3_TSO_FW_TEXT_ADDR 0x08000000
5638#define TG3_TSO_FW_TEXT_LEN 0x1aa0
5639#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5640#define TG3_TSO_FW_RODATA_LEN 0x60
5641#define TG3_TSO_FW_DATA_ADDR 0x08001b20
5642#define TG3_TSO_FW_DATA_LEN 0x30
5643#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5644#define TG3_TSO_FW_SBSS_LEN 0x2c
5645#define TG3_TSO_FW_BSS_ADDR 0x08001b80
5646#define TG3_TSO_FW_BSS_LEN 0x894
5647
50da859d 5648static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
1da177e4
LT
5649 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5650 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5651 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5652 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5653 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5654 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5655 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5656 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5657 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5658 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5659 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5660 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5661 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5662 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5663 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5664 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5665 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5666 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5667 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5668 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5669 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5670 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5671 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5672 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5673 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5674 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5675 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5676 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5677 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5678 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5679 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5680 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5681 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5682 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5683 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5684 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5685 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5686 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5687 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5688 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5689 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5690 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5691 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5692 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5693 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5694 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5695 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5696 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5697 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5698 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5699 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5700 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5701 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5702 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5703 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5704 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5705 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5706 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5707 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5708 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5709 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5710 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5711 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5712 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5713 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5714 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5715 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5716 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5717 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5718 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5719 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5720 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5721 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5722 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5723 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5724 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5725 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5726 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5727 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5728 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5729 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5730 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5731 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5732 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5733 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5734 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5735 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5736 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5737 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5738 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5739 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5740 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5741 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5742 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5743 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5744 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5745 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5746 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5747 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5748 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5749 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5750 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5751 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5752 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5753 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5754 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5755 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5756 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5757 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5758 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5759 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5760 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5761 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5762 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5763 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5764 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5765 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5766 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5767 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5768 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5769 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5770 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5771 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5772 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5773 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5774 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5775 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5776 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5777 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5778 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5779 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5780 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5781 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5782 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5783 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5784 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5785 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5786 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5787 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5788 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5789 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5790 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5791 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5792 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5793 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5794 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5795 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5796 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5797 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5798 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5799 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5800 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5801 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5802 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5803 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5804 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5805 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5806 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5807 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5808 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5809 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5810 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5811 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5812 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5813 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5814 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5815 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5816 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5817 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5818 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5819 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5820 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5821 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5822 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5823 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5824 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5825 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5826 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5827 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5828 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5829 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5830 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5831 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5832 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5833 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5834 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5835 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5836 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5837 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5838 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5839 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5840 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5841 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5842 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5843 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5844 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5845 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5846 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5847 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5848 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5849 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5850 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5851 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5852 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5853 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5854 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5855 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5856 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5857 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5858 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5859 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5860 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5861 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5862 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5863 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5864 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5865 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5866 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5867 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5868 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5869 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5870 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5871 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5872 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5873 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5874 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5875 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5876 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5877 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5878 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5879 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5880 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5881 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5882 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5883 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5884 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5885 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5886 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5887 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5888 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5889 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5890 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5891 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5892 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5893 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5894 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5895 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5896 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5897 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5898 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5899 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5900 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5901 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5902 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5903 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5904 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5905 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5906 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5907 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5908 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5909 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5910 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5911 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5912 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5913 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5914 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5915 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5916 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5917 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5918 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5919 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5920 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5921 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5922 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5923 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5924 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5925 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5926 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5927 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5928 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5929 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5930 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5931 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5932 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5933};
5934
50da859d 5935static const u32 tg3TsoFwRodata[] = {
1da177e4
LT
5936 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5937 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5938 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5939 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5940 0x00000000,
5941};
5942
50da859d 5943static const u32 tg3TsoFwData[] = {
1da177e4
LT
5944 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5945 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5946 0x00000000,
5947};
5948
5949/* 5705 needs a special version of the TSO firmware. */
5950#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5951#define TG3_TSO5_FW_RELASE_MINOR 0x2
5952#define TG3_TSO5_FW_RELEASE_FIX 0x0
5953#define TG3_TSO5_FW_START_ADDR 0x00010000
5954#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5955#define TG3_TSO5_FW_TEXT_LEN 0xe90
5956#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5957#define TG3_TSO5_FW_RODATA_LEN 0x50
5958#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5959#define TG3_TSO5_FW_DATA_LEN 0x20
5960#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5961#define TG3_TSO5_FW_SBSS_LEN 0x28
5962#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5963#define TG3_TSO5_FW_BSS_LEN 0x88
5964
50da859d 5965static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
1da177e4
LT
5966 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5967 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5968 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5969 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5970 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5971 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5972 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5973 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5974 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5975 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5976 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5977 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5978 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5979 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5980 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5981 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5982 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5983 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5984 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5985 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5986 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5987 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5988 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5989 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5990 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5991 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5992 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5993 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5994 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5995 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5996 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5997 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5998 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5999 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6000 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6001 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6002 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6003 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6004 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6005 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6006 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6007 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6008 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6009 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6010 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6011 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6012 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6013 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6014 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6015 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6016 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6017 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6018 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6019 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6020 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6021 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6022 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6023 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6024 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6025 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6026 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6027 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6028 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6029 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6030 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6031 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6032 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6033 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6034 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6035 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6036 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6037 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6038 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6039 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6040 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6041 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6042 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6043 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6044 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6045 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6046 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6047 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6048 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6049 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6050 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6051 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6052 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6053 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6054 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6055 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6056 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6057 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6058 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6059 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6060 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6061 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6062 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6063 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6064 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6065 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6066 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6067 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6068 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6069 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6070 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6071 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6072 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6073 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6074 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6075 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6076 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6077 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6078 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6079 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6080 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6081 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6082 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6083 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6084 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6085 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6086 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6087 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6088 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6089 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6090 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6091 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6092 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6093 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6094 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6095 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6096 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6097 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6098 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6099 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6100 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6101 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6102 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6103 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6104 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6105 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6106 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6107 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6108 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6109 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6110 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6111 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6112 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6113 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6114 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6115 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6116 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6117 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6118 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6119 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6120 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6121 0x00000000, 0x00000000, 0x00000000,
6122};
6123
50da859d 6124static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
1da177e4
LT
6125 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6126 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6127 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6128 0x00000000, 0x00000000, 0x00000000,
6129};
6130
50da859d 6131static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
1da177e4
LT
6132 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6133 0x00000000, 0x00000000, 0x00000000,
6134};
6135
6136/* tp->lock is held. */
6137static int tg3_load_tso_firmware(struct tg3 *tp)
6138{
6139 struct fw_info info;
6140 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6141 int err, i;
6142
6143 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6144 return 0;
6145
6146 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6147 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6148 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6149 info.text_data = &tg3Tso5FwText[0];
6150 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6151 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6152 info.rodata_data = &tg3Tso5FwRodata[0];
6153 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6154 info.data_len = TG3_TSO5_FW_DATA_LEN;
6155 info.data_data = &tg3Tso5FwData[0];
6156 cpu_base = RX_CPU_BASE;
6157 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6158 cpu_scratch_size = (info.text_len +
6159 info.rodata_len +
6160 info.data_len +
6161 TG3_TSO5_FW_SBSS_LEN +
6162 TG3_TSO5_FW_BSS_LEN);
6163 } else {
6164 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6165 info.text_len = TG3_TSO_FW_TEXT_LEN;
6166 info.text_data = &tg3TsoFwText[0];
6167 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6168 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6169 info.rodata_data = &tg3TsoFwRodata[0];
6170 info.data_base = TG3_TSO_FW_DATA_ADDR;
6171 info.data_len = TG3_TSO_FW_DATA_LEN;
6172 info.data_data = &tg3TsoFwData[0];
6173 cpu_base = TX_CPU_BASE;
6174 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6175 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6176 }
6177
6178 err = tg3_load_firmware_cpu(tp, cpu_base,
6179 cpu_scratch_base, cpu_scratch_size,
6180 &info);
6181 if (err)
6182 return err;
6183
6184 /* Now startup the cpu. */
6185 tw32(cpu_base + CPU_STATE, 0xffffffff);
6186 tw32_f(cpu_base + CPU_PC, info.text_base);
6187
6188 for (i = 0; i < 5; i++) {
6189 if (tr32(cpu_base + CPU_PC) == info.text_base)
6190 break;
6191 tw32(cpu_base + CPU_STATE, 0xffffffff);
6192 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6193 tw32_f(cpu_base + CPU_PC, info.text_base);
6194 udelay(1000);
6195 }
6196 if (i >= 5) {
6197 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6198 "to set CPU PC, is %08x should be %08x\n",
6199 tp->dev->name, tr32(cpu_base + CPU_PC),
6200 info.text_base);
6201 return -ENODEV;
6202 }
6203 tw32(cpu_base + CPU_STATE, 0xffffffff);
6204 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6205 return 0;
6206}
6207
1da177e4
LT
6208
6209/* tp->lock is held. */
986e0aeb 6210static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
1da177e4
LT
6211{
6212 u32 addr_high, addr_low;
6213 int i;
6214
6215 addr_high = ((tp->dev->dev_addr[0] << 8) |
6216 tp->dev->dev_addr[1]);
6217 addr_low = ((tp->dev->dev_addr[2] << 24) |
6218 (tp->dev->dev_addr[3] << 16) |
6219 (tp->dev->dev_addr[4] << 8) |
6220 (tp->dev->dev_addr[5] << 0));
6221 for (i = 0; i < 4; i++) {
986e0aeb
MC
6222 if (i == 1 && skip_mac_1)
6223 continue;
1da177e4
LT
6224 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6225 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6226 }
6227
6228 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6229 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6230 for (i = 0; i < 12; i++) {
6231 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6232 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6233 }
6234 }
6235
6236 addr_high = (tp->dev->dev_addr[0] +
6237 tp->dev->dev_addr[1] +
6238 tp->dev->dev_addr[2] +
6239 tp->dev->dev_addr[3] +
6240 tp->dev->dev_addr[4] +
6241 tp->dev->dev_addr[5]) &
6242 TX_BACKOFF_SEED_MASK;
6243 tw32(MAC_TX_BACKOFF_SEED, addr_high);
6244}
6245
6246static int tg3_set_mac_addr(struct net_device *dev, void *p)
6247{
6248 struct tg3 *tp = netdev_priv(dev);
6249 struct sockaddr *addr = p;
986e0aeb 6250 int err = 0, skip_mac_1 = 0;
1da177e4 6251
f9804ddb
MC
6252 if (!is_valid_ether_addr(addr->sa_data))
6253 return -EINVAL;
6254
1da177e4
LT
6255 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6256
e75f7c90
MC
6257 if (!netif_running(dev))
6258 return 0;
6259
58712ef9 6260 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
986e0aeb 6261 u32 addr0_high, addr0_low, addr1_high, addr1_low;
58712ef9 6262
986e0aeb
MC
6263 addr0_high = tr32(MAC_ADDR_0_HIGH);
6264 addr0_low = tr32(MAC_ADDR_0_LOW);
6265 addr1_high = tr32(MAC_ADDR_1_HIGH);
6266 addr1_low = tr32(MAC_ADDR_1_LOW);
6267
6268 /* Skip MAC addr 1 if ASF is using it. */
6269 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6270 !(addr1_high == 0 && addr1_low == 0))
6271 skip_mac_1 = 1;
58712ef9 6272 }
986e0aeb
MC
6273 spin_lock_bh(&tp->lock);
6274 __tg3_set_mac_addr(tp, skip_mac_1);
6275 spin_unlock_bh(&tp->lock);
1da177e4 6276
b9ec6c1b 6277 return err;
1da177e4
LT
6278}
6279
6280/* tp->lock is held. */
6281static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6282 dma_addr_t mapping, u32 maxlen_flags,
6283 u32 nic_addr)
6284{
6285 tg3_write_mem(tp,
6286 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6287 ((u64) mapping >> 32));
6288 tg3_write_mem(tp,
6289 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6290 ((u64) mapping & 0xffffffff));
6291 tg3_write_mem(tp,
6292 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6293 maxlen_flags);
6294
6295 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6296 tg3_write_mem(tp,
6297 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6298 nic_addr);
6299}
6300
6301static void __tg3_set_rx_mode(struct net_device *);
d244c892 6302static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
15f9850d
DM
6303{
6304 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6305 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6306 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6307 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6308 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6309 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6310 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6311 }
6312 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6313 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6314 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6315 u32 val = ec->stats_block_coalesce_usecs;
6316
6317 if (!netif_carrier_ok(tp->dev))
6318 val = 0;
6319
6320 tw32(HOSTCC_STAT_COAL_TICKS, val);
6321 }
6322}
1da177e4
LT
6323
6324/* tp->lock is held. */
8e7a22e3 6325static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
6326{
6327 u32 val, rdmac_mode;
6328 int i, err, limit;
6329
6330 tg3_disable_ints(tp);
6331
6332 tg3_stop_fw(tp);
6333
6334 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6335
6336 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
e6de8ad1 6337 tg3_abort_hw(tp, 1);
1da177e4
LT
6338 }
6339
36da4d86 6340 if (reset_phy)
d4d2c558
MC
6341 tg3_phy_reset(tp);
6342
1da177e4
LT
6343 err = tg3_chip_reset(tp);
6344 if (err)
6345 return err;
6346
6347 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6348
d30cdd28
MC
6349 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0) {
6350 val = tr32(TG3_CPMU_CTRL);
6351 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6352 tw32(TG3_CPMU_CTRL, val);
6353 }
6354
1da177e4
LT
6355 /* This works around an issue with Athlon chipsets on
6356 * B3 tigon3 silicon. This bit has no effect on any
6357 * other revision. But do not set this on PCI Express
795d01c5 6358 * chips and don't even touch the clocks if the CPMU is present.
1da177e4 6359 */
795d01c5
MC
6360 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6361 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6362 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6363 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6364 }
1da177e4
LT
6365
6366 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6367 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6368 val = tr32(TG3PCI_PCISTATE);
6369 val |= PCISTATE_RETRY_SAME_DMA;
6370 tw32(TG3PCI_PCISTATE, val);
6371 }
6372
0d3031d9
MC
6373 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6374 /* Allow reads and writes to the
6375 * APE register and memory space.
6376 */
6377 val = tr32(TG3PCI_PCISTATE);
6378 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6379 PCISTATE_ALLOW_APE_SHMEM_WR;
6380 tw32(TG3PCI_PCISTATE, val);
6381 }
6382
1da177e4
LT
6383 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6384 /* Enable some hw fixes. */
6385 val = tr32(TG3PCI_MSI_DATA);
6386 val |= (1 << 26) | (1 << 28) | (1 << 29);
6387 tw32(TG3PCI_MSI_DATA, val);
6388 }
6389
6390 /* Descriptor ring init may make accesses to the
6391 * NIC SRAM area to setup the TX descriptors, so we
6392 * can only do this after the hardware has been
6393 * successfully reset.
6394 */
32d8c572
MC
6395 err = tg3_init_rings(tp);
6396 if (err)
6397 return err;
1da177e4 6398
9936bcf6
MC
6399 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6400 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
d30cdd28
MC
6401 /* This value is determined during the probe time DMA
6402 * engine test, tg3_test_dma.
6403 */
6404 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6405 }
1da177e4
LT
6406
6407 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6408 GRC_MODE_4X_NIC_SEND_RINGS |
6409 GRC_MODE_NO_TX_PHDR_CSUM |
6410 GRC_MODE_NO_RX_PHDR_CSUM);
6411 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
d2d746f8
MC
6412
6413 /* Pseudo-header checksum is done by hardware logic and not
6414 * the offload processers, so make the chip do the pseudo-
6415 * header checksums on receive. For transmit it is more
6416 * convenient to do the pseudo-header checksum in software
6417 * as Linux does that on transmit for us in all cases.
6418 */
6419 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
1da177e4
LT
6420
6421 tw32(GRC_MODE,
6422 tp->grc_mode |
6423 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6424
6425 /* Setup the timer prescalar register. Clock is always 66Mhz. */
6426 val = tr32(GRC_MISC_CFG);
6427 val &= ~0xff;
6428 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6429 tw32(GRC_MISC_CFG, val);
6430
6431 /* Initialize MBUF/DESC pool. */
cbf46853 6432 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
1da177e4
LT
6433 /* Do nothing. */
6434 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6435 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6436 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6437 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6438 else
6439 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6440 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6441 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6442 }
1da177e4
LT
6443 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6444 int fw_len;
6445
6446 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6447 TG3_TSO5_FW_RODATA_LEN +
6448 TG3_TSO5_FW_DATA_LEN +
6449 TG3_TSO5_FW_SBSS_LEN +
6450 TG3_TSO5_FW_BSS_LEN);
6451 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6452 tw32(BUFMGR_MB_POOL_ADDR,
6453 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6454 tw32(BUFMGR_MB_POOL_SIZE,
6455 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6456 }
1da177e4 6457
0f893dc6 6458 if (tp->dev->mtu <= ETH_DATA_LEN) {
1da177e4
LT
6459 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6460 tp->bufmgr_config.mbuf_read_dma_low_water);
6461 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6462 tp->bufmgr_config.mbuf_mac_rx_low_water);
6463 tw32(BUFMGR_MB_HIGH_WATER,
6464 tp->bufmgr_config.mbuf_high_water);
6465 } else {
6466 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6467 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6468 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6469 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6470 tw32(BUFMGR_MB_HIGH_WATER,
6471 tp->bufmgr_config.mbuf_high_water_jumbo);
6472 }
6473 tw32(BUFMGR_DMA_LOW_WATER,
6474 tp->bufmgr_config.dma_low_water);
6475 tw32(BUFMGR_DMA_HIGH_WATER,
6476 tp->bufmgr_config.dma_high_water);
6477
6478 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6479 for (i = 0; i < 2000; i++) {
6480 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6481 break;
6482 udelay(10);
6483 }
6484 if (i >= 2000) {
6485 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6486 tp->dev->name);
6487 return -ENODEV;
6488 }
6489
6490 /* Setup replenish threshold. */
f92905de
MC
6491 val = tp->rx_pending / 8;
6492 if (val == 0)
6493 val = 1;
6494 else if (val > tp->rx_std_max_post)
6495 val = tp->rx_std_max_post;
b5d3772c
MC
6496 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6497 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6498 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6499
6500 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6501 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6502 }
f92905de
MC
6503
6504 tw32(RCVBDI_STD_THRESH, val);
1da177e4
LT
6505
6506 /* Initialize TG3_BDINFO's at:
6507 * RCVDBDI_STD_BD: standard eth size rx ring
6508 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6509 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6510 *
6511 * like so:
6512 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6513 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6514 * ring attribute flags
6515 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6516 *
6517 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6518 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6519 *
6520 * The size of each ring is fixed in the firmware, but the location is
6521 * configurable.
6522 */
6523 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6524 ((u64) tp->rx_std_mapping >> 32));
6525 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6526 ((u64) tp->rx_std_mapping & 0xffffffff));
6527 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6528 NIC_SRAM_RX_BUFFER_DESC);
6529
6530 /* Don't even try to program the JUMBO/MINI buffer descriptor
6531 * configs on 5705.
6532 */
6533 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6534 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6535 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6536 } else {
6537 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6538 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6539
6540 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6541 BDINFO_FLAGS_DISABLED);
6542
6543 /* Setup replenish threshold. */
6544 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6545
0f893dc6 6546 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
6547 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6548 ((u64) tp->rx_jumbo_mapping >> 32));
6549 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6550 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6551 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6552 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6553 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6554 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6555 } else {
6556 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6557 BDINFO_FLAGS_DISABLED);
6558 }
6559
6560 }
6561
6562 /* There is only one send ring on 5705/5750, no need to explicitly
6563 * disable the others.
6564 */
6565 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6566 /* Clear out send RCB ring in SRAM. */
6567 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6568 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6569 BDINFO_FLAGS_DISABLED);
6570 }
6571
6572 tp->tx_prod = 0;
6573 tp->tx_cons = 0;
6574 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6575 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6576
6577 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6578 tp->tx_desc_mapping,
6579 (TG3_TX_RING_SIZE <<
6580 BDINFO_FLAGS_MAXLEN_SHIFT),
6581 NIC_SRAM_TX_BUFFER_DESC);
6582
6583 /* There is only one receive return ring on 5705/5750, no need
6584 * to explicitly disable the others.
6585 */
6586 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6587 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6588 i += TG3_BDINFO_SIZE) {
6589 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6590 BDINFO_FLAGS_DISABLED);
6591 }
6592 }
6593
6594 tp->rx_rcb_ptr = 0;
6595 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6596
6597 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6598 tp->rx_rcb_mapping,
6599 (TG3_RX_RCB_RING_SIZE(tp) <<
6600 BDINFO_FLAGS_MAXLEN_SHIFT),
6601 0);
6602
6603 tp->rx_std_ptr = tp->rx_pending;
6604 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6605 tp->rx_std_ptr);
6606
0f893dc6 6607 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
1da177e4
LT
6608 tp->rx_jumbo_pending : 0;
6609 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6610 tp->rx_jumbo_ptr);
6611
6612 /* Initialize MAC address and backoff seed. */
986e0aeb 6613 __tg3_set_mac_addr(tp, 0);
1da177e4
LT
6614
6615 /* MTU + ethernet header + FCS + optional VLAN tag */
6616 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6617
6618 /* The slot time is changed by tg3_setup_phy if we
6619 * run at gigabit with half duplex.
6620 */
6621 tw32(MAC_TX_LENGTHS,
6622 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6623 (6 << TX_LENGTHS_IPG_SHIFT) |
6624 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6625
6626 /* Receive rules. */
6627 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6628 tw32(RCVLPC_CONFIG, 0x0181);
6629
6630 /* Calculate RDMAC_MODE setting early, we need it to determine
6631 * the RCVLPC_STATE_ENABLE mask.
6632 */
6633 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6634 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6635 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6636 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6637 RDMAC_MODE_LNGREAD_ENAB);
85e94ced 6638
d30cdd28
MC
6639 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6640 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6641 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6642 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6643
85e94ced
MC
6644 /* If statement applies to 5705 and 5750 PCI devices only */
6645 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6646 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6647 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
1da177e4 6648 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
c13e3713 6649 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1da177e4
LT
6650 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6651 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6652 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6653 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6654 }
6655 }
6656
85e94ced
MC
6657 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6658 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6659
1da177e4
LT
6660 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6661 rdmac_mode |= (1 << 27);
1da177e4
LT
6662
6663 /* Receive/send statistics. */
1661394e
MC
6664 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6665 val = tr32(RCVLPC_STATS_ENABLE);
6666 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6667 tw32(RCVLPC_STATS_ENABLE, val);
6668 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6669 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
1da177e4
LT
6670 val = tr32(RCVLPC_STATS_ENABLE);
6671 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6672 tw32(RCVLPC_STATS_ENABLE, val);
6673 } else {
6674 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6675 }
6676 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6677 tw32(SNDDATAI_STATSENAB, 0xffffff);
6678 tw32(SNDDATAI_STATSCTRL,
6679 (SNDDATAI_SCTRL_ENABLE |
6680 SNDDATAI_SCTRL_FASTUPD));
6681
6682 /* Setup host coalescing engine. */
6683 tw32(HOSTCC_MODE, 0);
6684 for (i = 0; i < 2000; i++) {
6685 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6686 break;
6687 udelay(10);
6688 }
6689
d244c892 6690 __tg3_set_coalesce(tp, &tp->coal);
1da177e4
LT
6691
6692 /* set status block DMA address */
6693 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6694 ((u64) tp->status_mapping >> 32));
6695 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6696 ((u64) tp->status_mapping & 0xffffffff));
6697
6698 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6699 /* Status/statistics block address. See tg3_timer,
6700 * the tg3_periodic_fetch_stats call there, and
6701 * tg3_get_stats to see how this works for 5705/5750 chips.
6702 */
1da177e4
LT
6703 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6704 ((u64) tp->stats_mapping >> 32));
6705 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6706 ((u64) tp->stats_mapping & 0xffffffff));
6707 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6708 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6709 }
6710
6711 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6712
6713 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6714 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6715 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6716 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6717
6718 /* Clear statistics/status block in chip, and status block in ram. */
6719 for (i = NIC_SRAM_STATS_BLK;
6720 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6721 i += sizeof(u32)) {
6722 tg3_write_mem(tp, i, 0);
6723 udelay(40);
6724 }
6725 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6726
c94e3941
MC
6727 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6728 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6729 /* reset to prevent losing 1st rx packet intermittently */
6730 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6731 udelay(10);
6732 }
6733
1da177e4
LT
6734 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6735 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
e8f3f6ca
MC
6736 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6737 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6738 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6739 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1da177e4
LT
6740 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6741 udelay(40);
6742
314fba34 6743 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9d26e213 6744 * If TG3_FLG2_IS_NIC is zero, we should read the
314fba34
MC
6745 * register to preserve the GPIO settings for LOMs. The GPIOs,
6746 * whether used as inputs or outputs, are set by boot code after
6747 * reset.
6748 */
9d26e213 6749 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
314fba34
MC
6750 u32 gpio_mask;
6751
9d26e213
MC
6752 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6753 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6754 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
3e7d83bc
MC
6755
6756 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6757 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6758 GRC_LCLCTRL_GPIO_OUTPUT3;
6759
af36e6b6
MC
6760 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6761 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6762
aaf84465 6763 tp->grc_local_ctrl &= ~gpio_mask;
314fba34
MC
6764 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6765
6766 /* GPIO1 must be driven high for eeprom write protect */
9d26e213
MC
6767 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6768 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6769 GRC_LCLCTRL_GPIO_OUTPUT1);
314fba34 6770 }
1da177e4
LT
6771 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6772 udelay(100);
6773
09ee929c 6774 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
fac9b83e 6775 tp->last_tag = 0;
1da177e4
LT
6776
6777 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6778 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6779 udelay(40);
6780 }
6781
6782 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6783 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6784 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6785 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6786 WDMAC_MODE_LNGREAD_ENAB);
6787
85e94ced
MC
6788 /* If statement applies to 5705 and 5750 PCI devices only */
6789 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6790 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6791 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1da177e4
LT
6792 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6793 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6794 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6795 /* nothing */
6796 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6797 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6798 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6799 val |= WDMAC_MODE_RX_ACCEL;
6800 }
6801 }
6802
d9ab5ad1 6803 /* Enable host coalescing bug fix */
af36e6b6 6804 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
d30cdd28 6805 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
9936bcf6
MC
6806 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
6807 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
d9ab5ad1
MC
6808 val |= (1 << 29);
6809
1da177e4
LT
6810 tw32_f(WDMAC_MODE, val);
6811 udelay(40);
6812
9974a356
MC
6813 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6814 u16 pcix_cmd;
6815
6816 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6817 &pcix_cmd);
1da177e4 6818 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9974a356
MC
6819 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
6820 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 6821 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9974a356
MC
6822 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
6823 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 6824 }
9974a356
MC
6825 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6826 pcix_cmd);
1da177e4
LT
6827 }
6828
6829 tw32_f(RDMAC_MODE, rdmac_mode);
6830 udelay(40);
6831
6832 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6833 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6834 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9936bcf6
MC
6835
6836 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6837 tw32(SNDDATAC_MODE,
6838 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
6839 else
6840 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6841
1da177e4
LT
6842 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6843 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6844 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6845 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
1da177e4
LT
6846 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6847 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
1da177e4
LT
6848 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6849 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6850
6851 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6852 err = tg3_load_5701_a0_firmware_fix(tp);
6853 if (err)
6854 return err;
6855 }
6856
1da177e4
LT
6857 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6858 err = tg3_load_tso_firmware(tp);
6859 if (err)
6860 return err;
6861 }
1da177e4
LT
6862
6863 tp->tx_mode = TX_MODE_ENABLE;
6864 tw32_f(MAC_TX_MODE, tp->tx_mode);
6865 udelay(100);
6866
6867 tp->rx_mode = RX_MODE_ENABLE;
9936bcf6
MC
6868 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
6869 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
af36e6b6
MC
6870 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6871
1da177e4
LT
6872 tw32_f(MAC_RX_MODE, tp->rx_mode);
6873 udelay(10);
6874
6875 if (tp->link_config.phy_is_low_power) {
6876 tp->link_config.phy_is_low_power = 0;
6877 tp->link_config.speed = tp->link_config.orig_speed;
6878 tp->link_config.duplex = tp->link_config.orig_duplex;
6879 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6880 }
6881
6882 tp->mi_mode = MAC_MI_MODE_BASE;
6883 tw32_f(MAC_MI_MODE, tp->mi_mode);
6884 udelay(80);
6885
6886 tw32(MAC_LED_CTRL, tp->led_ctrl);
6887
6888 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
c94e3941 6889 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1da177e4
LT
6890 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6891 udelay(10);
6892 }
6893 tw32_f(MAC_RX_MODE, tp->rx_mode);
6894 udelay(10);
6895
6896 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6897 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6898 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6899 /* Set drive transmission level to 1.2V */
6900 /* only if the signal pre-emphasis bit is not set */
6901 val = tr32(MAC_SERDES_CFG);
6902 val &= 0xfffff000;
6903 val |= 0x880;
6904 tw32(MAC_SERDES_CFG, val);
6905 }
6906 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6907 tw32(MAC_SERDES_CFG, 0x616000);
6908 }
6909
6910 /* Prevent chip from dropping frames when flow control
6911 * is enabled.
6912 */
6913 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6914
6915 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6916 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6917 /* Use hardware link auto-negotiation */
6918 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6919 }
6920
d4d2c558
MC
6921 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6922 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6923 u32 tmp;
6924
6925 tmp = tr32(SERDES_RX_CTRL);
6926 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6927 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6928 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6929 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6930 }
6931
36da4d86 6932 err = tg3_setup_phy(tp, 0);
1da177e4
LT
6933 if (err)
6934 return err;
6935
715116a1
MC
6936 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6937 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
1da177e4
LT
6938 u32 tmp;
6939
6940 /* Clear CRC stats. */
569a5df8
MC
6941 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
6942 tg3_writephy(tp, MII_TG3_TEST1,
6943 tmp | MII_TG3_TEST1_CRC_EN);
1da177e4
LT
6944 tg3_readphy(tp, 0x14, &tmp);
6945 }
6946 }
6947
6948 __tg3_set_rx_mode(tp->dev);
6949
6950 /* Initialize receive rules. */
6951 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
6952 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6953 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
6954 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6955
4cf78e4f 6956 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
a4e2b347 6957 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
1da177e4
LT
6958 limit = 8;
6959 else
6960 limit = 16;
6961 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6962 limit -= 4;
6963 switch (limit) {
6964 case 16:
6965 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
6966 case 15:
6967 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
6968 case 14:
6969 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
6970 case 13:
6971 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
6972 case 12:
6973 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
6974 case 11:
6975 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
6976 case 10:
6977 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
6978 case 9:
6979 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
6980 case 8:
6981 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
6982 case 7:
6983 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
6984 case 6:
6985 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
6986 case 5:
6987 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
6988 case 4:
6989 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6990 case 3:
6991 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6992 case 2:
6993 case 1:
6994
6995 default:
6996 break;
6997 };
6998
9ce768ea
MC
6999 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7000 /* Write our heartbeat update interval to APE. */
7001 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7002 APE_HOST_HEARTBEAT_INT_DISABLE);
0d3031d9 7003
1da177e4
LT
7004 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7005
1da177e4
LT
7006 return 0;
7007}
7008
7009/* Called at device open time to get the chip ready for
7010 * packet processing. Invoked with tp->lock held.
7011 */
8e7a22e3 7012static int tg3_init_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
7013{
7014 int err;
7015
7016 /* Force the chip into D0. */
bc1c7567 7017 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
7018 if (err)
7019 goto out;
7020
7021 tg3_switch_clocks(tp);
7022
7023 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7024
8e7a22e3 7025 err = tg3_reset_hw(tp, reset_phy);
1da177e4
LT
7026
7027out:
7028 return err;
7029}
7030
7031#define TG3_STAT_ADD32(PSTAT, REG) \
7032do { u32 __val = tr32(REG); \
7033 (PSTAT)->low += __val; \
7034 if ((PSTAT)->low < __val) \
7035 (PSTAT)->high += 1; \
7036} while (0)
7037
7038static void tg3_periodic_fetch_stats(struct tg3 *tp)
7039{
7040 struct tg3_hw_stats *sp = tp->hw_stats;
7041
7042 if (!netif_carrier_ok(tp->dev))
7043 return;
7044
7045 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7046 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7047 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7048 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7049 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7050 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7051 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7052 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7053 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7054 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7055 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7056 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7057 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7058
7059 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7060 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7061 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7062 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7063 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7064 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7065 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7066 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7067 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7068 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7069 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7070 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7071 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7072 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
463d305b
MC
7073
7074 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7075 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7076 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
1da177e4
LT
7077}
7078
7079static void tg3_timer(unsigned long __opaque)
7080{
7081 struct tg3 *tp = (struct tg3 *) __opaque;
1da177e4 7082
f475f163
MC
7083 if (tp->irq_sync)
7084 goto restart_timer;
7085
f47c11ee 7086 spin_lock(&tp->lock);
1da177e4 7087
fac9b83e
DM
7088 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7089 /* All of this garbage is because when using non-tagged
7090 * IRQ status the mailbox/status_block protocol the chip
7091 * uses with the cpu is race prone.
7092 */
7093 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7094 tw32(GRC_LOCAL_CTRL,
7095 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7096 } else {
7097 tw32(HOSTCC_MODE, tp->coalesce_mode |
7098 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7099 }
1da177e4 7100
fac9b83e
DM
7101 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7102 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
f47c11ee 7103 spin_unlock(&tp->lock);
fac9b83e
DM
7104 schedule_work(&tp->reset_task);
7105 return;
7106 }
1da177e4
LT
7107 }
7108
1da177e4
LT
7109 /* This part only runs once per second. */
7110 if (!--tp->timer_counter) {
fac9b83e
DM
7111 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7112 tg3_periodic_fetch_stats(tp);
7113
1da177e4
LT
7114 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7115 u32 mac_stat;
7116 int phy_event;
7117
7118 mac_stat = tr32(MAC_STATUS);
7119
7120 phy_event = 0;
7121 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7122 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7123 phy_event = 1;
7124 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7125 phy_event = 1;
7126
7127 if (phy_event)
7128 tg3_setup_phy(tp, 0);
7129 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7130 u32 mac_stat = tr32(MAC_STATUS);
7131 int need_setup = 0;
7132
7133 if (netif_carrier_ok(tp->dev) &&
7134 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7135 need_setup = 1;
7136 }
7137 if (! netif_carrier_ok(tp->dev) &&
7138 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7139 MAC_STATUS_SIGNAL_DET))) {
7140 need_setup = 1;
7141 }
7142 if (need_setup) {
3d3ebe74
MC
7143 if (!tp->serdes_counter) {
7144 tw32_f(MAC_MODE,
7145 (tp->mac_mode &
7146 ~MAC_MODE_PORT_MODE_MASK));
7147 udelay(40);
7148 tw32_f(MAC_MODE, tp->mac_mode);
7149 udelay(40);
7150 }
1da177e4
LT
7151 tg3_setup_phy(tp, 0);
7152 }
747e8f8b
MC
7153 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7154 tg3_serdes_parallel_detect(tp);
1da177e4
LT
7155
7156 tp->timer_counter = tp->timer_multiplier;
7157 }
7158
130b8e4d
MC
7159 /* Heartbeat is only sent once every 2 seconds.
7160 *
7161 * The heartbeat is to tell the ASF firmware that the host
7162 * driver is still alive. In the event that the OS crashes,
7163 * ASF needs to reset the hardware to free up the FIFO space
7164 * that may be filled with rx packets destined for the host.
7165 * If the FIFO is full, ASF will no longer function properly.
7166 *
7167 * Unintended resets have been reported on real time kernels
7168 * where the timer doesn't run on time. Netpoll will also have
7169 * same problem.
7170 *
7171 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7172 * to check the ring condition when the heartbeat is expiring
7173 * before doing the reset. This will prevent most unintended
7174 * resets.
7175 */
1da177e4
LT
7176 if (!--tp->asf_counter) {
7177 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7178 u32 val;
7179
bbadf503 7180 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
130b8e4d 7181 FWCMD_NICDRV_ALIVE3);
bbadf503 7182 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
28fbef78 7183 /* 5 seconds timeout */
bbadf503 7184 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
1da177e4
LT
7185 val = tr32(GRC_RX_CPU_EVENT);
7186 val |= (1 << 14);
7187 tw32(GRC_RX_CPU_EVENT, val);
7188 }
7189 tp->asf_counter = tp->asf_multiplier;
7190 }
7191
f47c11ee 7192 spin_unlock(&tp->lock);
1da177e4 7193
f475f163 7194restart_timer:
1da177e4
LT
7195 tp->timer.expires = jiffies + tp->timer_offset;
7196 add_timer(&tp->timer);
7197}
7198
81789ef5 7199static int tg3_request_irq(struct tg3 *tp)
fcfa0a32 7200{
7d12e780 7201 irq_handler_t fn;
fcfa0a32
MC
7202 unsigned long flags;
7203 struct net_device *dev = tp->dev;
7204
7205 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7206 fn = tg3_msi;
7207 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7208 fn = tg3_msi_1shot;
1fb9df5d 7209 flags = IRQF_SAMPLE_RANDOM;
fcfa0a32
MC
7210 } else {
7211 fn = tg3_interrupt;
7212 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7213 fn = tg3_interrupt_tagged;
1fb9df5d 7214 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
fcfa0a32
MC
7215 }
7216 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7217}
7218
7938109f
MC
7219static int tg3_test_interrupt(struct tg3 *tp)
7220{
7221 struct net_device *dev = tp->dev;
b16250e3 7222 int err, i, intr_ok = 0;
7938109f 7223
d4bc3927
MC
7224 if (!netif_running(dev))
7225 return -ENODEV;
7226
7938109f
MC
7227 tg3_disable_ints(tp);
7228
7229 free_irq(tp->pdev->irq, dev);
7230
7231 err = request_irq(tp->pdev->irq, tg3_test_isr,
1fb9df5d 7232 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7938109f
MC
7233 if (err)
7234 return err;
7235
38f3843e 7236 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7938109f
MC
7237 tg3_enable_ints(tp);
7238
7239 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7240 HOSTCC_MODE_NOW);
7241
7242 for (i = 0; i < 5; i++) {
b16250e3
MC
7243 u32 int_mbox, misc_host_ctrl;
7244
09ee929c
MC
7245 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7246 TG3_64BIT_REG_LOW);
b16250e3
MC
7247 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7248
7249 if ((int_mbox != 0) ||
7250 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7251 intr_ok = 1;
7938109f 7252 break;
b16250e3
MC
7253 }
7254
7938109f
MC
7255 msleep(10);
7256 }
7257
7258 tg3_disable_ints(tp);
7259
7260 free_irq(tp->pdev->irq, dev);
6aa20a22 7261
fcfa0a32 7262 err = tg3_request_irq(tp);
7938109f
MC
7263
7264 if (err)
7265 return err;
7266
b16250e3 7267 if (intr_ok)
7938109f
MC
7268 return 0;
7269
7270 return -EIO;
7271}
7272
7273/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7274 * successfully restored
7275 */
7276static int tg3_test_msi(struct tg3 *tp)
7277{
7278 struct net_device *dev = tp->dev;
7279 int err;
7280 u16 pci_cmd;
7281
7282 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7283 return 0;
7284
7285 /* Turn off SERR reporting in case MSI terminates with Master
7286 * Abort.
7287 */
7288 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7289 pci_write_config_word(tp->pdev, PCI_COMMAND,
7290 pci_cmd & ~PCI_COMMAND_SERR);
7291
7292 err = tg3_test_interrupt(tp);
7293
7294 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7295
7296 if (!err)
7297 return 0;
7298
7299 /* other failures */
7300 if (err != -EIO)
7301 return err;
7302
7303 /* MSI test failed, go back to INTx mode */
7304 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7305 "switching to INTx mode. Please report this failure to "
7306 "the PCI maintainer and include system chipset information.\n",
7307 tp->dev->name);
7308
7309 free_irq(tp->pdev->irq, dev);
7310 pci_disable_msi(tp->pdev);
7311
7312 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7313
fcfa0a32 7314 err = tg3_request_irq(tp);
7938109f
MC
7315 if (err)
7316 return err;
7317
7318 /* Need to reset the chip because the MSI cycle may have terminated
7319 * with Master Abort.
7320 */
f47c11ee 7321 tg3_full_lock(tp, 1);
7938109f 7322
944d980e 7323 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8e7a22e3 7324 err = tg3_init_hw(tp, 1);
7938109f 7325
f47c11ee 7326 tg3_full_unlock(tp);
7938109f
MC
7327
7328 if (err)
7329 free_irq(tp->pdev->irq, dev);
7330
7331 return err;
7332}
7333
1da177e4
LT
7334static int tg3_open(struct net_device *dev)
7335{
7336 struct tg3 *tp = netdev_priv(dev);
7337 int err;
7338
c49a1561
MC
7339 netif_carrier_off(tp->dev);
7340
f47c11ee 7341 tg3_full_lock(tp, 0);
1da177e4 7342
bc1c7567 7343 err = tg3_set_power_state(tp, PCI_D0);
12862086
IS
7344 if (err) {
7345 tg3_full_unlock(tp);
bc1c7567 7346 return err;
12862086 7347 }
bc1c7567 7348
1da177e4
LT
7349 tg3_disable_ints(tp);
7350 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7351
f47c11ee 7352 tg3_full_unlock(tp);
1da177e4
LT
7353
7354 /* The placement of this call is tied
7355 * to the setup and use of Host TX descriptors.
7356 */
7357 err = tg3_alloc_consistent(tp);
7358 if (err)
7359 return err;
7360
7544b097 7361 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
fac9b83e
DM
7362 /* All MSI supporting chips should support tagged
7363 * status. Assert that this is the case.
7364 */
7365 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7366 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7367 "Not using MSI.\n", tp->dev->name);
7368 } else if (pci_enable_msi(tp->pdev) == 0) {
88b06bc2
MC
7369 u32 msi_mode;
7370
2fbe43f6
MC
7371 /* Hardware bug - MSI won't work if INTX disabled. */
7372 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
7373 pci_intx(tp->pdev, 1);
7374
88b06bc2
MC
7375 msi_mode = tr32(MSGINT_MODE);
7376 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7377 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7378 }
7379 }
fcfa0a32 7380 err = tg3_request_irq(tp);
1da177e4
LT
7381
7382 if (err) {
88b06bc2
MC
7383 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7384 pci_disable_msi(tp->pdev);
7385 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7386 }
1da177e4
LT
7387 tg3_free_consistent(tp);
7388 return err;
7389 }
7390
bea3348e
SH
7391 napi_enable(&tp->napi);
7392
f47c11ee 7393 tg3_full_lock(tp, 0);
1da177e4 7394
8e7a22e3 7395 err = tg3_init_hw(tp, 1);
1da177e4 7396 if (err) {
944d980e 7397 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7398 tg3_free_rings(tp);
7399 } else {
fac9b83e
DM
7400 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7401 tp->timer_offset = HZ;
7402 else
7403 tp->timer_offset = HZ / 10;
7404
7405 BUG_ON(tp->timer_offset > HZ);
7406 tp->timer_counter = tp->timer_multiplier =
7407 (HZ / tp->timer_offset);
7408 tp->asf_counter = tp->asf_multiplier =
28fbef78 7409 ((HZ / tp->timer_offset) * 2);
1da177e4
LT
7410
7411 init_timer(&tp->timer);
7412 tp->timer.expires = jiffies + tp->timer_offset;
7413 tp->timer.data = (unsigned long) tp;
7414 tp->timer.function = tg3_timer;
1da177e4
LT
7415 }
7416
f47c11ee 7417 tg3_full_unlock(tp);
1da177e4
LT
7418
7419 if (err) {
bea3348e 7420 napi_disable(&tp->napi);
88b06bc2
MC
7421 free_irq(tp->pdev->irq, dev);
7422 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7423 pci_disable_msi(tp->pdev);
7424 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7425 }
1da177e4
LT
7426 tg3_free_consistent(tp);
7427 return err;
7428 }
7429
7938109f
MC
7430 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7431 err = tg3_test_msi(tp);
fac9b83e 7432
7938109f 7433 if (err) {
f47c11ee 7434 tg3_full_lock(tp, 0);
7938109f
MC
7435
7436 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7437 pci_disable_msi(tp->pdev);
7438 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7439 }
944d980e 7440 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f
MC
7441 tg3_free_rings(tp);
7442 tg3_free_consistent(tp);
7443
f47c11ee 7444 tg3_full_unlock(tp);
7938109f 7445
bea3348e
SH
7446 napi_disable(&tp->napi);
7447
7938109f
MC
7448 return err;
7449 }
fcfa0a32
MC
7450
7451 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7452 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
b5d3772c 7453 u32 val = tr32(PCIE_TRANSACTION_CFG);
fcfa0a32 7454
b5d3772c
MC
7455 tw32(PCIE_TRANSACTION_CFG,
7456 val | PCIE_TRANS_CFG_1SHOT_MSI);
fcfa0a32
MC
7457 }
7458 }
7938109f
MC
7459 }
7460
f47c11ee 7461 tg3_full_lock(tp, 0);
1da177e4 7462
7938109f
MC
7463 add_timer(&tp->timer);
7464 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
1da177e4
LT
7465 tg3_enable_ints(tp);
7466
f47c11ee 7467 tg3_full_unlock(tp);
1da177e4
LT
7468
7469 netif_start_queue(dev);
7470
7471 return 0;
7472}
7473
7474#if 0
7475/*static*/ void tg3_dump_state(struct tg3 *tp)
7476{
7477 u32 val32, val32_2, val32_3, val32_4, val32_5;
7478 u16 val16;
7479 int i;
7480
7481 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7482 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7483 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7484 val16, val32);
7485
7486 /* MAC block */
7487 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7488 tr32(MAC_MODE), tr32(MAC_STATUS));
7489 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7490 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7491 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7492 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7493 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7494 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7495
7496 /* Send data initiator control block */
7497 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7498 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7499 printk(" SNDDATAI_STATSCTRL[%08x]\n",
7500 tr32(SNDDATAI_STATSCTRL));
7501
7502 /* Send data completion control block */
7503 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7504
7505 /* Send BD ring selector block */
7506 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7507 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7508
7509 /* Send BD initiator control block */
7510 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7511 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7512
7513 /* Send BD completion control block */
7514 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7515
7516 /* Receive list placement control block */
7517 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7518 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7519 printk(" RCVLPC_STATSCTRL[%08x]\n",
7520 tr32(RCVLPC_STATSCTRL));
7521
7522 /* Receive data and receive BD initiator control block */
7523 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7524 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7525
7526 /* Receive data completion control block */
7527 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7528 tr32(RCVDCC_MODE));
7529
7530 /* Receive BD initiator control block */
7531 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7532 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7533
7534 /* Receive BD completion control block */
7535 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7536 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7537
7538 /* Receive list selector control block */
7539 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7540 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7541
7542 /* Mbuf cluster free block */
7543 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7544 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7545
7546 /* Host coalescing control block */
7547 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7548 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7549 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7550 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7551 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7552 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7553 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7554 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7555 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7556 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7557 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7558 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7559
7560 /* Memory arbiter control block */
7561 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7562 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7563
7564 /* Buffer manager control block */
7565 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7566 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7567 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7568 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7569 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7570 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7571 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7572 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7573
7574 /* Read DMA control block */
7575 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7576 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7577
7578 /* Write DMA control block */
7579 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7580 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7581
7582 /* DMA completion block */
7583 printk("DEBUG: DMAC_MODE[%08x]\n",
7584 tr32(DMAC_MODE));
7585
7586 /* GRC block */
7587 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7588 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7589 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7590 tr32(GRC_LOCAL_CTRL));
7591
7592 /* TG3_BDINFOs */
7593 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7594 tr32(RCVDBDI_JUMBO_BD + 0x0),
7595 tr32(RCVDBDI_JUMBO_BD + 0x4),
7596 tr32(RCVDBDI_JUMBO_BD + 0x8),
7597 tr32(RCVDBDI_JUMBO_BD + 0xc));
7598 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7599 tr32(RCVDBDI_STD_BD + 0x0),
7600 tr32(RCVDBDI_STD_BD + 0x4),
7601 tr32(RCVDBDI_STD_BD + 0x8),
7602 tr32(RCVDBDI_STD_BD + 0xc));
7603 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7604 tr32(RCVDBDI_MINI_BD + 0x0),
7605 tr32(RCVDBDI_MINI_BD + 0x4),
7606 tr32(RCVDBDI_MINI_BD + 0x8),
7607 tr32(RCVDBDI_MINI_BD + 0xc));
7608
7609 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7610 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7611 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7612 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7613 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7614 val32, val32_2, val32_3, val32_4);
7615
7616 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7617 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7618 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7619 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7620 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7621 val32, val32_2, val32_3, val32_4);
7622
7623 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7624 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7625 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7626 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7627 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7628 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7629 val32, val32_2, val32_3, val32_4, val32_5);
7630
7631 /* SW status block */
7632 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7633 tp->hw_status->status,
7634 tp->hw_status->status_tag,
7635 tp->hw_status->rx_jumbo_consumer,
7636 tp->hw_status->rx_consumer,
7637 tp->hw_status->rx_mini_consumer,
7638 tp->hw_status->idx[0].rx_producer,
7639 tp->hw_status->idx[0].tx_consumer);
7640
7641 /* SW statistics block */
7642 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7643 ((u32 *)tp->hw_stats)[0],
7644 ((u32 *)tp->hw_stats)[1],
7645 ((u32 *)tp->hw_stats)[2],
7646 ((u32 *)tp->hw_stats)[3]);
7647
7648 /* Mailboxes */
7649 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
09ee929c
MC
7650 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7651 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7652 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7653 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
1da177e4
LT
7654
7655 /* NIC side send descriptors. */
7656 for (i = 0; i < 6; i++) {
7657 unsigned long txd;
7658
7659 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7660 + (i * sizeof(struct tg3_tx_buffer_desc));
7661 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7662 i,
7663 readl(txd + 0x0), readl(txd + 0x4),
7664 readl(txd + 0x8), readl(txd + 0xc));
7665 }
7666
7667 /* NIC side RX descriptors. */
7668 for (i = 0; i < 6; i++) {
7669 unsigned long rxd;
7670
7671 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7672 + (i * sizeof(struct tg3_rx_buffer_desc));
7673 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7674 i,
7675 readl(rxd + 0x0), readl(rxd + 0x4),
7676 readl(rxd + 0x8), readl(rxd + 0xc));
7677 rxd += (4 * sizeof(u32));
7678 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7679 i,
7680 readl(rxd + 0x0), readl(rxd + 0x4),
7681 readl(rxd + 0x8), readl(rxd + 0xc));
7682 }
7683
7684 for (i = 0; i < 6; i++) {
7685 unsigned long rxd;
7686
7687 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7688 + (i * sizeof(struct tg3_rx_buffer_desc));
7689 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7690 i,
7691 readl(rxd + 0x0), readl(rxd + 0x4),
7692 readl(rxd + 0x8), readl(rxd + 0xc));
7693 rxd += (4 * sizeof(u32));
7694 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7695 i,
7696 readl(rxd + 0x0), readl(rxd + 0x4),
7697 readl(rxd + 0x8), readl(rxd + 0xc));
7698 }
7699}
7700#endif
7701
7702static struct net_device_stats *tg3_get_stats(struct net_device *);
7703static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7704
7705static int tg3_close(struct net_device *dev)
7706{
7707 struct tg3 *tp = netdev_priv(dev);
7708
bea3348e 7709 napi_disable(&tp->napi);
28e53bdd 7710 cancel_work_sync(&tp->reset_task);
7faa006f 7711
1da177e4
LT
7712 netif_stop_queue(dev);
7713
7714 del_timer_sync(&tp->timer);
7715
f47c11ee 7716 tg3_full_lock(tp, 1);
1da177e4
LT
7717#if 0
7718 tg3_dump_state(tp);
7719#endif
7720
7721 tg3_disable_ints(tp);
7722
944d980e 7723 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4 7724 tg3_free_rings(tp);
5cf64b8a 7725 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
1da177e4 7726
f47c11ee 7727 tg3_full_unlock(tp);
1da177e4 7728
88b06bc2
MC
7729 free_irq(tp->pdev->irq, dev);
7730 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7731 pci_disable_msi(tp->pdev);
7732 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7733 }
1da177e4
LT
7734
7735 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7736 sizeof(tp->net_stats_prev));
7737 memcpy(&tp->estats_prev, tg3_get_estats(tp),
7738 sizeof(tp->estats_prev));
7739
7740 tg3_free_consistent(tp);
7741
bc1c7567
MC
7742 tg3_set_power_state(tp, PCI_D3hot);
7743
7744 netif_carrier_off(tp->dev);
7745
1da177e4
LT
7746 return 0;
7747}
7748
7749static inline unsigned long get_stat64(tg3_stat64_t *val)
7750{
7751 unsigned long ret;
7752
7753#if (BITS_PER_LONG == 32)
7754 ret = val->low;
7755#else
7756 ret = ((u64)val->high << 32) | ((u64)val->low);
7757#endif
7758 return ret;
7759}
7760
7761static unsigned long calc_crc_errors(struct tg3 *tp)
7762{
7763 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7764
7765 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7766 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7767 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1da177e4
LT
7768 u32 val;
7769
f47c11ee 7770 spin_lock_bh(&tp->lock);
569a5df8
MC
7771 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7772 tg3_writephy(tp, MII_TG3_TEST1,
7773 val | MII_TG3_TEST1_CRC_EN);
1da177e4
LT
7774 tg3_readphy(tp, 0x14, &val);
7775 } else
7776 val = 0;
f47c11ee 7777 spin_unlock_bh(&tp->lock);
1da177e4
LT
7778
7779 tp->phy_crc_errors += val;
7780
7781 return tp->phy_crc_errors;
7782 }
7783
7784 return get_stat64(&hw_stats->rx_fcs_errors);
7785}
7786
7787#define ESTAT_ADD(member) \
7788 estats->member = old_estats->member + \
7789 get_stat64(&hw_stats->member)
7790
7791static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7792{
7793 struct tg3_ethtool_stats *estats = &tp->estats;
7794 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7795 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7796
7797 if (!hw_stats)
7798 return old_estats;
7799
7800 ESTAT_ADD(rx_octets);
7801 ESTAT_ADD(rx_fragments);
7802 ESTAT_ADD(rx_ucast_packets);
7803 ESTAT_ADD(rx_mcast_packets);
7804 ESTAT_ADD(rx_bcast_packets);
7805 ESTAT_ADD(rx_fcs_errors);
7806 ESTAT_ADD(rx_align_errors);
7807 ESTAT_ADD(rx_xon_pause_rcvd);
7808 ESTAT_ADD(rx_xoff_pause_rcvd);
7809 ESTAT_ADD(rx_mac_ctrl_rcvd);
7810 ESTAT_ADD(rx_xoff_entered);
7811 ESTAT_ADD(rx_frame_too_long_errors);
7812 ESTAT_ADD(rx_jabbers);
7813 ESTAT_ADD(rx_undersize_packets);
7814 ESTAT_ADD(rx_in_length_errors);
7815 ESTAT_ADD(rx_out_length_errors);
7816 ESTAT_ADD(rx_64_or_less_octet_packets);
7817 ESTAT_ADD(rx_65_to_127_octet_packets);
7818 ESTAT_ADD(rx_128_to_255_octet_packets);
7819 ESTAT_ADD(rx_256_to_511_octet_packets);
7820 ESTAT_ADD(rx_512_to_1023_octet_packets);
7821 ESTAT_ADD(rx_1024_to_1522_octet_packets);
7822 ESTAT_ADD(rx_1523_to_2047_octet_packets);
7823 ESTAT_ADD(rx_2048_to_4095_octet_packets);
7824 ESTAT_ADD(rx_4096_to_8191_octet_packets);
7825 ESTAT_ADD(rx_8192_to_9022_octet_packets);
7826
7827 ESTAT_ADD(tx_octets);
7828 ESTAT_ADD(tx_collisions);
7829 ESTAT_ADD(tx_xon_sent);
7830 ESTAT_ADD(tx_xoff_sent);
7831 ESTAT_ADD(tx_flow_control);
7832 ESTAT_ADD(tx_mac_errors);
7833 ESTAT_ADD(tx_single_collisions);
7834 ESTAT_ADD(tx_mult_collisions);
7835 ESTAT_ADD(tx_deferred);
7836 ESTAT_ADD(tx_excessive_collisions);
7837 ESTAT_ADD(tx_late_collisions);
7838 ESTAT_ADD(tx_collide_2times);
7839 ESTAT_ADD(tx_collide_3times);
7840 ESTAT_ADD(tx_collide_4times);
7841 ESTAT_ADD(tx_collide_5times);
7842 ESTAT_ADD(tx_collide_6times);
7843 ESTAT_ADD(tx_collide_7times);
7844 ESTAT_ADD(tx_collide_8times);
7845 ESTAT_ADD(tx_collide_9times);
7846 ESTAT_ADD(tx_collide_10times);
7847 ESTAT_ADD(tx_collide_11times);
7848 ESTAT_ADD(tx_collide_12times);
7849 ESTAT_ADD(tx_collide_13times);
7850 ESTAT_ADD(tx_collide_14times);
7851 ESTAT_ADD(tx_collide_15times);
7852 ESTAT_ADD(tx_ucast_packets);
7853 ESTAT_ADD(tx_mcast_packets);
7854 ESTAT_ADD(tx_bcast_packets);
7855 ESTAT_ADD(tx_carrier_sense_errors);
7856 ESTAT_ADD(tx_discards);
7857 ESTAT_ADD(tx_errors);
7858
7859 ESTAT_ADD(dma_writeq_full);
7860 ESTAT_ADD(dma_write_prioq_full);
7861 ESTAT_ADD(rxbds_empty);
7862 ESTAT_ADD(rx_discards);
7863 ESTAT_ADD(rx_errors);
7864 ESTAT_ADD(rx_threshold_hit);
7865
7866 ESTAT_ADD(dma_readq_full);
7867 ESTAT_ADD(dma_read_prioq_full);
7868 ESTAT_ADD(tx_comp_queue_full);
7869
7870 ESTAT_ADD(ring_set_send_prod_index);
7871 ESTAT_ADD(ring_status_update);
7872 ESTAT_ADD(nic_irqs);
7873 ESTAT_ADD(nic_avoided_irqs);
7874 ESTAT_ADD(nic_tx_threshold_hit);
7875
7876 return estats;
7877}
7878
7879static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7880{
7881 struct tg3 *tp = netdev_priv(dev);
7882 struct net_device_stats *stats = &tp->net_stats;
7883 struct net_device_stats *old_stats = &tp->net_stats_prev;
7884 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7885
7886 if (!hw_stats)
7887 return old_stats;
7888
7889 stats->rx_packets = old_stats->rx_packets +
7890 get_stat64(&hw_stats->rx_ucast_packets) +
7891 get_stat64(&hw_stats->rx_mcast_packets) +
7892 get_stat64(&hw_stats->rx_bcast_packets);
6aa20a22 7893
1da177e4
LT
7894 stats->tx_packets = old_stats->tx_packets +
7895 get_stat64(&hw_stats->tx_ucast_packets) +
7896 get_stat64(&hw_stats->tx_mcast_packets) +
7897 get_stat64(&hw_stats->tx_bcast_packets);
7898
7899 stats->rx_bytes = old_stats->rx_bytes +
7900 get_stat64(&hw_stats->rx_octets);
7901 stats->tx_bytes = old_stats->tx_bytes +
7902 get_stat64(&hw_stats->tx_octets);
7903
7904 stats->rx_errors = old_stats->rx_errors +
4f63b877 7905 get_stat64(&hw_stats->rx_errors);
1da177e4
LT
7906 stats->tx_errors = old_stats->tx_errors +
7907 get_stat64(&hw_stats->tx_errors) +
7908 get_stat64(&hw_stats->tx_mac_errors) +
7909 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7910 get_stat64(&hw_stats->tx_discards);
7911
7912 stats->multicast = old_stats->multicast +
7913 get_stat64(&hw_stats->rx_mcast_packets);
7914 stats->collisions = old_stats->collisions +
7915 get_stat64(&hw_stats->tx_collisions);
7916
7917 stats->rx_length_errors = old_stats->rx_length_errors +
7918 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7919 get_stat64(&hw_stats->rx_undersize_packets);
7920
7921 stats->rx_over_errors = old_stats->rx_over_errors +
7922 get_stat64(&hw_stats->rxbds_empty);
7923 stats->rx_frame_errors = old_stats->rx_frame_errors +
7924 get_stat64(&hw_stats->rx_align_errors);
7925 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7926 get_stat64(&hw_stats->tx_discards);
7927 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7928 get_stat64(&hw_stats->tx_carrier_sense_errors);
7929
7930 stats->rx_crc_errors = old_stats->rx_crc_errors +
7931 calc_crc_errors(tp);
7932
4f63b877
JL
7933 stats->rx_missed_errors = old_stats->rx_missed_errors +
7934 get_stat64(&hw_stats->rx_discards);
7935
1da177e4
LT
7936 return stats;
7937}
7938
7939static inline u32 calc_crc(unsigned char *buf, int len)
7940{
7941 u32 reg;
7942 u32 tmp;
7943 int j, k;
7944
7945 reg = 0xffffffff;
7946
7947 for (j = 0; j < len; j++) {
7948 reg ^= buf[j];
7949
7950 for (k = 0; k < 8; k++) {
7951 tmp = reg & 0x01;
7952
7953 reg >>= 1;
7954
7955 if (tmp) {
7956 reg ^= 0xedb88320;
7957 }
7958 }
7959 }
7960
7961 return ~reg;
7962}
7963
7964static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7965{
7966 /* accept or reject all multicast frames */
7967 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7968 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7969 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7970 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7971}
7972
7973static void __tg3_set_rx_mode(struct net_device *dev)
7974{
7975 struct tg3 *tp = netdev_priv(dev);
7976 u32 rx_mode;
7977
7978 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7979 RX_MODE_KEEP_VLAN_TAG);
7980
7981 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7982 * flag clear.
7983 */
7984#if TG3_VLAN_TAG_USED
7985 if (!tp->vlgrp &&
7986 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7987 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7988#else
7989 /* By definition, VLAN is disabled always in this
7990 * case.
7991 */
7992 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7993 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7994#endif
7995
7996 if (dev->flags & IFF_PROMISC) {
7997 /* Promiscuous mode. */
7998 rx_mode |= RX_MODE_PROMISC;
7999 } else if (dev->flags & IFF_ALLMULTI) {
8000 /* Accept all multicast. */
8001 tg3_set_multi (tp, 1);
8002 } else if (dev->mc_count < 1) {
8003 /* Reject all multicast. */
8004 tg3_set_multi (tp, 0);
8005 } else {
8006 /* Accept one or more multicast(s). */
8007 struct dev_mc_list *mclist;
8008 unsigned int i;
8009 u32 mc_filter[4] = { 0, };
8010 u32 regidx;
8011 u32 bit;
8012 u32 crc;
8013
8014 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8015 i++, mclist = mclist->next) {
8016
8017 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8018 bit = ~crc & 0x7f;
8019 regidx = (bit & 0x60) >> 5;
8020 bit &= 0x1f;
8021 mc_filter[regidx] |= (1 << bit);
8022 }
8023
8024 tw32(MAC_HASH_REG_0, mc_filter[0]);
8025 tw32(MAC_HASH_REG_1, mc_filter[1]);
8026 tw32(MAC_HASH_REG_2, mc_filter[2]);
8027 tw32(MAC_HASH_REG_3, mc_filter[3]);
8028 }
8029
8030 if (rx_mode != tp->rx_mode) {
8031 tp->rx_mode = rx_mode;
8032 tw32_f(MAC_RX_MODE, rx_mode);
8033 udelay(10);
8034 }
8035}
8036
8037static void tg3_set_rx_mode(struct net_device *dev)
8038{
8039 struct tg3 *tp = netdev_priv(dev);
8040
e75f7c90
MC
8041 if (!netif_running(dev))
8042 return;
8043
f47c11ee 8044 tg3_full_lock(tp, 0);
1da177e4 8045 __tg3_set_rx_mode(dev);
f47c11ee 8046 tg3_full_unlock(tp);
1da177e4
LT
8047}
8048
8049#define TG3_REGDUMP_LEN (32 * 1024)
8050
8051static int tg3_get_regs_len(struct net_device *dev)
8052{
8053 return TG3_REGDUMP_LEN;
8054}
8055
8056static void tg3_get_regs(struct net_device *dev,
8057 struct ethtool_regs *regs, void *_p)
8058{
8059 u32 *p = _p;
8060 struct tg3 *tp = netdev_priv(dev);
8061 u8 *orig_p = _p;
8062 int i;
8063
8064 regs->version = 0;
8065
8066 memset(p, 0, TG3_REGDUMP_LEN);
8067
bc1c7567
MC
8068 if (tp->link_config.phy_is_low_power)
8069 return;
8070
f47c11ee 8071 tg3_full_lock(tp, 0);
1da177e4
LT
8072
8073#define __GET_REG32(reg) (*(p)++ = tr32(reg))
8074#define GET_REG32_LOOP(base,len) \
8075do { p = (u32 *)(orig_p + (base)); \
8076 for (i = 0; i < len; i += 4) \
8077 __GET_REG32((base) + i); \
8078} while (0)
8079#define GET_REG32_1(reg) \
8080do { p = (u32 *)(orig_p + (reg)); \
8081 __GET_REG32((reg)); \
8082} while (0)
8083
8084 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8085 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8086 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8087 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8088 GET_REG32_1(SNDDATAC_MODE);
8089 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8090 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8091 GET_REG32_1(SNDBDC_MODE);
8092 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8093 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8094 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8095 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8096 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8097 GET_REG32_1(RCVDCC_MODE);
8098 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8099 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8100 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8101 GET_REG32_1(MBFREE_MODE);
8102 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8103 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8104 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8105 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8106 GET_REG32_LOOP(WDMAC_MODE, 0x08);
091465d7
CE
8107 GET_REG32_1(RX_CPU_MODE);
8108 GET_REG32_1(RX_CPU_STATE);
8109 GET_REG32_1(RX_CPU_PGMCTR);
8110 GET_REG32_1(RX_CPU_HWBKPT);
8111 GET_REG32_1(TX_CPU_MODE);
8112 GET_REG32_1(TX_CPU_STATE);
8113 GET_REG32_1(TX_CPU_PGMCTR);
1da177e4
LT
8114 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8115 GET_REG32_LOOP(FTQ_RESET, 0x120);
8116 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8117 GET_REG32_1(DMAC_MODE);
8118 GET_REG32_LOOP(GRC_MODE, 0x4c);
8119 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8120 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8121
8122#undef __GET_REG32
8123#undef GET_REG32_LOOP
8124#undef GET_REG32_1
8125
f47c11ee 8126 tg3_full_unlock(tp);
1da177e4
LT
8127}
8128
8129static int tg3_get_eeprom_len(struct net_device *dev)
8130{
8131 struct tg3 *tp = netdev_priv(dev);
8132
8133 return tp->nvram_size;
8134}
8135
8136static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
1820180b 8137static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
1da177e4
LT
8138
8139static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8140{
8141 struct tg3 *tp = netdev_priv(dev);
8142 int ret;
8143 u8 *pd;
8144 u32 i, offset, len, val, b_offset, b_count;
8145
bc1c7567
MC
8146 if (tp->link_config.phy_is_low_power)
8147 return -EAGAIN;
8148
1da177e4
LT
8149 offset = eeprom->offset;
8150 len = eeprom->len;
8151 eeprom->len = 0;
8152
8153 eeprom->magic = TG3_EEPROM_MAGIC;
8154
8155 if (offset & 3) {
8156 /* adjustments to start on required 4 byte boundary */
8157 b_offset = offset & 3;
8158 b_count = 4 - b_offset;
8159 if (b_count > len) {
8160 /* i.e. offset=1 len=2 */
8161 b_count = len;
8162 }
8163 ret = tg3_nvram_read(tp, offset-b_offset, &val);
8164 if (ret)
8165 return ret;
8166 val = cpu_to_le32(val);
8167 memcpy(data, ((char*)&val) + b_offset, b_count);
8168 len -= b_count;
8169 offset += b_count;
8170 eeprom->len += b_count;
8171 }
8172
8173 /* read bytes upto the last 4 byte boundary */
8174 pd = &data[eeprom->len];
8175 for (i = 0; i < (len - (len & 3)); i += 4) {
8176 ret = tg3_nvram_read(tp, offset + i, &val);
8177 if (ret) {
8178 eeprom->len += i;
8179 return ret;
8180 }
8181 val = cpu_to_le32(val);
8182 memcpy(pd + i, &val, 4);
8183 }
8184 eeprom->len += i;
8185
8186 if (len & 3) {
8187 /* read last bytes not ending on 4 byte boundary */
8188 pd = &data[eeprom->len];
8189 b_count = len & 3;
8190 b_offset = offset + len - b_count;
8191 ret = tg3_nvram_read(tp, b_offset, &val);
8192 if (ret)
8193 return ret;
8194 val = cpu_to_le32(val);
8195 memcpy(pd, ((char*)&val), b_count);
8196 eeprom->len += b_count;
8197 }
8198 return 0;
8199}
8200
6aa20a22 8201static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
1da177e4
LT
8202
8203static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8204{
8205 struct tg3 *tp = netdev_priv(dev);
8206 int ret;
8207 u32 offset, len, b_offset, odd_len, start, end;
8208 u8 *buf;
8209
bc1c7567
MC
8210 if (tp->link_config.phy_is_low_power)
8211 return -EAGAIN;
8212
1da177e4
LT
8213 if (eeprom->magic != TG3_EEPROM_MAGIC)
8214 return -EINVAL;
8215
8216 offset = eeprom->offset;
8217 len = eeprom->len;
8218
8219 if ((b_offset = (offset & 3))) {
8220 /* adjustments to start on required 4 byte boundary */
8221 ret = tg3_nvram_read(tp, offset-b_offset, &start);
8222 if (ret)
8223 return ret;
8224 start = cpu_to_le32(start);
8225 len += b_offset;
8226 offset &= ~3;
1c8594b4
MC
8227 if (len < 4)
8228 len = 4;
1da177e4
LT
8229 }
8230
8231 odd_len = 0;
1c8594b4 8232 if (len & 3) {
1da177e4
LT
8233 /* adjustments to end on required 4 byte boundary */
8234 odd_len = 1;
8235 len = (len + 3) & ~3;
8236 ret = tg3_nvram_read(tp, offset+len-4, &end);
8237 if (ret)
8238 return ret;
8239 end = cpu_to_le32(end);
8240 }
8241
8242 buf = data;
8243 if (b_offset || odd_len) {
8244 buf = kmalloc(len, GFP_KERNEL);
ab0049b4 8245 if (!buf)
1da177e4
LT
8246 return -ENOMEM;
8247 if (b_offset)
8248 memcpy(buf, &start, 4);
8249 if (odd_len)
8250 memcpy(buf+len-4, &end, 4);
8251 memcpy(buf + b_offset, data, eeprom->len);
8252 }
8253
8254 ret = tg3_nvram_write_block(tp, offset, len, buf);
8255
8256 if (buf != data)
8257 kfree(buf);
8258
8259 return ret;
8260}
8261
8262static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8263{
8264 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8265
1da177e4
LT
8266 cmd->supported = (SUPPORTED_Autoneg);
8267
8268 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8269 cmd->supported |= (SUPPORTED_1000baseT_Half |
8270 SUPPORTED_1000baseT_Full);
8271
ef348144 8272 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1da177e4
LT
8273 cmd->supported |= (SUPPORTED_100baseT_Half |
8274 SUPPORTED_100baseT_Full |
8275 SUPPORTED_10baseT_Half |
8276 SUPPORTED_10baseT_Full |
8277 SUPPORTED_MII);
ef348144
KK
8278 cmd->port = PORT_TP;
8279 } else {
1da177e4 8280 cmd->supported |= SUPPORTED_FIBRE;
ef348144
KK
8281 cmd->port = PORT_FIBRE;
8282 }
6aa20a22 8283
1da177e4
LT
8284 cmd->advertising = tp->link_config.advertising;
8285 if (netif_running(dev)) {
8286 cmd->speed = tp->link_config.active_speed;
8287 cmd->duplex = tp->link_config.active_duplex;
8288 }
1da177e4
LT
8289 cmd->phy_address = PHY_ADDR;
8290 cmd->transceiver = 0;
8291 cmd->autoneg = tp->link_config.autoneg;
8292 cmd->maxtxpkt = 0;
8293 cmd->maxrxpkt = 0;
8294 return 0;
8295}
6aa20a22 8296
1da177e4
LT
8297static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8298{
8299 struct tg3 *tp = netdev_priv(dev);
6aa20a22
JG
8300
8301 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
1da177e4
LT
8302 /* These are the only valid advertisement bits allowed. */
8303 if (cmd->autoneg == AUTONEG_ENABLE &&
8304 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8305 ADVERTISED_1000baseT_Full |
8306 ADVERTISED_Autoneg |
8307 ADVERTISED_FIBRE)))
8308 return -EINVAL;
37ff238d
MC
8309 /* Fiber can only do SPEED_1000. */
8310 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8311 (cmd->speed != SPEED_1000))
8312 return -EINVAL;
8313 /* Copper cannot force SPEED_1000. */
8314 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8315 (cmd->speed == SPEED_1000))
8316 return -EINVAL;
8317 else if ((cmd->speed == SPEED_1000) &&
8318 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8319 return -EINVAL;
1da177e4 8320
f47c11ee 8321 tg3_full_lock(tp, 0);
1da177e4
LT
8322
8323 tp->link_config.autoneg = cmd->autoneg;
8324 if (cmd->autoneg == AUTONEG_ENABLE) {
405d8e5c
AG
8325 tp->link_config.advertising = (cmd->advertising |
8326 ADVERTISED_Autoneg);
1da177e4
LT
8327 tp->link_config.speed = SPEED_INVALID;
8328 tp->link_config.duplex = DUPLEX_INVALID;
8329 } else {
8330 tp->link_config.advertising = 0;
8331 tp->link_config.speed = cmd->speed;
8332 tp->link_config.duplex = cmd->duplex;
8333 }
6aa20a22 8334
24fcad6b
MC
8335 tp->link_config.orig_speed = tp->link_config.speed;
8336 tp->link_config.orig_duplex = tp->link_config.duplex;
8337 tp->link_config.orig_autoneg = tp->link_config.autoneg;
8338
1da177e4
LT
8339 if (netif_running(dev))
8340 tg3_setup_phy(tp, 1);
8341
f47c11ee 8342 tg3_full_unlock(tp);
6aa20a22 8343
1da177e4
LT
8344 return 0;
8345}
6aa20a22 8346
1da177e4
LT
8347static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8348{
8349 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8350
1da177e4
LT
8351 strcpy(info->driver, DRV_MODULE_NAME);
8352 strcpy(info->version, DRV_MODULE_VERSION);
c4e6575c 8353 strcpy(info->fw_version, tp->fw_ver);
1da177e4
LT
8354 strcpy(info->bus_info, pci_name(tp->pdev));
8355}
6aa20a22 8356
1da177e4
LT
8357static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8358{
8359 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8360
a85feb8c
GZ
8361 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8362 wol->supported = WAKE_MAGIC;
8363 else
8364 wol->supported = 0;
1da177e4
LT
8365 wol->wolopts = 0;
8366 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8367 wol->wolopts = WAKE_MAGIC;
8368 memset(&wol->sopass, 0, sizeof(wol->sopass));
8369}
6aa20a22 8370
1da177e4
LT
8371static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8372{
8373 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8374
1da177e4
LT
8375 if (wol->wolopts & ~WAKE_MAGIC)
8376 return -EINVAL;
8377 if ((wol->wolopts & WAKE_MAGIC) &&
a85feb8c 8378 !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
1da177e4 8379 return -EINVAL;
6aa20a22 8380
f47c11ee 8381 spin_lock_bh(&tp->lock);
1da177e4
LT
8382 if (wol->wolopts & WAKE_MAGIC)
8383 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8384 else
8385 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
f47c11ee 8386 spin_unlock_bh(&tp->lock);
6aa20a22 8387
1da177e4
LT
8388 return 0;
8389}
6aa20a22 8390
1da177e4
LT
8391static u32 tg3_get_msglevel(struct net_device *dev)
8392{
8393 struct tg3 *tp = netdev_priv(dev);
8394 return tp->msg_enable;
8395}
6aa20a22 8396
1da177e4
LT
8397static void tg3_set_msglevel(struct net_device *dev, u32 value)
8398{
8399 struct tg3 *tp = netdev_priv(dev);
8400 tp->msg_enable = value;
8401}
6aa20a22 8402
1da177e4
LT
8403static int tg3_set_tso(struct net_device *dev, u32 value)
8404{
8405 struct tg3 *tp = netdev_priv(dev);
8406
8407 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8408 if (value)
8409 return -EINVAL;
8410 return 0;
8411 }
b5d3772c
MC
8412 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8413 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
9936bcf6 8414 if (value) {
b0026624 8415 dev->features |= NETIF_F_TSO6;
9936bcf6
MC
8416 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8417 dev->features |= NETIF_F_TSO_ECN;
8418 } else
8419 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
b0026624 8420 }
1da177e4
LT
8421 return ethtool_op_set_tso(dev, value);
8422}
6aa20a22 8423
1da177e4
LT
8424static int tg3_nway_reset(struct net_device *dev)
8425{
8426 struct tg3 *tp = netdev_priv(dev);
8427 u32 bmcr;
8428 int r;
6aa20a22 8429
1da177e4
LT
8430 if (!netif_running(dev))
8431 return -EAGAIN;
8432
c94e3941
MC
8433 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8434 return -EINVAL;
8435
f47c11ee 8436 spin_lock_bh(&tp->lock);
1da177e4
LT
8437 r = -EINVAL;
8438 tg3_readphy(tp, MII_BMCR, &bmcr);
8439 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
c94e3941
MC
8440 ((bmcr & BMCR_ANENABLE) ||
8441 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8442 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8443 BMCR_ANENABLE);
1da177e4
LT
8444 r = 0;
8445 }
f47c11ee 8446 spin_unlock_bh(&tp->lock);
6aa20a22 8447
1da177e4
LT
8448 return r;
8449}
6aa20a22 8450
1da177e4
LT
8451static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8452{
8453 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8454
1da177e4
LT
8455 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8456 ering->rx_mini_max_pending = 0;
4f81c32b
MC
8457 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8458 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8459 else
8460 ering->rx_jumbo_max_pending = 0;
8461
8462 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
1da177e4
LT
8463
8464 ering->rx_pending = tp->rx_pending;
8465 ering->rx_mini_pending = 0;
4f81c32b
MC
8466 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8467 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8468 else
8469 ering->rx_jumbo_pending = 0;
8470
1da177e4
LT
8471 ering->tx_pending = tp->tx_pending;
8472}
6aa20a22 8473
1da177e4
LT
8474static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8475{
8476 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 8477 int irq_sync = 0, err = 0;
6aa20a22 8478
1da177e4
LT
8479 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8480 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
bc3a9254
MC
8481 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8482 (ering->tx_pending <= MAX_SKB_FRAGS) ||
7f62ad5d 8483 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
bc3a9254 8484 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
1da177e4 8485 return -EINVAL;
6aa20a22 8486
bbe832c0 8487 if (netif_running(dev)) {
1da177e4 8488 tg3_netif_stop(tp);
bbe832c0
MC
8489 irq_sync = 1;
8490 }
1da177e4 8491
bbe832c0 8492 tg3_full_lock(tp, irq_sync);
6aa20a22 8493
1da177e4
LT
8494 tp->rx_pending = ering->rx_pending;
8495
8496 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8497 tp->rx_pending > 63)
8498 tp->rx_pending = 63;
8499 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8500 tp->tx_pending = ering->tx_pending;
8501
8502 if (netif_running(dev)) {
944d980e 8503 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
b9ec6c1b
MC
8504 err = tg3_restart_hw(tp, 1);
8505 if (!err)
8506 tg3_netif_start(tp);
1da177e4
LT
8507 }
8508
f47c11ee 8509 tg3_full_unlock(tp);
6aa20a22 8510
b9ec6c1b 8511 return err;
1da177e4 8512}
6aa20a22 8513
1da177e4
LT
8514static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8515{
8516 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8517
1da177e4
LT
8518 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8519 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8520 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8521}
6aa20a22 8522
1da177e4
LT
8523static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8524{
8525 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 8526 int irq_sync = 0, err = 0;
6aa20a22 8527
bbe832c0 8528 if (netif_running(dev)) {
1da177e4 8529 tg3_netif_stop(tp);
bbe832c0
MC
8530 irq_sync = 1;
8531 }
1da177e4 8532
bbe832c0 8533 tg3_full_lock(tp, irq_sync);
f47c11ee 8534
1da177e4
LT
8535 if (epause->autoneg)
8536 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8537 else
8538 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8539 if (epause->rx_pause)
8540 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8541 else
8542 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8543 if (epause->tx_pause)
8544 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8545 else
8546 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8547
8548 if (netif_running(dev)) {
944d980e 8549 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
b9ec6c1b
MC
8550 err = tg3_restart_hw(tp, 1);
8551 if (!err)
8552 tg3_netif_start(tp);
1da177e4 8553 }
f47c11ee
DM
8554
8555 tg3_full_unlock(tp);
6aa20a22 8556
b9ec6c1b 8557 return err;
1da177e4 8558}
6aa20a22 8559
1da177e4
LT
8560static u32 tg3_get_rx_csum(struct net_device *dev)
8561{
8562 struct tg3 *tp = netdev_priv(dev);
8563 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8564}
6aa20a22 8565
1da177e4
LT
8566static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8567{
8568 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8569
1da177e4
LT
8570 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8571 if (data != 0)
8572 return -EINVAL;
8573 return 0;
8574 }
6aa20a22 8575
f47c11ee 8576 spin_lock_bh(&tp->lock);
1da177e4
LT
8577 if (data)
8578 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8579 else
8580 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
f47c11ee 8581 spin_unlock_bh(&tp->lock);
6aa20a22 8582
1da177e4
LT
8583 return 0;
8584}
6aa20a22 8585
1da177e4
LT
8586static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8587{
8588 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8589
1da177e4
LT
8590 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8591 if (data != 0)
8592 return -EINVAL;
8593 return 0;
8594 }
6aa20a22 8595
af36e6b6 8596 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 8597 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
8598 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8599 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6460d948 8600 ethtool_op_set_tx_ipv6_csum(dev, data);
1da177e4 8601 else
9c27dbdf 8602 ethtool_op_set_tx_csum(dev, data);
1da177e4
LT
8603
8604 return 0;
8605}
8606
b9f2c044 8607static int tg3_get_sset_count (struct net_device *dev, int sset)
1da177e4 8608{
b9f2c044
JG
8609 switch (sset) {
8610 case ETH_SS_TEST:
8611 return TG3_NUM_TEST;
8612 case ETH_SS_STATS:
8613 return TG3_NUM_STATS;
8614 default:
8615 return -EOPNOTSUPP;
8616 }
4cafd3f5
MC
8617}
8618
1da177e4
LT
8619static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8620{
8621 switch (stringset) {
8622 case ETH_SS_STATS:
8623 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8624 break;
4cafd3f5
MC
8625 case ETH_SS_TEST:
8626 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8627 break;
1da177e4
LT
8628 default:
8629 WARN_ON(1); /* we need a WARN() */
8630 break;
8631 }
8632}
8633
4009a93d
MC
8634static int tg3_phys_id(struct net_device *dev, u32 data)
8635{
8636 struct tg3 *tp = netdev_priv(dev);
8637 int i;
8638
8639 if (!netif_running(tp->dev))
8640 return -EAGAIN;
8641
8642 if (data == 0)
8643 data = 2;
8644
8645 for (i = 0; i < (data * 2); i++) {
8646 if ((i % 2) == 0)
8647 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8648 LED_CTRL_1000MBPS_ON |
8649 LED_CTRL_100MBPS_ON |
8650 LED_CTRL_10MBPS_ON |
8651 LED_CTRL_TRAFFIC_OVERRIDE |
8652 LED_CTRL_TRAFFIC_BLINK |
8653 LED_CTRL_TRAFFIC_LED);
6aa20a22 8654
4009a93d
MC
8655 else
8656 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8657 LED_CTRL_TRAFFIC_OVERRIDE);
8658
8659 if (msleep_interruptible(500))
8660 break;
8661 }
8662 tw32(MAC_LED_CTRL, tp->led_ctrl);
8663 return 0;
8664}
8665
1da177e4
LT
8666static void tg3_get_ethtool_stats (struct net_device *dev,
8667 struct ethtool_stats *estats, u64 *tmp_stats)
8668{
8669 struct tg3 *tp = netdev_priv(dev);
8670 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8671}
8672
566f86ad 8673#define NVRAM_TEST_SIZE 0x100
1b27777a 8674#define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
b16250e3
MC
8675#define NVRAM_SELFBOOT_HW_SIZE 0x20
8676#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
566f86ad
MC
8677
8678static int tg3_test_nvram(struct tg3 *tp)
8679{
1b27777a 8680 u32 *buf, csum, magic;
ab0049b4 8681 int i, j, k, err = 0, size;
566f86ad 8682
1820180b 8683 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
1b27777a
MC
8684 return -EIO;
8685
1b27777a
MC
8686 if (magic == TG3_EEPROM_MAGIC)
8687 size = NVRAM_TEST_SIZE;
b16250e3 8688 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
1b27777a
MC
8689 if ((magic & 0xe00000) == 0x200000)
8690 size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8691 else
8692 return 0;
b16250e3
MC
8693 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8694 size = NVRAM_SELFBOOT_HW_SIZE;
8695 else
1b27777a
MC
8696 return -EIO;
8697
8698 buf = kmalloc(size, GFP_KERNEL);
566f86ad
MC
8699 if (buf == NULL)
8700 return -ENOMEM;
8701
1b27777a
MC
8702 err = -EIO;
8703 for (i = 0, j = 0; i < size; i += 4, j++) {
566f86ad
MC
8704 u32 val;
8705
8706 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8707 break;
8708 buf[j] = cpu_to_le32(val);
8709 }
1b27777a 8710 if (i < size)
566f86ad
MC
8711 goto out;
8712
1b27777a 8713 /* Selfboot format */
b16250e3
MC
8714 if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
8715 TG3_EEPROM_MAGIC_FW) {
1b27777a
MC
8716 u8 *buf8 = (u8 *) buf, csum8 = 0;
8717
8718 for (i = 0; i < size; i++)
8719 csum8 += buf8[i];
8720
ad96b485
AB
8721 if (csum8 == 0) {
8722 err = 0;
8723 goto out;
8724 }
8725
8726 err = -EIO;
8727 goto out;
1b27777a 8728 }
566f86ad 8729
b16250e3
MC
8730 if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
8731 TG3_EEPROM_MAGIC_HW) {
8732 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8733 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8734 u8 *buf8 = (u8 *) buf;
b16250e3
MC
8735
8736 /* Separate the parity bits and the data bytes. */
8737 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8738 if ((i == 0) || (i == 8)) {
8739 int l;
8740 u8 msk;
8741
8742 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8743 parity[k++] = buf8[i] & msk;
8744 i++;
8745 }
8746 else if (i == 16) {
8747 int l;
8748 u8 msk;
8749
8750 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8751 parity[k++] = buf8[i] & msk;
8752 i++;
8753
8754 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8755 parity[k++] = buf8[i] & msk;
8756 i++;
8757 }
8758 data[j++] = buf8[i];
8759 }
8760
8761 err = -EIO;
8762 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8763 u8 hw8 = hweight8(data[i]);
8764
8765 if ((hw8 & 0x1) && parity[i])
8766 goto out;
8767 else if (!(hw8 & 0x1) && !parity[i])
8768 goto out;
8769 }
8770 err = 0;
8771 goto out;
8772 }
8773
566f86ad
MC
8774 /* Bootstrap checksum at offset 0x10 */
8775 csum = calc_crc((unsigned char *) buf, 0x10);
8776 if(csum != cpu_to_le32(buf[0x10/4]))
8777 goto out;
8778
8779 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8780 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8781 if (csum != cpu_to_le32(buf[0xfc/4]))
8782 goto out;
8783
8784 err = 0;
8785
8786out:
8787 kfree(buf);
8788 return err;
8789}
8790
ca43007a
MC
8791#define TG3_SERDES_TIMEOUT_SEC 2
8792#define TG3_COPPER_TIMEOUT_SEC 6
8793
8794static int tg3_test_link(struct tg3 *tp)
8795{
8796 int i, max;
8797
8798 if (!netif_running(tp->dev))
8799 return -ENODEV;
8800
4c987487 8801 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
ca43007a
MC
8802 max = TG3_SERDES_TIMEOUT_SEC;
8803 else
8804 max = TG3_COPPER_TIMEOUT_SEC;
8805
8806 for (i = 0; i < max; i++) {
8807 if (netif_carrier_ok(tp->dev))
8808 return 0;
8809
8810 if (msleep_interruptible(1000))
8811 break;
8812 }
8813
8814 return -EIO;
8815}
8816
a71116d1 8817/* Only test the commonly used registers */
30ca3e37 8818static int tg3_test_registers(struct tg3 *tp)
a71116d1 8819{
b16250e3 8820 int i, is_5705, is_5750;
a71116d1
MC
8821 u32 offset, read_mask, write_mask, val, save_val, read_val;
8822 static struct {
8823 u16 offset;
8824 u16 flags;
8825#define TG3_FL_5705 0x1
8826#define TG3_FL_NOT_5705 0x2
8827#define TG3_FL_NOT_5788 0x4
b16250e3 8828#define TG3_FL_NOT_5750 0x8
a71116d1
MC
8829 u32 read_mask;
8830 u32 write_mask;
8831 } reg_tbl[] = {
8832 /* MAC Control Registers */
8833 { MAC_MODE, TG3_FL_NOT_5705,
8834 0x00000000, 0x00ef6f8c },
8835 { MAC_MODE, TG3_FL_5705,
8836 0x00000000, 0x01ef6b8c },
8837 { MAC_STATUS, TG3_FL_NOT_5705,
8838 0x03800107, 0x00000000 },
8839 { MAC_STATUS, TG3_FL_5705,
8840 0x03800100, 0x00000000 },
8841 { MAC_ADDR_0_HIGH, 0x0000,
8842 0x00000000, 0x0000ffff },
8843 { MAC_ADDR_0_LOW, 0x0000,
8844 0x00000000, 0xffffffff },
8845 { MAC_RX_MTU_SIZE, 0x0000,
8846 0x00000000, 0x0000ffff },
8847 { MAC_TX_MODE, 0x0000,
8848 0x00000000, 0x00000070 },
8849 { MAC_TX_LENGTHS, 0x0000,
8850 0x00000000, 0x00003fff },
8851 { MAC_RX_MODE, TG3_FL_NOT_5705,
8852 0x00000000, 0x000007fc },
8853 { MAC_RX_MODE, TG3_FL_5705,
8854 0x00000000, 0x000007dc },
8855 { MAC_HASH_REG_0, 0x0000,
8856 0x00000000, 0xffffffff },
8857 { MAC_HASH_REG_1, 0x0000,
8858 0x00000000, 0xffffffff },
8859 { MAC_HASH_REG_2, 0x0000,
8860 0x00000000, 0xffffffff },
8861 { MAC_HASH_REG_3, 0x0000,
8862 0x00000000, 0xffffffff },
8863
8864 /* Receive Data and Receive BD Initiator Control Registers. */
8865 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8866 0x00000000, 0xffffffff },
8867 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8868 0x00000000, 0xffffffff },
8869 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8870 0x00000000, 0x00000003 },
8871 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8872 0x00000000, 0xffffffff },
8873 { RCVDBDI_STD_BD+0, 0x0000,
8874 0x00000000, 0xffffffff },
8875 { RCVDBDI_STD_BD+4, 0x0000,
8876 0x00000000, 0xffffffff },
8877 { RCVDBDI_STD_BD+8, 0x0000,
8878 0x00000000, 0xffff0002 },
8879 { RCVDBDI_STD_BD+0xc, 0x0000,
8880 0x00000000, 0xffffffff },
6aa20a22 8881
a71116d1
MC
8882 /* Receive BD Initiator Control Registers. */
8883 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8884 0x00000000, 0xffffffff },
8885 { RCVBDI_STD_THRESH, TG3_FL_5705,
8886 0x00000000, 0x000003ff },
8887 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8888 0x00000000, 0xffffffff },
6aa20a22 8889
a71116d1
MC
8890 /* Host Coalescing Control Registers. */
8891 { HOSTCC_MODE, TG3_FL_NOT_5705,
8892 0x00000000, 0x00000004 },
8893 { HOSTCC_MODE, TG3_FL_5705,
8894 0x00000000, 0x000000f6 },
8895 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8896 0x00000000, 0xffffffff },
8897 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8898 0x00000000, 0x000003ff },
8899 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8900 0x00000000, 0xffffffff },
8901 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8902 0x00000000, 0x000003ff },
8903 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8904 0x00000000, 0xffffffff },
8905 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8906 0x00000000, 0x000000ff },
8907 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8908 0x00000000, 0xffffffff },
8909 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8910 0x00000000, 0x000000ff },
8911 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8912 0x00000000, 0xffffffff },
8913 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8914 0x00000000, 0xffffffff },
8915 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8916 0x00000000, 0xffffffff },
8917 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8918 0x00000000, 0x000000ff },
8919 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8920 0x00000000, 0xffffffff },
8921 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8922 0x00000000, 0x000000ff },
8923 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8924 0x00000000, 0xffffffff },
8925 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8926 0x00000000, 0xffffffff },
8927 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8928 0x00000000, 0xffffffff },
8929 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8930 0x00000000, 0xffffffff },
8931 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8932 0x00000000, 0xffffffff },
8933 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8934 0xffffffff, 0x00000000 },
8935 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8936 0xffffffff, 0x00000000 },
8937
8938 /* Buffer Manager Control Registers. */
b16250e3 8939 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
a71116d1 8940 0x00000000, 0x007fff80 },
b16250e3 8941 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
a71116d1
MC
8942 0x00000000, 0x007fffff },
8943 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8944 0x00000000, 0x0000003f },
8945 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8946 0x00000000, 0x000001ff },
8947 { BUFMGR_MB_HIGH_WATER, 0x0000,
8948 0x00000000, 0x000001ff },
8949 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8950 0xffffffff, 0x00000000 },
8951 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8952 0xffffffff, 0x00000000 },
6aa20a22 8953
a71116d1
MC
8954 /* Mailbox Registers */
8955 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8956 0x00000000, 0x000001ff },
8957 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8958 0x00000000, 0x000001ff },
8959 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8960 0x00000000, 0x000007ff },
8961 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8962 0x00000000, 0x000001ff },
8963
8964 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8965 };
8966
b16250e3
MC
8967 is_5705 = is_5750 = 0;
8968 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
a71116d1 8969 is_5705 = 1;
b16250e3
MC
8970 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8971 is_5750 = 1;
8972 }
a71116d1
MC
8973
8974 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8975 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8976 continue;
8977
8978 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8979 continue;
8980
8981 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8982 (reg_tbl[i].flags & TG3_FL_NOT_5788))
8983 continue;
8984
b16250e3
MC
8985 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
8986 continue;
8987
a71116d1
MC
8988 offset = (u32) reg_tbl[i].offset;
8989 read_mask = reg_tbl[i].read_mask;
8990 write_mask = reg_tbl[i].write_mask;
8991
8992 /* Save the original register content */
8993 save_val = tr32(offset);
8994
8995 /* Determine the read-only value. */
8996 read_val = save_val & read_mask;
8997
8998 /* Write zero to the register, then make sure the read-only bits
8999 * are not changed and the read/write bits are all zeros.
9000 */
9001 tw32(offset, 0);
9002
9003 val = tr32(offset);
9004
9005 /* Test the read-only and read/write bits. */
9006 if (((val & read_mask) != read_val) || (val & write_mask))
9007 goto out;
9008
9009 /* Write ones to all the bits defined by RdMask and WrMask, then
9010 * make sure the read-only bits are not changed and the
9011 * read/write bits are all ones.
9012 */
9013 tw32(offset, read_mask | write_mask);
9014
9015 val = tr32(offset);
9016
9017 /* Test the read-only bits. */
9018 if ((val & read_mask) != read_val)
9019 goto out;
9020
9021 /* Test the read/write bits. */
9022 if ((val & write_mask) != write_mask)
9023 goto out;
9024
9025 tw32(offset, save_val);
9026 }
9027
9028 return 0;
9029
9030out:
9f88f29f
MC
9031 if (netif_msg_hw(tp))
9032 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9033 offset);
a71116d1
MC
9034 tw32(offset, save_val);
9035 return -EIO;
9036}
9037
7942e1db
MC
9038static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9039{
f71e1309 9040 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7942e1db
MC
9041 int i;
9042 u32 j;
9043
e9edda69 9044 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
7942e1db
MC
9045 for (j = 0; j < len; j += 4) {
9046 u32 val;
9047
9048 tg3_write_mem(tp, offset + j, test_pattern[i]);
9049 tg3_read_mem(tp, offset + j, &val);
9050 if (val != test_pattern[i])
9051 return -EIO;
9052 }
9053 }
9054 return 0;
9055}
9056
9057static int tg3_test_memory(struct tg3 *tp)
9058{
9059 static struct mem_entry {
9060 u32 offset;
9061 u32 len;
9062 } mem_tbl_570x[] = {
38690194 9063 { 0x00000000, 0x00b50},
7942e1db
MC
9064 { 0x00002000, 0x1c000},
9065 { 0xffffffff, 0x00000}
9066 }, mem_tbl_5705[] = {
9067 { 0x00000100, 0x0000c},
9068 { 0x00000200, 0x00008},
7942e1db
MC
9069 { 0x00004000, 0x00800},
9070 { 0x00006000, 0x01000},
9071 { 0x00008000, 0x02000},
9072 { 0x00010000, 0x0e000},
9073 { 0xffffffff, 0x00000}
79f4d13a
MC
9074 }, mem_tbl_5755[] = {
9075 { 0x00000200, 0x00008},
9076 { 0x00004000, 0x00800},
9077 { 0x00006000, 0x00800},
9078 { 0x00008000, 0x02000},
9079 { 0x00010000, 0x0c000},
9080 { 0xffffffff, 0x00000}
b16250e3
MC
9081 }, mem_tbl_5906[] = {
9082 { 0x00000200, 0x00008},
9083 { 0x00004000, 0x00400},
9084 { 0x00006000, 0x00400},
9085 { 0x00008000, 0x01000},
9086 { 0x00010000, 0x01000},
9087 { 0xffffffff, 0x00000}
7942e1db
MC
9088 };
9089 struct mem_entry *mem_tbl;
9090 int err = 0;
9091 int i;
9092
79f4d13a 9093 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
af36e6b6 9094 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 9095 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
9096 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9097 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
79f4d13a 9098 mem_tbl = mem_tbl_5755;
b16250e3
MC
9099 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9100 mem_tbl = mem_tbl_5906;
79f4d13a
MC
9101 else
9102 mem_tbl = mem_tbl_5705;
9103 } else
7942e1db
MC
9104 mem_tbl = mem_tbl_570x;
9105
9106 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9107 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9108 mem_tbl[i].len)) != 0)
9109 break;
9110 }
6aa20a22 9111
7942e1db
MC
9112 return err;
9113}
9114
9f40dead
MC
9115#define TG3_MAC_LOOPBACK 0
9116#define TG3_PHY_LOOPBACK 1
9117
9118static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
c76949a6 9119{
9f40dead 9120 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
c76949a6
MC
9121 u32 desc_idx;
9122 struct sk_buff *skb, *rx_skb;
9123 u8 *tx_data;
9124 dma_addr_t map;
9125 int num_pkts, tx_len, rx_len, i, err;
9126 struct tg3_rx_buffer_desc *desc;
9127
9f40dead 9128 if (loopback_mode == TG3_MAC_LOOPBACK) {
c94e3941
MC
9129 /* HW errata - mac loopback fails in some cases on 5780.
9130 * Normal traffic and PHY loopback are not affected by
9131 * errata.
9132 */
9133 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9134 return 0;
9135
9f40dead 9136 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
e8f3f6ca
MC
9137 MAC_MODE_PORT_INT_LPBACK;
9138 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9139 mac_mode |= MAC_MODE_LINK_POLARITY;
3f7045c1
MC
9140 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9141 mac_mode |= MAC_MODE_PORT_MODE_MII;
9142 else
9143 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9f40dead
MC
9144 tw32(MAC_MODE, mac_mode);
9145 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
3f7045c1
MC
9146 u32 val;
9147
b16250e3
MC
9148 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9149 u32 phytest;
9150
9151 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9152 u32 phy;
9153
9154 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9155 phytest | MII_TG3_EPHY_SHADOW_EN);
9156 if (!tg3_readphy(tp, 0x1b, &phy))
9157 tg3_writephy(tp, 0x1b, phy & ~0x20);
b16250e3
MC
9158 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9159 }
5d64ad34
MC
9160 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9161 } else
9162 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
3f7045c1 9163
9ef8ca99
MC
9164 tg3_phy_toggle_automdix(tp, 0);
9165
3f7045c1 9166 tg3_writephy(tp, MII_BMCR, val);
c94e3941 9167 udelay(40);
5d64ad34 9168
e8f3f6ca 9169 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5d64ad34 9170 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
b16250e3 9171 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
5d64ad34
MC
9172 mac_mode |= MAC_MODE_PORT_MODE_MII;
9173 } else
9174 mac_mode |= MAC_MODE_PORT_MODE_GMII;
b16250e3 9175
c94e3941
MC
9176 /* reset to prevent losing 1st rx packet intermittently */
9177 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9178 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9179 udelay(10);
9180 tw32_f(MAC_RX_MODE, tp->rx_mode);
9181 }
e8f3f6ca
MC
9182 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9183 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9184 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9185 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9186 mac_mode |= MAC_MODE_LINK_POLARITY;
ff18ff02
MC
9187 tg3_writephy(tp, MII_TG3_EXT_CTRL,
9188 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9189 }
9f40dead 9190 tw32(MAC_MODE, mac_mode);
9f40dead
MC
9191 }
9192 else
9193 return -EINVAL;
c76949a6
MC
9194
9195 err = -EIO;
9196
c76949a6 9197 tx_len = 1514;
a20e9c62 9198 skb = netdev_alloc_skb(tp->dev, tx_len);
a50bb7b9
JJ
9199 if (!skb)
9200 return -ENOMEM;
9201
c76949a6
MC
9202 tx_data = skb_put(skb, tx_len);
9203 memcpy(tx_data, tp->dev->dev_addr, 6);
9204 memset(tx_data + 6, 0x0, 8);
9205
9206 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9207
9208 for (i = 14; i < tx_len; i++)
9209 tx_data[i] = (u8) (i & 0xff);
9210
9211 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9212
9213 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9214 HOSTCC_MODE_NOW);
9215
9216 udelay(10);
9217
9218 rx_start_idx = tp->hw_status->idx[0].rx_producer;
9219
c76949a6
MC
9220 num_pkts = 0;
9221
9f40dead 9222 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
c76949a6 9223
9f40dead 9224 tp->tx_prod++;
c76949a6
MC
9225 num_pkts++;
9226
9f40dead
MC
9227 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9228 tp->tx_prod);
09ee929c 9229 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
c76949a6
MC
9230
9231 udelay(10);
9232
3f7045c1
MC
9233 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
9234 for (i = 0; i < 25; i++) {
c76949a6
MC
9235 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9236 HOSTCC_MODE_NOW);
9237
9238 udelay(10);
9239
9240 tx_idx = tp->hw_status->idx[0].tx_consumer;
9241 rx_idx = tp->hw_status->idx[0].rx_producer;
9f40dead 9242 if ((tx_idx == tp->tx_prod) &&
c76949a6
MC
9243 (rx_idx == (rx_start_idx + num_pkts)))
9244 break;
9245 }
9246
9247 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9248 dev_kfree_skb(skb);
9249
9f40dead 9250 if (tx_idx != tp->tx_prod)
c76949a6
MC
9251 goto out;
9252
9253 if (rx_idx != rx_start_idx + num_pkts)
9254 goto out;
9255
9256 desc = &tp->rx_rcb[rx_start_idx];
9257 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9258 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9259 if (opaque_key != RXD_OPAQUE_RING_STD)
9260 goto out;
9261
9262 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9263 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9264 goto out;
9265
9266 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9267 if (rx_len != tx_len)
9268 goto out;
9269
9270 rx_skb = tp->rx_std_buffers[desc_idx].skb;
9271
9272 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9273 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9274
9275 for (i = 14; i < tx_len; i++) {
9276 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9277 goto out;
9278 }
9279 err = 0;
6aa20a22 9280
c76949a6
MC
9281 /* tg3_free_rings will unmap and free the rx_skb */
9282out:
9283 return err;
9284}
9285
9f40dead
MC
9286#define TG3_MAC_LOOPBACK_FAILED 1
9287#define TG3_PHY_LOOPBACK_FAILED 2
9288#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
9289 TG3_PHY_LOOPBACK_FAILED)
9290
9291static int tg3_test_loopback(struct tg3 *tp)
9292{
9293 int err = 0;
9936bcf6 9294 u32 cpmuctrl = 0;
9f40dead
MC
9295
9296 if (!netif_running(tp->dev))
9297 return TG3_LOOPBACK_FAILED;
9298
b9ec6c1b
MC
9299 err = tg3_reset_hw(tp, 1);
9300 if (err)
9301 return TG3_LOOPBACK_FAILED;
9f40dead 9302
9936bcf6
MC
9303 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9304 int i;
9305 u32 status;
9306
9307 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9308
9309 /* Wait for up to 40 microseconds to acquire lock. */
9310 for (i = 0; i < 4; i++) {
9311 status = tr32(TG3_CPMU_MUTEX_GNT);
9312 if (status == CPMU_MUTEX_GNT_DRIVER)
9313 break;
9314 udelay(10);
9315 }
9316
9317 if (status != CPMU_MUTEX_GNT_DRIVER)
9318 return TG3_LOOPBACK_FAILED;
9319
9320 cpmuctrl = tr32(TG3_CPMU_CTRL);
9321
9322 /* Turn off power management based on link speed. */
9323 tw32(TG3_CPMU_CTRL,
9324 cpmuctrl & ~CPMU_CTRL_LINK_SPEED_MODE);
9325 }
9326
9f40dead
MC
9327 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9328 err |= TG3_MAC_LOOPBACK_FAILED;
9936bcf6
MC
9329
9330 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9331 tw32(TG3_CPMU_CTRL, cpmuctrl);
9332
9333 /* Release the mutex */
9334 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9335 }
9336
9f40dead
MC
9337 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9338 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9339 err |= TG3_PHY_LOOPBACK_FAILED;
9340 }
9341
9342 return err;
9343}
9344
4cafd3f5
MC
9345static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9346 u64 *data)
9347{
566f86ad
MC
9348 struct tg3 *tp = netdev_priv(dev);
9349
bc1c7567
MC
9350 if (tp->link_config.phy_is_low_power)
9351 tg3_set_power_state(tp, PCI_D0);
9352
566f86ad
MC
9353 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9354
9355 if (tg3_test_nvram(tp) != 0) {
9356 etest->flags |= ETH_TEST_FL_FAILED;
9357 data[0] = 1;
9358 }
ca43007a
MC
9359 if (tg3_test_link(tp) != 0) {
9360 etest->flags |= ETH_TEST_FL_FAILED;
9361 data[1] = 1;
9362 }
a71116d1 9363 if (etest->flags & ETH_TEST_FL_OFFLINE) {
ec41c7df 9364 int err, irq_sync = 0;
bbe832c0
MC
9365
9366 if (netif_running(dev)) {
a71116d1 9367 tg3_netif_stop(tp);
bbe832c0
MC
9368 irq_sync = 1;
9369 }
a71116d1 9370
bbe832c0 9371 tg3_full_lock(tp, irq_sync);
a71116d1
MC
9372
9373 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
ec41c7df 9374 err = tg3_nvram_lock(tp);
a71116d1
MC
9375 tg3_halt_cpu(tp, RX_CPU_BASE);
9376 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9377 tg3_halt_cpu(tp, TX_CPU_BASE);
ec41c7df
MC
9378 if (!err)
9379 tg3_nvram_unlock(tp);
a71116d1 9380
d9ab5ad1
MC
9381 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9382 tg3_phy_reset(tp);
9383
a71116d1
MC
9384 if (tg3_test_registers(tp) != 0) {
9385 etest->flags |= ETH_TEST_FL_FAILED;
9386 data[2] = 1;
9387 }
7942e1db
MC
9388 if (tg3_test_memory(tp) != 0) {
9389 etest->flags |= ETH_TEST_FL_FAILED;
9390 data[3] = 1;
9391 }
9f40dead 9392 if ((data[4] = tg3_test_loopback(tp)) != 0)
c76949a6 9393 etest->flags |= ETH_TEST_FL_FAILED;
a71116d1 9394
f47c11ee
DM
9395 tg3_full_unlock(tp);
9396
d4bc3927
MC
9397 if (tg3_test_interrupt(tp) != 0) {
9398 etest->flags |= ETH_TEST_FL_FAILED;
9399 data[5] = 1;
9400 }
f47c11ee
DM
9401
9402 tg3_full_lock(tp, 0);
d4bc3927 9403
a71116d1
MC
9404 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9405 if (netif_running(dev)) {
9406 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
b9ec6c1b
MC
9407 if (!tg3_restart_hw(tp, 1))
9408 tg3_netif_start(tp);
a71116d1 9409 }
f47c11ee
DM
9410
9411 tg3_full_unlock(tp);
a71116d1 9412 }
bc1c7567
MC
9413 if (tp->link_config.phy_is_low_power)
9414 tg3_set_power_state(tp, PCI_D3hot);
9415
4cafd3f5
MC
9416}
9417
1da177e4
LT
9418static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9419{
9420 struct mii_ioctl_data *data = if_mii(ifr);
9421 struct tg3 *tp = netdev_priv(dev);
9422 int err;
9423
9424 switch(cmd) {
9425 case SIOCGMIIPHY:
9426 data->phy_id = PHY_ADDR;
9427
9428 /* fallthru */
9429 case SIOCGMIIREG: {
9430 u32 mii_regval;
9431
9432 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9433 break; /* We have no PHY */
9434
bc1c7567
MC
9435 if (tp->link_config.phy_is_low_power)
9436 return -EAGAIN;
9437
f47c11ee 9438 spin_lock_bh(&tp->lock);
1da177e4 9439 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
f47c11ee 9440 spin_unlock_bh(&tp->lock);
1da177e4
LT
9441
9442 data->val_out = mii_regval;
9443
9444 return err;
9445 }
9446
9447 case SIOCSMIIREG:
9448 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9449 break; /* We have no PHY */
9450
9451 if (!capable(CAP_NET_ADMIN))
9452 return -EPERM;
9453
bc1c7567
MC
9454 if (tp->link_config.phy_is_low_power)
9455 return -EAGAIN;
9456
f47c11ee 9457 spin_lock_bh(&tp->lock);
1da177e4 9458 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
f47c11ee 9459 spin_unlock_bh(&tp->lock);
1da177e4
LT
9460
9461 return err;
9462
9463 default:
9464 /* do nothing */
9465 break;
9466 }
9467 return -EOPNOTSUPP;
9468}
9469
9470#if TG3_VLAN_TAG_USED
9471static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9472{
9473 struct tg3 *tp = netdev_priv(dev);
9474
29315e87
MC
9475 if (netif_running(dev))
9476 tg3_netif_stop(tp);
9477
f47c11ee 9478 tg3_full_lock(tp, 0);
1da177e4
LT
9479
9480 tp->vlgrp = grp;
9481
9482 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9483 __tg3_set_rx_mode(dev);
9484
29315e87
MC
9485 if (netif_running(dev))
9486 tg3_netif_start(tp);
46966545
MC
9487
9488 tg3_full_unlock(tp);
1da177e4 9489}
1da177e4
LT
9490#endif
9491
15f9850d
DM
9492static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9493{
9494 struct tg3 *tp = netdev_priv(dev);
9495
9496 memcpy(ec, &tp->coal, sizeof(*ec));
9497 return 0;
9498}
9499
d244c892
MC
9500static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9501{
9502 struct tg3 *tp = netdev_priv(dev);
9503 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9504 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9505
9506 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9507 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9508 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9509 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9510 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9511 }
9512
9513 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9514 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9515 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9516 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9517 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9518 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9519 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9520 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9521 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9522 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9523 return -EINVAL;
9524
9525 /* No rx interrupts will be generated if both are zero */
9526 if ((ec->rx_coalesce_usecs == 0) &&
9527 (ec->rx_max_coalesced_frames == 0))
9528 return -EINVAL;
9529
9530 /* No tx interrupts will be generated if both are zero */
9531 if ((ec->tx_coalesce_usecs == 0) &&
9532 (ec->tx_max_coalesced_frames == 0))
9533 return -EINVAL;
9534
9535 /* Only copy relevant parameters, ignore all others. */
9536 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9537 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9538 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9539 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9540 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9541 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9542 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9543 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9544 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9545
9546 if (netif_running(dev)) {
9547 tg3_full_lock(tp, 0);
9548 __tg3_set_coalesce(tp, &tp->coal);
9549 tg3_full_unlock(tp);
9550 }
9551 return 0;
9552}
9553
7282d491 9554static const struct ethtool_ops tg3_ethtool_ops = {
1da177e4
LT
9555 .get_settings = tg3_get_settings,
9556 .set_settings = tg3_set_settings,
9557 .get_drvinfo = tg3_get_drvinfo,
9558 .get_regs_len = tg3_get_regs_len,
9559 .get_regs = tg3_get_regs,
9560 .get_wol = tg3_get_wol,
9561 .set_wol = tg3_set_wol,
9562 .get_msglevel = tg3_get_msglevel,
9563 .set_msglevel = tg3_set_msglevel,
9564 .nway_reset = tg3_nway_reset,
9565 .get_link = ethtool_op_get_link,
9566 .get_eeprom_len = tg3_get_eeprom_len,
9567 .get_eeprom = tg3_get_eeprom,
9568 .set_eeprom = tg3_set_eeprom,
9569 .get_ringparam = tg3_get_ringparam,
9570 .set_ringparam = tg3_set_ringparam,
9571 .get_pauseparam = tg3_get_pauseparam,
9572 .set_pauseparam = tg3_set_pauseparam,
9573 .get_rx_csum = tg3_get_rx_csum,
9574 .set_rx_csum = tg3_set_rx_csum,
1da177e4 9575 .set_tx_csum = tg3_set_tx_csum,
1da177e4 9576 .set_sg = ethtool_op_set_sg,
1da177e4 9577 .set_tso = tg3_set_tso,
4cafd3f5 9578 .self_test = tg3_self_test,
1da177e4 9579 .get_strings = tg3_get_strings,
4009a93d 9580 .phys_id = tg3_phys_id,
1da177e4 9581 .get_ethtool_stats = tg3_get_ethtool_stats,
15f9850d 9582 .get_coalesce = tg3_get_coalesce,
d244c892 9583 .set_coalesce = tg3_set_coalesce,
b9f2c044 9584 .get_sset_count = tg3_get_sset_count,
1da177e4
LT
9585};
9586
9587static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9588{
1b27777a 9589 u32 cursize, val, magic;
1da177e4
LT
9590
9591 tp->nvram_size = EEPROM_CHIP_SIZE;
9592
1820180b 9593 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
1da177e4
LT
9594 return;
9595
b16250e3
MC
9596 if ((magic != TG3_EEPROM_MAGIC) &&
9597 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9598 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
1da177e4
LT
9599 return;
9600
9601 /*
9602 * Size the chip by reading offsets at increasing powers of two.
9603 * When we encounter our validation signature, we know the addressing
9604 * has wrapped around, and thus have our chip size.
9605 */
1b27777a 9606 cursize = 0x10;
1da177e4
LT
9607
9608 while (cursize < tp->nvram_size) {
1820180b 9609 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
1da177e4
LT
9610 return;
9611
1820180b 9612 if (val == magic)
1da177e4
LT
9613 break;
9614
9615 cursize <<= 1;
9616 }
9617
9618 tp->nvram_size = cursize;
9619}
6aa20a22 9620
1da177e4
LT
9621static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9622{
9623 u32 val;
9624
1820180b 9625 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
1b27777a
MC
9626 return;
9627
9628 /* Selfboot format */
1820180b 9629 if (val != TG3_EEPROM_MAGIC) {
1b27777a
MC
9630 tg3_get_eeprom_size(tp);
9631 return;
9632 }
9633
1da177e4
LT
9634 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9635 if (val != 0) {
9636 tp->nvram_size = (val >> 16) * 1024;
9637 return;
9638 }
9639 }
989a9d23 9640 tp->nvram_size = 0x80000;
1da177e4
LT
9641}
9642
9643static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9644{
9645 u32 nvcfg1;
9646
9647 nvcfg1 = tr32(NVRAM_CFG1);
9648 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9649 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9650 }
9651 else {
9652 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9653 tw32(NVRAM_CFG1, nvcfg1);
9654 }
9655
4c987487 9656 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
a4e2b347 9657 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
9658 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9659 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9660 tp->nvram_jedecnum = JEDEC_ATMEL;
9661 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9662 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9663 break;
9664 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9665 tp->nvram_jedecnum = JEDEC_ATMEL;
9666 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9667 break;
9668 case FLASH_VENDOR_ATMEL_EEPROM:
9669 tp->nvram_jedecnum = JEDEC_ATMEL;
9670 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9671 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9672 break;
9673 case FLASH_VENDOR_ST:
9674 tp->nvram_jedecnum = JEDEC_ST;
9675 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9676 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9677 break;
9678 case FLASH_VENDOR_SAIFUN:
9679 tp->nvram_jedecnum = JEDEC_SAIFUN;
9680 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9681 break;
9682 case FLASH_VENDOR_SST_SMALL:
9683 case FLASH_VENDOR_SST_LARGE:
9684 tp->nvram_jedecnum = JEDEC_SST;
9685 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9686 break;
9687 }
9688 }
9689 else {
9690 tp->nvram_jedecnum = JEDEC_ATMEL;
9691 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9692 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9693 }
9694}
9695
361b4ac2
MC
9696static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9697{
9698 u32 nvcfg1;
9699
9700 nvcfg1 = tr32(NVRAM_CFG1);
9701
e6af301b
MC
9702 /* NVRAM protection for TPM */
9703 if (nvcfg1 & (1 << 27))
9704 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9705
361b4ac2
MC
9706 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9707 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9708 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9709 tp->nvram_jedecnum = JEDEC_ATMEL;
9710 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9711 break;
9712 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9713 tp->nvram_jedecnum = JEDEC_ATMEL;
9714 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9715 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9716 break;
9717 case FLASH_5752VENDOR_ST_M45PE10:
9718 case FLASH_5752VENDOR_ST_M45PE20:
9719 case FLASH_5752VENDOR_ST_M45PE40:
9720 tp->nvram_jedecnum = JEDEC_ST;
9721 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9722 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9723 break;
9724 }
9725
9726 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9727 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9728 case FLASH_5752PAGE_SIZE_256:
9729 tp->nvram_pagesize = 256;
9730 break;
9731 case FLASH_5752PAGE_SIZE_512:
9732 tp->nvram_pagesize = 512;
9733 break;
9734 case FLASH_5752PAGE_SIZE_1K:
9735 tp->nvram_pagesize = 1024;
9736 break;
9737 case FLASH_5752PAGE_SIZE_2K:
9738 tp->nvram_pagesize = 2048;
9739 break;
9740 case FLASH_5752PAGE_SIZE_4K:
9741 tp->nvram_pagesize = 4096;
9742 break;
9743 case FLASH_5752PAGE_SIZE_264:
9744 tp->nvram_pagesize = 264;
9745 break;
9746 }
9747 }
9748 else {
9749 /* For eeprom, set pagesize to maximum eeprom size */
9750 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9751
9752 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9753 tw32(NVRAM_CFG1, nvcfg1);
9754 }
9755}
9756
d3c7b886
MC
9757static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9758{
989a9d23 9759 u32 nvcfg1, protect = 0;
d3c7b886
MC
9760
9761 nvcfg1 = tr32(NVRAM_CFG1);
9762
9763 /* NVRAM protection for TPM */
989a9d23 9764 if (nvcfg1 & (1 << 27)) {
d3c7b886 9765 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
989a9d23
MC
9766 protect = 1;
9767 }
d3c7b886 9768
989a9d23
MC
9769 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9770 switch (nvcfg1) {
d3c7b886
MC
9771 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9772 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9773 case FLASH_5755VENDOR_ATMEL_FLASH_3:
70b65a2d 9774 case FLASH_5755VENDOR_ATMEL_FLASH_5:
d3c7b886
MC
9775 tp->nvram_jedecnum = JEDEC_ATMEL;
9776 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9777 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9778 tp->nvram_pagesize = 264;
70b65a2d
MC
9779 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
9780 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
989a9d23
MC
9781 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9782 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9783 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
9784 else
9785 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
d3c7b886
MC
9786 break;
9787 case FLASH_5752VENDOR_ST_M45PE10:
9788 case FLASH_5752VENDOR_ST_M45PE20:
9789 case FLASH_5752VENDOR_ST_M45PE40:
9790 tp->nvram_jedecnum = JEDEC_ST;
9791 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9792 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9793 tp->nvram_pagesize = 256;
989a9d23
MC
9794 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
9795 tp->nvram_size = (protect ? 0x10000 : 0x20000);
9796 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
9797 tp->nvram_size = (protect ? 0x10000 : 0x40000);
9798 else
9799 tp->nvram_size = (protect ? 0x20000 : 0x80000);
d3c7b886
MC
9800 break;
9801 }
9802}
9803
1b27777a
MC
9804static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9805{
9806 u32 nvcfg1;
9807
9808 nvcfg1 = tr32(NVRAM_CFG1);
9809
9810 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9811 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9812 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9813 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9814 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9815 tp->nvram_jedecnum = JEDEC_ATMEL;
9816 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9817 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9818
9819 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9820 tw32(NVRAM_CFG1, nvcfg1);
9821 break;
9822 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9823 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9824 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9825 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9826 tp->nvram_jedecnum = JEDEC_ATMEL;
9827 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9828 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9829 tp->nvram_pagesize = 264;
9830 break;
9831 case FLASH_5752VENDOR_ST_M45PE10:
9832 case FLASH_5752VENDOR_ST_M45PE20:
9833 case FLASH_5752VENDOR_ST_M45PE40:
9834 tp->nvram_jedecnum = JEDEC_ST;
9835 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9836 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9837 tp->nvram_pagesize = 256;
9838 break;
9839 }
9840}
9841
6b91fa02
MC
9842static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
9843{
9844 u32 nvcfg1, protect = 0;
9845
9846 nvcfg1 = tr32(NVRAM_CFG1);
9847
9848 /* NVRAM protection for TPM */
9849 if (nvcfg1 & (1 << 27)) {
9850 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9851 protect = 1;
9852 }
9853
9854 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9855 switch (nvcfg1) {
9856 case FLASH_5761VENDOR_ATMEL_ADB021D:
9857 case FLASH_5761VENDOR_ATMEL_ADB041D:
9858 case FLASH_5761VENDOR_ATMEL_ADB081D:
9859 case FLASH_5761VENDOR_ATMEL_ADB161D:
9860 case FLASH_5761VENDOR_ATMEL_MDB021D:
9861 case FLASH_5761VENDOR_ATMEL_MDB041D:
9862 case FLASH_5761VENDOR_ATMEL_MDB081D:
9863 case FLASH_5761VENDOR_ATMEL_MDB161D:
9864 tp->nvram_jedecnum = JEDEC_ATMEL;
9865 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9866 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9867 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
9868 tp->nvram_pagesize = 256;
9869 break;
9870 case FLASH_5761VENDOR_ST_A_M45PE20:
9871 case FLASH_5761VENDOR_ST_A_M45PE40:
9872 case FLASH_5761VENDOR_ST_A_M45PE80:
9873 case FLASH_5761VENDOR_ST_A_M45PE16:
9874 case FLASH_5761VENDOR_ST_M_M45PE20:
9875 case FLASH_5761VENDOR_ST_M_M45PE40:
9876 case FLASH_5761VENDOR_ST_M_M45PE80:
9877 case FLASH_5761VENDOR_ST_M_M45PE16:
9878 tp->nvram_jedecnum = JEDEC_ST;
9879 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9880 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9881 tp->nvram_pagesize = 256;
9882 break;
9883 }
9884
9885 if (protect) {
9886 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
9887 } else {
9888 switch (nvcfg1) {
9889 case FLASH_5761VENDOR_ATMEL_ADB161D:
9890 case FLASH_5761VENDOR_ATMEL_MDB161D:
9891 case FLASH_5761VENDOR_ST_A_M45PE16:
9892 case FLASH_5761VENDOR_ST_M_M45PE16:
9893 tp->nvram_size = 0x100000;
9894 break;
9895 case FLASH_5761VENDOR_ATMEL_ADB081D:
9896 case FLASH_5761VENDOR_ATMEL_MDB081D:
9897 case FLASH_5761VENDOR_ST_A_M45PE80:
9898 case FLASH_5761VENDOR_ST_M_M45PE80:
9899 tp->nvram_size = 0x80000;
9900 break;
9901 case FLASH_5761VENDOR_ATMEL_ADB041D:
9902 case FLASH_5761VENDOR_ATMEL_MDB041D:
9903 case FLASH_5761VENDOR_ST_A_M45PE40:
9904 case FLASH_5761VENDOR_ST_M_M45PE40:
9905 tp->nvram_size = 0x40000;
9906 break;
9907 case FLASH_5761VENDOR_ATMEL_ADB021D:
9908 case FLASH_5761VENDOR_ATMEL_MDB021D:
9909 case FLASH_5761VENDOR_ST_A_M45PE20:
9910 case FLASH_5761VENDOR_ST_M_M45PE20:
9911 tp->nvram_size = 0x20000;
9912 break;
9913 }
9914 }
9915}
9916
b5d3772c
MC
9917static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9918{
9919 tp->nvram_jedecnum = JEDEC_ATMEL;
9920 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9921 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9922}
9923
1da177e4
LT
9924/* Chips other than 5700/5701 use the NVRAM for fetching info. */
9925static void __devinit tg3_nvram_init(struct tg3 *tp)
9926{
1da177e4
LT
9927 tw32_f(GRC_EEPROM_ADDR,
9928 (EEPROM_ADDR_FSM_RESET |
9929 (EEPROM_DEFAULT_CLOCK_PERIOD <<
9930 EEPROM_ADDR_CLKPERD_SHIFT)));
9931
9d57f01c 9932 msleep(1);
1da177e4
LT
9933
9934 /* Enable seeprom accesses. */
9935 tw32_f(GRC_LOCAL_CTRL,
9936 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9937 udelay(100);
9938
9939 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9940 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9941 tp->tg3_flags |= TG3_FLAG_NVRAM;
9942
ec41c7df
MC
9943 if (tg3_nvram_lock(tp)) {
9944 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9945 "tg3_nvram_init failed.\n", tp->dev->name);
9946 return;
9947 }
e6af301b 9948 tg3_enable_nvram_access(tp);
1da177e4 9949
989a9d23
MC
9950 tp->nvram_size = 0;
9951
361b4ac2
MC
9952 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9953 tg3_get_5752_nvram_info(tp);
d3c7b886
MC
9954 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9955 tg3_get_5755_nvram_info(tp);
d30cdd28
MC
9956 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9957 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
1b27777a 9958 tg3_get_5787_nvram_info(tp);
6b91fa02
MC
9959 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9960 tg3_get_5761_nvram_info(tp);
b5d3772c
MC
9961 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9962 tg3_get_5906_nvram_info(tp);
361b4ac2
MC
9963 else
9964 tg3_get_nvram_info(tp);
9965
989a9d23
MC
9966 if (tp->nvram_size == 0)
9967 tg3_get_nvram_size(tp);
1da177e4 9968
e6af301b 9969 tg3_disable_nvram_access(tp);
381291b7 9970 tg3_nvram_unlock(tp);
1da177e4
LT
9971
9972 } else {
9973 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9974
9975 tg3_get_eeprom_size(tp);
9976 }
9977}
9978
9979static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9980 u32 offset, u32 *val)
9981{
9982 u32 tmp;
9983 int i;
9984
9985 if (offset > EEPROM_ADDR_ADDR_MASK ||
9986 (offset % 4) != 0)
9987 return -EINVAL;
9988
9989 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9990 EEPROM_ADDR_DEVID_MASK |
9991 EEPROM_ADDR_READ);
9992 tw32(GRC_EEPROM_ADDR,
9993 tmp |
9994 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9995 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9996 EEPROM_ADDR_ADDR_MASK) |
9997 EEPROM_ADDR_READ | EEPROM_ADDR_START);
9998
9d57f01c 9999 for (i = 0; i < 1000; i++) {
1da177e4
LT
10000 tmp = tr32(GRC_EEPROM_ADDR);
10001
10002 if (tmp & EEPROM_ADDR_COMPLETE)
10003 break;
9d57f01c 10004 msleep(1);
1da177e4
LT
10005 }
10006 if (!(tmp & EEPROM_ADDR_COMPLETE))
10007 return -EBUSY;
10008
10009 *val = tr32(GRC_EEPROM_DATA);
10010 return 0;
10011}
10012
10013#define NVRAM_CMD_TIMEOUT 10000
10014
10015static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10016{
10017 int i;
10018
10019 tw32(NVRAM_CMD, nvram_cmd);
10020 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10021 udelay(10);
10022 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10023 udelay(10);
10024 break;
10025 }
10026 }
10027 if (i == NVRAM_CMD_TIMEOUT) {
10028 return -EBUSY;
10029 }
10030 return 0;
10031}
10032
1820180b
MC
10033static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10034{
10035 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10036 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10037 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
6b91fa02 10038 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
1820180b
MC
10039 (tp->nvram_jedecnum == JEDEC_ATMEL))
10040
10041 addr = ((addr / tp->nvram_pagesize) <<
10042 ATMEL_AT45DB0X1B_PAGE_POS) +
10043 (addr % tp->nvram_pagesize);
10044
10045 return addr;
10046}
10047
c4e6575c
MC
10048static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10049{
10050 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10051 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10052 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
6b91fa02 10053 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
c4e6575c
MC
10054 (tp->nvram_jedecnum == JEDEC_ATMEL))
10055
10056 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10057 tp->nvram_pagesize) +
10058 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10059
10060 return addr;
10061}
10062
1da177e4
LT
10063static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10064{
10065 int ret;
10066
1da177e4
LT
10067 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10068 return tg3_nvram_read_using_eeprom(tp, offset, val);
10069
1820180b 10070 offset = tg3_nvram_phys_addr(tp, offset);
1da177e4
LT
10071
10072 if (offset > NVRAM_ADDR_MSK)
10073 return -EINVAL;
10074
ec41c7df
MC
10075 ret = tg3_nvram_lock(tp);
10076 if (ret)
10077 return ret;
1da177e4 10078
e6af301b 10079 tg3_enable_nvram_access(tp);
1da177e4
LT
10080
10081 tw32(NVRAM_ADDR, offset);
10082 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10083 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10084
10085 if (ret == 0)
10086 *val = swab32(tr32(NVRAM_RDDATA));
10087
e6af301b 10088 tg3_disable_nvram_access(tp);
1da177e4 10089
381291b7
MC
10090 tg3_nvram_unlock(tp);
10091
1da177e4
LT
10092 return ret;
10093}
10094
1820180b
MC
10095static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10096{
10097 int err;
10098 u32 tmp;
10099
10100 err = tg3_nvram_read(tp, offset, &tmp);
10101 *val = swab32(tmp);
10102 return err;
10103}
10104
1da177e4
LT
10105static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10106 u32 offset, u32 len, u8 *buf)
10107{
10108 int i, j, rc = 0;
10109 u32 val;
10110
10111 for (i = 0; i < len; i += 4) {
10112 u32 addr, data;
10113
10114 addr = offset + i;
10115
10116 memcpy(&data, buf + i, 4);
10117
10118 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
10119
10120 val = tr32(GRC_EEPROM_ADDR);
10121 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10122
10123 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10124 EEPROM_ADDR_READ);
10125 tw32(GRC_EEPROM_ADDR, val |
10126 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10127 (addr & EEPROM_ADDR_ADDR_MASK) |
10128 EEPROM_ADDR_START |
10129 EEPROM_ADDR_WRITE);
6aa20a22 10130
9d57f01c 10131 for (j = 0; j < 1000; j++) {
1da177e4
LT
10132 val = tr32(GRC_EEPROM_ADDR);
10133
10134 if (val & EEPROM_ADDR_COMPLETE)
10135 break;
9d57f01c 10136 msleep(1);
1da177e4
LT
10137 }
10138 if (!(val & EEPROM_ADDR_COMPLETE)) {
10139 rc = -EBUSY;
10140 break;
10141 }
10142 }
10143
10144 return rc;
10145}
10146
10147/* offset and length are dword aligned */
10148static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10149 u8 *buf)
10150{
10151 int ret = 0;
10152 u32 pagesize = tp->nvram_pagesize;
10153 u32 pagemask = pagesize - 1;
10154 u32 nvram_cmd;
10155 u8 *tmp;
10156
10157 tmp = kmalloc(pagesize, GFP_KERNEL);
10158 if (tmp == NULL)
10159 return -ENOMEM;
10160
10161 while (len) {
10162 int j;
e6af301b 10163 u32 phy_addr, page_off, size;
1da177e4
LT
10164
10165 phy_addr = offset & ~pagemask;
6aa20a22 10166
1da177e4
LT
10167 for (j = 0; j < pagesize; j += 4) {
10168 if ((ret = tg3_nvram_read(tp, phy_addr + j,
10169 (u32 *) (tmp + j))))
10170 break;
10171 }
10172 if (ret)
10173 break;
10174
10175 page_off = offset & pagemask;
10176 size = pagesize;
10177 if (len < size)
10178 size = len;
10179
10180 len -= size;
10181
10182 memcpy(tmp + page_off, buf, size);
10183
10184 offset = offset + (pagesize - page_off);
10185
e6af301b 10186 tg3_enable_nvram_access(tp);
1da177e4
LT
10187
10188 /*
10189 * Before we can erase the flash page, we need
10190 * to issue a special "write enable" command.
10191 */
10192 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10193
10194 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10195 break;
10196
10197 /* Erase the target page */
10198 tw32(NVRAM_ADDR, phy_addr);
10199
10200 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10201 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10202
10203 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10204 break;
10205
10206 /* Issue another write enable to start the write. */
10207 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10208
10209 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10210 break;
10211
10212 for (j = 0; j < pagesize; j += 4) {
10213 u32 data;
10214
10215 data = *((u32 *) (tmp + j));
10216 tw32(NVRAM_WRDATA, cpu_to_be32(data));
10217
10218 tw32(NVRAM_ADDR, phy_addr + j);
10219
10220 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10221 NVRAM_CMD_WR;
10222
10223 if (j == 0)
10224 nvram_cmd |= NVRAM_CMD_FIRST;
10225 else if (j == (pagesize - 4))
10226 nvram_cmd |= NVRAM_CMD_LAST;
10227
10228 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10229 break;
10230 }
10231 if (ret)
10232 break;
10233 }
10234
10235 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10236 tg3_nvram_exec_cmd(tp, nvram_cmd);
10237
10238 kfree(tmp);
10239
10240 return ret;
10241}
10242
10243/* offset and length are dword aligned */
10244static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10245 u8 *buf)
10246{
10247 int i, ret = 0;
10248
10249 for (i = 0; i < len; i += 4, offset += 4) {
10250 u32 data, page_off, phy_addr, nvram_cmd;
10251
10252 memcpy(&data, buf + i, 4);
10253 tw32(NVRAM_WRDATA, cpu_to_be32(data));
10254
10255 page_off = offset % tp->nvram_pagesize;
10256
1820180b 10257 phy_addr = tg3_nvram_phys_addr(tp, offset);
1da177e4
LT
10258
10259 tw32(NVRAM_ADDR, phy_addr);
10260
10261 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10262
10263 if ((page_off == 0) || (i == 0))
10264 nvram_cmd |= NVRAM_CMD_FIRST;
f6d9a256 10265 if (page_off == (tp->nvram_pagesize - 4))
1da177e4
LT
10266 nvram_cmd |= NVRAM_CMD_LAST;
10267
10268 if (i == (len - 4))
10269 nvram_cmd |= NVRAM_CMD_LAST;
10270
4c987487 10271 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
af36e6b6 10272 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
1b27777a 10273 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
d30cdd28 10274 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
9936bcf6 10275 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
4c987487
MC
10276 (tp->nvram_jedecnum == JEDEC_ST) &&
10277 (nvram_cmd & NVRAM_CMD_FIRST)) {
1da177e4
LT
10278
10279 if ((ret = tg3_nvram_exec_cmd(tp,
10280 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10281 NVRAM_CMD_DONE)))
10282
10283 break;
10284 }
10285 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10286 /* We always do complete word writes to eeprom. */
10287 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10288 }
10289
10290 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10291 break;
10292 }
10293 return ret;
10294}
10295
10296/* offset and length are dword aligned */
10297static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10298{
10299 int ret;
10300
1da177e4 10301 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34
MC
10302 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10303 ~GRC_LCLCTRL_GPIO_OUTPUT1);
1da177e4
LT
10304 udelay(40);
10305 }
10306
10307 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10308 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10309 }
10310 else {
10311 u32 grc_mode;
10312
ec41c7df
MC
10313 ret = tg3_nvram_lock(tp);
10314 if (ret)
10315 return ret;
1da177e4 10316
e6af301b
MC
10317 tg3_enable_nvram_access(tp);
10318 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10319 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
1da177e4 10320 tw32(NVRAM_WRITE1, 0x406);
1da177e4
LT
10321
10322 grc_mode = tr32(GRC_MODE);
10323 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10324
10325 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10326 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10327
10328 ret = tg3_nvram_write_block_buffered(tp, offset, len,
10329 buf);
10330 }
10331 else {
10332 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10333 buf);
10334 }
10335
10336 grc_mode = tr32(GRC_MODE);
10337 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10338
e6af301b 10339 tg3_disable_nvram_access(tp);
1da177e4
LT
10340 tg3_nvram_unlock(tp);
10341 }
10342
10343 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34 10344 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1da177e4
LT
10345 udelay(40);
10346 }
10347
10348 return ret;
10349}
10350
10351struct subsys_tbl_ent {
10352 u16 subsys_vendor, subsys_devid;
10353 u32 phy_id;
10354};
10355
10356static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10357 /* Broadcom boards. */
10358 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10359 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10360 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10361 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
10362 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10363 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10364 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
10365 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10366 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10367 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10368 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10369
10370 /* 3com boards. */
10371 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10372 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10373 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
10374 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10375 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10376
10377 /* DELL boards. */
10378 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10379 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10380 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10381 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10382
10383 /* Compaq boards. */
10384 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10385 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10386 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
10387 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10388 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10389
10390 /* IBM boards. */
10391 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10392};
10393
10394static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10395{
10396 int i;
10397
10398 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10399 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10400 tp->pdev->subsystem_vendor) &&
10401 (subsys_id_to_phy_id[i].subsys_devid ==
10402 tp->pdev->subsystem_device))
10403 return &subsys_id_to_phy_id[i];
10404 }
10405 return NULL;
10406}
10407
7d0c41ef 10408static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
1da177e4 10409{
1da177e4 10410 u32 val;
caf636c7
MC
10411 u16 pmcsr;
10412
10413 /* On some early chips the SRAM cannot be accessed in D3hot state,
10414 * so need make sure we're in D0.
10415 */
10416 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10417 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10418 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10419 msleep(1);
7d0c41ef
MC
10420
10421 /* Make sure register accesses (indirect or otherwise)
10422 * will function correctly.
10423 */
10424 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10425 tp->misc_host_ctrl);
1da177e4 10426
f49639e6
DM
10427 /* The memory arbiter has to be enabled in order for SRAM accesses
10428 * to succeed. Normally on powerup the tg3 chip firmware will make
10429 * sure it is enabled, but other entities such as system netboot
10430 * code might disable it.
10431 */
10432 val = tr32(MEMARB_MODE);
10433 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10434
1da177e4 10435 tp->phy_id = PHY_ID_INVALID;
7d0c41ef
MC
10436 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10437
a85feb8c
GZ
10438 /* Assume an onboard device and WOL capable by default. */
10439 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
72b845e0 10440
b5d3772c 10441 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9d26e213 10442 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
b5d3772c 10443 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9d26e213
MC
10444 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10445 }
0527ba35
MC
10446 val = tr32(VCPU_CFGSHDW);
10447 if (val & VCPU_CFGSHDW_ASPM_DBNC)
8ed5d97e 10448 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
0527ba35
MC
10449 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10450 (val & VCPU_CFGSHDW_WOL_MAGPKT))
10451 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
b5d3772c
MC
10452 return;
10453 }
10454
1da177e4
LT
10455 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10456 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10457 u32 nic_cfg, led_cfg;
7d0c41ef
MC
10458 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10459 int eeprom_phy_serdes = 0;
1da177e4
LT
10460
10461 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10462 tp->nic_sram_data_cfg = nic_cfg;
10463
10464 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10465 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10466 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10467 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10468 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10469 (ver > 0) && (ver < 0x100))
10470 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10471
1da177e4
LT
10472 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10473 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10474 eeprom_phy_serdes = 1;
10475
10476 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10477 if (nic_phy_id != 0) {
10478 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10479 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10480
10481 eeprom_phy_id = (id1 >> 16) << 10;
10482 eeprom_phy_id |= (id2 & 0xfc00) << 16;
10483 eeprom_phy_id |= (id2 & 0x03ff) << 0;
10484 } else
10485 eeprom_phy_id = 0;
10486
7d0c41ef 10487 tp->phy_id = eeprom_phy_id;
747e8f8b 10488 if (eeprom_phy_serdes) {
a4e2b347 10489 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
747e8f8b
MC
10490 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10491 else
10492 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10493 }
7d0c41ef 10494
cbf46853 10495 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
10496 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10497 SHASTA_EXT_LED_MODE_MASK);
cbf46853 10498 else
1da177e4
LT
10499 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10500
10501 switch (led_cfg) {
10502 default:
10503 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10504 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10505 break;
10506
10507 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10508 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10509 break;
10510
10511 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10512 tp->led_ctrl = LED_CTRL_MODE_MAC;
9ba27794
MC
10513
10514 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10515 * read on some older 5700/5701 bootcode.
10516 */
10517 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10518 ASIC_REV_5700 ||
10519 GET_ASIC_REV(tp->pci_chip_rev_id) ==
10520 ASIC_REV_5701)
10521 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10522
1da177e4
LT
10523 break;
10524
10525 case SHASTA_EXT_LED_SHARED:
10526 tp->led_ctrl = LED_CTRL_MODE_SHARED;
10527 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10528 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10529 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10530 LED_CTRL_MODE_PHY_2);
10531 break;
10532
10533 case SHASTA_EXT_LED_MAC:
10534 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10535 break;
10536
10537 case SHASTA_EXT_LED_COMBO:
10538 tp->led_ctrl = LED_CTRL_MODE_COMBO;
10539 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10540 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10541 LED_CTRL_MODE_PHY_2);
10542 break;
10543
10544 };
10545
10546 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10547 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10548 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10549 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10550
9d26e213 10551 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
1da177e4 10552 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9d26e213
MC
10553 if ((tp->pdev->subsystem_vendor ==
10554 PCI_VENDOR_ID_ARIMA) &&
10555 (tp->pdev->subsystem_device == 0x205a ||
10556 tp->pdev->subsystem_device == 0x2063))
10557 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10558 } else {
f49639e6 10559 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9d26e213
MC
10560 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10561 }
1da177e4
LT
10562
10563 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10564 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 10565 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
10566 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10567 }
0d3031d9
MC
10568 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10569 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
a85feb8c
GZ
10570 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10571 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10572 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
1da177e4 10573
0527ba35
MC
10574 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10575 nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10576 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10577
1da177e4
LT
10578 if (cfg2 & (1 << 17))
10579 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10580
10581 /* serdes signal pre-emphasis in register 0x590 set by */
10582 /* bootcode if bit 18 is set */
10583 if (cfg2 & (1 << 18))
10584 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
8ed5d97e
MC
10585
10586 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10587 u32 cfg3;
10588
10589 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10590 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10591 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10592 }
1da177e4 10593 }
7d0c41ef
MC
10594}
10595
10596static int __devinit tg3_phy_probe(struct tg3 *tp)
10597{
10598 u32 hw_phy_id_1, hw_phy_id_2;
10599 u32 hw_phy_id, hw_phy_id_masked;
10600 int err;
1da177e4
LT
10601
10602 /* Reading the PHY ID register can conflict with ASF
10603 * firwmare access to the PHY hardware.
10604 */
10605 err = 0;
0d3031d9
MC
10606 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10607 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
1da177e4
LT
10608 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10609 } else {
10610 /* Now read the physical PHY_ID from the chip and verify
10611 * that it is sane. If it doesn't look good, we fall back
10612 * to either the hard-coded table based PHY_ID and failing
10613 * that the value found in the eeprom area.
10614 */
10615 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10616 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10617
10618 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
10619 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10620 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
10621
10622 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10623 }
10624
10625 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10626 tp->phy_id = hw_phy_id;
10627 if (hw_phy_id_masked == PHY_ID_BCM8002)
10628 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
da6b2d01
MC
10629 else
10630 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
1da177e4 10631 } else {
7d0c41ef
MC
10632 if (tp->phy_id != PHY_ID_INVALID) {
10633 /* Do nothing, phy ID already set up in
10634 * tg3_get_eeprom_hw_cfg().
10635 */
1da177e4
LT
10636 } else {
10637 struct subsys_tbl_ent *p;
10638
10639 /* No eeprom signature? Try the hardcoded
10640 * subsys device table.
10641 */
10642 p = lookup_by_subsys(tp);
10643 if (!p)
10644 return -ENODEV;
10645
10646 tp->phy_id = p->phy_id;
10647 if (!tp->phy_id ||
10648 tp->phy_id == PHY_ID_BCM8002)
10649 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10650 }
10651 }
10652
747e8f8b 10653 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
0d3031d9 10654 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
1da177e4 10655 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
3600d918 10656 u32 bmsr, adv_reg, tg3_ctrl, mask;
1da177e4
LT
10657
10658 tg3_readphy(tp, MII_BMSR, &bmsr);
10659 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10660 (bmsr & BMSR_LSTATUS))
10661 goto skip_phy_reset;
6aa20a22 10662
1da177e4
LT
10663 err = tg3_phy_reset(tp);
10664 if (err)
10665 return err;
10666
10667 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10668 ADVERTISE_100HALF | ADVERTISE_100FULL |
10669 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10670 tg3_ctrl = 0;
10671 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10672 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10673 MII_TG3_CTRL_ADV_1000_FULL);
10674 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10675 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10676 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10677 MII_TG3_CTRL_ENABLE_AS_MASTER);
10678 }
10679
3600d918
MC
10680 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10681 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10682 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10683 if (!tg3_copper_is_advertising_all(tp, mask)) {
1da177e4
LT
10684 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10685
10686 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10687 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10688
10689 tg3_writephy(tp, MII_BMCR,
10690 BMCR_ANENABLE | BMCR_ANRESTART);
10691 }
10692 tg3_phy_set_wirespeed(tp);
10693
10694 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10695 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10696 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10697 }
10698
10699skip_phy_reset:
10700 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10701 err = tg3_init_5401phy_dsp(tp);
10702 if (err)
10703 return err;
10704 }
10705
10706 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10707 err = tg3_init_5401phy_dsp(tp);
10708 }
10709
747e8f8b 10710 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1da177e4
LT
10711 tp->link_config.advertising =
10712 (ADVERTISED_1000baseT_Half |
10713 ADVERTISED_1000baseT_Full |
10714 ADVERTISED_Autoneg |
10715 ADVERTISED_FIBRE);
10716 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10717 tp->link_config.advertising &=
10718 ~(ADVERTISED_1000baseT_Half |
10719 ADVERTISED_1000baseT_Full);
10720
10721 return err;
10722}
10723
10724static void __devinit tg3_read_partno(struct tg3 *tp)
10725{
10726 unsigned char vpd_data[256];
af2c6a4a 10727 unsigned int i;
1b27777a 10728 u32 magic;
1da177e4 10729
1820180b 10730 if (tg3_nvram_read_swab(tp, 0x0, &magic))
f49639e6 10731 goto out_not_found;
1da177e4 10732
1820180b 10733 if (magic == TG3_EEPROM_MAGIC) {
1b27777a
MC
10734 for (i = 0; i < 256; i += 4) {
10735 u32 tmp;
1da177e4 10736
1b27777a
MC
10737 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10738 goto out_not_found;
10739
10740 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
10741 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
10742 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10743 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10744 }
10745 } else {
10746 int vpd_cap;
10747
10748 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10749 for (i = 0; i < 256; i += 4) {
10750 u32 tmp, j = 0;
10751 u16 tmp16;
10752
10753 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10754 i);
10755 while (j++ < 100) {
10756 pci_read_config_word(tp->pdev, vpd_cap +
10757 PCI_VPD_ADDR, &tmp16);
10758 if (tmp16 & 0x8000)
10759 break;
10760 msleep(1);
10761 }
f49639e6
DM
10762 if (!(tmp16 & 0x8000))
10763 goto out_not_found;
10764
1b27777a
MC
10765 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10766 &tmp);
10767 tmp = cpu_to_le32(tmp);
10768 memcpy(&vpd_data[i], &tmp, 4);
10769 }
1da177e4
LT
10770 }
10771
10772 /* Now parse and find the part number. */
af2c6a4a 10773 for (i = 0; i < 254; ) {
1da177e4 10774 unsigned char val = vpd_data[i];
af2c6a4a 10775 unsigned int block_end;
1da177e4
LT
10776
10777 if (val == 0x82 || val == 0x91) {
10778 i = (i + 3 +
10779 (vpd_data[i + 1] +
10780 (vpd_data[i + 2] << 8)));
10781 continue;
10782 }
10783
10784 if (val != 0x90)
10785 goto out_not_found;
10786
10787 block_end = (i + 3 +
10788 (vpd_data[i + 1] +
10789 (vpd_data[i + 2] << 8)));
10790 i += 3;
af2c6a4a
MC
10791
10792 if (block_end > 256)
10793 goto out_not_found;
10794
10795 while (i < (block_end - 2)) {
1da177e4
LT
10796 if (vpd_data[i + 0] == 'P' &&
10797 vpd_data[i + 1] == 'N') {
10798 int partno_len = vpd_data[i + 2];
10799
af2c6a4a
MC
10800 i += 3;
10801 if (partno_len > 24 || (partno_len + i) > 256)
1da177e4
LT
10802 goto out_not_found;
10803
10804 memcpy(tp->board_part_number,
af2c6a4a 10805 &vpd_data[i], partno_len);
1da177e4
LT
10806
10807 /* Success. */
10808 return;
10809 }
af2c6a4a 10810 i += 3 + vpd_data[i + 2];
1da177e4
LT
10811 }
10812
10813 /* Part number not found. */
10814 goto out_not_found;
10815 }
10816
10817out_not_found:
b5d3772c
MC
10818 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10819 strcpy(tp->board_part_number, "BCM95906");
10820 else
10821 strcpy(tp->board_part_number, "none");
1da177e4
LT
10822}
10823
c4e6575c
MC
10824static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10825{
10826 u32 val, offset, start;
10827
10828 if (tg3_nvram_read_swab(tp, 0, &val))
10829 return;
10830
10831 if (val != TG3_EEPROM_MAGIC)
10832 return;
10833
10834 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10835 tg3_nvram_read_swab(tp, 0x4, &start))
10836 return;
10837
10838 offset = tg3_nvram_logical_addr(tp, offset);
10839 if (tg3_nvram_read_swab(tp, offset, &val))
10840 return;
10841
10842 if ((val & 0xfc000000) == 0x0c000000) {
10843 u32 ver_offset, addr;
10844 int i;
10845
10846 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10847 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10848 return;
10849
10850 if (val != 0)
10851 return;
10852
10853 addr = offset + ver_offset - start;
10854 for (i = 0; i < 16; i += 4) {
10855 if (tg3_nvram_read(tp, addr + i, &val))
10856 return;
10857
10858 val = cpu_to_le32(val);
10859 memcpy(tp->fw_ver + i, &val, 4);
10860 }
10861 }
10862}
10863
7544b097
MC
10864static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
10865
1da177e4
LT
10866static int __devinit tg3_get_invariants(struct tg3 *tp)
10867{
10868 static struct pci_device_id write_reorder_chipsets[] = {
1da177e4
LT
10869 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10870 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
c165b004
JL
10871 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10872 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
399de50b
MC
10873 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10874 PCI_DEVICE_ID_VIA_8385_0) },
1da177e4
LT
10875 { },
10876 };
10877 u32 misc_ctrl_reg;
10878 u32 cacheline_sz_reg;
10879 u32 pci_state_reg, grc_misc_cfg;
10880 u32 val;
10881 u16 pci_cmd;
c7835a77 10882 int err, pcie_cap;
1da177e4 10883
1da177e4
LT
10884 /* Force memory write invalidate off. If we leave it on,
10885 * then on 5700_BX chips we have to enable a workaround.
10886 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10887 * to match the cacheline size. The Broadcom driver have this
10888 * workaround but turns MWI off all the times so never uses
10889 * it. This seems to suggest that the workaround is insufficient.
10890 */
10891 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10892 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10893 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10894
10895 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10896 * has the register indirect write enable bit set before
10897 * we try to access any of the MMIO registers. It is also
10898 * critical that the PCI-X hw workaround situation is decided
10899 * before that as well.
10900 */
10901 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10902 &misc_ctrl_reg);
10903
10904 tp->pci_chip_rev_id = (misc_ctrl_reg >>
10905 MISC_HOST_CTRL_CHIPREV_SHIFT);
795d01c5
MC
10906 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
10907 u32 prod_id_asic_rev;
10908
10909 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
10910 &prod_id_asic_rev);
10911 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
10912 }
1da177e4 10913
ff645bec
MC
10914 /* Wrong chip ID in 5752 A0. This code can be removed later
10915 * as A0 is not in production.
10916 */
10917 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10918 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10919
6892914f
MC
10920 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10921 * we need to disable memory and use config. cycles
10922 * only to access all registers. The 5702/03 chips
10923 * can mistakenly decode the special cycles from the
10924 * ICH chipsets as memory write cycles, causing corruption
10925 * of register and memory space. Only certain ICH bridges
10926 * will drive special cycles with non-zero data during the
10927 * address phase which can fall within the 5703's address
10928 * range. This is not an ICH bug as the PCI spec allows
10929 * non-zero address during special cycles. However, only
10930 * these ICH bridges are known to drive non-zero addresses
10931 * during special cycles.
10932 *
10933 * Since special cycles do not cross PCI bridges, we only
10934 * enable this workaround if the 5703 is on the secondary
10935 * bus of these ICH bridges.
10936 */
10937 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10938 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10939 static struct tg3_dev_id {
10940 u32 vendor;
10941 u32 device;
10942 u32 rev;
10943 } ich_chipsets[] = {
10944 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10945 PCI_ANY_ID },
10946 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10947 PCI_ANY_ID },
10948 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10949 0xa },
10950 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10951 PCI_ANY_ID },
10952 { },
10953 };
10954 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10955 struct pci_dev *bridge = NULL;
10956
10957 while (pci_id->vendor != 0) {
10958 bridge = pci_get_device(pci_id->vendor, pci_id->device,
10959 bridge);
10960 if (!bridge) {
10961 pci_id++;
10962 continue;
10963 }
10964 if (pci_id->rev != PCI_ANY_ID) {
44c10138 10965 if (bridge->revision > pci_id->rev)
6892914f
MC
10966 continue;
10967 }
10968 if (bridge->subordinate &&
10969 (bridge->subordinate->number ==
10970 tp->pdev->bus->number)) {
10971
10972 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10973 pci_dev_put(bridge);
10974 break;
10975 }
10976 }
10977 }
10978
4a29cc2e
MC
10979 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10980 * DMA addresses > 40-bit. This bridge may have other additional
10981 * 57xx devices behind it in some 4-port NIC designs for example.
10982 * Any tg3 device found behind the bridge will also need the 40-bit
10983 * DMA workaround.
10984 */
a4e2b347
MC
10985 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10986 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10987 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
4a29cc2e 10988 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
4cf78e4f 10989 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
a4e2b347 10990 }
4a29cc2e
MC
10991 else {
10992 struct pci_dev *bridge = NULL;
10993
10994 do {
10995 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10996 PCI_DEVICE_ID_SERVERWORKS_EPB,
10997 bridge);
10998 if (bridge && bridge->subordinate &&
10999 (bridge->subordinate->number <=
11000 tp->pdev->bus->number) &&
11001 (bridge->subordinate->subordinate >=
11002 tp->pdev->bus->number)) {
11003 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11004 pci_dev_put(bridge);
11005 break;
11006 }
11007 } while (bridge);
11008 }
4cf78e4f 11009
1da177e4
LT
11010 /* Initialize misc host control in PCI block. */
11011 tp->misc_host_ctrl |= (misc_ctrl_reg &
11012 MISC_HOST_CTRL_CHIPREV);
11013 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11014 tp->misc_host_ctrl);
11015
11016 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11017 &cacheline_sz_reg);
11018
11019 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
11020 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
11021 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
11022 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
11023
7544b097
MC
11024 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11025 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11026 tp->pdev_peer = tg3_find_peer(tp);
11027
6708e5cc 11028 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
4cf78e4f 11029 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
af36e6b6 11030 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d9ab5ad1 11031 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
d30cdd28 11032 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9936bcf6 11033 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
b5d3772c 11034 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
a4e2b347 11035 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6708e5cc
JL
11036 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11037
1b440c56
JL
11038 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11039 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11040 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11041
5a6f3074 11042 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7544b097
MC
11043 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11044 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11045 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11046 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11047 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11048 tp->pdev_peer == tp->pdev))
11049 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11050
af36e6b6 11051 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
b5d3772c 11052 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
d30cdd28 11053 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9936bcf6 11054 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
b5d3772c 11055 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5a6f3074 11056 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
fcfa0a32 11057 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
52c0fd83 11058 } else {
7f62ad5d 11059 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
52c0fd83
MC
11060 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11061 ASIC_REV_5750 &&
11062 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
7f62ad5d 11063 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
52c0fd83 11064 }
5a6f3074 11065 }
1da177e4 11066
0f893dc6
MC
11067 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11068 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
d9ab5ad1 11069 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
af36e6b6 11070 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
b5d3772c 11071 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
d30cdd28 11072 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
9936bcf6 11073 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
b5d3772c 11074 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
0f893dc6
MC
11075 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11076
c7835a77
MC
11077 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11078 if (pcie_cap != 0) {
1da177e4 11079 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
c7835a77
MC
11080 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11081 u16 lnkctl;
11082
11083 pci_read_config_word(tp->pdev,
11084 pcie_cap + PCI_EXP_LNKCTL,
11085 &lnkctl);
11086 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11087 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11088 }
11089 }
1da177e4 11090
399de50b
MC
11091 /* If we have an AMD 762 or VIA K8T800 chipset, write
11092 * reordering to the mailbox registers done by the host
11093 * controller can cause major troubles. We read back from
11094 * every mailbox register write to force the writes to be
11095 * posted to the chip in order.
11096 */
11097 if (pci_dev_present(write_reorder_chipsets) &&
11098 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11099 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11100
1da177e4
LT
11101 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11102 tp->pci_lat_timer < 64) {
11103 tp->pci_lat_timer = 64;
11104
11105 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
11106 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
11107 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
11108 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
11109
11110 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11111 cacheline_sz_reg);
11112 }
11113
9974a356
MC
11114 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11115 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11116 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11117 if (!tp->pcix_cap) {
11118 printk(KERN_ERR PFX "Cannot find PCI-X "
11119 "capability, aborting.\n");
11120 return -EIO;
11121 }
11122 }
11123
1da177e4
LT
11124 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11125 &pci_state_reg);
11126
9974a356 11127 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
1da177e4
LT
11128 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11129
11130 /* If this is a 5700 BX chipset, and we are in PCI-X
11131 * mode, enable register write workaround.
11132 *
11133 * The workaround is to use indirect register accesses
11134 * for all chip writes not to mailbox registers.
11135 */
11136 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11137 u32 pm_reg;
1da177e4
LT
11138
11139 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11140
11141 /* The chip can have it's power management PCI config
11142 * space registers clobbered due to this bug.
11143 * So explicitly force the chip into D0 here.
11144 */
9974a356
MC
11145 pci_read_config_dword(tp->pdev,
11146 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
11147 &pm_reg);
11148 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11149 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9974a356
MC
11150 pci_write_config_dword(tp->pdev,
11151 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
11152 pm_reg);
11153
11154 /* Also, force SERR#/PERR# in PCI command. */
11155 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11156 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11157 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11158 }
11159 }
11160
087fe256
MC
11161 /* 5700 BX chips need to have their TX producer index mailboxes
11162 * written twice to workaround a bug.
11163 */
11164 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11165 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11166
1da177e4
LT
11167 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11168 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11169 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11170 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11171
11172 /* Chip-specific fixup from Broadcom driver */
11173 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11174 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11175 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11176 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11177 }
11178
1ee582d8 11179 /* Default fast path register access methods */
20094930 11180 tp->read32 = tg3_read32;
1ee582d8 11181 tp->write32 = tg3_write32;
09ee929c 11182 tp->read32_mbox = tg3_read32;
20094930 11183 tp->write32_mbox = tg3_write32;
1ee582d8
MC
11184 tp->write32_tx_mbox = tg3_write32;
11185 tp->write32_rx_mbox = tg3_write32;
11186
11187 /* Various workaround register access methods */
11188 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11189 tp->write32 = tg3_write_indirect_reg32;
98efd8a6
MC
11190 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11191 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11192 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11193 /*
11194 * Back to back register writes can cause problems on these
11195 * chips, the workaround is to read back all reg writes
11196 * except those to mailbox regs.
11197 *
11198 * See tg3_write_indirect_reg32().
11199 */
1ee582d8 11200 tp->write32 = tg3_write_flush_reg32;
98efd8a6
MC
11201 }
11202
1ee582d8
MC
11203
11204 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11205 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11206 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11207 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11208 tp->write32_rx_mbox = tg3_write_flush_reg32;
11209 }
20094930 11210
6892914f
MC
11211 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11212 tp->read32 = tg3_read_indirect_reg32;
11213 tp->write32 = tg3_write_indirect_reg32;
11214 tp->read32_mbox = tg3_read_indirect_mbox;
11215 tp->write32_mbox = tg3_write_indirect_mbox;
11216 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11217 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11218
11219 iounmap(tp->regs);
22abe310 11220 tp->regs = NULL;
6892914f
MC
11221
11222 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11223 pci_cmd &= ~PCI_COMMAND_MEMORY;
11224 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11225 }
b5d3772c
MC
11226 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11227 tp->read32_mbox = tg3_read32_mbox_5906;
11228 tp->write32_mbox = tg3_write32_mbox_5906;
11229 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11230 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11231 }
6892914f 11232
bbadf503
MC
11233 if (tp->write32 == tg3_write_indirect_reg32 ||
11234 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11235 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
f49639e6 11236 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
bbadf503
MC
11237 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11238
7d0c41ef 11239 /* Get eeprom hw config before calling tg3_set_power_state().
9d26e213 11240 * In particular, the TG3_FLG2_IS_NIC flag must be
7d0c41ef
MC
11241 * determined before calling tg3_set_power_state() so that
11242 * we know whether or not to switch out of Vaux power.
11243 * When the flag is set, it means that GPIO1 is used for eeprom
11244 * write protect and also implies that it is a LOM where GPIOs
11245 * are not used to switch power.
6aa20a22 11246 */
7d0c41ef
MC
11247 tg3_get_eeprom_hw_cfg(tp);
11248
0d3031d9
MC
11249 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11250 /* Allow reads and writes to the
11251 * APE register and memory space.
11252 */
11253 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11254 PCISTATE_ALLOW_APE_SHMEM_WR;
11255 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11256 pci_state_reg);
11257 }
11258
9936bcf6
MC
11259 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11260 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
d30cdd28
MC
11261 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11262
314fba34
MC
11263 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11264 * GPIO1 driven high will bring 5700's external PHY out of reset.
11265 * It is also used as eeprom write protect on LOMs.
11266 */
11267 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11268 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11269 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11270 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11271 GRC_LCLCTRL_GPIO_OUTPUT1);
3e7d83bc
MC
11272 /* Unused GPIO3 must be driven as output on 5752 because there
11273 * are no pull-up resistors on unused GPIO pins.
11274 */
11275 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11276 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
314fba34 11277
af36e6b6
MC
11278 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11279 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11280
1da177e4 11281 /* Force the chip into D0. */
bc1c7567 11282 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
11283 if (err) {
11284 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11285 pci_name(tp->pdev));
11286 return err;
11287 }
11288
11289 /* 5700 B0 chips do not support checksumming correctly due
11290 * to hardware bugs.
11291 */
11292 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11293 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11294
1da177e4
LT
11295 /* Derive initial jumbo mode from MTU assigned in
11296 * ether_setup() via the alloc_etherdev() call
11297 */
0f893dc6 11298 if (tp->dev->mtu > ETH_DATA_LEN &&
a4e2b347 11299 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
0f893dc6 11300 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
1da177e4
LT
11301
11302 /* Determine WakeOnLan speed to use. */
11303 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11304 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11305 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11306 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11307 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11308 } else {
11309 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11310 }
11311
11312 /* A few boards don't want Ethernet@WireSpeed phy feature */
11313 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11314 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11315 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
747e8f8b 11316 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
b5d3772c 11317 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
747e8f8b 11318 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1da177e4
LT
11319 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11320
11321 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11322 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11323 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11324 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11325 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11326
c424cb24
MC
11327 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11328 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 11329 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
11330 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11331 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
d4011ada
MC
11332 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11333 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11334 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
c1d2a196
MC
11335 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11336 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11337 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
c424cb24
MC
11338 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11339 }
1da177e4 11340
1da177e4 11341 tp->coalesce_mode = 0;
1da177e4
LT
11342 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11343 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11344 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11345
11346 /* Initialize MAC MI mode, polling disabled. */
11347 tw32_f(MAC_MI_MODE, tp->mi_mode);
11348 udelay(80);
11349
11350 /* Initialize data/descriptor byte/word swapping. */
11351 val = tr32(GRC_MODE);
11352 val &= GRC_MODE_HOST_STACKUP;
11353 tw32(GRC_MODE, val | tp->grc_mode);
11354
11355 tg3_switch_clocks(tp);
11356
11357 /* Clear this out for sanity. */
11358 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11359
11360 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11361 &pci_state_reg);
11362 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11363 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11364 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11365
11366 if (chiprevid == CHIPREV_ID_5701_A0 ||
11367 chiprevid == CHIPREV_ID_5701_B0 ||
11368 chiprevid == CHIPREV_ID_5701_B2 ||
11369 chiprevid == CHIPREV_ID_5701_B5) {
11370 void __iomem *sram_base;
11371
11372 /* Write some dummy words into the SRAM status block
11373 * area, see if it reads back correctly. If the return
11374 * value is bad, force enable the PCIX workaround.
11375 */
11376 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11377
11378 writel(0x00000000, sram_base);
11379 writel(0x00000000, sram_base + 4);
11380 writel(0xffffffff, sram_base + 4);
11381 if (readl(sram_base) != 0x00000000)
11382 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11383 }
11384 }
11385
11386 udelay(50);
11387 tg3_nvram_init(tp);
11388
11389 grc_misc_cfg = tr32(GRC_MISC_CFG);
11390 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11391
1da177e4
LT
11392 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11393 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11394 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11395 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11396
fac9b83e
DM
11397 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11398 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11399 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11400 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11401 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11402 HOSTCC_MODE_CLRTICK_TXBD);
11403
11404 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11405 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11406 tp->misc_host_ctrl);
11407 }
11408
1da177e4
LT
11409 /* these are limited to 10/100 only */
11410 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11411 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11412 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11413 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11414 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11415 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11416 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11417 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11418 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
676917d4
MC
11419 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11420 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
b5d3772c 11421 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
1da177e4
LT
11422 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11423
11424 err = tg3_phy_probe(tp);
11425 if (err) {
11426 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11427 pci_name(tp->pdev), err);
11428 /* ... but do not return immediately ... */
11429 }
11430
11431 tg3_read_partno(tp);
c4e6575c 11432 tg3_read_fw_ver(tp);
1da177e4
LT
11433
11434 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11435 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11436 } else {
11437 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11438 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11439 else
11440 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11441 }
11442
11443 /* 5700 {AX,BX} chips have a broken status block link
11444 * change bit implementation, so we must use the
11445 * status register in those cases.
11446 */
11447 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11448 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11449 else
11450 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11451
11452 /* The led_ctrl is set during tg3_phy_probe, here we might
11453 * have to force the link status polling mechanism based
11454 * upon subsystem IDs.
11455 */
11456 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
007a880d 11457 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
1da177e4
LT
11458 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11459 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11460 TG3_FLAG_USE_LINKCHG_REG);
11461 }
11462
11463 /* For all SERDES we poll the MAC status register. */
11464 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11465 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11466 else
11467 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11468
5a6f3074 11469 /* All chips before 5787 can get confused if TX buffers
1da177e4
LT
11470 * straddle the 4GB address boundary in some cases.
11471 */
af36e6b6 11472 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
b5d3772c 11473 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
d30cdd28 11474 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9936bcf6 11475 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
b5d3772c 11476 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
5a6f3074
MC
11477 tp->dev->hard_start_xmit = tg3_start_xmit;
11478 else
11479 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
1da177e4
LT
11480
11481 tp->rx_offset = 2;
11482 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11483 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11484 tp->rx_offset = 0;
11485
f92905de
MC
11486 tp->rx_std_max_post = TG3_RX_RING_SIZE;
11487
11488 /* Increment the rx prod index on the rx std ring by at most
11489 * 8 for these chips to workaround hw errata.
11490 */
11491 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11492 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11493 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11494 tp->rx_std_max_post = 8;
11495
8ed5d97e
MC
11496 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11497 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11498 PCIE_PWR_MGMT_L1_THRESH_MSK;
11499
1da177e4
LT
11500 return err;
11501}
11502
49b6e95f 11503#ifdef CONFIG_SPARC
1da177e4
LT
11504static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11505{
11506 struct net_device *dev = tp->dev;
11507 struct pci_dev *pdev = tp->pdev;
49b6e95f 11508 struct device_node *dp = pci_device_to_OF_node(pdev);
374d4cac 11509 const unsigned char *addr;
49b6e95f
DM
11510 int len;
11511
11512 addr = of_get_property(dp, "local-mac-address", &len);
11513 if (addr && len == 6) {
11514 memcpy(dev->dev_addr, addr, 6);
11515 memcpy(dev->perm_addr, dev->dev_addr, 6);
11516 return 0;
1da177e4
LT
11517 }
11518 return -ENODEV;
11519}
11520
11521static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11522{
11523 struct net_device *dev = tp->dev;
11524
11525 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2ff43697 11526 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
1da177e4
LT
11527 return 0;
11528}
11529#endif
11530
11531static int __devinit tg3_get_device_address(struct tg3 *tp)
11532{
11533 struct net_device *dev = tp->dev;
11534 u32 hi, lo, mac_offset;
008652b3 11535 int addr_ok = 0;
1da177e4 11536
49b6e95f 11537#ifdef CONFIG_SPARC
1da177e4
LT
11538 if (!tg3_get_macaddr_sparc(tp))
11539 return 0;
11540#endif
11541
11542 mac_offset = 0x7c;
f49639e6 11543 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
a4e2b347 11544 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
11545 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11546 mac_offset = 0xcc;
11547 if (tg3_nvram_lock(tp))
11548 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11549 else
11550 tg3_nvram_unlock(tp);
11551 }
b5d3772c
MC
11552 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11553 mac_offset = 0x10;
1da177e4
LT
11554
11555 /* First try to get it from MAC address mailbox. */
11556 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11557 if ((hi >> 16) == 0x484b) {
11558 dev->dev_addr[0] = (hi >> 8) & 0xff;
11559 dev->dev_addr[1] = (hi >> 0) & 0xff;
11560
11561 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11562 dev->dev_addr[2] = (lo >> 24) & 0xff;
11563 dev->dev_addr[3] = (lo >> 16) & 0xff;
11564 dev->dev_addr[4] = (lo >> 8) & 0xff;
11565 dev->dev_addr[5] = (lo >> 0) & 0xff;
1da177e4 11566
008652b3
MC
11567 /* Some old bootcode may report a 0 MAC address in SRAM */
11568 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11569 }
11570 if (!addr_ok) {
11571 /* Next, try NVRAM. */
f49639e6 11572 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
008652b3
MC
11573 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11574 dev->dev_addr[0] = ((hi >> 16) & 0xff);
11575 dev->dev_addr[1] = ((hi >> 24) & 0xff);
11576 dev->dev_addr[2] = ((lo >> 0) & 0xff);
11577 dev->dev_addr[3] = ((lo >> 8) & 0xff);
11578 dev->dev_addr[4] = ((lo >> 16) & 0xff);
11579 dev->dev_addr[5] = ((lo >> 24) & 0xff);
11580 }
11581 /* Finally just fetch it out of the MAC control regs. */
11582 else {
11583 hi = tr32(MAC_ADDR_0_HIGH);
11584 lo = tr32(MAC_ADDR_0_LOW);
11585
11586 dev->dev_addr[5] = lo & 0xff;
11587 dev->dev_addr[4] = (lo >> 8) & 0xff;
11588 dev->dev_addr[3] = (lo >> 16) & 0xff;
11589 dev->dev_addr[2] = (lo >> 24) & 0xff;
11590 dev->dev_addr[1] = hi & 0xff;
11591 dev->dev_addr[0] = (hi >> 8) & 0xff;
11592 }
1da177e4
LT
11593 }
11594
11595 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11596#ifdef CONFIG_SPARC64
11597 if (!tg3_get_default_macaddr_sparc(tp))
11598 return 0;
11599#endif
11600 return -EINVAL;
11601 }
2ff43697 11602 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
11603 return 0;
11604}
11605
59e6b434
DM
11606#define BOUNDARY_SINGLE_CACHELINE 1
11607#define BOUNDARY_MULTI_CACHELINE 2
11608
11609static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11610{
11611 int cacheline_size;
11612 u8 byte;
11613 int goal;
11614
11615 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11616 if (byte == 0)
11617 cacheline_size = 1024;
11618 else
11619 cacheline_size = (int) byte * 4;
11620
11621 /* On 5703 and later chips, the boundary bits have no
11622 * effect.
11623 */
11624 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11625 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11626 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11627 goto out;
11628
11629#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11630 goal = BOUNDARY_MULTI_CACHELINE;
11631#else
11632#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11633 goal = BOUNDARY_SINGLE_CACHELINE;
11634#else
11635 goal = 0;
11636#endif
11637#endif
11638
11639 if (!goal)
11640 goto out;
11641
11642 /* PCI controllers on most RISC systems tend to disconnect
11643 * when a device tries to burst across a cache-line boundary.
11644 * Therefore, letting tg3 do so just wastes PCI bandwidth.
11645 *
11646 * Unfortunately, for PCI-E there are only limited
11647 * write-side controls for this, and thus for reads
11648 * we will still get the disconnects. We'll also waste
11649 * these PCI cycles for both read and write for chips
11650 * other than 5700 and 5701 which do not implement the
11651 * boundary bits.
11652 */
11653 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11654 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11655 switch (cacheline_size) {
11656 case 16:
11657 case 32:
11658 case 64:
11659 case 128:
11660 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11661 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11662 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11663 } else {
11664 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11665 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11666 }
11667 break;
11668
11669 case 256:
11670 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11671 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11672 break;
11673
11674 default:
11675 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11676 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11677 break;
11678 };
11679 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11680 switch (cacheline_size) {
11681 case 16:
11682 case 32:
11683 case 64:
11684 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11685 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11686 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11687 break;
11688 }
11689 /* fallthrough */
11690 case 128:
11691 default:
11692 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11693 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11694 break;
11695 };
11696 } else {
11697 switch (cacheline_size) {
11698 case 16:
11699 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11700 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11701 DMA_RWCTRL_WRITE_BNDRY_16);
11702 break;
11703 }
11704 /* fallthrough */
11705 case 32:
11706 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11707 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11708 DMA_RWCTRL_WRITE_BNDRY_32);
11709 break;
11710 }
11711 /* fallthrough */
11712 case 64:
11713 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11714 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11715 DMA_RWCTRL_WRITE_BNDRY_64);
11716 break;
11717 }
11718 /* fallthrough */
11719 case 128:
11720 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11721 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11722 DMA_RWCTRL_WRITE_BNDRY_128);
11723 break;
11724 }
11725 /* fallthrough */
11726 case 256:
11727 val |= (DMA_RWCTRL_READ_BNDRY_256 |
11728 DMA_RWCTRL_WRITE_BNDRY_256);
11729 break;
11730 case 512:
11731 val |= (DMA_RWCTRL_READ_BNDRY_512 |
11732 DMA_RWCTRL_WRITE_BNDRY_512);
11733 break;
11734 case 1024:
11735 default:
11736 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11737 DMA_RWCTRL_WRITE_BNDRY_1024);
11738 break;
11739 };
11740 }
11741
11742out:
11743 return val;
11744}
11745
1da177e4
LT
11746static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11747{
11748 struct tg3_internal_buffer_desc test_desc;
11749 u32 sram_dma_descs;
11750 int i, ret;
11751
11752 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11753
11754 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11755 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11756 tw32(RDMAC_STATUS, 0);
11757 tw32(WDMAC_STATUS, 0);
11758
11759 tw32(BUFMGR_MODE, 0);
11760 tw32(FTQ_RESET, 0);
11761
11762 test_desc.addr_hi = ((u64) buf_dma) >> 32;
11763 test_desc.addr_lo = buf_dma & 0xffffffff;
11764 test_desc.nic_mbuf = 0x00002100;
11765 test_desc.len = size;
11766
11767 /*
11768 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11769 * the *second* time the tg3 driver was getting loaded after an
11770 * initial scan.
11771 *
11772 * Broadcom tells me:
11773 * ...the DMA engine is connected to the GRC block and a DMA
11774 * reset may affect the GRC block in some unpredictable way...
11775 * The behavior of resets to individual blocks has not been tested.
11776 *
11777 * Broadcom noted the GRC reset will also reset all sub-components.
11778 */
11779 if (to_device) {
11780 test_desc.cqid_sqid = (13 << 8) | 2;
11781
11782 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11783 udelay(40);
11784 } else {
11785 test_desc.cqid_sqid = (16 << 8) | 7;
11786
11787 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11788 udelay(40);
11789 }
11790 test_desc.flags = 0x00000005;
11791
11792 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11793 u32 val;
11794
11795 val = *(((u32 *)&test_desc) + i);
11796 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11797 sram_dma_descs + (i * sizeof(u32)));
11798 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11799 }
11800 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11801
11802 if (to_device) {
11803 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11804 } else {
11805 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11806 }
11807
11808 ret = -ENODEV;
11809 for (i = 0; i < 40; i++) {
11810 u32 val;
11811
11812 if (to_device)
11813 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11814 else
11815 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11816 if ((val & 0xffff) == sram_dma_descs) {
11817 ret = 0;
11818 break;
11819 }
11820
11821 udelay(100);
11822 }
11823
11824 return ret;
11825}
11826
ded7340d 11827#define TEST_BUFFER_SIZE 0x2000
1da177e4
LT
11828
11829static int __devinit tg3_test_dma(struct tg3 *tp)
11830{
11831 dma_addr_t buf_dma;
59e6b434 11832 u32 *buf, saved_dma_rwctrl;
1da177e4
LT
11833 int ret;
11834
11835 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11836 if (!buf) {
11837 ret = -ENOMEM;
11838 goto out_nofree;
11839 }
11840
11841 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11842 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11843
59e6b434 11844 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
1da177e4
LT
11845
11846 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11847 /* DMA read watermark not used on PCIE */
11848 tp->dma_rwctrl |= 0x00180000;
11849 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
85e94ced
MC
11850 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11851 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
1da177e4
LT
11852 tp->dma_rwctrl |= 0x003f0000;
11853 else
11854 tp->dma_rwctrl |= 0x003f000f;
11855 } else {
11856 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11857 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11858 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
49afdeb6 11859 u32 read_water = 0x7;
1da177e4 11860
4a29cc2e
MC
11861 /* If the 5704 is behind the EPB bridge, we can
11862 * do the less restrictive ONE_DMA workaround for
11863 * better performance.
11864 */
11865 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11866 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11867 tp->dma_rwctrl |= 0x8000;
11868 else if (ccval == 0x6 || ccval == 0x7)
1da177e4
LT
11869 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11870
49afdeb6
MC
11871 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
11872 read_water = 4;
59e6b434 11873 /* Set bit 23 to enable PCIX hw bug fix */
49afdeb6
MC
11874 tp->dma_rwctrl |=
11875 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
11876 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
11877 (1 << 23);
4cf78e4f
MC
11878 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11879 /* 5780 always in PCIX mode */
11880 tp->dma_rwctrl |= 0x00144000;
a4e2b347
MC
11881 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11882 /* 5714 always in PCIX mode */
11883 tp->dma_rwctrl |= 0x00148000;
1da177e4
LT
11884 } else {
11885 tp->dma_rwctrl |= 0x001b000f;
11886 }
11887 }
11888
11889 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11890 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11891 tp->dma_rwctrl &= 0xfffffff0;
11892
11893 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11894 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11895 /* Remove this if it causes problems for some boards. */
11896 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11897
11898 /* On 5700/5701 chips, we need to set this bit.
11899 * Otherwise the chip will issue cacheline transactions
11900 * to streamable DMA memory with not all the byte
11901 * enables turned on. This is an error on several
11902 * RISC PCI controllers, in particular sparc64.
11903 *
11904 * On 5703/5704 chips, this bit has been reassigned
11905 * a different meaning. In particular, it is used
11906 * on those chips to enable a PCI-X workaround.
11907 */
11908 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11909 }
11910
11911 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11912
11913#if 0
11914 /* Unneeded, already done by tg3_get_invariants. */
11915 tg3_switch_clocks(tp);
11916#endif
11917
11918 ret = 0;
11919 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11920 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11921 goto out;
11922
59e6b434
DM
11923 /* It is best to perform DMA test with maximum write burst size
11924 * to expose the 5700/5701 write DMA bug.
11925 */
11926 saved_dma_rwctrl = tp->dma_rwctrl;
11927 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11928 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11929
1da177e4
LT
11930 while (1) {
11931 u32 *p = buf, i;
11932
11933 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11934 p[i] = i;
11935
11936 /* Send the buffer to the chip. */
11937 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11938 if (ret) {
11939 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11940 break;
11941 }
11942
11943#if 0
11944 /* validate data reached card RAM correctly. */
11945 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11946 u32 val;
11947 tg3_read_mem(tp, 0x2100 + (i*4), &val);
11948 if (le32_to_cpu(val) != p[i]) {
11949 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
11950 /* ret = -ENODEV here? */
11951 }
11952 p[i] = 0;
11953 }
11954#endif
11955 /* Now read it back. */
11956 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11957 if (ret) {
11958 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11959
11960 break;
11961 }
11962
11963 /* Verify it. */
11964 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11965 if (p[i] == i)
11966 continue;
11967
59e6b434
DM
11968 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11969 DMA_RWCTRL_WRITE_BNDRY_16) {
11970 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
1da177e4
LT
11971 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11972 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11973 break;
11974 } else {
11975 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11976 ret = -ENODEV;
11977 goto out;
11978 }
11979 }
11980
11981 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11982 /* Success. */
11983 ret = 0;
11984 break;
11985 }
11986 }
59e6b434
DM
11987 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11988 DMA_RWCTRL_WRITE_BNDRY_16) {
6d1cfbab
MC
11989 static struct pci_device_id dma_wait_state_chipsets[] = {
11990 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11991 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11992 { },
11993 };
11994
59e6b434 11995 /* DMA test passed without adjusting DMA boundary,
6d1cfbab
MC
11996 * now look for chipsets that are known to expose the
11997 * DMA bug without failing the test.
59e6b434 11998 */
6d1cfbab
MC
11999 if (pci_dev_present(dma_wait_state_chipsets)) {
12000 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12001 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12002 }
12003 else
12004 /* Safe to use the calculated DMA boundary. */
12005 tp->dma_rwctrl = saved_dma_rwctrl;
12006
59e6b434
DM
12007 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12008 }
1da177e4
LT
12009
12010out:
12011 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12012out_nofree:
12013 return ret;
12014}
12015
12016static void __devinit tg3_init_link_config(struct tg3 *tp)
12017{
12018 tp->link_config.advertising =
12019 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12020 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12021 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12022 ADVERTISED_Autoneg | ADVERTISED_MII);
12023 tp->link_config.speed = SPEED_INVALID;
12024 tp->link_config.duplex = DUPLEX_INVALID;
12025 tp->link_config.autoneg = AUTONEG_ENABLE;
1da177e4
LT
12026 tp->link_config.active_speed = SPEED_INVALID;
12027 tp->link_config.active_duplex = DUPLEX_INVALID;
12028 tp->link_config.phy_is_low_power = 0;
12029 tp->link_config.orig_speed = SPEED_INVALID;
12030 tp->link_config.orig_duplex = DUPLEX_INVALID;
12031 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12032}
12033
12034static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12035{
fdfec172
MC
12036 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12037 tp->bufmgr_config.mbuf_read_dma_low_water =
12038 DEFAULT_MB_RDMA_LOW_WATER_5705;
12039 tp->bufmgr_config.mbuf_mac_rx_low_water =
12040 DEFAULT_MB_MACRX_LOW_WATER_5705;
12041 tp->bufmgr_config.mbuf_high_water =
12042 DEFAULT_MB_HIGH_WATER_5705;
b5d3772c
MC
12043 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12044 tp->bufmgr_config.mbuf_mac_rx_low_water =
12045 DEFAULT_MB_MACRX_LOW_WATER_5906;
12046 tp->bufmgr_config.mbuf_high_water =
12047 DEFAULT_MB_HIGH_WATER_5906;
12048 }
fdfec172
MC
12049
12050 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12051 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12052 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12053 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12054 tp->bufmgr_config.mbuf_high_water_jumbo =
12055 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12056 } else {
12057 tp->bufmgr_config.mbuf_read_dma_low_water =
12058 DEFAULT_MB_RDMA_LOW_WATER;
12059 tp->bufmgr_config.mbuf_mac_rx_low_water =
12060 DEFAULT_MB_MACRX_LOW_WATER;
12061 tp->bufmgr_config.mbuf_high_water =
12062 DEFAULT_MB_HIGH_WATER;
12063
12064 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12065 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12066 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12067 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12068 tp->bufmgr_config.mbuf_high_water_jumbo =
12069 DEFAULT_MB_HIGH_WATER_JUMBO;
12070 }
1da177e4
LT
12071
12072 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12073 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12074}
12075
12076static char * __devinit tg3_phy_string(struct tg3 *tp)
12077{
12078 switch (tp->phy_id & PHY_ID_MASK) {
12079 case PHY_ID_BCM5400: return "5400";
12080 case PHY_ID_BCM5401: return "5401";
12081 case PHY_ID_BCM5411: return "5411";
12082 case PHY_ID_BCM5701: return "5701";
12083 case PHY_ID_BCM5703: return "5703";
12084 case PHY_ID_BCM5704: return "5704";
12085 case PHY_ID_BCM5705: return "5705";
12086 case PHY_ID_BCM5750: return "5750";
85e94ced 12087 case PHY_ID_BCM5752: return "5752";
a4e2b347 12088 case PHY_ID_BCM5714: return "5714";
4cf78e4f 12089 case PHY_ID_BCM5780: return "5780";
af36e6b6 12090 case PHY_ID_BCM5755: return "5755";
d9ab5ad1 12091 case PHY_ID_BCM5787: return "5787";
d30cdd28 12092 case PHY_ID_BCM5784: return "5784";
126a3368 12093 case PHY_ID_BCM5756: return "5722/5756";
b5d3772c 12094 case PHY_ID_BCM5906: return "5906";
9936bcf6 12095 case PHY_ID_BCM5761: return "5761";
1da177e4
LT
12096 case PHY_ID_BCM8002: return "8002/serdes";
12097 case 0: return "serdes";
12098 default: return "unknown";
12099 };
12100}
12101
f9804ddb
MC
12102static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12103{
12104 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12105 strcpy(str, "PCI Express");
12106 return str;
12107 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12108 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12109
12110 strcpy(str, "PCIX:");
12111
12112 if ((clock_ctrl == 7) ||
12113 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12114 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12115 strcat(str, "133MHz");
12116 else if (clock_ctrl == 0)
12117 strcat(str, "33MHz");
12118 else if (clock_ctrl == 2)
12119 strcat(str, "50MHz");
12120 else if (clock_ctrl == 4)
12121 strcat(str, "66MHz");
12122 else if (clock_ctrl == 6)
12123 strcat(str, "100MHz");
f9804ddb
MC
12124 } else {
12125 strcpy(str, "PCI:");
12126 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12127 strcat(str, "66MHz");
12128 else
12129 strcat(str, "33MHz");
12130 }
12131 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12132 strcat(str, ":32-bit");
12133 else
12134 strcat(str, ":64-bit");
12135 return str;
12136}
12137
8c2dc7e1 12138static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
1da177e4
LT
12139{
12140 struct pci_dev *peer;
12141 unsigned int func, devnr = tp->pdev->devfn & ~7;
12142
12143 for (func = 0; func < 8; func++) {
12144 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12145 if (peer && peer != tp->pdev)
12146 break;
12147 pci_dev_put(peer);
12148 }
16fe9d74
MC
12149 /* 5704 can be configured in single-port mode, set peer to
12150 * tp->pdev in that case.
12151 */
12152 if (!peer) {
12153 peer = tp->pdev;
12154 return peer;
12155 }
1da177e4
LT
12156
12157 /*
12158 * We don't need to keep the refcount elevated; there's no way
12159 * to remove one half of this device without removing the other
12160 */
12161 pci_dev_put(peer);
12162
12163 return peer;
12164}
12165
15f9850d
DM
12166static void __devinit tg3_init_coal(struct tg3 *tp)
12167{
12168 struct ethtool_coalesce *ec = &tp->coal;
12169
12170 memset(ec, 0, sizeof(*ec));
12171 ec->cmd = ETHTOOL_GCOALESCE;
12172 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12173 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12174 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12175 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12176 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12177 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12178 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12179 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12180 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12181
12182 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12183 HOSTCC_MODE_CLRTICK_TXBD)) {
12184 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12185 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12186 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12187 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12188 }
d244c892
MC
12189
12190 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12191 ec->rx_coalesce_usecs_irq = 0;
12192 ec->tx_coalesce_usecs_irq = 0;
12193 ec->stats_block_coalesce_usecs = 0;
12194 }
15f9850d
DM
12195}
12196
1da177e4
LT
12197static int __devinit tg3_init_one(struct pci_dev *pdev,
12198 const struct pci_device_id *ent)
12199{
12200 static int tg3_version_printed = 0;
12201 unsigned long tg3reg_base, tg3reg_len;
12202 struct net_device *dev;
12203 struct tg3 *tp;
72f2afb8 12204 int i, err, pm_cap;
f9804ddb 12205 char str[40];
72f2afb8 12206 u64 dma_mask, persist_dma_mask;
1da177e4
LT
12207
12208 if (tg3_version_printed++ == 0)
12209 printk(KERN_INFO "%s", version);
12210
12211 err = pci_enable_device(pdev);
12212 if (err) {
12213 printk(KERN_ERR PFX "Cannot enable PCI device, "
12214 "aborting.\n");
12215 return err;
12216 }
12217
12218 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12219 printk(KERN_ERR PFX "Cannot find proper PCI device "
12220 "base address, aborting.\n");
12221 err = -ENODEV;
12222 goto err_out_disable_pdev;
12223 }
12224
12225 err = pci_request_regions(pdev, DRV_MODULE_NAME);
12226 if (err) {
12227 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12228 "aborting.\n");
12229 goto err_out_disable_pdev;
12230 }
12231
12232 pci_set_master(pdev);
12233
12234 /* Find power-management capability. */
12235 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12236 if (pm_cap == 0) {
12237 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12238 "aborting.\n");
12239 err = -EIO;
12240 goto err_out_free_res;
12241 }
12242
1da177e4
LT
12243 tg3reg_base = pci_resource_start(pdev, 0);
12244 tg3reg_len = pci_resource_len(pdev, 0);
12245
12246 dev = alloc_etherdev(sizeof(*tp));
12247 if (!dev) {
12248 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12249 err = -ENOMEM;
12250 goto err_out_free_res;
12251 }
12252
1da177e4
LT
12253 SET_NETDEV_DEV(dev, &pdev->dev);
12254
1da177e4
LT
12255#if TG3_VLAN_TAG_USED
12256 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12257 dev->vlan_rx_register = tg3_vlan_rx_register;
1da177e4
LT
12258#endif
12259
12260 tp = netdev_priv(dev);
12261 tp->pdev = pdev;
12262 tp->dev = dev;
12263 tp->pm_cap = pm_cap;
12264 tp->mac_mode = TG3_DEF_MAC_MODE;
12265 tp->rx_mode = TG3_DEF_RX_MODE;
12266 tp->tx_mode = TG3_DEF_TX_MODE;
12267 tp->mi_mode = MAC_MI_MODE_BASE;
12268 if (tg3_debug > 0)
12269 tp->msg_enable = tg3_debug;
12270 else
12271 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12272
12273 /* The word/byte swap controls here control register access byte
12274 * swapping. DMA data byte swapping is controlled in the GRC_MODE
12275 * setting below.
12276 */
12277 tp->misc_host_ctrl =
12278 MISC_HOST_CTRL_MASK_PCI_INT |
12279 MISC_HOST_CTRL_WORD_SWAP |
12280 MISC_HOST_CTRL_INDIR_ACCESS |
12281 MISC_HOST_CTRL_PCISTATE_RW;
12282
12283 /* The NONFRM (non-frame) byte/word swap controls take effect
12284 * on descriptor entries, anything which isn't packet data.
12285 *
12286 * The StrongARM chips on the board (one for tx, one for rx)
12287 * are running in big-endian mode.
12288 */
12289 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12290 GRC_MODE_WSWAP_NONFRM_DATA);
12291#ifdef __BIG_ENDIAN
12292 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12293#endif
12294 spin_lock_init(&tp->lock);
1da177e4 12295 spin_lock_init(&tp->indirect_lock);
c4028958 12296 INIT_WORK(&tp->reset_task, tg3_reset_task);
1da177e4
LT
12297
12298 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
ab0049b4 12299 if (!tp->regs) {
1da177e4
LT
12300 printk(KERN_ERR PFX "Cannot map device registers, "
12301 "aborting.\n");
12302 err = -ENOMEM;
12303 goto err_out_free_dev;
12304 }
12305
12306 tg3_init_link_config(tp);
12307
1da177e4
LT
12308 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12309 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12310 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12311
12312 dev->open = tg3_open;
12313 dev->stop = tg3_close;
12314 dev->get_stats = tg3_get_stats;
12315 dev->set_multicast_list = tg3_set_rx_mode;
12316 dev->set_mac_address = tg3_set_mac_addr;
12317 dev->do_ioctl = tg3_ioctl;
12318 dev->tx_timeout = tg3_tx_timeout;
bea3348e 12319 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
1da177e4 12320 dev->ethtool_ops = &tg3_ethtool_ops;
1da177e4
LT
12321 dev->watchdog_timeo = TG3_TX_TIMEOUT;
12322 dev->change_mtu = tg3_change_mtu;
12323 dev->irq = pdev->irq;
12324#ifdef CONFIG_NET_POLL_CONTROLLER
12325 dev->poll_controller = tg3_poll_controller;
12326#endif
12327
12328 err = tg3_get_invariants(tp);
12329 if (err) {
12330 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12331 "aborting.\n");
12332 goto err_out_iounmap;
12333 }
12334
4a29cc2e
MC
12335 /* The EPB bridge inside 5714, 5715, and 5780 and any
12336 * device behind the EPB cannot support DMA addresses > 40-bit.
72f2afb8
MC
12337 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12338 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12339 * do DMA address check in tg3_start_xmit().
12340 */
4a29cc2e
MC
12341 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12342 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12343 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
72f2afb8
MC
12344 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12345#ifdef CONFIG_HIGHMEM
12346 dma_mask = DMA_64BIT_MASK;
12347#endif
4a29cc2e 12348 } else
72f2afb8
MC
12349 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12350
12351 /* Configure DMA attributes. */
12352 if (dma_mask > DMA_32BIT_MASK) {
12353 err = pci_set_dma_mask(pdev, dma_mask);
12354 if (!err) {
12355 dev->features |= NETIF_F_HIGHDMA;
12356 err = pci_set_consistent_dma_mask(pdev,
12357 persist_dma_mask);
12358 if (err < 0) {
12359 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12360 "DMA for consistent allocations\n");
12361 goto err_out_iounmap;
12362 }
12363 }
12364 }
12365 if (err || dma_mask == DMA_32BIT_MASK) {
12366 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12367 if (err) {
12368 printk(KERN_ERR PFX "No usable DMA configuration, "
12369 "aborting.\n");
12370 goto err_out_iounmap;
12371 }
12372 }
12373
fdfec172 12374 tg3_init_bufmgr_config(tp);
1da177e4 12375
1da177e4
LT
12376 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12377 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12378 }
12379 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12380 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12381 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
c7835a77 12382 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
1da177e4
LT
12383 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12384 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12385 } else {
7f62ad5d 12386 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
1da177e4
LT
12387 }
12388
4e3a7aaa
MC
12389 /* TSO is on by default on chips that support hardware TSO.
12390 * Firmware TSO on older chips gives lower performance, so it
12391 * is off by default, but can be enabled using ethtool.
12392 */
b0026624 12393 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
1da177e4 12394 dev->features |= NETIF_F_TSO;
b5d3772c
MC
12395 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12396 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
b0026624 12397 dev->features |= NETIF_F_TSO6;
9936bcf6
MC
12398 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12399 dev->features |= NETIF_F_TSO_ECN;
b0026624 12400 }
1da177e4 12401
1da177e4
LT
12402
12403 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12404 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12405 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12406 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12407 tp->rx_pending = 63;
12408 }
12409
1da177e4
LT
12410 err = tg3_get_device_address(tp);
12411 if (err) {
12412 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12413 "aborting.\n");
12414 goto err_out_iounmap;
12415 }
12416
12417 /*
12418 * Reset chip in case UNDI or EFI driver did not shutdown
12419 * DMA self test will enable WDMAC and we'll see (spurious)
12420 * pending DMA on the PCI bus at that point.
12421 */
12422 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12423 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
1da177e4 12424 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
944d980e 12425 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
12426 }
12427
12428 err = tg3_test_dma(tp);
12429 if (err) {
12430 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12431 goto err_out_iounmap;
12432 }
12433
12434 /* Tigon3 can do ipv4 only... and some chips have buggy
12435 * checksumming.
12436 */
12437 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
d212f87b 12438 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
af36e6b6 12439 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 12440 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
12441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12442 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
d212f87b
SH
12443 dev->features |= NETIF_F_IPV6_CSUM;
12444
1da177e4
LT
12445 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12446 } else
12447 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12448
1da177e4
LT
12449 /* flow control autonegotiation is default behavior */
12450 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12451
15f9850d
DM
12452 tg3_init_coal(tp);
12453
0d3031d9
MC
12454 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12455 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12456 printk(KERN_ERR PFX "Cannot find proper PCI device "
12457 "base address for APE, aborting.\n");
12458 err = -ENODEV;
12459 goto err_out_iounmap;
12460 }
12461
12462 tg3reg_base = pci_resource_start(pdev, 2);
12463 tg3reg_len = pci_resource_len(pdev, 2);
12464
12465 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12466 if (tp->aperegs == 0UL) {
12467 printk(KERN_ERR PFX "Cannot map APE registers, "
12468 "aborting.\n");
12469 err = -ENOMEM;
12470 goto err_out_iounmap;
12471 }
12472
12473 tg3_ape_lock_init(tp);
12474 }
12475
c49a1561
MC
12476 pci_set_drvdata(pdev, dev);
12477
1da177e4
LT
12478 err = register_netdev(dev);
12479 if (err) {
12480 printk(KERN_ERR PFX "Cannot register net device, "
12481 "aborting.\n");
0d3031d9 12482 goto err_out_apeunmap;
1da177e4
LT
12483 }
12484
cbb45d21 12485 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
1da177e4
LT
12486 dev->name,
12487 tp->board_part_number,
12488 tp->pci_chip_rev_id,
12489 tg3_phy_string(tp),
f9804ddb 12490 tg3_bus_string(tp, str),
cbb45d21
MC
12491 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12492 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12493 "10/100/1000Base-T")));
1da177e4
LT
12494
12495 for (i = 0; i < 6; i++)
12496 printk("%2.2x%c", dev->dev_addr[i],
12497 i == 5 ? '\n' : ':');
12498
12499 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
1c46ae05 12500 "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
1da177e4
LT
12501 dev->name,
12502 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12503 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12504 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12505 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
1da177e4
LT
12506 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12507 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
4a29cc2e
MC
12508 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12509 dev->name, tp->dma_rwctrl,
12510 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12511 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
1da177e4
LT
12512
12513 return 0;
12514
0d3031d9
MC
12515err_out_apeunmap:
12516 if (tp->aperegs) {
12517 iounmap(tp->aperegs);
12518 tp->aperegs = NULL;
12519 }
12520
1da177e4 12521err_out_iounmap:
6892914f
MC
12522 if (tp->regs) {
12523 iounmap(tp->regs);
22abe310 12524 tp->regs = NULL;
6892914f 12525 }
1da177e4
LT
12526
12527err_out_free_dev:
12528 free_netdev(dev);
12529
12530err_out_free_res:
12531 pci_release_regions(pdev);
12532
12533err_out_disable_pdev:
12534 pci_disable_device(pdev);
12535 pci_set_drvdata(pdev, NULL);
12536 return err;
12537}
12538
12539static void __devexit tg3_remove_one(struct pci_dev *pdev)
12540{
12541 struct net_device *dev = pci_get_drvdata(pdev);
12542
12543 if (dev) {
12544 struct tg3 *tp = netdev_priv(dev);
12545
7faa006f 12546 flush_scheduled_work();
1da177e4 12547 unregister_netdev(dev);
0d3031d9
MC
12548 if (tp->aperegs) {
12549 iounmap(tp->aperegs);
12550 tp->aperegs = NULL;
12551 }
6892914f
MC
12552 if (tp->regs) {
12553 iounmap(tp->regs);
22abe310 12554 tp->regs = NULL;
6892914f 12555 }
1da177e4
LT
12556 free_netdev(dev);
12557 pci_release_regions(pdev);
12558 pci_disable_device(pdev);
12559 pci_set_drvdata(pdev, NULL);
12560 }
12561}
12562
12563static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12564{
12565 struct net_device *dev = pci_get_drvdata(pdev);
12566 struct tg3 *tp = netdev_priv(dev);
12567 int err;
12568
3e0c95fd
MC
12569 /* PCI register 4 needs to be saved whether netif_running() or not.
12570 * MSI address and data need to be saved if using MSI and
12571 * netif_running().
12572 */
12573 pci_save_state(pdev);
12574
1da177e4
LT
12575 if (!netif_running(dev))
12576 return 0;
12577
7faa006f 12578 flush_scheduled_work();
1da177e4
LT
12579 tg3_netif_stop(tp);
12580
12581 del_timer_sync(&tp->timer);
12582
f47c11ee 12583 tg3_full_lock(tp, 1);
1da177e4 12584 tg3_disable_ints(tp);
f47c11ee 12585 tg3_full_unlock(tp);
1da177e4
LT
12586
12587 netif_device_detach(dev);
12588
f47c11ee 12589 tg3_full_lock(tp, 0);
944d980e 12590 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6a9eba15 12591 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
f47c11ee 12592 tg3_full_unlock(tp);
1da177e4
LT
12593
12594 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12595 if (err) {
f47c11ee 12596 tg3_full_lock(tp, 0);
1da177e4 12597
6a9eba15 12598 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
b9ec6c1b
MC
12599 if (tg3_restart_hw(tp, 1))
12600 goto out;
1da177e4
LT
12601
12602 tp->timer.expires = jiffies + tp->timer_offset;
12603 add_timer(&tp->timer);
12604
12605 netif_device_attach(dev);
12606 tg3_netif_start(tp);
12607
b9ec6c1b 12608out:
f47c11ee 12609 tg3_full_unlock(tp);
1da177e4
LT
12610 }
12611
12612 return err;
12613}
12614
12615static int tg3_resume(struct pci_dev *pdev)
12616{
12617 struct net_device *dev = pci_get_drvdata(pdev);
12618 struct tg3 *tp = netdev_priv(dev);
12619 int err;
12620
3e0c95fd
MC
12621 pci_restore_state(tp->pdev);
12622
1da177e4
LT
12623 if (!netif_running(dev))
12624 return 0;
12625
bc1c7567 12626 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
12627 if (err)
12628 return err;
12629
2fbe43f6
MC
12630 /* Hardware bug - MSI won't work if INTX disabled. */
12631 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
12632 (tp->tg3_flags2 & TG3_FLG2_USING_MSI))
12633 pci_intx(tp->pdev, 1);
12634
1da177e4
LT
12635 netif_device_attach(dev);
12636
f47c11ee 12637 tg3_full_lock(tp, 0);
1da177e4 12638
6a9eba15 12639 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
b9ec6c1b
MC
12640 err = tg3_restart_hw(tp, 1);
12641 if (err)
12642 goto out;
1da177e4
LT
12643
12644 tp->timer.expires = jiffies + tp->timer_offset;
12645 add_timer(&tp->timer);
12646
1da177e4
LT
12647 tg3_netif_start(tp);
12648
b9ec6c1b 12649out:
f47c11ee 12650 tg3_full_unlock(tp);
1da177e4 12651
b9ec6c1b 12652 return err;
1da177e4
LT
12653}
12654
12655static struct pci_driver tg3_driver = {
12656 .name = DRV_MODULE_NAME,
12657 .id_table = tg3_pci_tbl,
12658 .probe = tg3_init_one,
12659 .remove = __devexit_p(tg3_remove_one),
12660 .suspend = tg3_suspend,
12661 .resume = tg3_resume
12662};
12663
12664static int __init tg3_init(void)
12665{
29917620 12666 return pci_register_driver(&tg3_driver);
1da177e4
LT
12667}
12668
12669static void __exit tg3_cleanup(void)
12670{
12671 pci_unregister_driver(&tg3_driver);
12672}
12673
12674module_init(tg3_init);
12675module_exit(tg3_cleanup);