]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/tg3.c
[TG3]: Add 5761 support
[net-next-2.6.git] / drivers / net / tg3.c
CommitLineData
1da177e4
LT
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
65610fba 7 * Copyright (C) 2005-2007 Broadcom Corporation.
1da177e4
LT
8 *
9 * Firmware is:
49cabf49
MC
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
1da177e4
LT
16 */
17
1da177e4
LT
18
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/compiler.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
14c85021 26#include <linux/in.h>
1da177e4
LT
27#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/workqueue.h>
61487480 39#include <linux/prefetch.h>
f9a5f7d3 40#include <linux/dma-mapping.h>
1da177e4
LT
41
42#include <net/checksum.h>
c9bdd4b5 43#include <net/ip.h>
1da177e4
LT
44
45#include <asm/system.h>
46#include <asm/io.h>
47#include <asm/byteorder.h>
48#include <asm/uaccess.h>
49
49b6e95f 50#ifdef CONFIG_SPARC
1da177e4 51#include <asm/idprom.h>
49b6e95f 52#include <asm/prom.h>
1da177e4
LT
53#endif
54
55#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56#define TG3_VLAN_TAG_USED 1
57#else
58#define TG3_VLAN_TAG_USED 0
59#endif
60
1da177e4 61#define TG3_TSO_SUPPORT 1
1da177e4
LT
62
63#include "tg3.h"
64
65#define DRV_MODULE_NAME "tg3"
66#define PFX DRV_MODULE_NAME ": "
182f6ed5
MC
67#define DRV_MODULE_VERSION "3.82"
68#define DRV_MODULE_RELDATE "October 5, 2007"
1da177e4
LT
69
70#define TG3_DEF_MAC_MODE 0
71#define TG3_DEF_RX_MODE 0
72#define TG3_DEF_TX_MODE 0
73#define TG3_DEF_MSG_ENABLE \
74 (NETIF_MSG_DRV | \
75 NETIF_MSG_PROBE | \
76 NETIF_MSG_LINK | \
77 NETIF_MSG_TIMER | \
78 NETIF_MSG_IFDOWN | \
79 NETIF_MSG_IFUP | \
80 NETIF_MSG_RX_ERR | \
81 NETIF_MSG_TX_ERR)
82
83/* length of time before we decide the hardware is borked,
84 * and dev->tx_timeout() should be called to fix the problem
85 */
86#define TG3_TX_TIMEOUT (5 * HZ)
87
88/* hardware minimum and maximum for a single frame's data payload */
89#define TG3_MIN_MTU 60
90#define TG3_MAX_MTU(tp) \
0f893dc6 91 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
1da177e4
LT
92
93/* These numbers seem to be hard coded in the NIC firmware somehow.
94 * You can't change the ring sizes, but you can change where you place
95 * them in the NIC onboard memory.
96 */
97#define TG3_RX_RING_SIZE 512
98#define TG3_DEF_RX_RING_PENDING 200
99#define TG3_RX_JUMBO_RING_SIZE 256
100#define TG3_DEF_RX_JUMBO_RING_PENDING 100
101
102/* Do not place this n-ring entries value into the tp struct itself,
103 * we really want to expose these constants to GCC so that modulo et
104 * al. operations are done with shifts and masks instead of with
105 * hw multiply/modulo instructions. Another solution would be to
106 * replace things like '% foo' with '& (foo - 1)'.
107 */
108#define TG3_RX_RCB_RING_SIZE(tp) \
109 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
110
111#define TG3_TX_RING_SIZE 512
112#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
113
114#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
115 TG3_RX_RING_SIZE)
116#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117 TG3_RX_JUMBO_RING_SIZE)
118#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_RCB_RING_SIZE(tp))
120#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
121 TG3_TX_RING_SIZE)
1da177e4
LT
122#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
125#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
126
127/* minimum number of free TX descriptors required to wake up TX process */
42952231 128#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
1da177e4
LT
129
130/* number of ETHTOOL_GSTATS u64's */
131#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
4cafd3f5
MC
133#define TG3_NUM_TEST 6
134
1da177e4
LT
135static char version[] __devinitdata =
136 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140MODULE_LICENSE("GPL");
141MODULE_VERSION(DRV_MODULE_VERSION);
142
143static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
144module_param(tg3_debug, int, 0);
145MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147static struct pci_device_id tg3_pci_tbl[] = {
13185217
HK
148 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
126a3368 172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
13185217
HK
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
126a3368 187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
13185217
HK
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
676917d4 191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
13185217
HK
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
b5d3772c
MC
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
d30cdd28
MC
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
9936bcf6
MC
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
13185217
HK
205 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
206 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
207 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
208 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
209 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
210 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
211 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
212 {}
1da177e4
LT
213};
214
215MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
216
50da859d 217static const struct {
1da177e4
LT
218 const char string[ETH_GSTRING_LEN];
219} ethtool_stats_keys[TG3_NUM_STATS] = {
220 { "rx_octets" },
221 { "rx_fragments" },
222 { "rx_ucast_packets" },
223 { "rx_mcast_packets" },
224 { "rx_bcast_packets" },
225 { "rx_fcs_errors" },
226 { "rx_align_errors" },
227 { "rx_xon_pause_rcvd" },
228 { "rx_xoff_pause_rcvd" },
229 { "rx_mac_ctrl_rcvd" },
230 { "rx_xoff_entered" },
231 { "rx_frame_too_long_errors" },
232 { "rx_jabbers" },
233 { "rx_undersize_packets" },
234 { "rx_in_length_errors" },
235 { "rx_out_length_errors" },
236 { "rx_64_or_less_octet_packets" },
237 { "rx_65_to_127_octet_packets" },
238 { "rx_128_to_255_octet_packets" },
239 { "rx_256_to_511_octet_packets" },
240 { "rx_512_to_1023_octet_packets" },
241 { "rx_1024_to_1522_octet_packets" },
242 { "rx_1523_to_2047_octet_packets" },
243 { "rx_2048_to_4095_octet_packets" },
244 { "rx_4096_to_8191_octet_packets" },
245 { "rx_8192_to_9022_octet_packets" },
246
247 { "tx_octets" },
248 { "tx_collisions" },
249
250 { "tx_xon_sent" },
251 { "tx_xoff_sent" },
252 { "tx_flow_control" },
253 { "tx_mac_errors" },
254 { "tx_single_collisions" },
255 { "tx_mult_collisions" },
256 { "tx_deferred" },
257 { "tx_excessive_collisions" },
258 { "tx_late_collisions" },
259 { "tx_collide_2times" },
260 { "tx_collide_3times" },
261 { "tx_collide_4times" },
262 { "tx_collide_5times" },
263 { "tx_collide_6times" },
264 { "tx_collide_7times" },
265 { "tx_collide_8times" },
266 { "tx_collide_9times" },
267 { "tx_collide_10times" },
268 { "tx_collide_11times" },
269 { "tx_collide_12times" },
270 { "tx_collide_13times" },
271 { "tx_collide_14times" },
272 { "tx_collide_15times" },
273 { "tx_ucast_packets" },
274 { "tx_mcast_packets" },
275 { "tx_bcast_packets" },
276 { "tx_carrier_sense_errors" },
277 { "tx_discards" },
278 { "tx_errors" },
279
280 { "dma_writeq_full" },
281 { "dma_write_prioq_full" },
282 { "rxbds_empty" },
283 { "rx_discards" },
284 { "rx_errors" },
285 { "rx_threshold_hit" },
286
287 { "dma_readq_full" },
288 { "dma_read_prioq_full" },
289 { "tx_comp_queue_full" },
290
291 { "ring_set_send_prod_index" },
292 { "ring_status_update" },
293 { "nic_irqs" },
294 { "nic_avoided_irqs" },
295 { "nic_tx_threshold_hit" }
296};
297
50da859d 298static const struct {
4cafd3f5
MC
299 const char string[ETH_GSTRING_LEN];
300} ethtool_test_keys[TG3_NUM_TEST] = {
301 { "nvram test (online) " },
302 { "link test (online) " },
303 { "register test (offline)" },
304 { "memory test (offline)" },
305 { "loopback test (offline)" },
306 { "interrupt test (offline)" },
307};
308
b401e9e2
MC
309static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
310{
311 writel(val, tp->regs + off);
312}
313
314static u32 tg3_read32(struct tg3 *tp, u32 off)
315{
6aa20a22 316 return (readl(tp->regs + off));
b401e9e2
MC
317}
318
0d3031d9
MC
319static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
320{
321 writel(val, tp->aperegs + off);
322}
323
324static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
325{
326 return (readl(tp->aperegs + off));
327}
328
1da177e4
LT
329static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
330{
6892914f
MC
331 unsigned long flags;
332
333 spin_lock_irqsave(&tp->indirect_lock, flags);
1ee582d8
MC
334 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
335 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
6892914f 336 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1ee582d8
MC
337}
338
339static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
340{
341 writel(val, tp->regs + off);
342 readl(tp->regs + off);
1da177e4
LT
343}
344
6892914f 345static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
1da177e4 346{
6892914f
MC
347 unsigned long flags;
348 u32 val;
349
350 spin_lock_irqsave(&tp->indirect_lock, flags);
351 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
352 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
353 spin_unlock_irqrestore(&tp->indirect_lock, flags);
354 return val;
355}
356
357static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
358{
359 unsigned long flags;
360
361 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
362 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
363 TG3_64BIT_REG_LOW, val);
364 return;
365 }
366 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
367 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
368 TG3_64BIT_REG_LOW, val);
369 return;
1da177e4 370 }
6892914f
MC
371
372 spin_lock_irqsave(&tp->indirect_lock, flags);
373 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
374 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
375 spin_unlock_irqrestore(&tp->indirect_lock, flags);
376
377 /* In indirect mode when disabling interrupts, we also need
378 * to clear the interrupt bit in the GRC local ctrl register.
379 */
380 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
381 (val == 0x1)) {
382 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
383 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
384 }
385}
386
387static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
388{
389 unsigned long flags;
390 u32 val;
391
392 spin_lock_irqsave(&tp->indirect_lock, flags);
393 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
394 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
395 spin_unlock_irqrestore(&tp->indirect_lock, flags);
396 return val;
397}
398
b401e9e2
MC
399/* usec_wait specifies the wait time in usec when writing to certain registers
400 * where it is unsafe to read back the register without some delay.
401 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
402 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
403 */
404static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
6892914f 405{
b401e9e2
MC
406 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
407 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
408 /* Non-posted methods */
409 tp->write32(tp, off, val);
410 else {
411 /* Posted method */
412 tg3_write32(tp, off, val);
413 if (usec_wait)
414 udelay(usec_wait);
415 tp->read32(tp, off);
416 }
417 /* Wait again after the read for the posted method to guarantee that
418 * the wait time is met.
419 */
420 if (usec_wait)
421 udelay(usec_wait);
1da177e4
LT
422}
423
09ee929c
MC
424static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
425{
426 tp->write32_mbox(tp, off, val);
6892914f
MC
427 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
428 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
429 tp->read32_mbox(tp, off);
09ee929c
MC
430}
431
20094930 432static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
1da177e4
LT
433{
434 void __iomem *mbox = tp->regs + off;
435 writel(val, mbox);
436 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
437 writel(val, mbox);
438 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
439 readl(mbox);
440}
441
b5d3772c
MC
442static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
443{
444 return (readl(tp->regs + off + GRCMBOX_BASE));
445}
446
447static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
448{
449 writel(val, tp->regs + off + GRCMBOX_BASE);
450}
451
20094930 452#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
09ee929c 453#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
20094930
MC
454#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
455#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
09ee929c 456#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
20094930
MC
457
458#define tw32(reg,val) tp->write32(tp, reg, val)
b401e9e2
MC
459#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
460#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
20094930 461#define tr32(reg) tp->read32(tp, reg)
1da177e4
LT
462
463static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
464{
6892914f
MC
465 unsigned long flags;
466
b5d3772c
MC
467 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
468 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
469 return;
470
6892914f 471 spin_lock_irqsave(&tp->indirect_lock, flags);
bbadf503
MC
472 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
473 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
474 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 475
bbadf503
MC
476 /* Always leave this as zero. */
477 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
478 } else {
479 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
480 tw32_f(TG3PCI_MEM_WIN_DATA, val);
28fbef78 481
bbadf503
MC
482 /* Always leave this as zero. */
483 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
484 }
485 spin_unlock_irqrestore(&tp->indirect_lock, flags);
758a6139
DM
486}
487
1da177e4
LT
488static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
489{
6892914f
MC
490 unsigned long flags;
491
b5d3772c
MC
492 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
493 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
494 *val = 0;
495 return;
496 }
497
6892914f 498 spin_lock_irqsave(&tp->indirect_lock, flags);
bbadf503
MC
499 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
500 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
501 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 502
bbadf503
MC
503 /* Always leave this as zero. */
504 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
505 } else {
506 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
507 *val = tr32(TG3PCI_MEM_WIN_DATA);
508
509 /* Always leave this as zero. */
510 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
511 }
6892914f 512 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
513}
514
0d3031d9
MC
515static void tg3_ape_lock_init(struct tg3 *tp)
516{
517 int i;
518
519 /* Make sure the driver hasn't any stale locks. */
520 for (i = 0; i < 8; i++)
521 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
522 APE_LOCK_GRANT_DRIVER);
523}
524
525static int tg3_ape_lock(struct tg3 *tp, int locknum)
526{
527 int i, off;
528 int ret = 0;
529 u32 status;
530
531 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
532 return 0;
533
534 switch (locknum) {
535 case TG3_APE_LOCK_MEM:
536 break;
537 default:
538 return -EINVAL;
539 }
540
541 off = 4 * locknum;
542
543 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
544
545 /* Wait for up to 1 millisecond to acquire lock. */
546 for (i = 0; i < 100; i++) {
547 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
548 if (status == APE_LOCK_GRANT_DRIVER)
549 break;
550 udelay(10);
551 }
552
553 if (status != APE_LOCK_GRANT_DRIVER) {
554 /* Revoke the lock request. */
555 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
556 APE_LOCK_GRANT_DRIVER);
557
558 ret = -EBUSY;
559 }
560
561 return ret;
562}
563
564static void tg3_ape_unlock(struct tg3 *tp, int locknum)
565{
566 int off;
567
568 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
569 return;
570
571 switch (locknum) {
572 case TG3_APE_LOCK_MEM:
573 break;
574 default:
575 return;
576 }
577
578 off = 4 * locknum;
579 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
580}
581
1da177e4
LT
582static void tg3_disable_ints(struct tg3 *tp)
583{
584 tw32(TG3PCI_MISC_HOST_CTRL,
585 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c 586 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
1da177e4
LT
587}
588
589static inline void tg3_cond_int(struct tg3 *tp)
590{
38f3843e
MC
591 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
592 (tp->hw_status->status & SD_STATUS_UPDATED))
1da177e4 593 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
b5d3772c
MC
594 else
595 tw32(HOSTCC_MODE, tp->coalesce_mode |
596 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
1da177e4
LT
597}
598
599static void tg3_enable_ints(struct tg3 *tp)
600{
bbe832c0
MC
601 tp->irq_sync = 0;
602 wmb();
603
1da177e4
LT
604 tw32(TG3PCI_MISC_HOST_CTRL,
605 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c
MC
606 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
607 (tp->last_tag << 24));
fcfa0a32
MC
608 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
609 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
610 (tp->last_tag << 24));
1da177e4
LT
611 tg3_cond_int(tp);
612}
613
04237ddd
MC
614static inline unsigned int tg3_has_work(struct tg3 *tp)
615{
616 struct tg3_hw_status *sblk = tp->hw_status;
617 unsigned int work_exists = 0;
618
619 /* check for phy events */
620 if (!(tp->tg3_flags &
621 (TG3_FLAG_USE_LINKCHG_REG |
622 TG3_FLAG_POLL_SERDES))) {
623 if (sblk->status & SD_STATUS_LINK_CHG)
624 work_exists = 1;
625 }
626 /* check for RX/TX work to do */
627 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
628 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
629 work_exists = 1;
630
631 return work_exists;
632}
633
1da177e4 634/* tg3_restart_ints
04237ddd
MC
635 * similar to tg3_enable_ints, but it accurately determines whether there
636 * is new work pending and can return without flushing the PIO write
6aa20a22 637 * which reenables interrupts
1da177e4
LT
638 */
639static void tg3_restart_ints(struct tg3 *tp)
640{
fac9b83e
DM
641 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
642 tp->last_tag << 24);
1da177e4
LT
643 mmiowb();
644
fac9b83e
DM
645 /* When doing tagged status, this work check is unnecessary.
646 * The last_tag we write above tells the chip which piece of
647 * work we've completed.
648 */
649 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
650 tg3_has_work(tp))
04237ddd
MC
651 tw32(HOSTCC_MODE, tp->coalesce_mode |
652 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
1da177e4
LT
653}
654
655static inline void tg3_netif_stop(struct tg3 *tp)
656{
bbe832c0 657 tp->dev->trans_start = jiffies; /* prevent tx timeout */
bea3348e 658 napi_disable(&tp->napi);
1da177e4
LT
659 netif_tx_disable(tp->dev);
660}
661
662static inline void tg3_netif_start(struct tg3 *tp)
663{
664 netif_wake_queue(tp->dev);
665 /* NOTE: unconditional netif_wake_queue is only appropriate
666 * so long as all callers are assured to have free tx slots
667 * (such as after tg3_init_hw)
668 */
bea3348e 669 napi_enable(&tp->napi);
f47c11ee
DM
670 tp->hw_status->status |= SD_STATUS_UPDATED;
671 tg3_enable_ints(tp);
1da177e4
LT
672}
673
674static void tg3_switch_clocks(struct tg3 *tp)
675{
676 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
677 u32 orig_clock_ctrl;
678
795d01c5
MC
679 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
680 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
4cf78e4f
MC
681 return;
682
1da177e4
LT
683 orig_clock_ctrl = clock_ctrl;
684 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
685 CLOCK_CTRL_CLKRUN_OENABLE |
686 0x1f);
687 tp->pci_clock_ctrl = clock_ctrl;
688
689 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
690 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
b401e9e2
MC
691 tw32_wait_f(TG3PCI_CLOCK_CTRL,
692 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1da177e4
LT
693 }
694 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
b401e9e2
MC
695 tw32_wait_f(TG3PCI_CLOCK_CTRL,
696 clock_ctrl |
697 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
698 40);
699 tw32_wait_f(TG3PCI_CLOCK_CTRL,
700 clock_ctrl | (CLOCK_CTRL_ALTCLK),
701 40);
1da177e4 702 }
b401e9e2 703 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1da177e4
LT
704}
705
706#define PHY_BUSY_LOOPS 5000
707
708static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
709{
710 u32 frame_val;
711 unsigned int loops;
712 int ret;
713
714 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
715 tw32_f(MAC_MI_MODE,
716 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
717 udelay(80);
718 }
719
720 *val = 0x0;
721
722 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
723 MI_COM_PHY_ADDR_MASK);
724 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
725 MI_COM_REG_ADDR_MASK);
726 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
6aa20a22 727
1da177e4
LT
728 tw32_f(MAC_MI_COM, frame_val);
729
730 loops = PHY_BUSY_LOOPS;
731 while (loops != 0) {
732 udelay(10);
733 frame_val = tr32(MAC_MI_COM);
734
735 if ((frame_val & MI_COM_BUSY) == 0) {
736 udelay(5);
737 frame_val = tr32(MAC_MI_COM);
738 break;
739 }
740 loops -= 1;
741 }
742
743 ret = -EBUSY;
744 if (loops != 0) {
745 *val = frame_val & MI_COM_DATA_MASK;
746 ret = 0;
747 }
748
749 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
750 tw32_f(MAC_MI_MODE, tp->mi_mode);
751 udelay(80);
752 }
753
754 return ret;
755}
756
757static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
758{
759 u32 frame_val;
760 unsigned int loops;
761 int ret;
762
b5d3772c
MC
763 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
764 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
765 return 0;
766
1da177e4
LT
767 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
768 tw32_f(MAC_MI_MODE,
769 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
770 udelay(80);
771 }
772
773 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
774 MI_COM_PHY_ADDR_MASK);
775 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
776 MI_COM_REG_ADDR_MASK);
777 frame_val |= (val & MI_COM_DATA_MASK);
778 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
6aa20a22 779
1da177e4
LT
780 tw32_f(MAC_MI_COM, frame_val);
781
782 loops = PHY_BUSY_LOOPS;
783 while (loops != 0) {
784 udelay(10);
785 frame_val = tr32(MAC_MI_COM);
786 if ((frame_val & MI_COM_BUSY) == 0) {
787 udelay(5);
788 frame_val = tr32(MAC_MI_COM);
789 break;
790 }
791 loops -= 1;
792 }
793
794 ret = -EBUSY;
795 if (loops != 0)
796 ret = 0;
797
798 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
799 tw32_f(MAC_MI_MODE, tp->mi_mode);
800 udelay(80);
801 }
802
803 return ret;
804}
805
9ef8ca99
MC
806static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
807{
808 u32 phy;
809
810 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
811 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
812 return;
813
814 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
815 u32 ephy;
816
817 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
818 tg3_writephy(tp, MII_TG3_EPHY_TEST,
819 ephy | MII_TG3_EPHY_SHADOW_EN);
820 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
821 if (enable)
822 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
823 else
824 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
825 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
826 }
827 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
828 }
829 } else {
830 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
831 MII_TG3_AUXCTL_SHDWSEL_MISC;
832 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
833 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
834 if (enable)
835 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
836 else
837 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
838 phy |= MII_TG3_AUXCTL_MISC_WREN;
839 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
840 }
841 }
842}
843
1da177e4
LT
844static void tg3_phy_set_wirespeed(struct tg3 *tp)
845{
846 u32 val;
847
848 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
849 return;
850
851 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
852 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
853 tg3_writephy(tp, MII_TG3_AUX_CTRL,
854 (val | (1 << 15) | (1 << 4)));
855}
856
857static int tg3_bmcr_reset(struct tg3 *tp)
858{
859 u32 phy_control;
860 int limit, err;
861
862 /* OK, reset it, and poll the BMCR_RESET bit until it
863 * clears or we time out.
864 */
865 phy_control = BMCR_RESET;
866 err = tg3_writephy(tp, MII_BMCR, phy_control);
867 if (err != 0)
868 return -EBUSY;
869
870 limit = 5000;
871 while (limit--) {
872 err = tg3_readphy(tp, MII_BMCR, &phy_control);
873 if (err != 0)
874 return -EBUSY;
875
876 if ((phy_control & BMCR_RESET) == 0) {
877 udelay(40);
878 break;
879 }
880 udelay(10);
881 }
882 if (limit <= 0)
883 return -EBUSY;
884
885 return 0;
886}
887
888static int tg3_wait_macro_done(struct tg3 *tp)
889{
890 int limit = 100;
891
892 while (limit--) {
893 u32 tmp32;
894
895 if (!tg3_readphy(tp, 0x16, &tmp32)) {
896 if ((tmp32 & 0x1000) == 0)
897 break;
898 }
899 }
900 if (limit <= 0)
901 return -EBUSY;
902
903 return 0;
904}
905
906static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
907{
908 static const u32 test_pat[4][6] = {
909 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
910 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
911 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
912 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
913 };
914 int chan;
915
916 for (chan = 0; chan < 4; chan++) {
917 int i;
918
919 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
920 (chan * 0x2000) | 0x0200);
921 tg3_writephy(tp, 0x16, 0x0002);
922
923 for (i = 0; i < 6; i++)
924 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
925 test_pat[chan][i]);
926
927 tg3_writephy(tp, 0x16, 0x0202);
928 if (tg3_wait_macro_done(tp)) {
929 *resetp = 1;
930 return -EBUSY;
931 }
932
933 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
934 (chan * 0x2000) | 0x0200);
935 tg3_writephy(tp, 0x16, 0x0082);
936 if (tg3_wait_macro_done(tp)) {
937 *resetp = 1;
938 return -EBUSY;
939 }
940
941 tg3_writephy(tp, 0x16, 0x0802);
942 if (tg3_wait_macro_done(tp)) {
943 *resetp = 1;
944 return -EBUSY;
945 }
946
947 for (i = 0; i < 6; i += 2) {
948 u32 low, high;
949
950 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
951 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
952 tg3_wait_macro_done(tp)) {
953 *resetp = 1;
954 return -EBUSY;
955 }
956 low &= 0x7fff;
957 high &= 0x000f;
958 if (low != test_pat[chan][i] ||
959 high != test_pat[chan][i+1]) {
960 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
961 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
962 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
963
964 return -EBUSY;
965 }
966 }
967 }
968
969 return 0;
970}
971
972static int tg3_phy_reset_chanpat(struct tg3 *tp)
973{
974 int chan;
975
976 for (chan = 0; chan < 4; chan++) {
977 int i;
978
979 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
980 (chan * 0x2000) | 0x0200);
981 tg3_writephy(tp, 0x16, 0x0002);
982 for (i = 0; i < 6; i++)
983 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
984 tg3_writephy(tp, 0x16, 0x0202);
985 if (tg3_wait_macro_done(tp))
986 return -EBUSY;
987 }
988
989 return 0;
990}
991
992static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
993{
994 u32 reg32, phy9_orig;
995 int retries, do_phy_reset, err;
996
997 retries = 10;
998 do_phy_reset = 1;
999 do {
1000 if (do_phy_reset) {
1001 err = tg3_bmcr_reset(tp);
1002 if (err)
1003 return err;
1004 do_phy_reset = 0;
1005 }
1006
1007 /* Disable transmitter and interrupt. */
1008 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1009 continue;
1010
1011 reg32 |= 0x3000;
1012 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1013
1014 /* Set full-duplex, 1000 mbps. */
1015 tg3_writephy(tp, MII_BMCR,
1016 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1017
1018 /* Set to master mode. */
1019 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1020 continue;
1021
1022 tg3_writephy(tp, MII_TG3_CTRL,
1023 (MII_TG3_CTRL_AS_MASTER |
1024 MII_TG3_CTRL_ENABLE_AS_MASTER));
1025
1026 /* Enable SM_DSP_CLOCK and 6dB. */
1027 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1028
1029 /* Block the PHY control access. */
1030 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1031 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1032
1033 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1034 if (!err)
1035 break;
1036 } while (--retries);
1037
1038 err = tg3_phy_reset_chanpat(tp);
1039 if (err)
1040 return err;
1041
1042 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1043 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1044
1045 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1046 tg3_writephy(tp, 0x16, 0x0000);
1047
1048 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1049 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1050 /* Set Extended packet length bit for jumbo frames */
1051 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1052 }
1053 else {
1054 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1055 }
1056
1057 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1058
1059 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1060 reg32 &= ~0x3000;
1061 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1062 } else if (!err)
1063 err = -EBUSY;
1064
1065 return err;
1066}
1067
c8e1e82b
MC
1068static void tg3_link_report(struct tg3 *);
1069
1da177e4
LT
1070/* This will reset the tigon3 PHY if there is no valid
1071 * link unless the FORCE argument is non-zero.
1072 */
1073static int tg3_phy_reset(struct tg3 *tp)
1074{
1075 u32 phy_status;
1076 int err;
1077
60189ddf
MC
1078 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1079 u32 val;
1080
1081 val = tr32(GRC_MISC_CFG);
1082 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1083 udelay(40);
1084 }
1da177e4
LT
1085 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1086 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1087 if (err != 0)
1088 return -EBUSY;
1089
c8e1e82b
MC
1090 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1091 netif_carrier_off(tp->dev);
1092 tg3_link_report(tp);
1093 }
1094
1da177e4
LT
1095 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1096 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1097 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1098 err = tg3_phy_reset_5703_4_5(tp);
1099 if (err)
1100 return err;
1101 goto out;
1102 }
1103
1104 err = tg3_bmcr_reset(tp);
1105 if (err)
1106 return err;
1107
1108out:
1109 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1110 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1111 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1112 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1113 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1114 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1115 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1116 }
1117 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1118 tg3_writephy(tp, 0x1c, 0x8d68);
1119 tg3_writephy(tp, 0x1c, 0x8d68);
1120 }
1121 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1122 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1123 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1124 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1125 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1126 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1127 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1128 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1129 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1130 }
c424cb24
MC
1131 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1132 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1133 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
c1d2a196
MC
1134 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1135 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1136 tg3_writephy(tp, MII_TG3_TEST1,
1137 MII_TG3_TEST1_TRIM_EN | 0x4);
1138 } else
1139 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
c424cb24
MC
1140 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1141 }
1da177e4
LT
1142 /* Set Extended packet length bit (bit 14) on all chips that */
1143 /* support jumbo frames */
1144 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1145 /* Cannot do read-modify-write on 5401 */
1146 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
0f893dc6 1147 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1148 u32 phy_reg;
1149
1150 /* Set bit 14 with read-modify-write to preserve other bits */
1151 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1152 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1153 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1154 }
1155
1156 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1157 * jumbo frames transmission.
1158 */
0f893dc6 1159 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1160 u32 phy_reg;
1161
1162 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1163 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1164 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1165 }
1166
715116a1 1167 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
715116a1
MC
1168 /* adjust output voltage */
1169 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
715116a1
MC
1170 }
1171
9ef8ca99 1172 tg3_phy_toggle_automdix(tp, 1);
1da177e4
LT
1173 tg3_phy_set_wirespeed(tp);
1174 return 0;
1175}
1176
1177static void tg3_frob_aux_power(struct tg3 *tp)
1178{
1179 struct tg3 *tp_peer = tp;
1180
9d26e213 1181 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1da177e4
LT
1182 return;
1183
8c2dc7e1
MC
1184 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1185 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1186 struct net_device *dev_peer;
1187
1188 dev_peer = pci_get_drvdata(tp->pdev_peer);
bc1c7567 1189 /* remove_one() may have been run on the peer. */
8c2dc7e1 1190 if (!dev_peer)
bc1c7567
MC
1191 tp_peer = tp;
1192 else
1193 tp_peer = netdev_priv(dev_peer);
1da177e4
LT
1194 }
1195
1da177e4 1196 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
6921d201
MC
1197 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1198 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1199 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1da177e4
LT
1200 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1201 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
b401e9e2
MC
1202 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1203 (GRC_LCLCTRL_GPIO_OE0 |
1204 GRC_LCLCTRL_GPIO_OE1 |
1205 GRC_LCLCTRL_GPIO_OE2 |
1206 GRC_LCLCTRL_GPIO_OUTPUT0 |
1207 GRC_LCLCTRL_GPIO_OUTPUT1),
1208 100);
1da177e4
LT
1209 } else {
1210 u32 no_gpio2;
dc56b7d4 1211 u32 grc_local_ctrl = 0;
1da177e4
LT
1212
1213 if (tp_peer != tp &&
1214 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1215 return;
1216
dc56b7d4
MC
1217 /* Workaround to prevent overdrawing Amps. */
1218 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1219 ASIC_REV_5714) {
1220 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
b401e9e2
MC
1221 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1222 grc_local_ctrl, 100);
dc56b7d4
MC
1223 }
1224
1da177e4
LT
1225 /* On 5753 and variants, GPIO2 cannot be used. */
1226 no_gpio2 = tp->nic_sram_data_cfg &
1227 NIC_SRAM_DATA_CFG_NO_GPIO2;
1228
dc56b7d4 1229 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1da177e4
LT
1230 GRC_LCLCTRL_GPIO_OE1 |
1231 GRC_LCLCTRL_GPIO_OE2 |
1232 GRC_LCLCTRL_GPIO_OUTPUT1 |
1233 GRC_LCLCTRL_GPIO_OUTPUT2;
1234 if (no_gpio2) {
1235 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1236 GRC_LCLCTRL_GPIO_OUTPUT2);
1237 }
b401e9e2
MC
1238 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1239 grc_local_ctrl, 100);
1da177e4
LT
1240
1241 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1242
b401e9e2
MC
1243 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1244 grc_local_ctrl, 100);
1da177e4
LT
1245
1246 if (!no_gpio2) {
1247 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
b401e9e2
MC
1248 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1249 grc_local_ctrl, 100);
1da177e4
LT
1250 }
1251 }
1252 } else {
1253 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1254 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1255 if (tp_peer != tp &&
1256 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1257 return;
1258
b401e9e2
MC
1259 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1260 (GRC_LCLCTRL_GPIO_OE1 |
1261 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4 1262
b401e9e2
MC
1263 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1264 GRC_LCLCTRL_GPIO_OE1, 100);
1da177e4 1265
b401e9e2
MC
1266 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1267 (GRC_LCLCTRL_GPIO_OE1 |
1268 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4
LT
1269 }
1270 }
1271}
1272
e8f3f6ca
MC
1273static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1274{
1275 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1276 return 1;
1277 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1278 if (speed != SPEED_10)
1279 return 1;
1280 } else if (speed == SPEED_10)
1281 return 1;
1282
1283 return 0;
1284}
1285
1da177e4
LT
1286static int tg3_setup_phy(struct tg3 *, int);
1287
1288#define RESET_KIND_SHUTDOWN 0
1289#define RESET_KIND_INIT 1
1290#define RESET_KIND_SUSPEND 2
1291
1292static void tg3_write_sig_post_reset(struct tg3 *, int);
1293static int tg3_halt_cpu(struct tg3 *, u32);
6921d201
MC
1294static int tg3_nvram_lock(struct tg3 *);
1295static void tg3_nvram_unlock(struct tg3 *);
1da177e4 1296
15c3b696
MC
1297static void tg3_power_down_phy(struct tg3 *tp)
1298{
5129724a
MC
1299 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1300 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1301 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1302 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1303
1304 sg_dig_ctrl |=
1305 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1306 tw32(SG_DIG_CTRL, sg_dig_ctrl);
1307 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1308 }
3f7045c1 1309 return;
5129724a 1310 }
3f7045c1 1311
60189ddf
MC
1312 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1313 u32 val;
1314
1315 tg3_bmcr_reset(tp);
1316 val = tr32(GRC_MISC_CFG);
1317 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1318 udelay(40);
1319 return;
1320 } else {
715116a1
MC
1321 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1322 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1323 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1324 }
3f7045c1 1325
15c3b696
MC
1326 /* The PHY should not be powered down on some chips because
1327 * of bugs.
1328 */
1329 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1330 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1331 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1332 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1333 return;
1334 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1335}
1336
bc1c7567 1337static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1da177e4
LT
1338{
1339 u32 misc_host_ctrl;
1340 u16 power_control, power_caps;
1341 int pm = tp->pm_cap;
1342
1343 /* Make sure register accesses (indirect or otherwise)
1344 * will function correctly.
1345 */
1346 pci_write_config_dword(tp->pdev,
1347 TG3PCI_MISC_HOST_CTRL,
1348 tp->misc_host_ctrl);
1349
1350 pci_read_config_word(tp->pdev,
1351 pm + PCI_PM_CTRL,
1352 &power_control);
1353 power_control |= PCI_PM_CTRL_PME_STATUS;
1354 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1355 switch (state) {
bc1c7567 1356 case PCI_D0:
1da177e4
LT
1357 power_control |= 0;
1358 pci_write_config_word(tp->pdev,
1359 pm + PCI_PM_CTRL,
1360 power_control);
8c6bda1a
MC
1361 udelay(100); /* Delay after power state change */
1362
9d26e213
MC
1363 /* Switch out of Vaux if it is a NIC */
1364 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
b401e9e2 1365 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1da177e4
LT
1366
1367 return 0;
1368
bc1c7567 1369 case PCI_D1:
1da177e4
LT
1370 power_control |= 1;
1371 break;
1372
bc1c7567 1373 case PCI_D2:
1da177e4
LT
1374 power_control |= 2;
1375 break;
1376
bc1c7567 1377 case PCI_D3hot:
1da177e4
LT
1378 power_control |= 3;
1379 break;
1380
1381 default:
1382 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1383 "requested.\n",
1384 tp->dev->name, state);
1385 return -EINVAL;
1386 };
1387
1388 power_control |= PCI_PM_CTRL_PME_ENABLE;
1389
1390 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1391 tw32(TG3PCI_MISC_HOST_CTRL,
1392 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1393
1394 if (tp->link_config.phy_is_low_power == 0) {
1395 tp->link_config.phy_is_low_power = 1;
1396 tp->link_config.orig_speed = tp->link_config.speed;
1397 tp->link_config.orig_duplex = tp->link_config.duplex;
1398 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1399 }
1400
747e8f8b 1401 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1da177e4
LT
1402 tp->link_config.speed = SPEED_10;
1403 tp->link_config.duplex = DUPLEX_HALF;
1404 tp->link_config.autoneg = AUTONEG_ENABLE;
1405 tg3_setup_phy(tp, 0);
1406 }
1407
b5d3772c
MC
1408 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1409 u32 val;
1410
1411 val = tr32(GRC_VCPU_EXT_CTRL);
1412 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1413 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
6921d201
MC
1414 int i;
1415 u32 val;
1416
1417 for (i = 0; i < 200; i++) {
1418 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1419 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1420 break;
1421 msleep(1);
1422 }
1423 }
a85feb8c
GZ
1424 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1425 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1426 WOL_DRV_STATE_SHUTDOWN |
1427 WOL_DRV_WOL |
1428 WOL_SET_MAGIC_PKT);
6921d201 1429
1da177e4
LT
1430 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1431
1432 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1433 u32 mac_mode;
1434
1435 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1436 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1437 udelay(40);
1438
3f7045c1
MC
1439 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1440 mac_mode = MAC_MODE_PORT_MODE_GMII;
1441 else
1442 mac_mode = MAC_MODE_PORT_MODE_MII;
1da177e4 1443
e8f3f6ca
MC
1444 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1445 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1446 ASIC_REV_5700) {
1447 u32 speed = (tp->tg3_flags &
1448 TG3_FLAG_WOL_SPEED_100MB) ?
1449 SPEED_100 : SPEED_10;
1450 if (tg3_5700_link_polarity(tp, speed))
1451 mac_mode |= MAC_MODE_LINK_POLARITY;
1452 else
1453 mac_mode &= ~MAC_MODE_LINK_POLARITY;
1454 }
1da177e4
LT
1455 } else {
1456 mac_mode = MAC_MODE_PORT_MODE_TBI;
1457 }
1458
cbf46853 1459 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1da177e4
LT
1460 tw32(MAC_LED_CTRL, tp->led_ctrl);
1461
1462 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1463 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1464 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1465
1466 tw32_f(MAC_MODE, mac_mode);
1467 udelay(100);
1468
1469 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1470 udelay(10);
1471 }
1472
1473 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1474 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1475 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1476 u32 base_val;
1477
1478 base_val = tp->pci_clock_ctrl;
1479 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1480 CLOCK_CTRL_TXCLK_DISABLE);
1481
b401e9e2
MC
1482 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1483 CLOCK_CTRL_PWRDOWN_PLL133, 40);
d7b0a857 1484 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
795d01c5 1485 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
d7b0a857 1486 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
4cf78e4f 1487 /* do nothing */
85e94ced 1488 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1da177e4
LT
1489 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1490 u32 newbits1, newbits2;
1491
1492 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1493 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1494 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1495 CLOCK_CTRL_TXCLK_DISABLE |
1496 CLOCK_CTRL_ALTCLK);
1497 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1498 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1499 newbits1 = CLOCK_CTRL_625_CORE;
1500 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1501 } else {
1502 newbits1 = CLOCK_CTRL_ALTCLK;
1503 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1504 }
1505
b401e9e2
MC
1506 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1507 40);
1da177e4 1508
b401e9e2
MC
1509 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1510 40);
1da177e4
LT
1511
1512 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1513 u32 newbits3;
1514
1515 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1516 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1517 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1518 CLOCK_CTRL_TXCLK_DISABLE |
1519 CLOCK_CTRL_44MHZ_CORE);
1520 } else {
1521 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1522 }
1523
b401e9e2
MC
1524 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1525 tp->pci_clock_ctrl | newbits3, 40);
1da177e4
LT
1526 }
1527 }
1528
6921d201 1529 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
0d3031d9
MC
1530 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1531 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
3f7045c1 1532 tg3_power_down_phy(tp);
6921d201 1533
1da177e4
LT
1534 tg3_frob_aux_power(tp);
1535
1536 /* Workaround for unstable PLL clock */
1537 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1538 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1539 u32 val = tr32(0x7d00);
1540
1541 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1542 tw32(0x7d00, val);
6921d201 1543 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
ec41c7df
MC
1544 int err;
1545
1546 err = tg3_nvram_lock(tp);
1da177e4 1547 tg3_halt_cpu(tp, RX_CPU_BASE);
ec41c7df
MC
1548 if (!err)
1549 tg3_nvram_unlock(tp);
6921d201 1550 }
1da177e4
LT
1551 }
1552
bbadf503
MC
1553 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1554
1da177e4
LT
1555 /* Finally, set the new power state. */
1556 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
8c6bda1a 1557 udelay(100); /* Delay after power state change */
1da177e4 1558
1da177e4
LT
1559 return 0;
1560}
1561
1562static void tg3_link_report(struct tg3 *tp)
1563{
1564 if (!netif_carrier_ok(tp->dev)) {
9f88f29f
MC
1565 if (netif_msg_link(tp))
1566 printk(KERN_INFO PFX "%s: Link is down.\n",
1567 tp->dev->name);
1568 } else if (netif_msg_link(tp)) {
1da177e4
LT
1569 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1570 tp->dev->name,
1571 (tp->link_config.active_speed == SPEED_1000 ?
1572 1000 :
1573 (tp->link_config.active_speed == SPEED_100 ?
1574 100 : 10)),
1575 (tp->link_config.active_duplex == DUPLEX_FULL ?
1576 "full" : "half"));
1577
1578 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1579 "%s for RX.\n",
1580 tp->dev->name,
1581 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1582 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1583 }
1584}
1585
1586static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1587{
1588 u32 new_tg3_flags = 0;
1589 u32 old_rx_mode = tp->rx_mode;
1590 u32 old_tx_mode = tp->tx_mode;
1591
1592 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
747e8f8b
MC
1593
1594 /* Convert 1000BaseX flow control bits to 1000BaseT
1595 * bits before resolving flow control.
1596 */
1597 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1598 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1599 ADVERTISE_PAUSE_ASYM);
1600 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1601
1602 if (local_adv & ADVERTISE_1000XPAUSE)
1603 local_adv |= ADVERTISE_PAUSE_CAP;
1604 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1605 local_adv |= ADVERTISE_PAUSE_ASYM;
1606 if (remote_adv & LPA_1000XPAUSE)
1607 remote_adv |= LPA_PAUSE_CAP;
1608 if (remote_adv & LPA_1000XPAUSE_ASYM)
1609 remote_adv |= LPA_PAUSE_ASYM;
1610 }
1611
1da177e4
LT
1612 if (local_adv & ADVERTISE_PAUSE_CAP) {
1613 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1614 if (remote_adv & LPA_PAUSE_CAP)
1615 new_tg3_flags |=
1616 (TG3_FLAG_RX_PAUSE |
1617 TG3_FLAG_TX_PAUSE);
1618 else if (remote_adv & LPA_PAUSE_ASYM)
1619 new_tg3_flags |=
1620 (TG3_FLAG_RX_PAUSE);
1621 } else {
1622 if (remote_adv & LPA_PAUSE_CAP)
1623 new_tg3_flags |=
1624 (TG3_FLAG_RX_PAUSE |
1625 TG3_FLAG_TX_PAUSE);
1626 }
1627 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1628 if ((remote_adv & LPA_PAUSE_CAP) &&
1629 (remote_adv & LPA_PAUSE_ASYM))
1630 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1631 }
1632
1633 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1634 tp->tg3_flags |= new_tg3_flags;
1635 } else {
1636 new_tg3_flags = tp->tg3_flags;
1637 }
1638
1639 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1640 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1641 else
1642 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1643
1644 if (old_rx_mode != tp->rx_mode) {
1645 tw32_f(MAC_RX_MODE, tp->rx_mode);
1646 }
6aa20a22 1647
1da177e4
LT
1648 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1649 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1650 else
1651 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1652
1653 if (old_tx_mode != tp->tx_mode) {
1654 tw32_f(MAC_TX_MODE, tp->tx_mode);
1655 }
1656}
1657
1658static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1659{
1660 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1661 case MII_TG3_AUX_STAT_10HALF:
1662 *speed = SPEED_10;
1663 *duplex = DUPLEX_HALF;
1664 break;
1665
1666 case MII_TG3_AUX_STAT_10FULL:
1667 *speed = SPEED_10;
1668 *duplex = DUPLEX_FULL;
1669 break;
1670
1671 case MII_TG3_AUX_STAT_100HALF:
1672 *speed = SPEED_100;
1673 *duplex = DUPLEX_HALF;
1674 break;
1675
1676 case MII_TG3_AUX_STAT_100FULL:
1677 *speed = SPEED_100;
1678 *duplex = DUPLEX_FULL;
1679 break;
1680
1681 case MII_TG3_AUX_STAT_1000HALF:
1682 *speed = SPEED_1000;
1683 *duplex = DUPLEX_HALF;
1684 break;
1685
1686 case MII_TG3_AUX_STAT_1000FULL:
1687 *speed = SPEED_1000;
1688 *duplex = DUPLEX_FULL;
1689 break;
1690
1691 default:
715116a1
MC
1692 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1693 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1694 SPEED_10;
1695 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1696 DUPLEX_HALF;
1697 break;
1698 }
1da177e4
LT
1699 *speed = SPEED_INVALID;
1700 *duplex = DUPLEX_INVALID;
1701 break;
1702 };
1703}
1704
1705static void tg3_phy_copper_begin(struct tg3 *tp)
1706{
1707 u32 new_adv;
1708 int i;
1709
1710 if (tp->link_config.phy_is_low_power) {
1711 /* Entering low power mode. Disable gigabit and
1712 * 100baseT advertisements.
1713 */
1714 tg3_writephy(tp, MII_TG3_CTRL, 0);
1715
1716 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1717 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1718 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1719 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1720
1721 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1722 } else if (tp->link_config.speed == SPEED_INVALID) {
1da177e4
LT
1723 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1724 tp->link_config.advertising &=
1725 ~(ADVERTISED_1000baseT_Half |
1726 ADVERTISED_1000baseT_Full);
1727
1728 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1729 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1730 new_adv |= ADVERTISE_10HALF;
1731 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1732 new_adv |= ADVERTISE_10FULL;
1733 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1734 new_adv |= ADVERTISE_100HALF;
1735 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1736 new_adv |= ADVERTISE_100FULL;
1737 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1738
1739 if (tp->link_config.advertising &
1740 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1741 new_adv = 0;
1742 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1743 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1744 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1745 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1746 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1747 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1748 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1749 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1750 MII_TG3_CTRL_ENABLE_AS_MASTER);
1751 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1752 } else {
1753 tg3_writephy(tp, MII_TG3_CTRL, 0);
1754 }
1755 } else {
1756 /* Asking for a specific link mode. */
1757 if (tp->link_config.speed == SPEED_1000) {
1758 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1759 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1760
1761 if (tp->link_config.duplex == DUPLEX_FULL)
1762 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1763 else
1764 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1765 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1766 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1767 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1768 MII_TG3_CTRL_ENABLE_AS_MASTER);
1769 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1770 } else {
1771 tg3_writephy(tp, MII_TG3_CTRL, 0);
1772
1773 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1774 if (tp->link_config.speed == SPEED_100) {
1775 if (tp->link_config.duplex == DUPLEX_FULL)
1776 new_adv |= ADVERTISE_100FULL;
1777 else
1778 new_adv |= ADVERTISE_100HALF;
1779 } else {
1780 if (tp->link_config.duplex == DUPLEX_FULL)
1781 new_adv |= ADVERTISE_10FULL;
1782 else
1783 new_adv |= ADVERTISE_10HALF;
1784 }
1785 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1786 }
1787 }
1788
1789 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1790 tp->link_config.speed != SPEED_INVALID) {
1791 u32 bmcr, orig_bmcr;
1792
1793 tp->link_config.active_speed = tp->link_config.speed;
1794 tp->link_config.active_duplex = tp->link_config.duplex;
1795
1796 bmcr = 0;
1797 switch (tp->link_config.speed) {
1798 default:
1799 case SPEED_10:
1800 break;
1801
1802 case SPEED_100:
1803 bmcr |= BMCR_SPEED100;
1804 break;
1805
1806 case SPEED_1000:
1807 bmcr |= TG3_BMCR_SPEED1000;
1808 break;
1809 };
1810
1811 if (tp->link_config.duplex == DUPLEX_FULL)
1812 bmcr |= BMCR_FULLDPLX;
1813
1814 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1815 (bmcr != orig_bmcr)) {
1816 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1817 for (i = 0; i < 1500; i++) {
1818 u32 tmp;
1819
1820 udelay(10);
1821 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1822 tg3_readphy(tp, MII_BMSR, &tmp))
1823 continue;
1824 if (!(tmp & BMSR_LSTATUS)) {
1825 udelay(40);
1826 break;
1827 }
1828 }
1829 tg3_writephy(tp, MII_BMCR, bmcr);
1830 udelay(40);
1831 }
1832 } else {
1833 tg3_writephy(tp, MII_BMCR,
1834 BMCR_ANENABLE | BMCR_ANRESTART);
1835 }
1836}
1837
1838static int tg3_init_5401phy_dsp(struct tg3 *tp)
1839{
1840 int err;
1841
1842 /* Turn off tap power management. */
1843 /* Set Extended packet length bit */
1844 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1845
1846 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1847 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1848
1849 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1850 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1851
1852 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1853 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1854
1855 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1856 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1857
1858 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1859 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1860
1861 udelay(40);
1862
1863 return err;
1864}
1865
3600d918 1866static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1da177e4 1867{
3600d918
MC
1868 u32 adv_reg, all_mask = 0;
1869
1870 if (mask & ADVERTISED_10baseT_Half)
1871 all_mask |= ADVERTISE_10HALF;
1872 if (mask & ADVERTISED_10baseT_Full)
1873 all_mask |= ADVERTISE_10FULL;
1874 if (mask & ADVERTISED_100baseT_Half)
1875 all_mask |= ADVERTISE_100HALF;
1876 if (mask & ADVERTISED_100baseT_Full)
1877 all_mask |= ADVERTISE_100FULL;
1da177e4
LT
1878
1879 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1880 return 0;
1881
1da177e4
LT
1882 if ((adv_reg & all_mask) != all_mask)
1883 return 0;
1884 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1885 u32 tg3_ctrl;
1886
3600d918
MC
1887 all_mask = 0;
1888 if (mask & ADVERTISED_1000baseT_Half)
1889 all_mask |= ADVERTISE_1000HALF;
1890 if (mask & ADVERTISED_1000baseT_Full)
1891 all_mask |= ADVERTISE_1000FULL;
1892
1da177e4
LT
1893 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1894 return 0;
1895
1da177e4
LT
1896 if ((tg3_ctrl & all_mask) != all_mask)
1897 return 0;
1898 }
1899 return 1;
1900}
1901
1902static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1903{
1904 int current_link_up;
1905 u32 bmsr, dummy;
1906 u16 current_speed;
1907 u8 current_duplex;
1908 int i, err;
1909
1910 tw32(MAC_EVENT, 0);
1911
1912 tw32_f(MAC_STATUS,
1913 (MAC_STATUS_SYNC_CHANGED |
1914 MAC_STATUS_CFG_CHANGED |
1915 MAC_STATUS_MI_COMPLETION |
1916 MAC_STATUS_LNKSTATE_CHANGED));
1917 udelay(40);
1918
1919 tp->mi_mode = MAC_MI_MODE_BASE;
1920 tw32_f(MAC_MI_MODE, tp->mi_mode);
1921 udelay(80);
1922
1923 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1924
1925 /* Some third-party PHYs need to be reset on link going
1926 * down.
1927 */
1928 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1929 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1930 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1931 netif_carrier_ok(tp->dev)) {
1932 tg3_readphy(tp, MII_BMSR, &bmsr);
1933 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1934 !(bmsr & BMSR_LSTATUS))
1935 force_reset = 1;
1936 }
1937 if (force_reset)
1938 tg3_phy_reset(tp);
1939
1940 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1941 tg3_readphy(tp, MII_BMSR, &bmsr);
1942 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1943 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1944 bmsr = 0;
1945
1946 if (!(bmsr & BMSR_LSTATUS)) {
1947 err = tg3_init_5401phy_dsp(tp);
1948 if (err)
1949 return err;
1950
1951 tg3_readphy(tp, MII_BMSR, &bmsr);
1952 for (i = 0; i < 1000; i++) {
1953 udelay(10);
1954 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1955 (bmsr & BMSR_LSTATUS)) {
1956 udelay(40);
1957 break;
1958 }
1959 }
1960
1961 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1962 !(bmsr & BMSR_LSTATUS) &&
1963 tp->link_config.active_speed == SPEED_1000) {
1964 err = tg3_phy_reset(tp);
1965 if (!err)
1966 err = tg3_init_5401phy_dsp(tp);
1967 if (err)
1968 return err;
1969 }
1970 }
1971 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1972 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1973 /* 5701 {A0,B0} CRC bug workaround */
1974 tg3_writephy(tp, 0x15, 0x0a75);
1975 tg3_writephy(tp, 0x1c, 0x8c68);
1976 tg3_writephy(tp, 0x1c, 0x8d68);
1977 tg3_writephy(tp, 0x1c, 0x8c68);
1978 }
1979
1980 /* Clear pending interrupts... */
1981 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1982 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1983
1984 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1985 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
715116a1 1986 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1da177e4
LT
1987 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1988
1989 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1990 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1991 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1992 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1993 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1994 else
1995 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1996 }
1997
1998 current_link_up = 0;
1999 current_speed = SPEED_INVALID;
2000 current_duplex = DUPLEX_INVALID;
2001
2002 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2003 u32 val;
2004
2005 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2006 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2007 if (!(val & (1 << 10))) {
2008 val |= (1 << 10);
2009 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2010 goto relink;
2011 }
2012 }
2013
2014 bmsr = 0;
2015 for (i = 0; i < 100; i++) {
2016 tg3_readphy(tp, MII_BMSR, &bmsr);
2017 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2018 (bmsr & BMSR_LSTATUS))
2019 break;
2020 udelay(40);
2021 }
2022
2023 if (bmsr & BMSR_LSTATUS) {
2024 u32 aux_stat, bmcr;
2025
2026 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2027 for (i = 0; i < 2000; i++) {
2028 udelay(10);
2029 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2030 aux_stat)
2031 break;
2032 }
2033
2034 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2035 &current_speed,
2036 &current_duplex);
2037
2038 bmcr = 0;
2039 for (i = 0; i < 200; i++) {
2040 tg3_readphy(tp, MII_BMCR, &bmcr);
2041 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2042 continue;
2043 if (bmcr && bmcr != 0x7fff)
2044 break;
2045 udelay(10);
2046 }
2047
2048 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2049 if (bmcr & BMCR_ANENABLE) {
2050 current_link_up = 1;
2051
2052 /* Force autoneg restart if we are exiting
2053 * low power mode.
2054 */
3600d918
MC
2055 if (!tg3_copper_is_advertising_all(tp,
2056 tp->link_config.advertising))
1da177e4
LT
2057 current_link_up = 0;
2058 } else {
2059 current_link_up = 0;
2060 }
2061 } else {
2062 if (!(bmcr & BMCR_ANENABLE) &&
2063 tp->link_config.speed == current_speed &&
2064 tp->link_config.duplex == current_duplex) {
2065 current_link_up = 1;
2066 } else {
2067 current_link_up = 0;
2068 }
2069 }
2070
2071 tp->link_config.active_speed = current_speed;
2072 tp->link_config.active_duplex = current_duplex;
2073 }
2074
2075 if (current_link_up == 1 &&
2076 (tp->link_config.active_duplex == DUPLEX_FULL) &&
2077 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2078 u32 local_adv, remote_adv;
2079
2080 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2081 local_adv = 0;
2082 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2083
2084 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2085 remote_adv = 0;
2086
2087 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
2088
2089 /* If we are not advertising full pause capability,
2090 * something is wrong. Bring the link down and reconfigure.
2091 */
2092 if (local_adv != ADVERTISE_PAUSE_CAP) {
2093 current_link_up = 0;
2094 } else {
2095 tg3_setup_flow_control(tp, local_adv, remote_adv);
2096 }
2097 }
2098relink:
6921d201 2099 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1da177e4
LT
2100 u32 tmp;
2101
2102 tg3_phy_copper_begin(tp);
2103
2104 tg3_readphy(tp, MII_BMSR, &tmp);
2105 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2106 (tmp & BMSR_LSTATUS))
2107 current_link_up = 1;
2108 }
2109
2110 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2111 if (current_link_up == 1) {
2112 if (tp->link_config.active_speed == SPEED_100 ||
2113 tp->link_config.active_speed == SPEED_10)
2114 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2115 else
2116 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2117 } else
2118 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2119
2120 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2121 if (tp->link_config.active_duplex == DUPLEX_HALF)
2122 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2123
1da177e4 2124 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
e8f3f6ca
MC
2125 if (current_link_up == 1 &&
2126 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
1da177e4 2127 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
e8f3f6ca
MC
2128 else
2129 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1da177e4
LT
2130 }
2131
2132 /* ??? Without this setting Netgear GA302T PHY does not
2133 * ??? send/receive packets...
2134 */
2135 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2136 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2137 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2138 tw32_f(MAC_MI_MODE, tp->mi_mode);
2139 udelay(80);
2140 }
2141
2142 tw32_f(MAC_MODE, tp->mac_mode);
2143 udelay(40);
2144
2145 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2146 /* Polled via timer. */
2147 tw32_f(MAC_EVENT, 0);
2148 } else {
2149 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2150 }
2151 udelay(40);
2152
2153 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2154 current_link_up == 1 &&
2155 tp->link_config.active_speed == SPEED_1000 &&
2156 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2157 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2158 udelay(120);
2159 tw32_f(MAC_STATUS,
2160 (MAC_STATUS_SYNC_CHANGED |
2161 MAC_STATUS_CFG_CHANGED));
2162 udelay(40);
2163 tg3_write_mem(tp,
2164 NIC_SRAM_FIRMWARE_MBOX,
2165 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2166 }
2167
2168 if (current_link_up != netif_carrier_ok(tp->dev)) {
2169 if (current_link_up)
2170 netif_carrier_on(tp->dev);
2171 else
2172 netif_carrier_off(tp->dev);
2173 tg3_link_report(tp);
2174 }
2175
2176 return 0;
2177}
2178
2179struct tg3_fiber_aneginfo {
2180 int state;
2181#define ANEG_STATE_UNKNOWN 0
2182#define ANEG_STATE_AN_ENABLE 1
2183#define ANEG_STATE_RESTART_INIT 2
2184#define ANEG_STATE_RESTART 3
2185#define ANEG_STATE_DISABLE_LINK_OK 4
2186#define ANEG_STATE_ABILITY_DETECT_INIT 5
2187#define ANEG_STATE_ABILITY_DETECT 6
2188#define ANEG_STATE_ACK_DETECT_INIT 7
2189#define ANEG_STATE_ACK_DETECT 8
2190#define ANEG_STATE_COMPLETE_ACK_INIT 9
2191#define ANEG_STATE_COMPLETE_ACK 10
2192#define ANEG_STATE_IDLE_DETECT_INIT 11
2193#define ANEG_STATE_IDLE_DETECT 12
2194#define ANEG_STATE_LINK_OK 13
2195#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2196#define ANEG_STATE_NEXT_PAGE_WAIT 15
2197
2198 u32 flags;
2199#define MR_AN_ENABLE 0x00000001
2200#define MR_RESTART_AN 0x00000002
2201#define MR_AN_COMPLETE 0x00000004
2202#define MR_PAGE_RX 0x00000008
2203#define MR_NP_LOADED 0x00000010
2204#define MR_TOGGLE_TX 0x00000020
2205#define MR_LP_ADV_FULL_DUPLEX 0x00000040
2206#define MR_LP_ADV_HALF_DUPLEX 0x00000080
2207#define MR_LP_ADV_SYM_PAUSE 0x00000100
2208#define MR_LP_ADV_ASYM_PAUSE 0x00000200
2209#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2210#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2211#define MR_LP_ADV_NEXT_PAGE 0x00001000
2212#define MR_TOGGLE_RX 0x00002000
2213#define MR_NP_RX 0x00004000
2214
2215#define MR_LINK_OK 0x80000000
2216
2217 unsigned long link_time, cur_time;
2218
2219 u32 ability_match_cfg;
2220 int ability_match_count;
2221
2222 char ability_match, idle_match, ack_match;
2223
2224 u32 txconfig, rxconfig;
2225#define ANEG_CFG_NP 0x00000080
2226#define ANEG_CFG_ACK 0x00000040
2227#define ANEG_CFG_RF2 0x00000020
2228#define ANEG_CFG_RF1 0x00000010
2229#define ANEG_CFG_PS2 0x00000001
2230#define ANEG_CFG_PS1 0x00008000
2231#define ANEG_CFG_HD 0x00004000
2232#define ANEG_CFG_FD 0x00002000
2233#define ANEG_CFG_INVAL 0x00001f06
2234
2235};
2236#define ANEG_OK 0
2237#define ANEG_DONE 1
2238#define ANEG_TIMER_ENAB 2
2239#define ANEG_FAILED -1
2240
2241#define ANEG_STATE_SETTLE_TIME 10000
2242
2243static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2244 struct tg3_fiber_aneginfo *ap)
2245{
2246 unsigned long delta;
2247 u32 rx_cfg_reg;
2248 int ret;
2249
2250 if (ap->state == ANEG_STATE_UNKNOWN) {
2251 ap->rxconfig = 0;
2252 ap->link_time = 0;
2253 ap->cur_time = 0;
2254 ap->ability_match_cfg = 0;
2255 ap->ability_match_count = 0;
2256 ap->ability_match = 0;
2257 ap->idle_match = 0;
2258 ap->ack_match = 0;
2259 }
2260 ap->cur_time++;
2261
2262 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2263 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2264
2265 if (rx_cfg_reg != ap->ability_match_cfg) {
2266 ap->ability_match_cfg = rx_cfg_reg;
2267 ap->ability_match = 0;
2268 ap->ability_match_count = 0;
2269 } else {
2270 if (++ap->ability_match_count > 1) {
2271 ap->ability_match = 1;
2272 ap->ability_match_cfg = rx_cfg_reg;
2273 }
2274 }
2275 if (rx_cfg_reg & ANEG_CFG_ACK)
2276 ap->ack_match = 1;
2277 else
2278 ap->ack_match = 0;
2279
2280 ap->idle_match = 0;
2281 } else {
2282 ap->idle_match = 1;
2283 ap->ability_match_cfg = 0;
2284 ap->ability_match_count = 0;
2285 ap->ability_match = 0;
2286 ap->ack_match = 0;
2287
2288 rx_cfg_reg = 0;
2289 }
2290
2291 ap->rxconfig = rx_cfg_reg;
2292 ret = ANEG_OK;
2293
2294 switch(ap->state) {
2295 case ANEG_STATE_UNKNOWN:
2296 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2297 ap->state = ANEG_STATE_AN_ENABLE;
2298
2299 /* fallthru */
2300 case ANEG_STATE_AN_ENABLE:
2301 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2302 if (ap->flags & MR_AN_ENABLE) {
2303 ap->link_time = 0;
2304 ap->cur_time = 0;
2305 ap->ability_match_cfg = 0;
2306 ap->ability_match_count = 0;
2307 ap->ability_match = 0;
2308 ap->idle_match = 0;
2309 ap->ack_match = 0;
2310
2311 ap->state = ANEG_STATE_RESTART_INIT;
2312 } else {
2313 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2314 }
2315 break;
2316
2317 case ANEG_STATE_RESTART_INIT:
2318 ap->link_time = ap->cur_time;
2319 ap->flags &= ~(MR_NP_LOADED);
2320 ap->txconfig = 0;
2321 tw32(MAC_TX_AUTO_NEG, 0);
2322 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2323 tw32_f(MAC_MODE, tp->mac_mode);
2324 udelay(40);
2325
2326 ret = ANEG_TIMER_ENAB;
2327 ap->state = ANEG_STATE_RESTART;
2328
2329 /* fallthru */
2330 case ANEG_STATE_RESTART:
2331 delta = ap->cur_time - ap->link_time;
2332 if (delta > ANEG_STATE_SETTLE_TIME) {
2333 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2334 } else {
2335 ret = ANEG_TIMER_ENAB;
2336 }
2337 break;
2338
2339 case ANEG_STATE_DISABLE_LINK_OK:
2340 ret = ANEG_DONE;
2341 break;
2342
2343 case ANEG_STATE_ABILITY_DETECT_INIT:
2344 ap->flags &= ~(MR_TOGGLE_TX);
2345 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2346 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2347 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2348 tw32_f(MAC_MODE, tp->mac_mode);
2349 udelay(40);
2350
2351 ap->state = ANEG_STATE_ABILITY_DETECT;
2352 break;
2353
2354 case ANEG_STATE_ABILITY_DETECT:
2355 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2356 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2357 }
2358 break;
2359
2360 case ANEG_STATE_ACK_DETECT_INIT:
2361 ap->txconfig |= ANEG_CFG_ACK;
2362 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2363 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2364 tw32_f(MAC_MODE, tp->mac_mode);
2365 udelay(40);
2366
2367 ap->state = ANEG_STATE_ACK_DETECT;
2368
2369 /* fallthru */
2370 case ANEG_STATE_ACK_DETECT:
2371 if (ap->ack_match != 0) {
2372 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2373 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2374 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2375 } else {
2376 ap->state = ANEG_STATE_AN_ENABLE;
2377 }
2378 } else if (ap->ability_match != 0 &&
2379 ap->rxconfig == 0) {
2380 ap->state = ANEG_STATE_AN_ENABLE;
2381 }
2382 break;
2383
2384 case ANEG_STATE_COMPLETE_ACK_INIT:
2385 if (ap->rxconfig & ANEG_CFG_INVAL) {
2386 ret = ANEG_FAILED;
2387 break;
2388 }
2389 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2390 MR_LP_ADV_HALF_DUPLEX |
2391 MR_LP_ADV_SYM_PAUSE |
2392 MR_LP_ADV_ASYM_PAUSE |
2393 MR_LP_ADV_REMOTE_FAULT1 |
2394 MR_LP_ADV_REMOTE_FAULT2 |
2395 MR_LP_ADV_NEXT_PAGE |
2396 MR_TOGGLE_RX |
2397 MR_NP_RX);
2398 if (ap->rxconfig & ANEG_CFG_FD)
2399 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2400 if (ap->rxconfig & ANEG_CFG_HD)
2401 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2402 if (ap->rxconfig & ANEG_CFG_PS1)
2403 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2404 if (ap->rxconfig & ANEG_CFG_PS2)
2405 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2406 if (ap->rxconfig & ANEG_CFG_RF1)
2407 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2408 if (ap->rxconfig & ANEG_CFG_RF2)
2409 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2410 if (ap->rxconfig & ANEG_CFG_NP)
2411 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2412
2413 ap->link_time = ap->cur_time;
2414
2415 ap->flags ^= (MR_TOGGLE_TX);
2416 if (ap->rxconfig & 0x0008)
2417 ap->flags |= MR_TOGGLE_RX;
2418 if (ap->rxconfig & ANEG_CFG_NP)
2419 ap->flags |= MR_NP_RX;
2420 ap->flags |= MR_PAGE_RX;
2421
2422 ap->state = ANEG_STATE_COMPLETE_ACK;
2423 ret = ANEG_TIMER_ENAB;
2424 break;
2425
2426 case ANEG_STATE_COMPLETE_ACK:
2427 if (ap->ability_match != 0 &&
2428 ap->rxconfig == 0) {
2429 ap->state = ANEG_STATE_AN_ENABLE;
2430 break;
2431 }
2432 delta = ap->cur_time - ap->link_time;
2433 if (delta > ANEG_STATE_SETTLE_TIME) {
2434 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2435 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2436 } else {
2437 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2438 !(ap->flags & MR_NP_RX)) {
2439 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2440 } else {
2441 ret = ANEG_FAILED;
2442 }
2443 }
2444 }
2445 break;
2446
2447 case ANEG_STATE_IDLE_DETECT_INIT:
2448 ap->link_time = ap->cur_time;
2449 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2450 tw32_f(MAC_MODE, tp->mac_mode);
2451 udelay(40);
2452
2453 ap->state = ANEG_STATE_IDLE_DETECT;
2454 ret = ANEG_TIMER_ENAB;
2455 break;
2456
2457 case ANEG_STATE_IDLE_DETECT:
2458 if (ap->ability_match != 0 &&
2459 ap->rxconfig == 0) {
2460 ap->state = ANEG_STATE_AN_ENABLE;
2461 break;
2462 }
2463 delta = ap->cur_time - ap->link_time;
2464 if (delta > ANEG_STATE_SETTLE_TIME) {
2465 /* XXX another gem from the Broadcom driver :( */
2466 ap->state = ANEG_STATE_LINK_OK;
2467 }
2468 break;
2469
2470 case ANEG_STATE_LINK_OK:
2471 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2472 ret = ANEG_DONE;
2473 break;
2474
2475 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2476 /* ??? unimplemented */
2477 break;
2478
2479 case ANEG_STATE_NEXT_PAGE_WAIT:
2480 /* ??? unimplemented */
2481 break;
2482
2483 default:
2484 ret = ANEG_FAILED;
2485 break;
2486 };
2487
2488 return ret;
2489}
2490
2491static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2492{
2493 int res = 0;
2494 struct tg3_fiber_aneginfo aninfo;
2495 int status = ANEG_FAILED;
2496 unsigned int tick;
2497 u32 tmp;
2498
2499 tw32_f(MAC_TX_AUTO_NEG, 0);
2500
2501 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2502 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2503 udelay(40);
2504
2505 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2506 udelay(40);
2507
2508 memset(&aninfo, 0, sizeof(aninfo));
2509 aninfo.flags |= MR_AN_ENABLE;
2510 aninfo.state = ANEG_STATE_UNKNOWN;
2511 aninfo.cur_time = 0;
2512 tick = 0;
2513 while (++tick < 195000) {
2514 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2515 if (status == ANEG_DONE || status == ANEG_FAILED)
2516 break;
2517
2518 udelay(1);
2519 }
2520
2521 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2522 tw32_f(MAC_MODE, tp->mac_mode);
2523 udelay(40);
2524
2525 *flags = aninfo.flags;
2526
2527 if (status == ANEG_DONE &&
2528 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2529 MR_LP_ADV_FULL_DUPLEX)))
2530 res = 1;
2531
2532 return res;
2533}
2534
2535static void tg3_init_bcm8002(struct tg3 *tp)
2536{
2537 u32 mac_status = tr32(MAC_STATUS);
2538 int i;
2539
2540 /* Reset when initting first time or we have a link. */
2541 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2542 !(mac_status & MAC_STATUS_PCS_SYNCED))
2543 return;
2544
2545 /* Set PLL lock range. */
2546 tg3_writephy(tp, 0x16, 0x8007);
2547
2548 /* SW reset */
2549 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2550
2551 /* Wait for reset to complete. */
2552 /* XXX schedule_timeout() ... */
2553 for (i = 0; i < 500; i++)
2554 udelay(10);
2555
2556 /* Config mode; select PMA/Ch 1 regs. */
2557 tg3_writephy(tp, 0x10, 0x8411);
2558
2559 /* Enable auto-lock and comdet, select txclk for tx. */
2560 tg3_writephy(tp, 0x11, 0x0a10);
2561
2562 tg3_writephy(tp, 0x18, 0x00a0);
2563 tg3_writephy(tp, 0x16, 0x41ff);
2564
2565 /* Assert and deassert POR. */
2566 tg3_writephy(tp, 0x13, 0x0400);
2567 udelay(40);
2568 tg3_writephy(tp, 0x13, 0x0000);
2569
2570 tg3_writephy(tp, 0x11, 0x0a50);
2571 udelay(40);
2572 tg3_writephy(tp, 0x11, 0x0a10);
2573
2574 /* Wait for signal to stabilize */
2575 /* XXX schedule_timeout() ... */
2576 for (i = 0; i < 15000; i++)
2577 udelay(10);
2578
2579 /* Deselect the channel register so we can read the PHYID
2580 * later.
2581 */
2582 tg3_writephy(tp, 0x10, 0x8011);
2583}
2584
2585static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2586{
2587 u32 sg_dig_ctrl, sg_dig_status;
2588 u32 serdes_cfg, expected_sg_dig_ctrl;
2589 int workaround, port_a;
2590 int current_link_up;
2591
2592 serdes_cfg = 0;
2593 expected_sg_dig_ctrl = 0;
2594 workaround = 0;
2595 port_a = 1;
2596 current_link_up = 0;
2597
2598 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2599 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2600 workaround = 1;
2601 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2602 port_a = 0;
2603
2604 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2605 /* preserve bits 20-23 for voltage regulator */
2606 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2607 }
2608
2609 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2610
2611 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2612 if (sg_dig_ctrl & (1 << 31)) {
2613 if (workaround) {
2614 u32 val = serdes_cfg;
2615
2616 if (port_a)
2617 val |= 0xc010000;
2618 else
2619 val |= 0x4010000;
2620 tw32_f(MAC_SERDES_CFG, val);
2621 }
2622 tw32_f(SG_DIG_CTRL, 0x01388400);
2623 }
2624 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2625 tg3_setup_flow_control(tp, 0, 0);
2626 current_link_up = 1;
2627 }
2628 goto out;
2629 }
2630
2631 /* Want auto-negotiation. */
2632 expected_sg_dig_ctrl = 0x81388400;
2633
2634 /* Pause capability */
2635 expected_sg_dig_ctrl |= (1 << 11);
2636
2637 /* Asymettric pause */
2638 expected_sg_dig_ctrl |= (1 << 12);
2639
2640 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3d3ebe74
MC
2641 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2642 tp->serdes_counter &&
2643 ((mac_status & (MAC_STATUS_PCS_SYNCED |
2644 MAC_STATUS_RCVD_CFG)) ==
2645 MAC_STATUS_PCS_SYNCED)) {
2646 tp->serdes_counter--;
2647 current_link_up = 1;
2648 goto out;
2649 }
2650restart_autoneg:
1da177e4
LT
2651 if (workaround)
2652 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2653 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2654 udelay(5);
2655 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2656
3d3ebe74
MC
2657 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2658 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
1da177e4
LT
2659 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2660 MAC_STATUS_SIGNAL_DET)) {
3d3ebe74 2661 sg_dig_status = tr32(SG_DIG_STATUS);
1da177e4
LT
2662 mac_status = tr32(MAC_STATUS);
2663
2664 if ((sg_dig_status & (1 << 1)) &&
2665 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2666 u32 local_adv, remote_adv;
2667
2668 local_adv = ADVERTISE_PAUSE_CAP;
2669 remote_adv = 0;
2670 if (sg_dig_status & (1 << 19))
2671 remote_adv |= LPA_PAUSE_CAP;
2672 if (sg_dig_status & (1 << 20))
2673 remote_adv |= LPA_PAUSE_ASYM;
2674
2675 tg3_setup_flow_control(tp, local_adv, remote_adv);
2676 current_link_up = 1;
3d3ebe74
MC
2677 tp->serdes_counter = 0;
2678 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
1da177e4 2679 } else if (!(sg_dig_status & (1 << 1))) {
3d3ebe74
MC
2680 if (tp->serdes_counter)
2681 tp->serdes_counter--;
1da177e4
LT
2682 else {
2683 if (workaround) {
2684 u32 val = serdes_cfg;
2685
2686 if (port_a)
2687 val |= 0xc010000;
2688 else
2689 val |= 0x4010000;
2690
2691 tw32_f(MAC_SERDES_CFG, val);
2692 }
2693
2694 tw32_f(SG_DIG_CTRL, 0x01388400);
2695 udelay(40);
2696
2697 /* Link parallel detection - link is up */
2698 /* only if we have PCS_SYNC and not */
2699 /* receiving config code words */
2700 mac_status = tr32(MAC_STATUS);
2701 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2702 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2703 tg3_setup_flow_control(tp, 0, 0);
2704 current_link_up = 1;
3d3ebe74
MC
2705 tp->tg3_flags2 |=
2706 TG3_FLG2_PARALLEL_DETECT;
2707 tp->serdes_counter =
2708 SERDES_PARALLEL_DET_TIMEOUT;
2709 } else
2710 goto restart_autoneg;
1da177e4
LT
2711 }
2712 }
3d3ebe74
MC
2713 } else {
2714 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2715 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
1da177e4
LT
2716 }
2717
2718out:
2719 return current_link_up;
2720}
2721
2722static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2723{
2724 int current_link_up = 0;
2725
5cf64b8a 2726 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
1da177e4 2727 goto out;
1da177e4
LT
2728
2729 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2730 u32 flags;
2731 int i;
6aa20a22 2732
1da177e4
LT
2733 if (fiber_autoneg(tp, &flags)) {
2734 u32 local_adv, remote_adv;
2735
2736 local_adv = ADVERTISE_PAUSE_CAP;
2737 remote_adv = 0;
2738 if (flags & MR_LP_ADV_SYM_PAUSE)
2739 remote_adv |= LPA_PAUSE_CAP;
2740 if (flags & MR_LP_ADV_ASYM_PAUSE)
2741 remote_adv |= LPA_PAUSE_ASYM;
2742
2743 tg3_setup_flow_control(tp, local_adv, remote_adv);
2744
1da177e4
LT
2745 current_link_up = 1;
2746 }
2747 for (i = 0; i < 30; i++) {
2748 udelay(20);
2749 tw32_f(MAC_STATUS,
2750 (MAC_STATUS_SYNC_CHANGED |
2751 MAC_STATUS_CFG_CHANGED));
2752 udelay(40);
2753 if ((tr32(MAC_STATUS) &
2754 (MAC_STATUS_SYNC_CHANGED |
2755 MAC_STATUS_CFG_CHANGED)) == 0)
2756 break;
2757 }
2758
2759 mac_status = tr32(MAC_STATUS);
2760 if (current_link_up == 0 &&
2761 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2762 !(mac_status & MAC_STATUS_RCVD_CFG))
2763 current_link_up = 1;
2764 } else {
2765 /* Forcing 1000FD link up. */
2766 current_link_up = 1;
1da177e4
LT
2767
2768 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2769 udelay(40);
e8f3f6ca
MC
2770
2771 tw32_f(MAC_MODE, tp->mac_mode);
2772 udelay(40);
1da177e4
LT
2773 }
2774
2775out:
2776 return current_link_up;
2777}
2778
2779static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2780{
2781 u32 orig_pause_cfg;
2782 u16 orig_active_speed;
2783 u8 orig_active_duplex;
2784 u32 mac_status;
2785 int current_link_up;
2786 int i;
2787
2788 orig_pause_cfg =
2789 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2790 TG3_FLAG_TX_PAUSE));
2791 orig_active_speed = tp->link_config.active_speed;
2792 orig_active_duplex = tp->link_config.active_duplex;
2793
2794 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2795 netif_carrier_ok(tp->dev) &&
2796 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2797 mac_status = tr32(MAC_STATUS);
2798 mac_status &= (MAC_STATUS_PCS_SYNCED |
2799 MAC_STATUS_SIGNAL_DET |
2800 MAC_STATUS_CFG_CHANGED |
2801 MAC_STATUS_RCVD_CFG);
2802 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2803 MAC_STATUS_SIGNAL_DET)) {
2804 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2805 MAC_STATUS_CFG_CHANGED));
2806 return 0;
2807 }
2808 }
2809
2810 tw32_f(MAC_TX_AUTO_NEG, 0);
2811
2812 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2813 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2814 tw32_f(MAC_MODE, tp->mac_mode);
2815 udelay(40);
2816
2817 if (tp->phy_id == PHY_ID_BCM8002)
2818 tg3_init_bcm8002(tp);
2819
2820 /* Enable link change event even when serdes polling. */
2821 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2822 udelay(40);
2823
2824 current_link_up = 0;
2825 mac_status = tr32(MAC_STATUS);
2826
2827 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2828 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2829 else
2830 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2831
1da177e4
LT
2832 tp->hw_status->status =
2833 (SD_STATUS_UPDATED |
2834 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2835
2836 for (i = 0; i < 100; i++) {
2837 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2838 MAC_STATUS_CFG_CHANGED));
2839 udelay(5);
2840 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3d3ebe74
MC
2841 MAC_STATUS_CFG_CHANGED |
2842 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
1da177e4
LT
2843 break;
2844 }
2845
2846 mac_status = tr32(MAC_STATUS);
2847 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2848 current_link_up = 0;
3d3ebe74
MC
2849 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2850 tp->serdes_counter == 0) {
1da177e4
LT
2851 tw32_f(MAC_MODE, (tp->mac_mode |
2852 MAC_MODE_SEND_CONFIGS));
2853 udelay(1);
2854 tw32_f(MAC_MODE, tp->mac_mode);
2855 }
2856 }
2857
2858 if (current_link_up == 1) {
2859 tp->link_config.active_speed = SPEED_1000;
2860 tp->link_config.active_duplex = DUPLEX_FULL;
2861 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2862 LED_CTRL_LNKLED_OVERRIDE |
2863 LED_CTRL_1000MBPS_ON));
2864 } else {
2865 tp->link_config.active_speed = SPEED_INVALID;
2866 tp->link_config.active_duplex = DUPLEX_INVALID;
2867 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2868 LED_CTRL_LNKLED_OVERRIDE |
2869 LED_CTRL_TRAFFIC_OVERRIDE));
2870 }
2871
2872 if (current_link_up != netif_carrier_ok(tp->dev)) {
2873 if (current_link_up)
2874 netif_carrier_on(tp->dev);
2875 else
2876 netif_carrier_off(tp->dev);
2877 tg3_link_report(tp);
2878 } else {
2879 u32 now_pause_cfg =
2880 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2881 TG3_FLAG_TX_PAUSE);
2882 if (orig_pause_cfg != now_pause_cfg ||
2883 orig_active_speed != tp->link_config.active_speed ||
2884 orig_active_duplex != tp->link_config.active_duplex)
2885 tg3_link_report(tp);
2886 }
2887
2888 return 0;
2889}
2890
747e8f8b
MC
2891static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2892{
2893 int current_link_up, err = 0;
2894 u32 bmsr, bmcr;
2895 u16 current_speed;
2896 u8 current_duplex;
2897
2898 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2899 tw32_f(MAC_MODE, tp->mac_mode);
2900 udelay(40);
2901
2902 tw32(MAC_EVENT, 0);
2903
2904 tw32_f(MAC_STATUS,
2905 (MAC_STATUS_SYNC_CHANGED |
2906 MAC_STATUS_CFG_CHANGED |
2907 MAC_STATUS_MI_COMPLETION |
2908 MAC_STATUS_LNKSTATE_CHANGED));
2909 udelay(40);
2910
2911 if (force_reset)
2912 tg3_phy_reset(tp);
2913
2914 current_link_up = 0;
2915 current_speed = SPEED_INVALID;
2916 current_duplex = DUPLEX_INVALID;
2917
2918 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2919 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
2920 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2921 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2922 bmsr |= BMSR_LSTATUS;
2923 else
2924 bmsr &= ~BMSR_LSTATUS;
2925 }
747e8f8b
MC
2926
2927 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2928
2929 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2930 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2931 /* do nothing, just check for link up at the end */
2932 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2933 u32 adv, new_adv;
2934
2935 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2936 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2937 ADVERTISE_1000XPAUSE |
2938 ADVERTISE_1000XPSE_ASYM |
2939 ADVERTISE_SLCT);
2940
2941 /* Always advertise symmetric PAUSE just like copper */
2942 new_adv |= ADVERTISE_1000XPAUSE;
2943
2944 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2945 new_adv |= ADVERTISE_1000XHALF;
2946 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2947 new_adv |= ADVERTISE_1000XFULL;
2948
2949 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2950 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2951 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2952 tg3_writephy(tp, MII_BMCR, bmcr);
2953
2954 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3d3ebe74 2955 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
747e8f8b
MC
2956 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2957
2958 return err;
2959 }
2960 } else {
2961 u32 new_bmcr;
2962
2963 bmcr &= ~BMCR_SPEED1000;
2964 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2965
2966 if (tp->link_config.duplex == DUPLEX_FULL)
2967 new_bmcr |= BMCR_FULLDPLX;
2968
2969 if (new_bmcr != bmcr) {
2970 /* BMCR_SPEED1000 is a reserved bit that needs
2971 * to be set on write.
2972 */
2973 new_bmcr |= BMCR_SPEED1000;
2974
2975 /* Force a linkdown */
2976 if (netif_carrier_ok(tp->dev)) {
2977 u32 adv;
2978
2979 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2980 adv &= ~(ADVERTISE_1000XFULL |
2981 ADVERTISE_1000XHALF |
2982 ADVERTISE_SLCT);
2983 tg3_writephy(tp, MII_ADVERTISE, adv);
2984 tg3_writephy(tp, MII_BMCR, bmcr |
2985 BMCR_ANRESTART |
2986 BMCR_ANENABLE);
2987 udelay(10);
2988 netif_carrier_off(tp->dev);
2989 }
2990 tg3_writephy(tp, MII_BMCR, new_bmcr);
2991 bmcr = new_bmcr;
2992 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2993 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
2994 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2995 ASIC_REV_5714) {
2996 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2997 bmsr |= BMSR_LSTATUS;
2998 else
2999 bmsr &= ~BMSR_LSTATUS;
3000 }
747e8f8b
MC
3001 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3002 }
3003 }
3004
3005 if (bmsr & BMSR_LSTATUS) {
3006 current_speed = SPEED_1000;
3007 current_link_up = 1;
3008 if (bmcr & BMCR_FULLDPLX)
3009 current_duplex = DUPLEX_FULL;
3010 else
3011 current_duplex = DUPLEX_HALF;
3012
3013 if (bmcr & BMCR_ANENABLE) {
3014 u32 local_adv, remote_adv, common;
3015
3016 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3017 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3018 common = local_adv & remote_adv;
3019 if (common & (ADVERTISE_1000XHALF |
3020 ADVERTISE_1000XFULL)) {
3021 if (common & ADVERTISE_1000XFULL)
3022 current_duplex = DUPLEX_FULL;
3023 else
3024 current_duplex = DUPLEX_HALF;
3025
3026 tg3_setup_flow_control(tp, local_adv,
3027 remote_adv);
3028 }
3029 else
3030 current_link_up = 0;
3031 }
3032 }
3033
3034 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3035 if (tp->link_config.active_duplex == DUPLEX_HALF)
3036 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3037
3038 tw32_f(MAC_MODE, tp->mac_mode);
3039 udelay(40);
3040
3041 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3042
3043 tp->link_config.active_speed = current_speed;
3044 tp->link_config.active_duplex = current_duplex;
3045
3046 if (current_link_up != netif_carrier_ok(tp->dev)) {
3047 if (current_link_up)
3048 netif_carrier_on(tp->dev);
3049 else {
3050 netif_carrier_off(tp->dev);
3051 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3052 }
3053 tg3_link_report(tp);
3054 }
3055 return err;
3056}
3057
3058static void tg3_serdes_parallel_detect(struct tg3 *tp)
3059{
3d3ebe74 3060 if (tp->serdes_counter) {
747e8f8b 3061 /* Give autoneg time to complete. */
3d3ebe74 3062 tp->serdes_counter--;
747e8f8b
MC
3063 return;
3064 }
3065 if (!netif_carrier_ok(tp->dev) &&
3066 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3067 u32 bmcr;
3068
3069 tg3_readphy(tp, MII_BMCR, &bmcr);
3070 if (bmcr & BMCR_ANENABLE) {
3071 u32 phy1, phy2;
3072
3073 /* Select shadow register 0x1f */
3074 tg3_writephy(tp, 0x1c, 0x7c00);
3075 tg3_readphy(tp, 0x1c, &phy1);
3076
3077 /* Select expansion interrupt status register */
3078 tg3_writephy(tp, 0x17, 0x0f01);
3079 tg3_readphy(tp, 0x15, &phy2);
3080 tg3_readphy(tp, 0x15, &phy2);
3081
3082 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3083 /* We have signal detect and not receiving
3084 * config code words, link is up by parallel
3085 * detection.
3086 */
3087
3088 bmcr &= ~BMCR_ANENABLE;
3089 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3090 tg3_writephy(tp, MII_BMCR, bmcr);
3091 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3092 }
3093 }
3094 }
3095 else if (netif_carrier_ok(tp->dev) &&
3096 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3097 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3098 u32 phy2;
3099
3100 /* Select expansion interrupt status register */
3101 tg3_writephy(tp, 0x17, 0x0f01);
3102 tg3_readphy(tp, 0x15, &phy2);
3103 if (phy2 & 0x20) {
3104 u32 bmcr;
3105
3106 /* Config code words received, turn on autoneg. */
3107 tg3_readphy(tp, MII_BMCR, &bmcr);
3108 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3109
3110 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3111
3112 }
3113 }
3114}
3115
1da177e4
LT
3116static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3117{
3118 int err;
3119
3120 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3121 err = tg3_setup_fiber_phy(tp, force_reset);
747e8f8b
MC
3122 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3123 err = tg3_setup_fiber_mii_phy(tp, force_reset);
1da177e4
LT
3124 } else {
3125 err = tg3_setup_copper_phy(tp, force_reset);
3126 }
3127
3128 if (tp->link_config.active_speed == SPEED_1000 &&
3129 tp->link_config.active_duplex == DUPLEX_HALF)
3130 tw32(MAC_TX_LENGTHS,
3131 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3132 (6 << TX_LENGTHS_IPG_SHIFT) |
3133 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3134 else
3135 tw32(MAC_TX_LENGTHS,
3136 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3137 (6 << TX_LENGTHS_IPG_SHIFT) |
3138 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3139
3140 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3141 if (netif_carrier_ok(tp->dev)) {
3142 tw32(HOSTCC_STAT_COAL_TICKS,
15f9850d 3143 tp->coal.stats_block_coalesce_usecs);
1da177e4
LT
3144 } else {
3145 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3146 }
3147 }
3148
8ed5d97e
MC
3149 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3150 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3151 if (!netif_carrier_ok(tp->dev))
3152 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3153 tp->pwrmgmt_thresh;
3154 else
3155 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3156 tw32(PCIE_PWR_MGMT_THRESH, val);
3157 }
3158
1da177e4
LT
3159 return err;
3160}
3161
df3e6548
MC
3162/* This is called whenever we suspect that the system chipset is re-
3163 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3164 * is bogus tx completions. We try to recover by setting the
3165 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3166 * in the workqueue.
3167 */
3168static void tg3_tx_recover(struct tg3 *tp)
3169{
3170 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3171 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3172
3173 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3174 "mapped I/O cycles to the network device, attempting to "
3175 "recover. Please report the problem to the driver maintainer "
3176 "and include system chipset information.\n", tp->dev->name);
3177
3178 spin_lock(&tp->lock);
df3e6548 3179 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
df3e6548
MC
3180 spin_unlock(&tp->lock);
3181}
3182
1b2a7205
MC
3183static inline u32 tg3_tx_avail(struct tg3 *tp)
3184{
3185 smp_mb();
3186 return (tp->tx_pending -
3187 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3188}
3189
1da177e4
LT
3190/* Tigon3 never reports partial packet sends. So we do not
3191 * need special logic to handle SKBs that have not had all
3192 * of their frags sent yet, like SunGEM does.
3193 */
3194static void tg3_tx(struct tg3 *tp)
3195{
3196 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3197 u32 sw_idx = tp->tx_cons;
3198
3199 while (sw_idx != hw_idx) {
3200 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3201 struct sk_buff *skb = ri->skb;
df3e6548
MC
3202 int i, tx_bug = 0;
3203
3204 if (unlikely(skb == NULL)) {
3205 tg3_tx_recover(tp);
3206 return;
3207 }
1da177e4 3208
1da177e4
LT
3209 pci_unmap_single(tp->pdev,
3210 pci_unmap_addr(ri, mapping),
3211 skb_headlen(skb),
3212 PCI_DMA_TODEVICE);
3213
3214 ri->skb = NULL;
3215
3216 sw_idx = NEXT_TX(sw_idx);
3217
3218 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1da177e4 3219 ri = &tp->tx_buffers[sw_idx];
df3e6548
MC
3220 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3221 tx_bug = 1;
1da177e4
LT
3222
3223 pci_unmap_page(tp->pdev,
3224 pci_unmap_addr(ri, mapping),
3225 skb_shinfo(skb)->frags[i].size,
3226 PCI_DMA_TODEVICE);
3227
3228 sw_idx = NEXT_TX(sw_idx);
3229 }
3230
f47c11ee 3231 dev_kfree_skb(skb);
df3e6548
MC
3232
3233 if (unlikely(tx_bug)) {
3234 tg3_tx_recover(tp);
3235 return;
3236 }
1da177e4
LT
3237 }
3238
3239 tp->tx_cons = sw_idx;
3240
1b2a7205
MC
3241 /* Need to make the tx_cons update visible to tg3_start_xmit()
3242 * before checking for netif_queue_stopped(). Without the
3243 * memory barrier, there is a small possibility that tg3_start_xmit()
3244 * will miss it and cause the queue to be stopped forever.
3245 */
3246 smp_mb();
3247
3248 if (unlikely(netif_queue_stopped(tp->dev) &&
42952231 3249 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
1b2a7205 3250 netif_tx_lock(tp->dev);
51b91468 3251 if (netif_queue_stopped(tp->dev) &&
42952231 3252 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
51b91468 3253 netif_wake_queue(tp->dev);
1b2a7205 3254 netif_tx_unlock(tp->dev);
51b91468 3255 }
1da177e4
LT
3256}
3257
3258/* Returns size of skb allocated or < 0 on error.
3259 *
3260 * We only need to fill in the address because the other members
3261 * of the RX descriptor are invariant, see tg3_init_rings.
3262 *
3263 * Note the purposeful assymetry of cpu vs. chip accesses. For
3264 * posting buffers we only dirty the first cache line of the RX
3265 * descriptor (containing the address). Whereas for the RX status
3266 * buffers the cpu only reads the last cacheline of the RX descriptor
3267 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3268 */
3269static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3270 int src_idx, u32 dest_idx_unmasked)
3271{
3272 struct tg3_rx_buffer_desc *desc;
3273 struct ring_info *map, *src_map;
3274 struct sk_buff *skb;
3275 dma_addr_t mapping;
3276 int skb_size, dest_idx;
3277
3278 src_map = NULL;
3279 switch (opaque_key) {
3280 case RXD_OPAQUE_RING_STD:
3281 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3282 desc = &tp->rx_std[dest_idx];
3283 map = &tp->rx_std_buffers[dest_idx];
3284 if (src_idx >= 0)
3285 src_map = &tp->rx_std_buffers[src_idx];
7e72aad4 3286 skb_size = tp->rx_pkt_buf_sz;
1da177e4
LT
3287 break;
3288
3289 case RXD_OPAQUE_RING_JUMBO:
3290 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3291 desc = &tp->rx_jumbo[dest_idx];
3292 map = &tp->rx_jumbo_buffers[dest_idx];
3293 if (src_idx >= 0)
3294 src_map = &tp->rx_jumbo_buffers[src_idx];
3295 skb_size = RX_JUMBO_PKT_BUF_SZ;
3296 break;
3297
3298 default:
3299 return -EINVAL;
3300 };
3301
3302 /* Do not overwrite any of the map or rp information
3303 * until we are sure we can commit to a new buffer.
3304 *
3305 * Callers depend upon this behavior and assume that
3306 * we leave everything unchanged if we fail.
3307 */
a20e9c62 3308 skb = netdev_alloc_skb(tp->dev, skb_size);
1da177e4
LT
3309 if (skb == NULL)
3310 return -ENOMEM;
3311
1da177e4
LT
3312 skb_reserve(skb, tp->rx_offset);
3313
3314 mapping = pci_map_single(tp->pdev, skb->data,
3315 skb_size - tp->rx_offset,
3316 PCI_DMA_FROMDEVICE);
3317
3318 map->skb = skb;
3319 pci_unmap_addr_set(map, mapping, mapping);
3320
3321 if (src_map != NULL)
3322 src_map->skb = NULL;
3323
3324 desc->addr_hi = ((u64)mapping >> 32);
3325 desc->addr_lo = ((u64)mapping & 0xffffffff);
3326
3327 return skb_size;
3328}
3329
3330/* We only need to move over in the address because the other
3331 * members of the RX descriptor are invariant. See notes above
3332 * tg3_alloc_rx_skb for full details.
3333 */
3334static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3335 int src_idx, u32 dest_idx_unmasked)
3336{
3337 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3338 struct ring_info *src_map, *dest_map;
3339 int dest_idx;
3340
3341 switch (opaque_key) {
3342 case RXD_OPAQUE_RING_STD:
3343 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3344 dest_desc = &tp->rx_std[dest_idx];
3345 dest_map = &tp->rx_std_buffers[dest_idx];
3346 src_desc = &tp->rx_std[src_idx];
3347 src_map = &tp->rx_std_buffers[src_idx];
3348 break;
3349
3350 case RXD_OPAQUE_RING_JUMBO:
3351 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3352 dest_desc = &tp->rx_jumbo[dest_idx];
3353 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3354 src_desc = &tp->rx_jumbo[src_idx];
3355 src_map = &tp->rx_jumbo_buffers[src_idx];
3356 break;
3357
3358 default:
3359 return;
3360 };
3361
3362 dest_map->skb = src_map->skb;
3363 pci_unmap_addr_set(dest_map, mapping,
3364 pci_unmap_addr(src_map, mapping));
3365 dest_desc->addr_hi = src_desc->addr_hi;
3366 dest_desc->addr_lo = src_desc->addr_lo;
3367
3368 src_map->skb = NULL;
3369}
3370
3371#if TG3_VLAN_TAG_USED
3372static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3373{
3374 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3375}
3376#endif
3377
3378/* The RX ring scheme is composed of multiple rings which post fresh
3379 * buffers to the chip, and one special ring the chip uses to report
3380 * status back to the host.
3381 *
3382 * The special ring reports the status of received packets to the
3383 * host. The chip does not write into the original descriptor the
3384 * RX buffer was obtained from. The chip simply takes the original
3385 * descriptor as provided by the host, updates the status and length
3386 * field, then writes this into the next status ring entry.
3387 *
3388 * Each ring the host uses to post buffers to the chip is described
3389 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3390 * it is first placed into the on-chip ram. When the packet's length
3391 * is known, it walks down the TG3_BDINFO entries to select the ring.
3392 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3393 * which is within the range of the new packet's length is chosen.
3394 *
3395 * The "separate ring for rx status" scheme may sound queer, but it makes
3396 * sense from a cache coherency perspective. If only the host writes
3397 * to the buffer post rings, and only the chip writes to the rx status
3398 * rings, then cache lines never move beyond shared-modified state.
3399 * If both the host and chip were to write into the same ring, cache line
3400 * eviction could occur since both entities want it in an exclusive state.
3401 */
3402static int tg3_rx(struct tg3 *tp, int budget)
3403{
f92905de 3404 u32 work_mask, rx_std_posted = 0;
483ba50b
MC
3405 u32 sw_idx = tp->rx_rcb_ptr;
3406 u16 hw_idx;
1da177e4
LT
3407 int received;
3408
3409 hw_idx = tp->hw_status->idx[0].rx_producer;
3410 /*
3411 * We need to order the read of hw_idx and the read of
3412 * the opaque cookie.
3413 */
3414 rmb();
1da177e4
LT
3415 work_mask = 0;
3416 received = 0;
3417 while (sw_idx != hw_idx && budget > 0) {
3418 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3419 unsigned int len;
3420 struct sk_buff *skb;
3421 dma_addr_t dma_addr;
3422 u32 opaque_key, desc_idx, *post_ptr;
3423
3424 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3425 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3426 if (opaque_key == RXD_OPAQUE_RING_STD) {
3427 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3428 mapping);
3429 skb = tp->rx_std_buffers[desc_idx].skb;
3430 post_ptr = &tp->rx_std_ptr;
f92905de 3431 rx_std_posted++;
1da177e4
LT
3432 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3433 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3434 mapping);
3435 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3436 post_ptr = &tp->rx_jumbo_ptr;
3437 }
3438 else {
3439 goto next_pkt_nopost;
3440 }
3441
3442 work_mask |= opaque_key;
3443
3444 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3445 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3446 drop_it:
3447 tg3_recycle_rx(tp, opaque_key,
3448 desc_idx, *post_ptr);
3449 drop_it_no_recycle:
3450 /* Other statistics kept track of by card. */
3451 tp->net_stats.rx_dropped++;
3452 goto next_pkt;
3453 }
3454
3455 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3456
6aa20a22 3457 if (len > RX_COPY_THRESHOLD
1da177e4
LT
3458 && tp->rx_offset == 2
3459 /* rx_offset != 2 iff this is a 5701 card running
3460 * in PCI-X mode [see tg3_get_invariants()] */
3461 ) {
3462 int skb_size;
3463
3464 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3465 desc_idx, *post_ptr);
3466 if (skb_size < 0)
3467 goto drop_it;
3468
3469 pci_unmap_single(tp->pdev, dma_addr,
3470 skb_size - tp->rx_offset,
3471 PCI_DMA_FROMDEVICE);
3472
3473 skb_put(skb, len);
3474 } else {
3475 struct sk_buff *copy_skb;
3476
3477 tg3_recycle_rx(tp, opaque_key,
3478 desc_idx, *post_ptr);
3479
a20e9c62 3480 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
1da177e4
LT
3481 if (copy_skb == NULL)
3482 goto drop_it_no_recycle;
3483
1da177e4
LT
3484 skb_reserve(copy_skb, 2);
3485 skb_put(copy_skb, len);
3486 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
d626f62b 3487 skb_copy_from_linear_data(skb, copy_skb->data, len);
1da177e4
LT
3488 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3489
3490 /* We'll reuse the original ring buffer. */
3491 skb = copy_skb;
3492 }
3493
3494 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3495 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3496 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3497 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3498 skb->ip_summed = CHECKSUM_UNNECESSARY;
3499 else
3500 skb->ip_summed = CHECKSUM_NONE;
3501
3502 skb->protocol = eth_type_trans(skb, tp->dev);
3503#if TG3_VLAN_TAG_USED
3504 if (tp->vlgrp != NULL &&
3505 desc->type_flags & RXD_FLAG_VLAN) {
3506 tg3_vlan_rx(tp, skb,
3507 desc->err_vlan & RXD_VLAN_MASK);
3508 } else
3509#endif
3510 netif_receive_skb(skb);
3511
3512 tp->dev->last_rx = jiffies;
3513 received++;
3514 budget--;
3515
3516next_pkt:
3517 (*post_ptr)++;
f92905de
MC
3518
3519 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3520 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3521
3522 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3523 TG3_64BIT_REG_LOW, idx);
3524 work_mask &= ~RXD_OPAQUE_RING_STD;
3525 rx_std_posted = 0;
3526 }
1da177e4 3527next_pkt_nopost:
483ba50b 3528 sw_idx++;
6b31a515 3529 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
52f6d697
MC
3530
3531 /* Refresh hw_idx to see if there is new work */
3532 if (sw_idx == hw_idx) {
3533 hw_idx = tp->hw_status->idx[0].rx_producer;
3534 rmb();
3535 }
1da177e4
LT
3536 }
3537
3538 /* ACK the status ring. */
483ba50b
MC
3539 tp->rx_rcb_ptr = sw_idx;
3540 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
1da177e4
LT
3541
3542 /* Refill RX ring(s). */
3543 if (work_mask & RXD_OPAQUE_RING_STD) {
3544 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3545 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3546 sw_idx);
3547 }
3548 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3549 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3550 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3551 sw_idx);
3552 }
3553 mmiowb();
3554
3555 return received;
3556}
3557
bea3348e 3558static int tg3_poll(struct napi_struct *napi, int budget)
1da177e4 3559{
bea3348e
SH
3560 struct tg3 *tp = container_of(napi, struct tg3, napi);
3561 struct net_device *netdev = tp->dev;
1da177e4 3562 struct tg3_hw_status *sblk = tp->hw_status;
bea3348e 3563 int work_done = 0;
1da177e4 3564
1da177e4
LT
3565 /* handle link change and other phy events */
3566 if (!(tp->tg3_flags &
3567 (TG3_FLAG_USE_LINKCHG_REG |
3568 TG3_FLAG_POLL_SERDES))) {
3569 if (sblk->status & SD_STATUS_LINK_CHG) {
3570 sblk->status = SD_STATUS_UPDATED |
3571 (sblk->status & ~SD_STATUS_LINK_CHG);
f47c11ee 3572 spin_lock(&tp->lock);
1da177e4 3573 tg3_setup_phy(tp, 0);
f47c11ee 3574 spin_unlock(&tp->lock);
1da177e4
LT
3575 }
3576 }
3577
3578 /* run TX completion thread */
3579 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
1da177e4 3580 tg3_tx(tp);
df3e6548 3581 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
bea3348e 3582 netif_rx_complete(netdev, napi);
df3e6548
MC
3583 schedule_work(&tp->reset_task);
3584 return 0;
3585 }
1da177e4
LT
3586 }
3587
1da177e4
LT
3588 /* run RX thread, within the bounds set by NAPI.
3589 * All RX "locking" is done by ensuring outside
bea3348e 3590 * code synchronizes with tg3->napi.poll()
1da177e4 3591 */
bea3348e
SH
3592 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3593 work_done = tg3_rx(tp, budget);
1da177e4 3594
38f3843e 3595 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
f7383c22 3596 tp->last_tag = sblk->status_tag;
38f3843e
MC
3597 rmb();
3598 } else
3599 sblk->status &= ~SD_STATUS_UPDATED;
f7383c22 3600
1da177e4 3601 /* if no more work, tell net stack and NIC we're done */
bea3348e
SH
3602 if (!tg3_has_work(tp)) {
3603 netif_rx_complete(netdev, napi);
1da177e4 3604 tg3_restart_ints(tp);
1da177e4
LT
3605 }
3606
bea3348e 3607 return work_done;
1da177e4
LT
3608}
3609
f47c11ee
DM
3610static void tg3_irq_quiesce(struct tg3 *tp)
3611{
3612 BUG_ON(tp->irq_sync);
3613
3614 tp->irq_sync = 1;
3615 smp_mb();
3616
3617 synchronize_irq(tp->pdev->irq);
3618}
3619
3620static inline int tg3_irq_sync(struct tg3 *tp)
3621{
3622 return tp->irq_sync;
3623}
3624
3625/* Fully shutdown all tg3 driver activity elsewhere in the system.
3626 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3627 * with as well. Most of the time, this is not necessary except when
3628 * shutting down the device.
3629 */
3630static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3631{
46966545 3632 spin_lock_bh(&tp->lock);
f47c11ee
DM
3633 if (irq_sync)
3634 tg3_irq_quiesce(tp);
f47c11ee
DM
3635}
3636
3637static inline void tg3_full_unlock(struct tg3 *tp)
3638{
f47c11ee
DM
3639 spin_unlock_bh(&tp->lock);
3640}
3641
fcfa0a32
MC
3642/* One-shot MSI handler - Chip automatically disables interrupt
3643 * after sending MSI so driver doesn't have to do it.
3644 */
7d12e780 3645static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
fcfa0a32
MC
3646{
3647 struct net_device *dev = dev_id;
3648 struct tg3 *tp = netdev_priv(dev);
3649
3650 prefetch(tp->hw_status);
3651 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3652
3653 if (likely(!tg3_irq_sync(tp)))
bea3348e 3654 netif_rx_schedule(dev, &tp->napi);
fcfa0a32
MC
3655
3656 return IRQ_HANDLED;
3657}
3658
88b06bc2
MC
3659/* MSI ISR - No need to check for interrupt sharing and no need to
3660 * flush status block and interrupt mailbox. PCI ordering rules
3661 * guarantee that MSI will arrive after the status block.
3662 */
7d12e780 3663static irqreturn_t tg3_msi(int irq, void *dev_id)
88b06bc2
MC
3664{
3665 struct net_device *dev = dev_id;
3666 struct tg3 *tp = netdev_priv(dev);
88b06bc2 3667
61487480
MC
3668 prefetch(tp->hw_status);
3669 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
88b06bc2 3670 /*
fac9b83e 3671 * Writing any value to intr-mbox-0 clears PCI INTA# and
88b06bc2 3672 * chip-internal interrupt pending events.
fac9b83e 3673 * Writing non-zero to intr-mbox-0 additional tells the
88b06bc2
MC
3674 * NIC to stop sending us irqs, engaging "in-intr-handler"
3675 * event coalescing.
3676 */
3677 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
61487480 3678 if (likely(!tg3_irq_sync(tp)))
bea3348e 3679 netif_rx_schedule(dev, &tp->napi);
61487480 3680
88b06bc2
MC
3681 return IRQ_RETVAL(1);
3682}
3683
7d12e780 3684static irqreturn_t tg3_interrupt(int irq, void *dev_id)
1da177e4
LT
3685{
3686 struct net_device *dev = dev_id;
3687 struct tg3 *tp = netdev_priv(dev);
3688 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4
LT
3689 unsigned int handled = 1;
3690
1da177e4
LT
3691 /* In INTx mode, it is possible for the interrupt to arrive at
3692 * the CPU before the status block posted prior to the interrupt.
3693 * Reading the PCI State register will confirm whether the
3694 * interrupt is ours and will flush the status block.
3695 */
d18edcb2
MC
3696 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3697 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3698 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3699 handled = 0;
f47c11ee 3700 goto out;
fac9b83e 3701 }
d18edcb2
MC
3702 }
3703
3704 /*
3705 * Writing any value to intr-mbox-0 clears PCI INTA# and
3706 * chip-internal interrupt pending events.
3707 * Writing non-zero to intr-mbox-0 additional tells the
3708 * NIC to stop sending us irqs, engaging "in-intr-handler"
3709 * event coalescing.
c04cb347
MC
3710 *
3711 * Flush the mailbox to de-assert the IRQ immediately to prevent
3712 * spurious interrupts. The flush impacts performance but
3713 * excessive spurious interrupts can be worse in some cases.
d18edcb2 3714 */
c04cb347 3715 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
d18edcb2
MC
3716 if (tg3_irq_sync(tp))
3717 goto out;
3718 sblk->status &= ~SD_STATUS_UPDATED;
3719 if (likely(tg3_has_work(tp))) {
3720 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
bea3348e 3721 netif_rx_schedule(dev, &tp->napi);
d18edcb2
MC
3722 } else {
3723 /* No work, shared interrupt perhaps? re-enable
3724 * interrupts, and flush that PCI write
3725 */
3726 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3727 0x00000000);
fac9b83e 3728 }
f47c11ee 3729out:
fac9b83e
DM
3730 return IRQ_RETVAL(handled);
3731}
3732
7d12e780 3733static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
fac9b83e
DM
3734{
3735 struct net_device *dev = dev_id;
3736 struct tg3 *tp = netdev_priv(dev);
3737 struct tg3_hw_status *sblk = tp->hw_status;
fac9b83e
DM
3738 unsigned int handled = 1;
3739
fac9b83e
DM
3740 /* In INTx mode, it is possible for the interrupt to arrive at
3741 * the CPU before the status block posted prior to the interrupt.
3742 * Reading the PCI State register will confirm whether the
3743 * interrupt is ours and will flush the status block.
3744 */
d18edcb2
MC
3745 if (unlikely(sblk->status_tag == tp->last_tag)) {
3746 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3747 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3748 handled = 0;
f47c11ee 3749 goto out;
1da177e4 3750 }
d18edcb2
MC
3751 }
3752
3753 /*
3754 * writing any value to intr-mbox-0 clears PCI INTA# and
3755 * chip-internal interrupt pending events.
3756 * writing non-zero to intr-mbox-0 additional tells the
3757 * NIC to stop sending us irqs, engaging "in-intr-handler"
3758 * event coalescing.
c04cb347
MC
3759 *
3760 * Flush the mailbox to de-assert the IRQ immediately to prevent
3761 * spurious interrupts. The flush impacts performance but
3762 * excessive spurious interrupts can be worse in some cases.
d18edcb2 3763 */
c04cb347 3764 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
d18edcb2
MC
3765 if (tg3_irq_sync(tp))
3766 goto out;
bea3348e 3767 if (netif_rx_schedule_prep(dev, &tp->napi)) {
d18edcb2
MC
3768 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3769 /* Update last_tag to mark that this status has been
3770 * seen. Because interrupt may be shared, we may be
3771 * racing with tg3_poll(), so only update last_tag
3772 * if tg3_poll() is not scheduled.
3773 */
3774 tp->last_tag = sblk->status_tag;
bea3348e 3775 __netif_rx_schedule(dev, &tp->napi);
1da177e4 3776 }
f47c11ee 3777out:
1da177e4
LT
3778 return IRQ_RETVAL(handled);
3779}
3780
7938109f 3781/* ISR for interrupt test */
7d12e780 3782static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7938109f
MC
3783{
3784 struct net_device *dev = dev_id;
3785 struct tg3 *tp = netdev_priv(dev);
3786 struct tg3_hw_status *sblk = tp->hw_status;
3787
f9804ddb
MC
3788 if ((sblk->status & SD_STATUS_UPDATED) ||
3789 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
b16250e3 3790 tg3_disable_ints(tp);
7938109f
MC
3791 return IRQ_RETVAL(1);
3792 }
3793 return IRQ_RETVAL(0);
3794}
3795
8e7a22e3 3796static int tg3_init_hw(struct tg3 *, int);
944d980e 3797static int tg3_halt(struct tg3 *, int, int);
1da177e4 3798
b9ec6c1b
MC
3799/* Restart hardware after configuration changes, self-test, etc.
3800 * Invoked with tp->lock held.
3801 */
3802static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3803{
3804 int err;
3805
3806 err = tg3_init_hw(tp, reset_phy);
3807 if (err) {
3808 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3809 "aborting.\n", tp->dev->name);
3810 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3811 tg3_full_unlock(tp);
3812 del_timer_sync(&tp->timer);
3813 tp->irq_sync = 0;
bea3348e 3814 napi_enable(&tp->napi);
b9ec6c1b
MC
3815 dev_close(tp->dev);
3816 tg3_full_lock(tp, 0);
3817 }
3818 return err;
3819}
3820
1da177e4
LT
3821#ifdef CONFIG_NET_POLL_CONTROLLER
3822static void tg3_poll_controller(struct net_device *dev)
3823{
88b06bc2
MC
3824 struct tg3 *tp = netdev_priv(dev);
3825
7d12e780 3826 tg3_interrupt(tp->pdev->irq, dev);
1da177e4
LT
3827}
3828#endif
3829
c4028958 3830static void tg3_reset_task(struct work_struct *work)
1da177e4 3831{
c4028958 3832 struct tg3 *tp = container_of(work, struct tg3, reset_task);
1da177e4
LT
3833 unsigned int restart_timer;
3834
7faa006f 3835 tg3_full_lock(tp, 0);
7faa006f
MC
3836
3837 if (!netif_running(tp->dev)) {
7faa006f
MC
3838 tg3_full_unlock(tp);
3839 return;
3840 }
3841
3842 tg3_full_unlock(tp);
3843
1da177e4
LT
3844 tg3_netif_stop(tp);
3845
f47c11ee 3846 tg3_full_lock(tp, 1);
1da177e4
LT
3847
3848 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3849 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3850
df3e6548
MC
3851 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3852 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3853 tp->write32_rx_mbox = tg3_write_flush_reg32;
3854 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3855 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3856 }
3857
944d980e 3858 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
b9ec6c1b
MC
3859 if (tg3_init_hw(tp, 1))
3860 goto out;
1da177e4
LT
3861
3862 tg3_netif_start(tp);
3863
1da177e4
LT
3864 if (restart_timer)
3865 mod_timer(&tp->timer, jiffies + 1);
7faa006f 3866
b9ec6c1b 3867out:
7faa006f 3868 tg3_full_unlock(tp);
1da177e4
LT
3869}
3870
b0408751
MC
3871static void tg3_dump_short_state(struct tg3 *tp)
3872{
3873 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3874 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3875 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3876 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3877}
3878
1da177e4
LT
3879static void tg3_tx_timeout(struct net_device *dev)
3880{
3881 struct tg3 *tp = netdev_priv(dev);
3882
b0408751 3883 if (netif_msg_tx_err(tp)) {
9f88f29f
MC
3884 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3885 dev->name);
b0408751
MC
3886 tg3_dump_short_state(tp);
3887 }
1da177e4
LT
3888
3889 schedule_work(&tp->reset_task);
3890}
3891
c58ec932
MC
3892/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3893static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3894{
3895 u32 base = (u32) mapping & 0xffffffff;
3896
3897 return ((base > 0xffffdcc0) &&
3898 (base + len + 8 < base));
3899}
3900
72f2afb8
MC
3901/* Test for DMA addresses > 40-bit */
3902static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3903 int len)
3904{
3905#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6728a8e2 3906 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
72f2afb8
MC
3907 return (((u64) mapping + len) > DMA_40BIT_MASK);
3908 return 0;
3909#else
3910 return 0;
3911#endif
3912}
3913
1da177e4
LT
3914static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3915
72f2afb8
MC
3916/* Workaround 4GB and 40-bit hardware DMA bugs. */
3917static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
c58ec932
MC
3918 u32 last_plus_one, u32 *start,
3919 u32 base_flags, u32 mss)
1da177e4
LT
3920{
3921 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
c58ec932 3922 dma_addr_t new_addr = 0;
1da177e4 3923 u32 entry = *start;
c58ec932 3924 int i, ret = 0;
1da177e4
LT
3925
3926 if (!new_skb) {
c58ec932
MC
3927 ret = -1;
3928 } else {
3929 /* New SKB is guaranteed to be linear. */
3930 entry = *start;
3931 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3932 PCI_DMA_TODEVICE);
3933 /* Make sure new skb does not cross any 4G boundaries.
3934 * Drop the packet if it does.
3935 */
3936 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3937 ret = -1;
3938 dev_kfree_skb(new_skb);
3939 new_skb = NULL;
3940 } else {
3941 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3942 base_flags, 1 | (mss << 1));
3943 *start = NEXT_TX(entry);
3944 }
1da177e4
LT
3945 }
3946
1da177e4
LT
3947 /* Now clean up the sw ring entries. */
3948 i = 0;
3949 while (entry != last_plus_one) {
3950 int len;
3951
3952 if (i == 0)
3953 len = skb_headlen(skb);
3954 else
3955 len = skb_shinfo(skb)->frags[i-1].size;
3956 pci_unmap_single(tp->pdev,
3957 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3958 len, PCI_DMA_TODEVICE);
3959 if (i == 0) {
3960 tp->tx_buffers[entry].skb = new_skb;
3961 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3962 } else {
3963 tp->tx_buffers[entry].skb = NULL;
3964 }
3965 entry = NEXT_TX(entry);
3966 i++;
3967 }
3968
3969 dev_kfree_skb(skb);
3970
c58ec932 3971 return ret;
1da177e4
LT
3972}
3973
3974static void tg3_set_txd(struct tg3 *tp, int entry,
3975 dma_addr_t mapping, int len, u32 flags,
3976 u32 mss_and_is_end)
3977{
3978 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3979 int is_end = (mss_and_is_end & 0x1);
3980 u32 mss = (mss_and_is_end >> 1);
3981 u32 vlan_tag = 0;
3982
3983 if (is_end)
3984 flags |= TXD_FLAG_END;
3985 if (flags & TXD_FLAG_VLAN) {
3986 vlan_tag = flags >> 16;
3987 flags &= 0xffff;
3988 }
3989 vlan_tag |= (mss << TXD_MSS_SHIFT);
3990
3991 txd->addr_hi = ((u64) mapping >> 32);
3992 txd->addr_lo = ((u64) mapping & 0xffffffff);
3993 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3994 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3995}
3996
5a6f3074
MC
3997/* hard_start_xmit for devices that don't have any bugs and
3998 * support TG3_FLG2_HW_TSO_2 only.
3999 */
1da177e4 4000static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5a6f3074
MC
4001{
4002 struct tg3 *tp = netdev_priv(dev);
4003 dma_addr_t mapping;
4004 u32 len, entry, base_flags, mss;
4005
4006 len = skb_headlen(skb);
4007
00b70504 4008 /* We are running in BH disabled context with netif_tx_lock
bea3348e 4009 * and TX reclaim runs via tp->napi.poll inside of a software
5a6f3074
MC
4010 * interrupt. Furthermore, IRQ processing runs lockless so we have
4011 * no IRQ context deadlocks to worry about either. Rejoice!
4012 */
1b2a7205 4013 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
5a6f3074
MC
4014 if (!netif_queue_stopped(dev)) {
4015 netif_stop_queue(dev);
4016
4017 /* This is a hard error, log it. */
4018 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4019 "queue awake!\n", dev->name);
4020 }
5a6f3074
MC
4021 return NETDEV_TX_BUSY;
4022 }
4023
4024 entry = tp->tx_prod;
4025 base_flags = 0;
5a6f3074 4026 mss = 0;
c13e3713 4027 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5a6f3074
MC
4028 int tcp_opt_len, ip_tcp_len;
4029
4030 if (skb_header_cloned(skb) &&
4031 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4032 dev_kfree_skb(skb);
4033 goto out_unlock;
4034 }
4035
b0026624
MC
4036 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4037 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4038 else {
eddc9ec5
ACM
4039 struct iphdr *iph = ip_hdr(skb);
4040
ab6a5bb6 4041 tcp_opt_len = tcp_optlen(skb);
c9bdd4b5 4042 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
b0026624 4043
eddc9ec5
ACM
4044 iph->check = 0;
4045 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
b0026624
MC
4046 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4047 }
5a6f3074
MC
4048
4049 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4050 TXD_FLAG_CPU_POST_DMA);
4051
aa8223c7 4052 tcp_hdr(skb)->check = 0;
5a6f3074 4053
5a6f3074 4054 }
84fa7933 4055 else if (skb->ip_summed == CHECKSUM_PARTIAL)
5a6f3074 4056 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5a6f3074
MC
4057#if TG3_VLAN_TAG_USED
4058 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4059 base_flags |= (TXD_FLAG_VLAN |
4060 (vlan_tx_tag_get(skb) << 16));
4061#endif
4062
4063 /* Queue skb data, a.k.a. the main skb fragment. */
4064 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4065
4066 tp->tx_buffers[entry].skb = skb;
4067 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4068
4069 tg3_set_txd(tp, entry, mapping, len, base_flags,
4070 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4071
4072 entry = NEXT_TX(entry);
4073
4074 /* Now loop through additional data fragments, and queue them. */
4075 if (skb_shinfo(skb)->nr_frags > 0) {
4076 unsigned int i, last;
4077
4078 last = skb_shinfo(skb)->nr_frags - 1;
4079 for (i = 0; i <= last; i++) {
4080 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4081
4082 len = frag->size;
4083 mapping = pci_map_page(tp->pdev,
4084 frag->page,
4085 frag->page_offset,
4086 len, PCI_DMA_TODEVICE);
4087
4088 tp->tx_buffers[entry].skb = NULL;
4089 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4090
4091 tg3_set_txd(tp, entry, mapping, len,
4092 base_flags, (i == last) | (mss << 1));
4093
4094 entry = NEXT_TX(entry);
4095 }
4096 }
4097
4098 /* Packets are ready, update Tx producer idx local and on card. */
4099 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4100
4101 tp->tx_prod = entry;
1b2a7205 4102 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5a6f3074 4103 netif_stop_queue(dev);
42952231 4104 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5a6f3074
MC
4105 netif_wake_queue(tp->dev);
4106 }
4107
4108out_unlock:
4109 mmiowb();
5a6f3074
MC
4110
4111 dev->trans_start = jiffies;
4112
4113 return NETDEV_TX_OK;
4114}
4115
52c0fd83
MC
4116static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4117
4118/* Use GSO to workaround a rare TSO bug that may be triggered when the
4119 * TSO header is greater than 80 bytes.
4120 */
4121static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4122{
4123 struct sk_buff *segs, *nskb;
4124
4125 /* Estimate the number of fragments in the worst case */
1b2a7205 4126 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
52c0fd83 4127 netif_stop_queue(tp->dev);
7f62ad5d
MC
4128 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4129 return NETDEV_TX_BUSY;
4130
4131 netif_wake_queue(tp->dev);
52c0fd83
MC
4132 }
4133
4134 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4135 if (unlikely(IS_ERR(segs)))
4136 goto tg3_tso_bug_end;
4137
4138 do {
4139 nskb = segs;
4140 segs = segs->next;
4141 nskb->next = NULL;
4142 tg3_start_xmit_dma_bug(nskb, tp->dev);
4143 } while (segs);
4144
4145tg3_tso_bug_end:
4146 dev_kfree_skb(skb);
4147
4148 return NETDEV_TX_OK;
4149}
52c0fd83 4150
5a6f3074
MC
4151/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4152 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4153 */
4154static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
4155{
4156 struct tg3 *tp = netdev_priv(dev);
4157 dma_addr_t mapping;
1da177e4
LT
4158 u32 len, entry, base_flags, mss;
4159 int would_hit_hwbug;
1da177e4
LT
4160
4161 len = skb_headlen(skb);
4162
00b70504 4163 /* We are running in BH disabled context with netif_tx_lock
bea3348e 4164 * and TX reclaim runs via tp->napi.poll inside of a software
f47c11ee
DM
4165 * interrupt. Furthermore, IRQ processing runs lockless so we have
4166 * no IRQ context deadlocks to worry about either. Rejoice!
1da177e4 4167 */
1b2a7205 4168 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
1f064a87
SH
4169 if (!netif_queue_stopped(dev)) {
4170 netif_stop_queue(dev);
4171
4172 /* This is a hard error, log it. */
4173 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4174 "queue awake!\n", dev->name);
4175 }
1da177e4
LT
4176 return NETDEV_TX_BUSY;
4177 }
4178
4179 entry = tp->tx_prod;
4180 base_flags = 0;
84fa7933 4181 if (skb->ip_summed == CHECKSUM_PARTIAL)
1da177e4 4182 base_flags |= TXD_FLAG_TCPUDP_CSUM;
1da177e4 4183 mss = 0;
c13e3713 4184 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
eddc9ec5 4185 struct iphdr *iph;
52c0fd83 4186 int tcp_opt_len, ip_tcp_len, hdr_len;
1da177e4
LT
4187
4188 if (skb_header_cloned(skb) &&
4189 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4190 dev_kfree_skb(skb);
4191 goto out_unlock;
4192 }
4193
ab6a5bb6 4194 tcp_opt_len = tcp_optlen(skb);
c9bdd4b5 4195 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
1da177e4 4196
52c0fd83
MC
4197 hdr_len = ip_tcp_len + tcp_opt_len;
4198 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7f62ad5d 4199 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
52c0fd83
MC
4200 return (tg3_tso_bug(tp, skb));
4201
1da177e4
LT
4202 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4203 TXD_FLAG_CPU_POST_DMA);
4204
eddc9ec5
ACM
4205 iph = ip_hdr(skb);
4206 iph->check = 0;
4207 iph->tot_len = htons(mss + hdr_len);
1da177e4 4208 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
aa8223c7 4209 tcp_hdr(skb)->check = 0;
1da177e4 4210 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
aa8223c7
ACM
4211 } else
4212 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4213 iph->daddr, 0,
4214 IPPROTO_TCP,
4215 0);
1da177e4
LT
4216
4217 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4218 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
eddc9ec5 4219 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
4220 int tsflags;
4221
eddc9ec5 4222 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
4223 mss |= (tsflags << 11);
4224 }
4225 } else {
eddc9ec5 4226 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
4227 int tsflags;
4228
eddc9ec5 4229 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
4230 base_flags |= tsflags << 12;
4231 }
4232 }
4233 }
1da177e4
LT
4234#if TG3_VLAN_TAG_USED
4235 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4236 base_flags |= (TXD_FLAG_VLAN |
4237 (vlan_tx_tag_get(skb) << 16));
4238#endif
4239
4240 /* Queue skb data, a.k.a. the main skb fragment. */
4241 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4242
4243 tp->tx_buffers[entry].skb = skb;
4244 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4245
4246 would_hit_hwbug = 0;
4247
4248 if (tg3_4g_overflow_test(mapping, len))
c58ec932 4249 would_hit_hwbug = 1;
1da177e4
LT
4250
4251 tg3_set_txd(tp, entry, mapping, len, base_flags,
4252 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4253
4254 entry = NEXT_TX(entry);
4255
4256 /* Now loop through additional data fragments, and queue them. */
4257 if (skb_shinfo(skb)->nr_frags > 0) {
4258 unsigned int i, last;
4259
4260 last = skb_shinfo(skb)->nr_frags - 1;
4261 for (i = 0; i <= last; i++) {
4262 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4263
4264 len = frag->size;
4265 mapping = pci_map_page(tp->pdev,
4266 frag->page,
4267 frag->page_offset,
4268 len, PCI_DMA_TODEVICE);
4269
4270 tp->tx_buffers[entry].skb = NULL;
4271 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4272
c58ec932
MC
4273 if (tg3_4g_overflow_test(mapping, len))
4274 would_hit_hwbug = 1;
1da177e4 4275
72f2afb8
MC
4276 if (tg3_40bit_overflow_test(tp, mapping, len))
4277 would_hit_hwbug = 1;
4278
1da177e4
LT
4279 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4280 tg3_set_txd(tp, entry, mapping, len,
4281 base_flags, (i == last)|(mss << 1));
4282 else
4283 tg3_set_txd(tp, entry, mapping, len,
4284 base_flags, (i == last));
4285
4286 entry = NEXT_TX(entry);
4287 }
4288 }
4289
4290 if (would_hit_hwbug) {
4291 u32 last_plus_one = entry;
4292 u32 start;
1da177e4 4293
c58ec932
MC
4294 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4295 start &= (TG3_TX_RING_SIZE - 1);
1da177e4
LT
4296
4297 /* If the workaround fails due to memory/mapping
4298 * failure, silently drop this packet.
4299 */
72f2afb8 4300 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
c58ec932 4301 &start, base_flags, mss))
1da177e4
LT
4302 goto out_unlock;
4303
4304 entry = start;
4305 }
4306
4307 /* Packets are ready, update Tx producer idx local and on card. */
4308 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4309
4310 tp->tx_prod = entry;
1b2a7205 4311 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
1da177e4 4312 netif_stop_queue(dev);
42952231 4313 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
51b91468
MC
4314 netif_wake_queue(tp->dev);
4315 }
1da177e4
LT
4316
4317out_unlock:
4318 mmiowb();
1da177e4
LT
4319
4320 dev->trans_start = jiffies;
4321
4322 return NETDEV_TX_OK;
4323}
4324
4325static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4326 int new_mtu)
4327{
4328 dev->mtu = new_mtu;
4329
ef7f5ec0 4330 if (new_mtu > ETH_DATA_LEN) {
a4e2b347 4331 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
ef7f5ec0
MC
4332 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4333 ethtool_op_set_tso(dev, 0);
4334 }
4335 else
4336 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4337 } else {
a4e2b347 4338 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
ef7f5ec0 4339 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
0f893dc6 4340 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
ef7f5ec0 4341 }
1da177e4
LT
4342}
4343
4344static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4345{
4346 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 4347 int err;
1da177e4
LT
4348
4349 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4350 return -EINVAL;
4351
4352 if (!netif_running(dev)) {
4353 /* We'll just catch it later when the
4354 * device is up'd.
4355 */
4356 tg3_set_mtu(dev, tp, new_mtu);
4357 return 0;
4358 }
4359
4360 tg3_netif_stop(tp);
f47c11ee
DM
4361
4362 tg3_full_lock(tp, 1);
1da177e4 4363
944d980e 4364 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
4365
4366 tg3_set_mtu(dev, tp, new_mtu);
4367
b9ec6c1b 4368 err = tg3_restart_hw(tp, 0);
1da177e4 4369
b9ec6c1b
MC
4370 if (!err)
4371 tg3_netif_start(tp);
1da177e4 4372
f47c11ee 4373 tg3_full_unlock(tp);
1da177e4 4374
b9ec6c1b 4375 return err;
1da177e4
LT
4376}
4377
4378/* Free up pending packets in all rx/tx rings.
4379 *
4380 * The chip has been shut down and the driver detached from
4381 * the networking, so no interrupts or new tx packets will
4382 * end up in the driver. tp->{tx,}lock is not held and we are not
4383 * in an interrupt context and thus may sleep.
4384 */
4385static void tg3_free_rings(struct tg3 *tp)
4386{
4387 struct ring_info *rxp;
4388 int i;
4389
4390 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4391 rxp = &tp->rx_std_buffers[i];
4392
4393 if (rxp->skb == NULL)
4394 continue;
4395 pci_unmap_single(tp->pdev,
4396 pci_unmap_addr(rxp, mapping),
7e72aad4 4397 tp->rx_pkt_buf_sz - tp->rx_offset,
1da177e4
LT
4398 PCI_DMA_FROMDEVICE);
4399 dev_kfree_skb_any(rxp->skb);
4400 rxp->skb = NULL;
4401 }
4402
4403 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4404 rxp = &tp->rx_jumbo_buffers[i];
4405
4406 if (rxp->skb == NULL)
4407 continue;
4408 pci_unmap_single(tp->pdev,
4409 pci_unmap_addr(rxp, mapping),
4410 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4411 PCI_DMA_FROMDEVICE);
4412 dev_kfree_skb_any(rxp->skb);
4413 rxp->skb = NULL;
4414 }
4415
4416 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4417 struct tx_ring_info *txp;
4418 struct sk_buff *skb;
4419 int j;
4420
4421 txp = &tp->tx_buffers[i];
4422 skb = txp->skb;
4423
4424 if (skb == NULL) {
4425 i++;
4426 continue;
4427 }
4428
4429 pci_unmap_single(tp->pdev,
4430 pci_unmap_addr(txp, mapping),
4431 skb_headlen(skb),
4432 PCI_DMA_TODEVICE);
4433 txp->skb = NULL;
4434
4435 i++;
4436
4437 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4438 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4439 pci_unmap_page(tp->pdev,
4440 pci_unmap_addr(txp, mapping),
4441 skb_shinfo(skb)->frags[j].size,
4442 PCI_DMA_TODEVICE);
4443 i++;
4444 }
4445
4446 dev_kfree_skb_any(skb);
4447 }
4448}
4449
4450/* Initialize tx/rx rings for packet processing.
4451 *
4452 * The chip has been shut down and the driver detached from
4453 * the networking, so no interrupts or new tx packets will
4454 * end up in the driver. tp->{tx,}lock are held and thus
4455 * we may not sleep.
4456 */
32d8c572 4457static int tg3_init_rings(struct tg3 *tp)
1da177e4
LT
4458{
4459 u32 i;
4460
4461 /* Free up all the SKBs. */
4462 tg3_free_rings(tp);
4463
4464 /* Zero out all descriptors. */
4465 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4466 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4467 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4468 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4469
7e72aad4 4470 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
a4e2b347 4471 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
7e72aad4
MC
4472 (tp->dev->mtu > ETH_DATA_LEN))
4473 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4474
1da177e4
LT
4475 /* Initialize invariants of the rings, we only set this
4476 * stuff once. This works because the card does not
4477 * write into the rx buffer posting rings.
4478 */
4479 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4480 struct tg3_rx_buffer_desc *rxd;
4481
4482 rxd = &tp->rx_std[i];
7e72aad4 4483 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
1da177e4
LT
4484 << RXD_LEN_SHIFT;
4485 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4486 rxd->opaque = (RXD_OPAQUE_RING_STD |
4487 (i << RXD_OPAQUE_INDEX_SHIFT));
4488 }
4489
0f893dc6 4490 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
4491 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4492 struct tg3_rx_buffer_desc *rxd;
4493
4494 rxd = &tp->rx_jumbo[i];
4495 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4496 << RXD_LEN_SHIFT;
4497 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4498 RXD_FLAG_JUMBO;
4499 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4500 (i << RXD_OPAQUE_INDEX_SHIFT));
4501 }
4502 }
4503
4504 /* Now allocate fresh SKBs for each rx ring. */
4505 for (i = 0; i < tp->rx_pending; i++) {
32d8c572
MC
4506 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4507 printk(KERN_WARNING PFX
4508 "%s: Using a smaller RX standard ring, "
4509 "only %d out of %d buffers were allocated "
4510 "successfully.\n",
4511 tp->dev->name, i, tp->rx_pending);
4512 if (i == 0)
4513 return -ENOMEM;
4514 tp->rx_pending = i;
1da177e4 4515 break;
32d8c572 4516 }
1da177e4
LT
4517 }
4518
0f893dc6 4519 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
4520 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4521 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
32d8c572
MC
4522 -1, i) < 0) {
4523 printk(KERN_WARNING PFX
4524 "%s: Using a smaller RX jumbo ring, "
4525 "only %d out of %d buffers were "
4526 "allocated successfully.\n",
4527 tp->dev->name, i, tp->rx_jumbo_pending);
4528 if (i == 0) {
4529 tg3_free_rings(tp);
4530 return -ENOMEM;
4531 }
4532 tp->rx_jumbo_pending = i;
1da177e4 4533 break;
32d8c572 4534 }
1da177e4
LT
4535 }
4536 }
32d8c572 4537 return 0;
1da177e4
LT
4538}
4539
4540/*
4541 * Must not be invoked with interrupt sources disabled and
4542 * the hardware shutdown down.
4543 */
4544static void tg3_free_consistent(struct tg3 *tp)
4545{
b4558ea9
JJ
4546 kfree(tp->rx_std_buffers);
4547 tp->rx_std_buffers = NULL;
1da177e4
LT
4548 if (tp->rx_std) {
4549 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4550 tp->rx_std, tp->rx_std_mapping);
4551 tp->rx_std = NULL;
4552 }
4553 if (tp->rx_jumbo) {
4554 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4555 tp->rx_jumbo, tp->rx_jumbo_mapping);
4556 tp->rx_jumbo = NULL;
4557 }
4558 if (tp->rx_rcb) {
4559 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4560 tp->rx_rcb, tp->rx_rcb_mapping);
4561 tp->rx_rcb = NULL;
4562 }
4563 if (tp->tx_ring) {
4564 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4565 tp->tx_ring, tp->tx_desc_mapping);
4566 tp->tx_ring = NULL;
4567 }
4568 if (tp->hw_status) {
4569 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4570 tp->hw_status, tp->status_mapping);
4571 tp->hw_status = NULL;
4572 }
4573 if (tp->hw_stats) {
4574 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4575 tp->hw_stats, tp->stats_mapping);
4576 tp->hw_stats = NULL;
4577 }
4578}
4579
4580/*
4581 * Must not be invoked with interrupt sources disabled and
4582 * the hardware shutdown down. Can sleep.
4583 */
4584static int tg3_alloc_consistent(struct tg3 *tp)
4585{
bd2b3343 4586 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
1da177e4
LT
4587 (TG3_RX_RING_SIZE +
4588 TG3_RX_JUMBO_RING_SIZE)) +
4589 (sizeof(struct tx_ring_info) *
4590 TG3_TX_RING_SIZE),
4591 GFP_KERNEL);
4592 if (!tp->rx_std_buffers)
4593 return -ENOMEM;
4594
1da177e4
LT
4595 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4596 tp->tx_buffers = (struct tx_ring_info *)
4597 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4598
4599 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4600 &tp->rx_std_mapping);
4601 if (!tp->rx_std)
4602 goto err_out;
4603
4604 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4605 &tp->rx_jumbo_mapping);
4606
4607 if (!tp->rx_jumbo)
4608 goto err_out;
4609
4610 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4611 &tp->rx_rcb_mapping);
4612 if (!tp->rx_rcb)
4613 goto err_out;
4614
4615 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4616 &tp->tx_desc_mapping);
4617 if (!tp->tx_ring)
4618 goto err_out;
4619
4620 tp->hw_status = pci_alloc_consistent(tp->pdev,
4621 TG3_HW_STATUS_SIZE,
4622 &tp->status_mapping);
4623 if (!tp->hw_status)
4624 goto err_out;
4625
4626 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4627 sizeof(struct tg3_hw_stats),
4628 &tp->stats_mapping);
4629 if (!tp->hw_stats)
4630 goto err_out;
4631
4632 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4633 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4634
4635 return 0;
4636
4637err_out:
4638 tg3_free_consistent(tp);
4639 return -ENOMEM;
4640}
4641
4642#define MAX_WAIT_CNT 1000
4643
4644/* To stop a block, clear the enable bit and poll till it
4645 * clears. tp->lock is held.
4646 */
b3b7d6be 4647static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
1da177e4
LT
4648{
4649 unsigned int i;
4650 u32 val;
4651
4652 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4653 switch (ofs) {
4654 case RCVLSC_MODE:
4655 case DMAC_MODE:
4656 case MBFREE_MODE:
4657 case BUFMGR_MODE:
4658 case MEMARB_MODE:
4659 /* We can't enable/disable these bits of the
4660 * 5705/5750, just say success.
4661 */
4662 return 0;
4663
4664 default:
4665 break;
4666 };
4667 }
4668
4669 val = tr32(ofs);
4670 val &= ~enable_bit;
4671 tw32_f(ofs, val);
4672
4673 for (i = 0; i < MAX_WAIT_CNT; i++) {
4674 udelay(100);
4675 val = tr32(ofs);
4676 if ((val & enable_bit) == 0)
4677 break;
4678 }
4679
b3b7d6be 4680 if (i == MAX_WAIT_CNT && !silent) {
1da177e4
LT
4681 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4682 "ofs=%lx enable_bit=%x\n",
4683 ofs, enable_bit);
4684 return -ENODEV;
4685 }
4686
4687 return 0;
4688}
4689
4690/* tp->lock is held. */
b3b7d6be 4691static int tg3_abort_hw(struct tg3 *tp, int silent)
1da177e4
LT
4692{
4693 int i, err;
4694
4695 tg3_disable_ints(tp);
4696
4697 tp->rx_mode &= ~RX_MODE_ENABLE;
4698 tw32_f(MAC_RX_MODE, tp->rx_mode);
4699 udelay(10);
4700
b3b7d6be
DM
4701 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4702 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4703 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4704 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4705 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4706 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4707
4708 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4709 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4710 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4711 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4712 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4713 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4714 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
1da177e4
LT
4715
4716 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4717 tw32_f(MAC_MODE, tp->mac_mode);
4718 udelay(40);
4719
4720 tp->tx_mode &= ~TX_MODE_ENABLE;
4721 tw32_f(MAC_TX_MODE, tp->tx_mode);
4722
4723 for (i = 0; i < MAX_WAIT_CNT; i++) {
4724 udelay(100);
4725 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4726 break;
4727 }
4728 if (i >= MAX_WAIT_CNT) {
4729 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4730 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4731 tp->dev->name, tr32(MAC_TX_MODE));
e6de8ad1 4732 err |= -ENODEV;
1da177e4
LT
4733 }
4734
e6de8ad1 4735 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
b3b7d6be
DM
4736 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4737 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
1da177e4
LT
4738
4739 tw32(FTQ_RESET, 0xffffffff);
4740 tw32(FTQ_RESET, 0x00000000);
4741
b3b7d6be
DM
4742 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4743 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
1da177e4
LT
4744
4745 if (tp->hw_status)
4746 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4747 if (tp->hw_stats)
4748 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4749
1da177e4
LT
4750 return err;
4751}
4752
4753/* tp->lock is held. */
4754static int tg3_nvram_lock(struct tg3 *tp)
4755{
4756 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4757 int i;
4758
ec41c7df
MC
4759 if (tp->nvram_lock_cnt == 0) {
4760 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4761 for (i = 0; i < 8000; i++) {
4762 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4763 break;
4764 udelay(20);
4765 }
4766 if (i == 8000) {
4767 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4768 return -ENODEV;
4769 }
1da177e4 4770 }
ec41c7df 4771 tp->nvram_lock_cnt++;
1da177e4
LT
4772 }
4773 return 0;
4774}
4775
4776/* tp->lock is held. */
4777static void tg3_nvram_unlock(struct tg3 *tp)
4778{
ec41c7df
MC
4779 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4780 if (tp->nvram_lock_cnt > 0)
4781 tp->nvram_lock_cnt--;
4782 if (tp->nvram_lock_cnt == 0)
4783 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4784 }
1da177e4
LT
4785}
4786
e6af301b
MC
4787/* tp->lock is held. */
4788static void tg3_enable_nvram_access(struct tg3 *tp)
4789{
4790 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4791 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4792 u32 nvaccess = tr32(NVRAM_ACCESS);
4793
4794 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4795 }
4796}
4797
4798/* tp->lock is held. */
4799static void tg3_disable_nvram_access(struct tg3 *tp)
4800{
4801 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4802 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4803 u32 nvaccess = tr32(NVRAM_ACCESS);
4804
4805 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4806 }
4807}
4808
0d3031d9
MC
4809static void tg3_ape_send_event(struct tg3 *tp, u32 event)
4810{
4811 int i;
4812 u32 apedata;
4813
4814 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
4815 if (apedata != APE_SEG_SIG_MAGIC)
4816 return;
4817
4818 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
4819 if (apedata != APE_FW_STATUS_READY)
4820 return;
4821
4822 /* Wait for up to 1 millisecond for APE to service previous event. */
4823 for (i = 0; i < 10; i++) {
4824 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
4825 return;
4826
4827 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
4828
4829 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4830 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
4831 event | APE_EVENT_STATUS_EVENT_PENDING);
4832
4833 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
4834
4835 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4836 break;
4837
4838 udelay(100);
4839 }
4840
4841 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4842 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
4843}
4844
4845static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
4846{
4847 u32 event;
4848 u32 apedata;
4849
4850 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
4851 return;
4852
4853 switch (kind) {
4854 case RESET_KIND_INIT:
4855 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
4856 APE_HOST_SEG_SIG_MAGIC);
4857 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
4858 APE_HOST_SEG_LEN_MAGIC);
4859 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
4860 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
4861 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
4862 APE_HOST_DRIVER_ID_MAGIC);
4863 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
4864 APE_HOST_BEHAV_NO_PHYLOCK);
4865
4866 event = APE_EVENT_STATUS_STATE_START;
4867 break;
4868 case RESET_KIND_SHUTDOWN:
4869 event = APE_EVENT_STATUS_STATE_UNLOAD;
4870 break;
4871 case RESET_KIND_SUSPEND:
4872 event = APE_EVENT_STATUS_STATE_SUSPEND;
4873 break;
4874 default:
4875 return;
4876 }
4877
4878 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
4879
4880 tg3_ape_send_event(tp, event);
4881}
4882
1da177e4
LT
4883/* tp->lock is held. */
4884static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4885{
f49639e6
DM
4886 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4887 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1da177e4
LT
4888
4889 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4890 switch (kind) {
4891 case RESET_KIND_INIT:
4892 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4893 DRV_STATE_START);
4894 break;
4895
4896 case RESET_KIND_SHUTDOWN:
4897 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4898 DRV_STATE_UNLOAD);
4899 break;
4900
4901 case RESET_KIND_SUSPEND:
4902 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4903 DRV_STATE_SUSPEND);
4904 break;
4905
4906 default:
4907 break;
4908 };
4909 }
0d3031d9
MC
4910
4911 if (kind == RESET_KIND_INIT ||
4912 kind == RESET_KIND_SUSPEND)
4913 tg3_ape_driver_state_change(tp, kind);
1da177e4
LT
4914}
4915
4916/* tp->lock is held. */
4917static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4918{
4919 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4920 switch (kind) {
4921 case RESET_KIND_INIT:
4922 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4923 DRV_STATE_START_DONE);
4924 break;
4925
4926 case RESET_KIND_SHUTDOWN:
4927 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4928 DRV_STATE_UNLOAD_DONE);
4929 break;
4930
4931 default:
4932 break;
4933 };
4934 }
0d3031d9
MC
4935
4936 if (kind == RESET_KIND_SHUTDOWN)
4937 tg3_ape_driver_state_change(tp, kind);
1da177e4
LT
4938}
4939
4940/* tp->lock is held. */
4941static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4942{
4943 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4944 switch (kind) {
4945 case RESET_KIND_INIT:
4946 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4947 DRV_STATE_START);
4948 break;
4949
4950 case RESET_KIND_SHUTDOWN:
4951 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4952 DRV_STATE_UNLOAD);
4953 break;
4954
4955 case RESET_KIND_SUSPEND:
4956 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4957 DRV_STATE_SUSPEND);
4958 break;
4959
4960 default:
4961 break;
4962 };
4963 }
4964}
4965
7a6f4369
MC
4966static int tg3_poll_fw(struct tg3 *tp)
4967{
4968 int i;
4969 u32 val;
4970
b5d3772c 4971 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
0ccead18
GZ
4972 /* Wait up to 20ms for init done. */
4973 for (i = 0; i < 200; i++) {
b5d3772c
MC
4974 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
4975 return 0;
0ccead18 4976 udelay(100);
b5d3772c
MC
4977 }
4978 return -ENODEV;
4979 }
4980
7a6f4369
MC
4981 /* Wait for firmware initialization to complete. */
4982 for (i = 0; i < 100000; i++) {
4983 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4984 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4985 break;
4986 udelay(10);
4987 }
4988
4989 /* Chip might not be fitted with firmware. Some Sun onboard
4990 * parts are configured like that. So don't signal the timeout
4991 * of the above loop as an error, but do report the lack of
4992 * running firmware once.
4993 */
4994 if (i >= 100000 &&
4995 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4996 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4997
4998 printk(KERN_INFO PFX "%s: No firmware running.\n",
4999 tp->dev->name);
5000 }
5001
5002 return 0;
5003}
5004
ee6a99b5
MC
5005/* Save PCI command register before chip reset */
5006static void tg3_save_pci_state(struct tg3 *tp)
5007{
5008 u32 val;
5009
5010 pci_read_config_dword(tp->pdev, TG3PCI_COMMAND, &val);
5011 tp->pci_cmd = val;
5012}
5013
5014/* Restore PCI state after chip reset */
5015static void tg3_restore_pci_state(struct tg3 *tp)
5016{
5017 u32 val;
5018
5019 /* Re-enable indirect register accesses. */
5020 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5021 tp->misc_host_ctrl);
5022
5023 /* Set MAX PCI retry to zero. */
5024 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5025 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5026 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5027 val |= PCISTATE_RETRY_SAME_DMA;
0d3031d9
MC
5028 /* Allow reads and writes to the APE register and memory space. */
5029 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5030 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5031 PCISTATE_ALLOW_APE_SHMEM_WR;
ee6a99b5
MC
5032 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5033
5034 pci_write_config_dword(tp->pdev, TG3PCI_COMMAND, tp->pci_cmd);
5035
5036 /* Make sure PCI-X relaxed ordering bit is clear. */
9974a356
MC
5037 if (tp->pcix_cap) {
5038 u16 pcix_cmd;
5039
5040 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5041 &pcix_cmd);
5042 pcix_cmd &= ~PCI_X_CMD_ERO;
5043 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5044 pcix_cmd);
5045 }
ee6a99b5
MC
5046
5047 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
ee6a99b5
MC
5048
5049 /* Chip reset on 5780 will reset MSI enable bit,
5050 * so need to restore it.
5051 */
5052 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5053 u16 ctrl;
5054
5055 pci_read_config_word(tp->pdev,
5056 tp->msi_cap + PCI_MSI_FLAGS,
5057 &ctrl);
5058 pci_write_config_word(tp->pdev,
5059 tp->msi_cap + PCI_MSI_FLAGS,
5060 ctrl | PCI_MSI_FLAGS_ENABLE);
5061 val = tr32(MSGINT_MODE);
5062 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5063 }
5064 }
5065}
5066
1da177e4
LT
5067static void tg3_stop_fw(struct tg3 *);
5068
5069/* tp->lock is held. */
5070static int tg3_chip_reset(struct tg3 *tp)
5071{
5072 u32 val;
1ee582d8 5073 void (*write_op)(struct tg3 *, u32, u32);
7a6f4369 5074 int err;
1da177e4 5075
f49639e6
DM
5076 tg3_nvram_lock(tp);
5077
5078 /* No matching tg3_nvram_unlock() after this because
5079 * chip reset below will undo the nvram lock.
5080 */
5081 tp->nvram_lock_cnt = 0;
1da177e4 5082
ee6a99b5
MC
5083 /* GRC_MISC_CFG core clock reset will clear the memory
5084 * enable bit in PCI register 4 and the MSI enable bit
5085 * on some chips, so we save relevant registers here.
5086 */
5087 tg3_save_pci_state(tp);
5088
d9ab5ad1 5089 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
af36e6b6 5090 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 5091 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
5092 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5093 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
d9ab5ad1
MC
5094 tw32(GRC_FASTBOOT_PC, 0);
5095
1da177e4
LT
5096 /*
5097 * We must avoid the readl() that normally takes place.
5098 * It locks machines, causes machine checks, and other
5099 * fun things. So, temporarily disable the 5701
5100 * hardware workaround, while we do the reset.
5101 */
1ee582d8
MC
5102 write_op = tp->write32;
5103 if (write_op == tg3_write_flush_reg32)
5104 tp->write32 = tg3_write32;
1da177e4 5105
d18edcb2
MC
5106 /* Prevent the irq handler from reading or writing PCI registers
5107 * during chip reset when the memory enable bit in the PCI command
5108 * register may be cleared. The chip does not generate interrupt
5109 * at this time, but the irq handler may still be called due to irq
5110 * sharing or irqpoll.
5111 */
5112 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
b8fa2f3a
MC
5113 if (tp->hw_status) {
5114 tp->hw_status->status = 0;
5115 tp->hw_status->status_tag = 0;
5116 }
d18edcb2
MC
5117 tp->last_tag = 0;
5118 smp_mb();
5119 synchronize_irq(tp->pdev->irq);
5120
1da177e4
LT
5121 /* do the reset */
5122 val = GRC_MISC_CFG_CORECLK_RESET;
5123
5124 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5125 if (tr32(0x7e2c) == 0x60) {
5126 tw32(0x7e2c, 0x20);
5127 }
5128 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5129 tw32(GRC_MISC_CFG, (1 << 29));
5130 val |= (1 << 29);
5131 }
5132 }
5133
b5d3772c
MC
5134 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5135 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5136 tw32(GRC_VCPU_EXT_CTRL,
5137 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5138 }
5139
1da177e4
LT
5140 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5141 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5142 tw32(GRC_MISC_CFG, val);
5143
1ee582d8
MC
5144 /* restore 5701 hardware bug workaround write method */
5145 tp->write32 = write_op;
1da177e4
LT
5146
5147 /* Unfortunately, we have to delay before the PCI read back.
5148 * Some 575X chips even will not respond to a PCI cfg access
5149 * when the reset command is given to the chip.
5150 *
5151 * How do these hardware designers expect things to work
5152 * properly if the PCI write is posted for a long period
5153 * of time? It is always necessary to have some method by
5154 * which a register read back can occur to push the write
5155 * out which does the reset.
5156 *
5157 * For most tg3 variants the trick below was working.
5158 * Ho hum...
5159 */
5160 udelay(120);
5161
5162 /* Flush PCI posted writes. The normal MMIO registers
5163 * are inaccessible at this time so this is the only
5164 * way to make this reliably (actually, this is no longer
5165 * the case, see above). I tried to use indirect
5166 * register read/write but this upset some 5701 variants.
5167 */
5168 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5169
5170 udelay(120);
5171
5172 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5173 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5174 int i;
5175 u32 cfg_val;
5176
5177 /* Wait for link training to complete. */
5178 for (i = 0; i < 5000; i++)
5179 udelay(100);
5180
5181 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5182 pci_write_config_dword(tp->pdev, 0xc4,
5183 cfg_val | (1 << 15));
5184 }
5185 /* Set PCIE max payload size and clear error status. */
5186 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5187 }
5188
ee6a99b5 5189 tg3_restore_pci_state(tp);
1da177e4 5190
d18edcb2
MC
5191 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5192
ee6a99b5
MC
5193 val = 0;
5194 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4cf78e4f 5195 val = tr32(MEMARB_MODE);
ee6a99b5 5196 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
1da177e4
LT
5197
5198 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5199 tg3_stop_fw(tp);
5200 tw32(0x5000, 0x400);
5201 }
5202
5203 tw32(GRC_MODE, tp->grc_mode);
5204
5205 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
ab0049b4 5206 val = tr32(0xc4);
1da177e4
LT
5207
5208 tw32(0xc4, val | (1 << 15));
5209 }
5210
5211 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5212 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5213 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5214 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5215 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5216 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5217 }
5218
5219 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5220 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5221 tw32_f(MAC_MODE, tp->mac_mode);
747e8f8b
MC
5222 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5223 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5224 tw32_f(MAC_MODE, tp->mac_mode);
1da177e4
LT
5225 } else
5226 tw32_f(MAC_MODE, 0);
5227 udelay(40);
5228
7a6f4369
MC
5229 err = tg3_poll_fw(tp);
5230 if (err)
5231 return err;
1da177e4
LT
5232
5233 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5234 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
ab0049b4 5235 val = tr32(0x7c00);
1da177e4
LT
5236
5237 tw32(0x7c00, val | (1 << 25));
5238 }
5239
5240 /* Reprobe ASF enable state. */
5241 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5242 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5243 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5244 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5245 u32 nic_cfg;
5246
5247 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5248 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5249 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 5250 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
5251 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5252 }
5253 }
5254
5255 return 0;
5256}
5257
5258/* tp->lock is held. */
5259static void tg3_stop_fw(struct tg3 *tp)
5260{
0d3031d9
MC
5261 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5262 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
1da177e4
LT
5263 u32 val;
5264 int i;
5265
5266 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5267 val = tr32(GRC_RX_CPU_EVENT);
5268 val |= (1 << 14);
5269 tw32(GRC_RX_CPU_EVENT, val);
5270
5271 /* Wait for RX cpu to ACK the event. */
5272 for (i = 0; i < 100; i++) {
5273 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5274 break;
5275 udelay(1);
5276 }
5277 }
5278}
5279
5280/* tp->lock is held. */
944d980e 5281static int tg3_halt(struct tg3 *tp, int kind, int silent)
1da177e4
LT
5282{
5283 int err;
5284
5285 tg3_stop_fw(tp);
5286
944d980e 5287 tg3_write_sig_pre_reset(tp, kind);
1da177e4 5288
b3b7d6be 5289 tg3_abort_hw(tp, silent);
1da177e4
LT
5290 err = tg3_chip_reset(tp);
5291
944d980e
MC
5292 tg3_write_sig_legacy(tp, kind);
5293 tg3_write_sig_post_reset(tp, kind);
1da177e4
LT
5294
5295 if (err)
5296 return err;
5297
5298 return 0;
5299}
5300
5301#define TG3_FW_RELEASE_MAJOR 0x0
5302#define TG3_FW_RELASE_MINOR 0x0
5303#define TG3_FW_RELEASE_FIX 0x0
5304#define TG3_FW_START_ADDR 0x08000000
5305#define TG3_FW_TEXT_ADDR 0x08000000
5306#define TG3_FW_TEXT_LEN 0x9c0
5307#define TG3_FW_RODATA_ADDR 0x080009c0
5308#define TG3_FW_RODATA_LEN 0x60
5309#define TG3_FW_DATA_ADDR 0x08000a40
5310#define TG3_FW_DATA_LEN 0x20
5311#define TG3_FW_SBSS_ADDR 0x08000a60
5312#define TG3_FW_SBSS_LEN 0xc
5313#define TG3_FW_BSS_ADDR 0x08000a70
5314#define TG3_FW_BSS_LEN 0x10
5315
50da859d 5316static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
1da177e4
LT
5317 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5318 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5319 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5320 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5321 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5322 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5323 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5324 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5325 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5326 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5327 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5328 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5329 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5330 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5331 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5332 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5333 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5334 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5335 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5336 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5337 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5338 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5339 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5340 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5341 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5342 0, 0, 0, 0, 0, 0,
5343 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5344 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5345 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5346 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5347 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5348 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5349 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5350 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5351 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5352 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5353 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5354 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5355 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5356 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5357 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5358 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5359 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5360 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5361 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5362 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5363 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5364 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5365 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5366 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5367 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5368 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5369 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5370 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5371 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5372 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5373 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5374 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5375 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5376 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5377 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5378 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5379 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5380 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5381 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5382 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5383 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5384 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5385 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5386 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5387 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5388 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5389 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5390 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5391 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5392 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5393 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5394 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5395 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5396 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5397 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5398 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5399 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5400 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5401 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5402 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5403 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5404 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5405 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5406 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5407 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5408};
5409
50da859d 5410static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
1da177e4
LT
5411 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5412 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5413 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5414 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5415 0x00000000
5416};
5417
5418#if 0 /* All zeros, don't eat up space with it. */
5419u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5420 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5421 0x00000000, 0x00000000, 0x00000000, 0x00000000
5422};
5423#endif
5424
5425#define RX_CPU_SCRATCH_BASE 0x30000
5426#define RX_CPU_SCRATCH_SIZE 0x04000
5427#define TX_CPU_SCRATCH_BASE 0x34000
5428#define TX_CPU_SCRATCH_SIZE 0x04000
5429
5430/* tp->lock is held. */
5431static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5432{
5433 int i;
5434
5d9428de
ES
5435 BUG_ON(offset == TX_CPU_BASE &&
5436 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
1da177e4 5437
b5d3772c
MC
5438 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5439 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5440
5441 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5442 return 0;
5443 }
1da177e4
LT
5444 if (offset == RX_CPU_BASE) {
5445 for (i = 0; i < 10000; i++) {
5446 tw32(offset + CPU_STATE, 0xffffffff);
5447 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5448 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5449 break;
5450 }
5451
5452 tw32(offset + CPU_STATE, 0xffffffff);
5453 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
5454 udelay(10);
5455 } else {
5456 for (i = 0; i < 10000; i++) {
5457 tw32(offset + CPU_STATE, 0xffffffff);
5458 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5459 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5460 break;
5461 }
5462 }
5463
5464 if (i >= 10000) {
5465 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5466 "and %s CPU\n",
5467 tp->dev->name,
5468 (offset == RX_CPU_BASE ? "RX" : "TX"));
5469 return -ENODEV;
5470 }
ec41c7df
MC
5471
5472 /* Clear firmware's nvram arbitration. */
5473 if (tp->tg3_flags & TG3_FLAG_NVRAM)
5474 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
1da177e4
LT
5475 return 0;
5476}
5477
5478struct fw_info {
5479 unsigned int text_base;
5480 unsigned int text_len;
50da859d 5481 const u32 *text_data;
1da177e4
LT
5482 unsigned int rodata_base;
5483 unsigned int rodata_len;
50da859d 5484 const u32 *rodata_data;
1da177e4
LT
5485 unsigned int data_base;
5486 unsigned int data_len;
50da859d 5487 const u32 *data_data;
1da177e4
LT
5488};
5489
5490/* tp->lock is held. */
5491static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5492 int cpu_scratch_size, struct fw_info *info)
5493{
ec41c7df 5494 int err, lock_err, i;
1da177e4
LT
5495 void (*write_op)(struct tg3 *, u32, u32);
5496
5497 if (cpu_base == TX_CPU_BASE &&
5498 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5499 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5500 "TX cpu firmware on %s which is 5705.\n",
5501 tp->dev->name);
5502 return -EINVAL;
5503 }
5504
5505 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5506 write_op = tg3_write_mem;
5507 else
5508 write_op = tg3_write_indirect_reg32;
5509
1b628151
MC
5510 /* It is possible that bootcode is still loading at this point.
5511 * Get the nvram lock first before halting the cpu.
5512 */
ec41c7df 5513 lock_err = tg3_nvram_lock(tp);
1da177e4 5514 err = tg3_halt_cpu(tp, cpu_base);
ec41c7df
MC
5515 if (!lock_err)
5516 tg3_nvram_unlock(tp);
1da177e4
LT
5517 if (err)
5518 goto out;
5519
5520 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5521 write_op(tp, cpu_scratch_base + i, 0);
5522 tw32(cpu_base + CPU_STATE, 0xffffffff);
5523 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5524 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5525 write_op(tp, (cpu_scratch_base +
5526 (info->text_base & 0xffff) +
5527 (i * sizeof(u32))),
5528 (info->text_data ?
5529 info->text_data[i] : 0));
5530 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5531 write_op(tp, (cpu_scratch_base +
5532 (info->rodata_base & 0xffff) +
5533 (i * sizeof(u32))),
5534 (info->rodata_data ?
5535 info->rodata_data[i] : 0));
5536 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5537 write_op(tp, (cpu_scratch_base +
5538 (info->data_base & 0xffff) +
5539 (i * sizeof(u32))),
5540 (info->data_data ?
5541 info->data_data[i] : 0));
5542
5543 err = 0;
5544
5545out:
1da177e4
LT
5546 return err;
5547}
5548
5549/* tp->lock is held. */
5550static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5551{
5552 struct fw_info info;
5553 int err, i;
5554
5555 info.text_base = TG3_FW_TEXT_ADDR;
5556 info.text_len = TG3_FW_TEXT_LEN;
5557 info.text_data = &tg3FwText[0];
5558 info.rodata_base = TG3_FW_RODATA_ADDR;
5559 info.rodata_len = TG3_FW_RODATA_LEN;
5560 info.rodata_data = &tg3FwRodata[0];
5561 info.data_base = TG3_FW_DATA_ADDR;
5562 info.data_len = TG3_FW_DATA_LEN;
5563 info.data_data = NULL;
5564
5565 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5566 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5567 &info);
5568 if (err)
5569 return err;
5570
5571 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5572 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5573 &info);
5574 if (err)
5575 return err;
5576
5577 /* Now startup only the RX cpu. */
5578 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5579 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5580
5581 for (i = 0; i < 5; i++) {
5582 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5583 break;
5584 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5585 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
5586 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5587 udelay(1000);
5588 }
5589 if (i >= 5) {
5590 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5591 "to set RX CPU PC, is %08x should be %08x\n",
5592 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5593 TG3_FW_TEXT_ADDR);
5594 return -ENODEV;
5595 }
5596 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5597 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
5598
5599 return 0;
5600}
5601
1da177e4
LT
5602
5603#define TG3_TSO_FW_RELEASE_MAJOR 0x1
5604#define TG3_TSO_FW_RELASE_MINOR 0x6
5605#define TG3_TSO_FW_RELEASE_FIX 0x0
5606#define TG3_TSO_FW_START_ADDR 0x08000000
5607#define TG3_TSO_FW_TEXT_ADDR 0x08000000
5608#define TG3_TSO_FW_TEXT_LEN 0x1aa0
5609#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5610#define TG3_TSO_FW_RODATA_LEN 0x60
5611#define TG3_TSO_FW_DATA_ADDR 0x08001b20
5612#define TG3_TSO_FW_DATA_LEN 0x30
5613#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5614#define TG3_TSO_FW_SBSS_LEN 0x2c
5615#define TG3_TSO_FW_BSS_ADDR 0x08001b80
5616#define TG3_TSO_FW_BSS_LEN 0x894
5617
50da859d 5618static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
1da177e4
LT
5619 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5620 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5621 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5622 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5623 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5624 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5625 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5626 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5627 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5628 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5629 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5630 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5631 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5632 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5633 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5634 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5635 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5636 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5637 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5638 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5639 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5640 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5641 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5642 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5643 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5644 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5645 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5646 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5647 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5648 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5649 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5650 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5651 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5652 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5653 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5654 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5655 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5656 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5657 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5658 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5659 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5660 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5661 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5662 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5663 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5664 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5665 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5666 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5667 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5668 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5669 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5670 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5671 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5672 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5673 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5674 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5675 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5676 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5677 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5678 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5679 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5680 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5681 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5682 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5683 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5684 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5685 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5686 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5687 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5688 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5689 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5690 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5691 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5692 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5693 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5694 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5695 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5696 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5697 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5698 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5699 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5700 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5701 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5702 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5703 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5704 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5705 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5706 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5707 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5708 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5709 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5710 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5711 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5712 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5713 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5714 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5715 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5716 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5717 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5718 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5719 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5720 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5721 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5722 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5723 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5724 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5725 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5726 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5727 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5728 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5729 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5730 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5731 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5732 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5733 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5734 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5735 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5736 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5737 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5738 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5739 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5740 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5741 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5742 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5743 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5744 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5745 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5746 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5747 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5748 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5749 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5750 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5751 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5752 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5753 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5754 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5755 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5756 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5757 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5758 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5759 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5760 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5761 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5762 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5763 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5764 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5765 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5766 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5767 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5768 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5769 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5770 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5771 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5772 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5773 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5774 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5775 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5776 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5777 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5778 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5779 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5780 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5781 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5782 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5783 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5784 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5785 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5786 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5787 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5788 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5789 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5790 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5791 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5792 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5793 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5794 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5795 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5796 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5797 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5798 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5799 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5800 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5801 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5802 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5803 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5804 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5805 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5806 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5807 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5808 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5809 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5810 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5811 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5812 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5813 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5814 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5815 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5816 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5817 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5818 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5819 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5820 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5821 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5822 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5823 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5824 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5825 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5826 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5827 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5828 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5829 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5830 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5831 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5832 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5833 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5834 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5835 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5836 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5837 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5838 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5839 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5840 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5841 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5842 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5843 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5844 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5845 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5846 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5847 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5848 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5849 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5850 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5851 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5852 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5853 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5854 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5855 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5856 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5857 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5858 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5859 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5860 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5861 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5862 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5863 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5864 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5865 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5866 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5867 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5868 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5869 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5870 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5871 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5872 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5873 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5874 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5875 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5876 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5877 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5878 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5879 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5880 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5881 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5882 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5883 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5884 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5885 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5886 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5887 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5888 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5889 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5890 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5891 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5892 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5893 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5894 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5895 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5896 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5897 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5898 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5899 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5900 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5901 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5902 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5903};
5904
50da859d 5905static const u32 tg3TsoFwRodata[] = {
1da177e4
LT
5906 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5907 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5908 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5909 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5910 0x00000000,
5911};
5912
50da859d 5913static const u32 tg3TsoFwData[] = {
1da177e4
LT
5914 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5915 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5916 0x00000000,
5917};
5918
5919/* 5705 needs a special version of the TSO firmware. */
5920#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5921#define TG3_TSO5_FW_RELASE_MINOR 0x2
5922#define TG3_TSO5_FW_RELEASE_FIX 0x0
5923#define TG3_TSO5_FW_START_ADDR 0x00010000
5924#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5925#define TG3_TSO5_FW_TEXT_LEN 0xe90
5926#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5927#define TG3_TSO5_FW_RODATA_LEN 0x50
5928#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5929#define TG3_TSO5_FW_DATA_LEN 0x20
5930#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5931#define TG3_TSO5_FW_SBSS_LEN 0x28
5932#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5933#define TG3_TSO5_FW_BSS_LEN 0x88
5934
50da859d 5935static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
1da177e4
LT
5936 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5937 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5938 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5939 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5940 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5941 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5942 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5943 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5944 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5945 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5946 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5947 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5948 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5949 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5950 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5951 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5952 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5953 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5954 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5955 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5956 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5957 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5958 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5959 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5960 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5961 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5962 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5963 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5964 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5965 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5966 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5967 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5968 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5969 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5970 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5971 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5972 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5973 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5974 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5975 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5976 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5977 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5978 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5979 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5980 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5981 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5982 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5983 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5984 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5985 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5986 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5987 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5988 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5989 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5990 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5991 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5992 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5993 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5994 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5995 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5996 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5997 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5998 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5999 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6000 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6001 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6002 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6003 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6004 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6005 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6006 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6007 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6008 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6009 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6010 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6011 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6012 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6013 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6014 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6015 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6016 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6017 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6018 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6019 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6020 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6021 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6022 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6023 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6024 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6025 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6026 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6027 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6028 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6029 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6030 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6031 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6032 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6033 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6034 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6035 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6036 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6037 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6038 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6039 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6040 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6041 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6042 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6043 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6044 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6045 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6046 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6047 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6048 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6049 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6050 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6051 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6052 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6053 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6054 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6055 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6056 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6057 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6058 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6059 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6060 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6061 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6062 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6063 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6064 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6065 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6066 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6067 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6068 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6069 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6070 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6071 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6072 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6073 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6074 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6075 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6076 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6077 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6078 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6079 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6080 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6081 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6082 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6083 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6084 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6085 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6086 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6087 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6088 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6089 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6090 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6091 0x00000000, 0x00000000, 0x00000000,
6092};
6093
50da859d 6094static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
1da177e4
LT
6095 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6096 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6097 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6098 0x00000000, 0x00000000, 0x00000000,
6099};
6100
50da859d 6101static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
1da177e4
LT
6102 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6103 0x00000000, 0x00000000, 0x00000000,
6104};
6105
6106/* tp->lock is held. */
6107static int tg3_load_tso_firmware(struct tg3 *tp)
6108{
6109 struct fw_info info;
6110 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6111 int err, i;
6112
6113 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6114 return 0;
6115
6116 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6117 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6118 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6119 info.text_data = &tg3Tso5FwText[0];
6120 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6121 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6122 info.rodata_data = &tg3Tso5FwRodata[0];
6123 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6124 info.data_len = TG3_TSO5_FW_DATA_LEN;
6125 info.data_data = &tg3Tso5FwData[0];
6126 cpu_base = RX_CPU_BASE;
6127 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6128 cpu_scratch_size = (info.text_len +
6129 info.rodata_len +
6130 info.data_len +
6131 TG3_TSO5_FW_SBSS_LEN +
6132 TG3_TSO5_FW_BSS_LEN);
6133 } else {
6134 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6135 info.text_len = TG3_TSO_FW_TEXT_LEN;
6136 info.text_data = &tg3TsoFwText[0];
6137 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6138 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6139 info.rodata_data = &tg3TsoFwRodata[0];
6140 info.data_base = TG3_TSO_FW_DATA_ADDR;
6141 info.data_len = TG3_TSO_FW_DATA_LEN;
6142 info.data_data = &tg3TsoFwData[0];
6143 cpu_base = TX_CPU_BASE;
6144 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6145 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6146 }
6147
6148 err = tg3_load_firmware_cpu(tp, cpu_base,
6149 cpu_scratch_base, cpu_scratch_size,
6150 &info);
6151 if (err)
6152 return err;
6153
6154 /* Now startup the cpu. */
6155 tw32(cpu_base + CPU_STATE, 0xffffffff);
6156 tw32_f(cpu_base + CPU_PC, info.text_base);
6157
6158 for (i = 0; i < 5; i++) {
6159 if (tr32(cpu_base + CPU_PC) == info.text_base)
6160 break;
6161 tw32(cpu_base + CPU_STATE, 0xffffffff);
6162 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6163 tw32_f(cpu_base + CPU_PC, info.text_base);
6164 udelay(1000);
6165 }
6166 if (i >= 5) {
6167 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6168 "to set CPU PC, is %08x should be %08x\n",
6169 tp->dev->name, tr32(cpu_base + CPU_PC),
6170 info.text_base);
6171 return -ENODEV;
6172 }
6173 tw32(cpu_base + CPU_STATE, 0xffffffff);
6174 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6175 return 0;
6176}
6177
1da177e4
LT
6178
6179/* tp->lock is held. */
986e0aeb 6180static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
1da177e4
LT
6181{
6182 u32 addr_high, addr_low;
6183 int i;
6184
6185 addr_high = ((tp->dev->dev_addr[0] << 8) |
6186 tp->dev->dev_addr[1]);
6187 addr_low = ((tp->dev->dev_addr[2] << 24) |
6188 (tp->dev->dev_addr[3] << 16) |
6189 (tp->dev->dev_addr[4] << 8) |
6190 (tp->dev->dev_addr[5] << 0));
6191 for (i = 0; i < 4; i++) {
986e0aeb
MC
6192 if (i == 1 && skip_mac_1)
6193 continue;
1da177e4
LT
6194 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6195 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6196 }
6197
6198 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6199 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6200 for (i = 0; i < 12; i++) {
6201 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6202 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6203 }
6204 }
6205
6206 addr_high = (tp->dev->dev_addr[0] +
6207 tp->dev->dev_addr[1] +
6208 tp->dev->dev_addr[2] +
6209 tp->dev->dev_addr[3] +
6210 tp->dev->dev_addr[4] +
6211 tp->dev->dev_addr[5]) &
6212 TX_BACKOFF_SEED_MASK;
6213 tw32(MAC_TX_BACKOFF_SEED, addr_high);
6214}
6215
6216static int tg3_set_mac_addr(struct net_device *dev, void *p)
6217{
6218 struct tg3 *tp = netdev_priv(dev);
6219 struct sockaddr *addr = p;
986e0aeb 6220 int err = 0, skip_mac_1 = 0;
1da177e4 6221
f9804ddb
MC
6222 if (!is_valid_ether_addr(addr->sa_data))
6223 return -EINVAL;
6224
1da177e4
LT
6225 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6226
e75f7c90
MC
6227 if (!netif_running(dev))
6228 return 0;
6229
58712ef9 6230 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
986e0aeb 6231 u32 addr0_high, addr0_low, addr1_high, addr1_low;
58712ef9 6232
986e0aeb
MC
6233 addr0_high = tr32(MAC_ADDR_0_HIGH);
6234 addr0_low = tr32(MAC_ADDR_0_LOW);
6235 addr1_high = tr32(MAC_ADDR_1_HIGH);
6236 addr1_low = tr32(MAC_ADDR_1_LOW);
6237
6238 /* Skip MAC addr 1 if ASF is using it. */
6239 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6240 !(addr1_high == 0 && addr1_low == 0))
6241 skip_mac_1 = 1;
58712ef9 6242 }
986e0aeb
MC
6243 spin_lock_bh(&tp->lock);
6244 __tg3_set_mac_addr(tp, skip_mac_1);
6245 spin_unlock_bh(&tp->lock);
1da177e4 6246
b9ec6c1b 6247 return err;
1da177e4
LT
6248}
6249
6250/* tp->lock is held. */
6251static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6252 dma_addr_t mapping, u32 maxlen_flags,
6253 u32 nic_addr)
6254{
6255 tg3_write_mem(tp,
6256 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6257 ((u64) mapping >> 32));
6258 tg3_write_mem(tp,
6259 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6260 ((u64) mapping & 0xffffffff));
6261 tg3_write_mem(tp,
6262 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6263 maxlen_flags);
6264
6265 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6266 tg3_write_mem(tp,
6267 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6268 nic_addr);
6269}
6270
6271static void __tg3_set_rx_mode(struct net_device *);
d244c892 6272static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
15f9850d
DM
6273{
6274 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6275 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6276 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6277 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6278 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6279 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6280 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6281 }
6282 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6283 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6284 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6285 u32 val = ec->stats_block_coalesce_usecs;
6286
6287 if (!netif_carrier_ok(tp->dev))
6288 val = 0;
6289
6290 tw32(HOSTCC_STAT_COAL_TICKS, val);
6291 }
6292}
1da177e4
LT
6293
6294/* tp->lock is held. */
8e7a22e3 6295static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
6296{
6297 u32 val, rdmac_mode;
6298 int i, err, limit;
6299
6300 tg3_disable_ints(tp);
6301
6302 tg3_stop_fw(tp);
6303
6304 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6305
6306 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
e6de8ad1 6307 tg3_abort_hw(tp, 1);
1da177e4
LT
6308 }
6309
36da4d86 6310 if (reset_phy)
d4d2c558
MC
6311 tg3_phy_reset(tp);
6312
1da177e4
LT
6313 err = tg3_chip_reset(tp);
6314 if (err)
6315 return err;
6316
6317 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6318
d30cdd28
MC
6319 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0) {
6320 val = tr32(TG3_CPMU_CTRL);
6321 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6322 tw32(TG3_CPMU_CTRL, val);
6323 }
6324
1da177e4
LT
6325 /* This works around an issue with Athlon chipsets on
6326 * B3 tigon3 silicon. This bit has no effect on any
6327 * other revision. But do not set this on PCI Express
795d01c5 6328 * chips and don't even touch the clocks if the CPMU is present.
1da177e4 6329 */
795d01c5
MC
6330 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6331 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6332 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6333 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6334 }
1da177e4
LT
6335
6336 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6337 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6338 val = tr32(TG3PCI_PCISTATE);
6339 val |= PCISTATE_RETRY_SAME_DMA;
6340 tw32(TG3PCI_PCISTATE, val);
6341 }
6342
0d3031d9
MC
6343 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6344 /* Allow reads and writes to the
6345 * APE register and memory space.
6346 */
6347 val = tr32(TG3PCI_PCISTATE);
6348 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6349 PCISTATE_ALLOW_APE_SHMEM_WR;
6350 tw32(TG3PCI_PCISTATE, val);
6351 }
6352
1da177e4
LT
6353 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6354 /* Enable some hw fixes. */
6355 val = tr32(TG3PCI_MSI_DATA);
6356 val |= (1 << 26) | (1 << 28) | (1 << 29);
6357 tw32(TG3PCI_MSI_DATA, val);
6358 }
6359
6360 /* Descriptor ring init may make accesses to the
6361 * NIC SRAM area to setup the TX descriptors, so we
6362 * can only do this after the hardware has been
6363 * successfully reset.
6364 */
32d8c572
MC
6365 err = tg3_init_rings(tp);
6366 if (err)
6367 return err;
1da177e4 6368
9936bcf6
MC
6369 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6370 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
d30cdd28
MC
6371 /* This value is determined during the probe time DMA
6372 * engine test, tg3_test_dma.
6373 */
6374 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6375 }
1da177e4
LT
6376
6377 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6378 GRC_MODE_4X_NIC_SEND_RINGS |
6379 GRC_MODE_NO_TX_PHDR_CSUM |
6380 GRC_MODE_NO_RX_PHDR_CSUM);
6381 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
d2d746f8
MC
6382
6383 /* Pseudo-header checksum is done by hardware logic and not
6384 * the offload processers, so make the chip do the pseudo-
6385 * header checksums on receive. For transmit it is more
6386 * convenient to do the pseudo-header checksum in software
6387 * as Linux does that on transmit for us in all cases.
6388 */
6389 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
1da177e4
LT
6390
6391 tw32(GRC_MODE,
6392 tp->grc_mode |
6393 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6394
6395 /* Setup the timer prescalar register. Clock is always 66Mhz. */
6396 val = tr32(GRC_MISC_CFG);
6397 val &= ~0xff;
6398 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6399 tw32(GRC_MISC_CFG, val);
6400
6401 /* Initialize MBUF/DESC pool. */
cbf46853 6402 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
1da177e4
LT
6403 /* Do nothing. */
6404 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6405 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6406 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6407 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6408 else
6409 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6410 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6411 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6412 }
1da177e4
LT
6413 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6414 int fw_len;
6415
6416 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6417 TG3_TSO5_FW_RODATA_LEN +
6418 TG3_TSO5_FW_DATA_LEN +
6419 TG3_TSO5_FW_SBSS_LEN +
6420 TG3_TSO5_FW_BSS_LEN);
6421 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6422 tw32(BUFMGR_MB_POOL_ADDR,
6423 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6424 tw32(BUFMGR_MB_POOL_SIZE,
6425 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6426 }
1da177e4 6427
0f893dc6 6428 if (tp->dev->mtu <= ETH_DATA_LEN) {
1da177e4
LT
6429 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6430 tp->bufmgr_config.mbuf_read_dma_low_water);
6431 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6432 tp->bufmgr_config.mbuf_mac_rx_low_water);
6433 tw32(BUFMGR_MB_HIGH_WATER,
6434 tp->bufmgr_config.mbuf_high_water);
6435 } else {
6436 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6437 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6438 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6439 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6440 tw32(BUFMGR_MB_HIGH_WATER,
6441 tp->bufmgr_config.mbuf_high_water_jumbo);
6442 }
6443 tw32(BUFMGR_DMA_LOW_WATER,
6444 tp->bufmgr_config.dma_low_water);
6445 tw32(BUFMGR_DMA_HIGH_WATER,
6446 tp->bufmgr_config.dma_high_water);
6447
6448 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6449 for (i = 0; i < 2000; i++) {
6450 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6451 break;
6452 udelay(10);
6453 }
6454 if (i >= 2000) {
6455 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6456 tp->dev->name);
6457 return -ENODEV;
6458 }
6459
6460 /* Setup replenish threshold. */
f92905de
MC
6461 val = tp->rx_pending / 8;
6462 if (val == 0)
6463 val = 1;
6464 else if (val > tp->rx_std_max_post)
6465 val = tp->rx_std_max_post;
b5d3772c
MC
6466 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6467 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6468 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6469
6470 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6471 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6472 }
f92905de
MC
6473
6474 tw32(RCVBDI_STD_THRESH, val);
1da177e4
LT
6475
6476 /* Initialize TG3_BDINFO's at:
6477 * RCVDBDI_STD_BD: standard eth size rx ring
6478 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6479 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6480 *
6481 * like so:
6482 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6483 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6484 * ring attribute flags
6485 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6486 *
6487 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6488 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6489 *
6490 * The size of each ring is fixed in the firmware, but the location is
6491 * configurable.
6492 */
6493 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6494 ((u64) tp->rx_std_mapping >> 32));
6495 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6496 ((u64) tp->rx_std_mapping & 0xffffffff));
6497 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6498 NIC_SRAM_RX_BUFFER_DESC);
6499
6500 /* Don't even try to program the JUMBO/MINI buffer descriptor
6501 * configs on 5705.
6502 */
6503 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6504 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6505 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6506 } else {
6507 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6508 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6509
6510 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6511 BDINFO_FLAGS_DISABLED);
6512
6513 /* Setup replenish threshold. */
6514 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6515
0f893dc6 6516 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
6517 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6518 ((u64) tp->rx_jumbo_mapping >> 32));
6519 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6520 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6521 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6522 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6523 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6524 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6525 } else {
6526 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6527 BDINFO_FLAGS_DISABLED);
6528 }
6529
6530 }
6531
6532 /* There is only one send ring on 5705/5750, no need to explicitly
6533 * disable the others.
6534 */
6535 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6536 /* Clear out send RCB ring in SRAM. */
6537 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6538 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6539 BDINFO_FLAGS_DISABLED);
6540 }
6541
6542 tp->tx_prod = 0;
6543 tp->tx_cons = 0;
6544 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6545 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6546
6547 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6548 tp->tx_desc_mapping,
6549 (TG3_TX_RING_SIZE <<
6550 BDINFO_FLAGS_MAXLEN_SHIFT),
6551 NIC_SRAM_TX_BUFFER_DESC);
6552
6553 /* There is only one receive return ring on 5705/5750, no need
6554 * to explicitly disable the others.
6555 */
6556 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6557 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6558 i += TG3_BDINFO_SIZE) {
6559 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6560 BDINFO_FLAGS_DISABLED);
6561 }
6562 }
6563
6564 tp->rx_rcb_ptr = 0;
6565 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6566
6567 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6568 tp->rx_rcb_mapping,
6569 (TG3_RX_RCB_RING_SIZE(tp) <<
6570 BDINFO_FLAGS_MAXLEN_SHIFT),
6571 0);
6572
6573 tp->rx_std_ptr = tp->rx_pending;
6574 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6575 tp->rx_std_ptr);
6576
0f893dc6 6577 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
1da177e4
LT
6578 tp->rx_jumbo_pending : 0;
6579 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6580 tp->rx_jumbo_ptr);
6581
6582 /* Initialize MAC address and backoff seed. */
986e0aeb 6583 __tg3_set_mac_addr(tp, 0);
1da177e4
LT
6584
6585 /* MTU + ethernet header + FCS + optional VLAN tag */
6586 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6587
6588 /* The slot time is changed by tg3_setup_phy if we
6589 * run at gigabit with half duplex.
6590 */
6591 tw32(MAC_TX_LENGTHS,
6592 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6593 (6 << TX_LENGTHS_IPG_SHIFT) |
6594 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6595
6596 /* Receive rules. */
6597 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6598 tw32(RCVLPC_CONFIG, 0x0181);
6599
6600 /* Calculate RDMAC_MODE setting early, we need it to determine
6601 * the RCVLPC_STATE_ENABLE mask.
6602 */
6603 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6604 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6605 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6606 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6607 RDMAC_MODE_LNGREAD_ENAB);
85e94ced 6608
d30cdd28
MC
6609 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6610 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6611 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6612 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6613
85e94ced
MC
6614 /* If statement applies to 5705 and 5750 PCI devices only */
6615 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6616 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6617 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
1da177e4 6618 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
c13e3713 6619 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1da177e4
LT
6620 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6621 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6622 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6623 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6624 }
6625 }
6626
85e94ced
MC
6627 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6628 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6629
1da177e4
LT
6630 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6631 rdmac_mode |= (1 << 27);
1da177e4
LT
6632
6633 /* Receive/send statistics. */
1661394e
MC
6634 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6635 val = tr32(RCVLPC_STATS_ENABLE);
6636 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6637 tw32(RCVLPC_STATS_ENABLE, val);
6638 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6639 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
1da177e4
LT
6640 val = tr32(RCVLPC_STATS_ENABLE);
6641 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6642 tw32(RCVLPC_STATS_ENABLE, val);
6643 } else {
6644 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6645 }
6646 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6647 tw32(SNDDATAI_STATSENAB, 0xffffff);
6648 tw32(SNDDATAI_STATSCTRL,
6649 (SNDDATAI_SCTRL_ENABLE |
6650 SNDDATAI_SCTRL_FASTUPD));
6651
6652 /* Setup host coalescing engine. */
6653 tw32(HOSTCC_MODE, 0);
6654 for (i = 0; i < 2000; i++) {
6655 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6656 break;
6657 udelay(10);
6658 }
6659
d244c892 6660 __tg3_set_coalesce(tp, &tp->coal);
1da177e4
LT
6661
6662 /* set status block DMA address */
6663 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6664 ((u64) tp->status_mapping >> 32));
6665 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6666 ((u64) tp->status_mapping & 0xffffffff));
6667
6668 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6669 /* Status/statistics block address. See tg3_timer,
6670 * the tg3_periodic_fetch_stats call there, and
6671 * tg3_get_stats to see how this works for 5705/5750 chips.
6672 */
1da177e4
LT
6673 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6674 ((u64) tp->stats_mapping >> 32));
6675 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6676 ((u64) tp->stats_mapping & 0xffffffff));
6677 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6678 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6679 }
6680
6681 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6682
6683 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6684 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6685 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6686 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6687
6688 /* Clear statistics/status block in chip, and status block in ram. */
6689 for (i = NIC_SRAM_STATS_BLK;
6690 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6691 i += sizeof(u32)) {
6692 tg3_write_mem(tp, i, 0);
6693 udelay(40);
6694 }
6695 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6696
c94e3941
MC
6697 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6698 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6699 /* reset to prevent losing 1st rx packet intermittently */
6700 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6701 udelay(10);
6702 }
6703
1da177e4
LT
6704 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6705 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
e8f3f6ca
MC
6706 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6707 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6708 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6709 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1da177e4
LT
6710 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6711 udelay(40);
6712
314fba34 6713 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9d26e213 6714 * If TG3_FLG2_IS_NIC is zero, we should read the
314fba34
MC
6715 * register to preserve the GPIO settings for LOMs. The GPIOs,
6716 * whether used as inputs or outputs, are set by boot code after
6717 * reset.
6718 */
9d26e213 6719 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
314fba34
MC
6720 u32 gpio_mask;
6721
9d26e213
MC
6722 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6723 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6724 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
3e7d83bc
MC
6725
6726 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6727 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6728 GRC_LCLCTRL_GPIO_OUTPUT3;
6729
af36e6b6
MC
6730 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6731 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6732
aaf84465 6733 tp->grc_local_ctrl &= ~gpio_mask;
314fba34
MC
6734 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6735
6736 /* GPIO1 must be driven high for eeprom write protect */
9d26e213
MC
6737 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6738 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6739 GRC_LCLCTRL_GPIO_OUTPUT1);
314fba34 6740 }
1da177e4
LT
6741 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6742 udelay(100);
6743
09ee929c 6744 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
fac9b83e 6745 tp->last_tag = 0;
1da177e4
LT
6746
6747 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6748 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6749 udelay(40);
6750 }
6751
6752 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6753 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6754 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6755 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6756 WDMAC_MODE_LNGREAD_ENAB);
6757
85e94ced
MC
6758 /* If statement applies to 5705 and 5750 PCI devices only */
6759 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6760 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6761 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1da177e4
LT
6762 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6763 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6764 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6765 /* nothing */
6766 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6767 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6768 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6769 val |= WDMAC_MODE_RX_ACCEL;
6770 }
6771 }
6772
d9ab5ad1 6773 /* Enable host coalescing bug fix */
af36e6b6 6774 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
d30cdd28 6775 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
9936bcf6
MC
6776 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
6777 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
d9ab5ad1
MC
6778 val |= (1 << 29);
6779
1da177e4
LT
6780 tw32_f(WDMAC_MODE, val);
6781 udelay(40);
6782
9974a356
MC
6783 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6784 u16 pcix_cmd;
6785
6786 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6787 &pcix_cmd);
1da177e4 6788 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9974a356
MC
6789 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
6790 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 6791 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9974a356
MC
6792 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
6793 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 6794 }
9974a356
MC
6795 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6796 pcix_cmd);
1da177e4
LT
6797 }
6798
6799 tw32_f(RDMAC_MODE, rdmac_mode);
6800 udelay(40);
6801
6802 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6803 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6804 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9936bcf6
MC
6805
6806 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6807 tw32(SNDDATAC_MODE,
6808 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
6809 else
6810 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6811
1da177e4
LT
6812 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6813 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6814 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6815 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
1da177e4
LT
6816 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6817 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
1da177e4
LT
6818 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6819 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6820
6821 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6822 err = tg3_load_5701_a0_firmware_fix(tp);
6823 if (err)
6824 return err;
6825 }
6826
1da177e4
LT
6827 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6828 err = tg3_load_tso_firmware(tp);
6829 if (err)
6830 return err;
6831 }
1da177e4
LT
6832
6833 tp->tx_mode = TX_MODE_ENABLE;
6834 tw32_f(MAC_TX_MODE, tp->tx_mode);
6835 udelay(100);
6836
6837 tp->rx_mode = RX_MODE_ENABLE;
9936bcf6
MC
6838 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
6839 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
af36e6b6
MC
6840 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6841
1da177e4
LT
6842 tw32_f(MAC_RX_MODE, tp->rx_mode);
6843 udelay(10);
6844
6845 if (tp->link_config.phy_is_low_power) {
6846 tp->link_config.phy_is_low_power = 0;
6847 tp->link_config.speed = tp->link_config.orig_speed;
6848 tp->link_config.duplex = tp->link_config.orig_duplex;
6849 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6850 }
6851
6852 tp->mi_mode = MAC_MI_MODE_BASE;
6853 tw32_f(MAC_MI_MODE, tp->mi_mode);
6854 udelay(80);
6855
6856 tw32(MAC_LED_CTRL, tp->led_ctrl);
6857
6858 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
c94e3941 6859 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1da177e4
LT
6860 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6861 udelay(10);
6862 }
6863 tw32_f(MAC_RX_MODE, tp->rx_mode);
6864 udelay(10);
6865
6866 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6867 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6868 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6869 /* Set drive transmission level to 1.2V */
6870 /* only if the signal pre-emphasis bit is not set */
6871 val = tr32(MAC_SERDES_CFG);
6872 val &= 0xfffff000;
6873 val |= 0x880;
6874 tw32(MAC_SERDES_CFG, val);
6875 }
6876 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6877 tw32(MAC_SERDES_CFG, 0x616000);
6878 }
6879
6880 /* Prevent chip from dropping frames when flow control
6881 * is enabled.
6882 */
6883 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6884
6885 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6886 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6887 /* Use hardware link auto-negotiation */
6888 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6889 }
6890
d4d2c558
MC
6891 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6892 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6893 u32 tmp;
6894
6895 tmp = tr32(SERDES_RX_CTRL);
6896 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6897 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6898 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6899 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6900 }
6901
36da4d86 6902 err = tg3_setup_phy(tp, 0);
1da177e4
LT
6903 if (err)
6904 return err;
6905
715116a1
MC
6906 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6907 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
1da177e4
LT
6908 u32 tmp;
6909
6910 /* Clear CRC stats. */
569a5df8
MC
6911 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
6912 tg3_writephy(tp, MII_TG3_TEST1,
6913 tmp | MII_TG3_TEST1_CRC_EN);
1da177e4
LT
6914 tg3_readphy(tp, 0x14, &tmp);
6915 }
6916 }
6917
6918 __tg3_set_rx_mode(tp->dev);
6919
6920 /* Initialize receive rules. */
6921 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
6922 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6923 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
6924 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6925
4cf78e4f 6926 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
a4e2b347 6927 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
1da177e4
LT
6928 limit = 8;
6929 else
6930 limit = 16;
6931 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6932 limit -= 4;
6933 switch (limit) {
6934 case 16:
6935 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
6936 case 15:
6937 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
6938 case 14:
6939 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
6940 case 13:
6941 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
6942 case 12:
6943 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
6944 case 11:
6945 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
6946 case 10:
6947 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
6948 case 9:
6949 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
6950 case 8:
6951 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
6952 case 7:
6953 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
6954 case 6:
6955 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
6956 case 5:
6957 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
6958 case 4:
6959 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6960 case 3:
6961 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6962 case 2:
6963 case 1:
6964
6965 default:
6966 break;
6967 };
6968
0d3031d9
MC
6969 /* Write our heartbeat update interval to APE. */
6970 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
6971 APE_HOST_HEARTBEAT_INT_DISABLE);
6972
1da177e4
LT
6973 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6974
1da177e4
LT
6975 return 0;
6976}
6977
6978/* Called at device open time to get the chip ready for
6979 * packet processing. Invoked with tp->lock held.
6980 */
8e7a22e3 6981static int tg3_init_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
6982{
6983 int err;
6984
6985 /* Force the chip into D0. */
bc1c7567 6986 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
6987 if (err)
6988 goto out;
6989
6990 tg3_switch_clocks(tp);
6991
6992 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6993
8e7a22e3 6994 err = tg3_reset_hw(tp, reset_phy);
1da177e4
LT
6995
6996out:
6997 return err;
6998}
6999
7000#define TG3_STAT_ADD32(PSTAT, REG) \
7001do { u32 __val = tr32(REG); \
7002 (PSTAT)->low += __val; \
7003 if ((PSTAT)->low < __val) \
7004 (PSTAT)->high += 1; \
7005} while (0)
7006
7007static void tg3_periodic_fetch_stats(struct tg3 *tp)
7008{
7009 struct tg3_hw_stats *sp = tp->hw_stats;
7010
7011 if (!netif_carrier_ok(tp->dev))
7012 return;
7013
7014 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7015 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7016 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7017 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7018 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7019 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7020 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7021 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7022 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7023 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7024 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7025 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7026 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7027
7028 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7029 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7030 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7031 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7032 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7033 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7034 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7035 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7036 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7037 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7038 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7039 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7040 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7041 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
463d305b
MC
7042
7043 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7044 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7045 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
1da177e4
LT
7046}
7047
7048static void tg3_timer(unsigned long __opaque)
7049{
7050 struct tg3 *tp = (struct tg3 *) __opaque;
1da177e4 7051
f475f163
MC
7052 if (tp->irq_sync)
7053 goto restart_timer;
7054
f47c11ee 7055 spin_lock(&tp->lock);
1da177e4 7056
fac9b83e
DM
7057 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7058 /* All of this garbage is because when using non-tagged
7059 * IRQ status the mailbox/status_block protocol the chip
7060 * uses with the cpu is race prone.
7061 */
7062 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7063 tw32(GRC_LOCAL_CTRL,
7064 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7065 } else {
7066 tw32(HOSTCC_MODE, tp->coalesce_mode |
7067 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7068 }
1da177e4 7069
fac9b83e
DM
7070 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7071 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
f47c11ee 7072 spin_unlock(&tp->lock);
fac9b83e
DM
7073 schedule_work(&tp->reset_task);
7074 return;
7075 }
1da177e4
LT
7076 }
7077
1da177e4
LT
7078 /* This part only runs once per second. */
7079 if (!--tp->timer_counter) {
fac9b83e
DM
7080 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7081 tg3_periodic_fetch_stats(tp);
7082
1da177e4
LT
7083 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7084 u32 mac_stat;
7085 int phy_event;
7086
7087 mac_stat = tr32(MAC_STATUS);
7088
7089 phy_event = 0;
7090 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7091 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7092 phy_event = 1;
7093 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7094 phy_event = 1;
7095
7096 if (phy_event)
7097 tg3_setup_phy(tp, 0);
7098 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7099 u32 mac_stat = tr32(MAC_STATUS);
7100 int need_setup = 0;
7101
7102 if (netif_carrier_ok(tp->dev) &&
7103 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7104 need_setup = 1;
7105 }
7106 if (! netif_carrier_ok(tp->dev) &&
7107 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7108 MAC_STATUS_SIGNAL_DET))) {
7109 need_setup = 1;
7110 }
7111 if (need_setup) {
3d3ebe74
MC
7112 if (!tp->serdes_counter) {
7113 tw32_f(MAC_MODE,
7114 (tp->mac_mode &
7115 ~MAC_MODE_PORT_MODE_MASK));
7116 udelay(40);
7117 tw32_f(MAC_MODE, tp->mac_mode);
7118 udelay(40);
7119 }
1da177e4
LT
7120 tg3_setup_phy(tp, 0);
7121 }
747e8f8b
MC
7122 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7123 tg3_serdes_parallel_detect(tp);
1da177e4
LT
7124
7125 tp->timer_counter = tp->timer_multiplier;
7126 }
7127
130b8e4d
MC
7128 /* Heartbeat is only sent once every 2 seconds.
7129 *
7130 * The heartbeat is to tell the ASF firmware that the host
7131 * driver is still alive. In the event that the OS crashes,
7132 * ASF needs to reset the hardware to free up the FIFO space
7133 * that may be filled with rx packets destined for the host.
7134 * If the FIFO is full, ASF will no longer function properly.
7135 *
7136 * Unintended resets have been reported on real time kernels
7137 * where the timer doesn't run on time. Netpoll will also have
7138 * same problem.
7139 *
7140 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7141 * to check the ring condition when the heartbeat is expiring
7142 * before doing the reset. This will prevent most unintended
7143 * resets.
7144 */
1da177e4
LT
7145 if (!--tp->asf_counter) {
7146 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7147 u32 val;
7148
bbadf503 7149 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
130b8e4d 7150 FWCMD_NICDRV_ALIVE3);
bbadf503 7151 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
28fbef78 7152 /* 5 seconds timeout */
bbadf503 7153 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
1da177e4
LT
7154 val = tr32(GRC_RX_CPU_EVENT);
7155 val |= (1 << 14);
7156 tw32(GRC_RX_CPU_EVENT, val);
7157 }
7158 tp->asf_counter = tp->asf_multiplier;
7159 }
7160
f47c11ee 7161 spin_unlock(&tp->lock);
1da177e4 7162
f475f163 7163restart_timer:
1da177e4
LT
7164 tp->timer.expires = jiffies + tp->timer_offset;
7165 add_timer(&tp->timer);
7166}
7167
81789ef5 7168static int tg3_request_irq(struct tg3 *tp)
fcfa0a32 7169{
7d12e780 7170 irq_handler_t fn;
fcfa0a32
MC
7171 unsigned long flags;
7172 struct net_device *dev = tp->dev;
7173
7174 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7175 fn = tg3_msi;
7176 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7177 fn = tg3_msi_1shot;
1fb9df5d 7178 flags = IRQF_SAMPLE_RANDOM;
fcfa0a32
MC
7179 } else {
7180 fn = tg3_interrupt;
7181 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7182 fn = tg3_interrupt_tagged;
1fb9df5d 7183 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
fcfa0a32
MC
7184 }
7185 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7186}
7187
7938109f
MC
7188static int tg3_test_interrupt(struct tg3 *tp)
7189{
7190 struct net_device *dev = tp->dev;
b16250e3 7191 int err, i, intr_ok = 0;
7938109f 7192
d4bc3927
MC
7193 if (!netif_running(dev))
7194 return -ENODEV;
7195
7938109f
MC
7196 tg3_disable_ints(tp);
7197
7198 free_irq(tp->pdev->irq, dev);
7199
7200 err = request_irq(tp->pdev->irq, tg3_test_isr,
1fb9df5d 7201 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7938109f
MC
7202 if (err)
7203 return err;
7204
38f3843e 7205 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7938109f
MC
7206 tg3_enable_ints(tp);
7207
7208 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7209 HOSTCC_MODE_NOW);
7210
7211 for (i = 0; i < 5; i++) {
b16250e3
MC
7212 u32 int_mbox, misc_host_ctrl;
7213
09ee929c
MC
7214 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7215 TG3_64BIT_REG_LOW);
b16250e3
MC
7216 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7217
7218 if ((int_mbox != 0) ||
7219 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7220 intr_ok = 1;
7938109f 7221 break;
b16250e3
MC
7222 }
7223
7938109f
MC
7224 msleep(10);
7225 }
7226
7227 tg3_disable_ints(tp);
7228
7229 free_irq(tp->pdev->irq, dev);
6aa20a22 7230
fcfa0a32 7231 err = tg3_request_irq(tp);
7938109f
MC
7232
7233 if (err)
7234 return err;
7235
b16250e3 7236 if (intr_ok)
7938109f
MC
7237 return 0;
7238
7239 return -EIO;
7240}
7241
7242/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7243 * successfully restored
7244 */
7245static int tg3_test_msi(struct tg3 *tp)
7246{
7247 struct net_device *dev = tp->dev;
7248 int err;
7249 u16 pci_cmd;
7250
7251 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7252 return 0;
7253
7254 /* Turn off SERR reporting in case MSI terminates with Master
7255 * Abort.
7256 */
7257 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7258 pci_write_config_word(tp->pdev, PCI_COMMAND,
7259 pci_cmd & ~PCI_COMMAND_SERR);
7260
7261 err = tg3_test_interrupt(tp);
7262
7263 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7264
7265 if (!err)
7266 return 0;
7267
7268 /* other failures */
7269 if (err != -EIO)
7270 return err;
7271
7272 /* MSI test failed, go back to INTx mode */
7273 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7274 "switching to INTx mode. Please report this failure to "
7275 "the PCI maintainer and include system chipset information.\n",
7276 tp->dev->name);
7277
7278 free_irq(tp->pdev->irq, dev);
7279 pci_disable_msi(tp->pdev);
7280
7281 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7282
fcfa0a32 7283 err = tg3_request_irq(tp);
7938109f
MC
7284 if (err)
7285 return err;
7286
7287 /* Need to reset the chip because the MSI cycle may have terminated
7288 * with Master Abort.
7289 */
f47c11ee 7290 tg3_full_lock(tp, 1);
7938109f 7291
944d980e 7292 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8e7a22e3 7293 err = tg3_init_hw(tp, 1);
7938109f 7294
f47c11ee 7295 tg3_full_unlock(tp);
7938109f
MC
7296
7297 if (err)
7298 free_irq(tp->pdev->irq, dev);
7299
7300 return err;
7301}
7302
1da177e4
LT
7303static int tg3_open(struct net_device *dev)
7304{
7305 struct tg3 *tp = netdev_priv(dev);
7306 int err;
7307
c49a1561
MC
7308 netif_carrier_off(tp->dev);
7309
f47c11ee 7310 tg3_full_lock(tp, 0);
1da177e4 7311
bc1c7567 7312 err = tg3_set_power_state(tp, PCI_D0);
12862086
IS
7313 if (err) {
7314 tg3_full_unlock(tp);
bc1c7567 7315 return err;
12862086 7316 }
bc1c7567 7317
1da177e4
LT
7318 tg3_disable_ints(tp);
7319 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7320
f47c11ee 7321 tg3_full_unlock(tp);
1da177e4
LT
7322
7323 /* The placement of this call is tied
7324 * to the setup and use of Host TX descriptors.
7325 */
7326 err = tg3_alloc_consistent(tp);
7327 if (err)
7328 return err;
7329
7544b097 7330 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
fac9b83e
DM
7331 /* All MSI supporting chips should support tagged
7332 * status. Assert that this is the case.
7333 */
7334 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7335 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7336 "Not using MSI.\n", tp->dev->name);
7337 } else if (pci_enable_msi(tp->pdev) == 0) {
88b06bc2
MC
7338 u32 msi_mode;
7339
2fbe43f6
MC
7340 /* Hardware bug - MSI won't work if INTX disabled. */
7341 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
7342 pci_intx(tp->pdev, 1);
7343
88b06bc2
MC
7344 msi_mode = tr32(MSGINT_MODE);
7345 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7346 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7347 }
7348 }
fcfa0a32 7349 err = tg3_request_irq(tp);
1da177e4
LT
7350
7351 if (err) {
88b06bc2
MC
7352 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7353 pci_disable_msi(tp->pdev);
7354 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7355 }
1da177e4
LT
7356 tg3_free_consistent(tp);
7357 return err;
7358 }
7359
bea3348e
SH
7360 napi_enable(&tp->napi);
7361
f47c11ee 7362 tg3_full_lock(tp, 0);
1da177e4 7363
8e7a22e3 7364 err = tg3_init_hw(tp, 1);
1da177e4 7365 if (err) {
944d980e 7366 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7367 tg3_free_rings(tp);
7368 } else {
fac9b83e
DM
7369 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7370 tp->timer_offset = HZ;
7371 else
7372 tp->timer_offset = HZ / 10;
7373
7374 BUG_ON(tp->timer_offset > HZ);
7375 tp->timer_counter = tp->timer_multiplier =
7376 (HZ / tp->timer_offset);
7377 tp->asf_counter = tp->asf_multiplier =
28fbef78 7378 ((HZ / tp->timer_offset) * 2);
1da177e4
LT
7379
7380 init_timer(&tp->timer);
7381 tp->timer.expires = jiffies + tp->timer_offset;
7382 tp->timer.data = (unsigned long) tp;
7383 tp->timer.function = tg3_timer;
1da177e4
LT
7384 }
7385
f47c11ee 7386 tg3_full_unlock(tp);
1da177e4
LT
7387
7388 if (err) {
bea3348e 7389 napi_disable(&tp->napi);
88b06bc2
MC
7390 free_irq(tp->pdev->irq, dev);
7391 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7392 pci_disable_msi(tp->pdev);
7393 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7394 }
1da177e4
LT
7395 tg3_free_consistent(tp);
7396 return err;
7397 }
7398
7938109f
MC
7399 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7400 err = tg3_test_msi(tp);
fac9b83e 7401
7938109f 7402 if (err) {
f47c11ee 7403 tg3_full_lock(tp, 0);
7938109f
MC
7404
7405 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7406 pci_disable_msi(tp->pdev);
7407 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7408 }
944d980e 7409 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f
MC
7410 tg3_free_rings(tp);
7411 tg3_free_consistent(tp);
7412
f47c11ee 7413 tg3_full_unlock(tp);
7938109f 7414
bea3348e
SH
7415 napi_disable(&tp->napi);
7416
7938109f
MC
7417 return err;
7418 }
fcfa0a32
MC
7419
7420 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7421 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
b5d3772c 7422 u32 val = tr32(PCIE_TRANSACTION_CFG);
fcfa0a32 7423
b5d3772c
MC
7424 tw32(PCIE_TRANSACTION_CFG,
7425 val | PCIE_TRANS_CFG_1SHOT_MSI);
fcfa0a32
MC
7426 }
7427 }
7938109f
MC
7428 }
7429
f47c11ee 7430 tg3_full_lock(tp, 0);
1da177e4 7431
7938109f
MC
7432 add_timer(&tp->timer);
7433 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
1da177e4
LT
7434 tg3_enable_ints(tp);
7435
f47c11ee 7436 tg3_full_unlock(tp);
1da177e4
LT
7437
7438 netif_start_queue(dev);
7439
7440 return 0;
7441}
7442
7443#if 0
7444/*static*/ void tg3_dump_state(struct tg3 *tp)
7445{
7446 u32 val32, val32_2, val32_3, val32_4, val32_5;
7447 u16 val16;
7448 int i;
7449
7450 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7451 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7452 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7453 val16, val32);
7454
7455 /* MAC block */
7456 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7457 tr32(MAC_MODE), tr32(MAC_STATUS));
7458 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7459 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7460 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7461 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7462 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7463 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7464
7465 /* Send data initiator control block */
7466 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7467 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7468 printk(" SNDDATAI_STATSCTRL[%08x]\n",
7469 tr32(SNDDATAI_STATSCTRL));
7470
7471 /* Send data completion control block */
7472 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7473
7474 /* Send BD ring selector block */
7475 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7476 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7477
7478 /* Send BD initiator control block */
7479 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7480 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7481
7482 /* Send BD completion control block */
7483 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7484
7485 /* Receive list placement control block */
7486 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7487 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7488 printk(" RCVLPC_STATSCTRL[%08x]\n",
7489 tr32(RCVLPC_STATSCTRL));
7490
7491 /* Receive data and receive BD initiator control block */
7492 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7493 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7494
7495 /* Receive data completion control block */
7496 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7497 tr32(RCVDCC_MODE));
7498
7499 /* Receive BD initiator control block */
7500 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7501 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7502
7503 /* Receive BD completion control block */
7504 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7505 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7506
7507 /* Receive list selector control block */
7508 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7509 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7510
7511 /* Mbuf cluster free block */
7512 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7513 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7514
7515 /* Host coalescing control block */
7516 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7517 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7518 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7519 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7520 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7521 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7522 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7523 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7524 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7525 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7526 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7527 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7528
7529 /* Memory arbiter control block */
7530 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7531 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7532
7533 /* Buffer manager control block */
7534 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7535 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7536 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7537 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7538 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7539 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7540 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7541 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7542
7543 /* Read DMA control block */
7544 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7545 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7546
7547 /* Write DMA control block */
7548 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7549 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7550
7551 /* DMA completion block */
7552 printk("DEBUG: DMAC_MODE[%08x]\n",
7553 tr32(DMAC_MODE));
7554
7555 /* GRC block */
7556 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7557 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7558 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7559 tr32(GRC_LOCAL_CTRL));
7560
7561 /* TG3_BDINFOs */
7562 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7563 tr32(RCVDBDI_JUMBO_BD + 0x0),
7564 tr32(RCVDBDI_JUMBO_BD + 0x4),
7565 tr32(RCVDBDI_JUMBO_BD + 0x8),
7566 tr32(RCVDBDI_JUMBO_BD + 0xc));
7567 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7568 tr32(RCVDBDI_STD_BD + 0x0),
7569 tr32(RCVDBDI_STD_BD + 0x4),
7570 tr32(RCVDBDI_STD_BD + 0x8),
7571 tr32(RCVDBDI_STD_BD + 0xc));
7572 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7573 tr32(RCVDBDI_MINI_BD + 0x0),
7574 tr32(RCVDBDI_MINI_BD + 0x4),
7575 tr32(RCVDBDI_MINI_BD + 0x8),
7576 tr32(RCVDBDI_MINI_BD + 0xc));
7577
7578 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7579 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7580 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7581 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7582 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7583 val32, val32_2, val32_3, val32_4);
7584
7585 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7586 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7587 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7588 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7589 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7590 val32, val32_2, val32_3, val32_4);
7591
7592 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7593 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7594 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7595 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7596 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7597 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7598 val32, val32_2, val32_3, val32_4, val32_5);
7599
7600 /* SW status block */
7601 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7602 tp->hw_status->status,
7603 tp->hw_status->status_tag,
7604 tp->hw_status->rx_jumbo_consumer,
7605 tp->hw_status->rx_consumer,
7606 tp->hw_status->rx_mini_consumer,
7607 tp->hw_status->idx[0].rx_producer,
7608 tp->hw_status->idx[0].tx_consumer);
7609
7610 /* SW statistics block */
7611 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7612 ((u32 *)tp->hw_stats)[0],
7613 ((u32 *)tp->hw_stats)[1],
7614 ((u32 *)tp->hw_stats)[2],
7615 ((u32 *)tp->hw_stats)[3]);
7616
7617 /* Mailboxes */
7618 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
09ee929c
MC
7619 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7620 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7621 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7622 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
1da177e4
LT
7623
7624 /* NIC side send descriptors. */
7625 for (i = 0; i < 6; i++) {
7626 unsigned long txd;
7627
7628 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7629 + (i * sizeof(struct tg3_tx_buffer_desc));
7630 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7631 i,
7632 readl(txd + 0x0), readl(txd + 0x4),
7633 readl(txd + 0x8), readl(txd + 0xc));
7634 }
7635
7636 /* NIC side RX descriptors. */
7637 for (i = 0; i < 6; i++) {
7638 unsigned long rxd;
7639
7640 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7641 + (i * sizeof(struct tg3_rx_buffer_desc));
7642 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7643 i,
7644 readl(rxd + 0x0), readl(rxd + 0x4),
7645 readl(rxd + 0x8), readl(rxd + 0xc));
7646 rxd += (4 * sizeof(u32));
7647 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7648 i,
7649 readl(rxd + 0x0), readl(rxd + 0x4),
7650 readl(rxd + 0x8), readl(rxd + 0xc));
7651 }
7652
7653 for (i = 0; i < 6; i++) {
7654 unsigned long rxd;
7655
7656 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7657 + (i * sizeof(struct tg3_rx_buffer_desc));
7658 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7659 i,
7660 readl(rxd + 0x0), readl(rxd + 0x4),
7661 readl(rxd + 0x8), readl(rxd + 0xc));
7662 rxd += (4 * sizeof(u32));
7663 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7664 i,
7665 readl(rxd + 0x0), readl(rxd + 0x4),
7666 readl(rxd + 0x8), readl(rxd + 0xc));
7667 }
7668}
7669#endif
7670
7671static struct net_device_stats *tg3_get_stats(struct net_device *);
7672static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7673
7674static int tg3_close(struct net_device *dev)
7675{
7676 struct tg3 *tp = netdev_priv(dev);
7677
bea3348e 7678 napi_disable(&tp->napi);
28e53bdd 7679 cancel_work_sync(&tp->reset_task);
7faa006f 7680
1da177e4
LT
7681 netif_stop_queue(dev);
7682
7683 del_timer_sync(&tp->timer);
7684
f47c11ee 7685 tg3_full_lock(tp, 1);
1da177e4
LT
7686#if 0
7687 tg3_dump_state(tp);
7688#endif
7689
7690 tg3_disable_ints(tp);
7691
944d980e 7692 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4 7693 tg3_free_rings(tp);
5cf64b8a 7694 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
1da177e4 7695
f47c11ee 7696 tg3_full_unlock(tp);
1da177e4 7697
88b06bc2
MC
7698 free_irq(tp->pdev->irq, dev);
7699 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7700 pci_disable_msi(tp->pdev);
7701 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7702 }
1da177e4
LT
7703
7704 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7705 sizeof(tp->net_stats_prev));
7706 memcpy(&tp->estats_prev, tg3_get_estats(tp),
7707 sizeof(tp->estats_prev));
7708
7709 tg3_free_consistent(tp);
7710
bc1c7567
MC
7711 tg3_set_power_state(tp, PCI_D3hot);
7712
7713 netif_carrier_off(tp->dev);
7714
1da177e4
LT
7715 return 0;
7716}
7717
7718static inline unsigned long get_stat64(tg3_stat64_t *val)
7719{
7720 unsigned long ret;
7721
7722#if (BITS_PER_LONG == 32)
7723 ret = val->low;
7724#else
7725 ret = ((u64)val->high << 32) | ((u64)val->low);
7726#endif
7727 return ret;
7728}
7729
7730static unsigned long calc_crc_errors(struct tg3 *tp)
7731{
7732 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7733
7734 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7735 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7736 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1da177e4
LT
7737 u32 val;
7738
f47c11ee 7739 spin_lock_bh(&tp->lock);
569a5df8
MC
7740 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7741 tg3_writephy(tp, MII_TG3_TEST1,
7742 val | MII_TG3_TEST1_CRC_EN);
1da177e4
LT
7743 tg3_readphy(tp, 0x14, &val);
7744 } else
7745 val = 0;
f47c11ee 7746 spin_unlock_bh(&tp->lock);
1da177e4
LT
7747
7748 tp->phy_crc_errors += val;
7749
7750 return tp->phy_crc_errors;
7751 }
7752
7753 return get_stat64(&hw_stats->rx_fcs_errors);
7754}
7755
7756#define ESTAT_ADD(member) \
7757 estats->member = old_estats->member + \
7758 get_stat64(&hw_stats->member)
7759
7760static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7761{
7762 struct tg3_ethtool_stats *estats = &tp->estats;
7763 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7764 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7765
7766 if (!hw_stats)
7767 return old_estats;
7768
7769 ESTAT_ADD(rx_octets);
7770 ESTAT_ADD(rx_fragments);
7771 ESTAT_ADD(rx_ucast_packets);
7772 ESTAT_ADD(rx_mcast_packets);
7773 ESTAT_ADD(rx_bcast_packets);
7774 ESTAT_ADD(rx_fcs_errors);
7775 ESTAT_ADD(rx_align_errors);
7776 ESTAT_ADD(rx_xon_pause_rcvd);
7777 ESTAT_ADD(rx_xoff_pause_rcvd);
7778 ESTAT_ADD(rx_mac_ctrl_rcvd);
7779 ESTAT_ADD(rx_xoff_entered);
7780 ESTAT_ADD(rx_frame_too_long_errors);
7781 ESTAT_ADD(rx_jabbers);
7782 ESTAT_ADD(rx_undersize_packets);
7783 ESTAT_ADD(rx_in_length_errors);
7784 ESTAT_ADD(rx_out_length_errors);
7785 ESTAT_ADD(rx_64_or_less_octet_packets);
7786 ESTAT_ADD(rx_65_to_127_octet_packets);
7787 ESTAT_ADD(rx_128_to_255_octet_packets);
7788 ESTAT_ADD(rx_256_to_511_octet_packets);
7789 ESTAT_ADD(rx_512_to_1023_octet_packets);
7790 ESTAT_ADD(rx_1024_to_1522_octet_packets);
7791 ESTAT_ADD(rx_1523_to_2047_octet_packets);
7792 ESTAT_ADD(rx_2048_to_4095_octet_packets);
7793 ESTAT_ADD(rx_4096_to_8191_octet_packets);
7794 ESTAT_ADD(rx_8192_to_9022_octet_packets);
7795
7796 ESTAT_ADD(tx_octets);
7797 ESTAT_ADD(tx_collisions);
7798 ESTAT_ADD(tx_xon_sent);
7799 ESTAT_ADD(tx_xoff_sent);
7800 ESTAT_ADD(tx_flow_control);
7801 ESTAT_ADD(tx_mac_errors);
7802 ESTAT_ADD(tx_single_collisions);
7803 ESTAT_ADD(tx_mult_collisions);
7804 ESTAT_ADD(tx_deferred);
7805 ESTAT_ADD(tx_excessive_collisions);
7806 ESTAT_ADD(tx_late_collisions);
7807 ESTAT_ADD(tx_collide_2times);
7808 ESTAT_ADD(tx_collide_3times);
7809 ESTAT_ADD(tx_collide_4times);
7810 ESTAT_ADD(tx_collide_5times);
7811 ESTAT_ADD(tx_collide_6times);
7812 ESTAT_ADD(tx_collide_7times);
7813 ESTAT_ADD(tx_collide_8times);
7814 ESTAT_ADD(tx_collide_9times);
7815 ESTAT_ADD(tx_collide_10times);
7816 ESTAT_ADD(tx_collide_11times);
7817 ESTAT_ADD(tx_collide_12times);
7818 ESTAT_ADD(tx_collide_13times);
7819 ESTAT_ADD(tx_collide_14times);
7820 ESTAT_ADD(tx_collide_15times);
7821 ESTAT_ADD(tx_ucast_packets);
7822 ESTAT_ADD(tx_mcast_packets);
7823 ESTAT_ADD(tx_bcast_packets);
7824 ESTAT_ADD(tx_carrier_sense_errors);
7825 ESTAT_ADD(tx_discards);
7826 ESTAT_ADD(tx_errors);
7827
7828 ESTAT_ADD(dma_writeq_full);
7829 ESTAT_ADD(dma_write_prioq_full);
7830 ESTAT_ADD(rxbds_empty);
7831 ESTAT_ADD(rx_discards);
7832 ESTAT_ADD(rx_errors);
7833 ESTAT_ADD(rx_threshold_hit);
7834
7835 ESTAT_ADD(dma_readq_full);
7836 ESTAT_ADD(dma_read_prioq_full);
7837 ESTAT_ADD(tx_comp_queue_full);
7838
7839 ESTAT_ADD(ring_set_send_prod_index);
7840 ESTAT_ADD(ring_status_update);
7841 ESTAT_ADD(nic_irqs);
7842 ESTAT_ADD(nic_avoided_irqs);
7843 ESTAT_ADD(nic_tx_threshold_hit);
7844
7845 return estats;
7846}
7847
7848static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7849{
7850 struct tg3 *tp = netdev_priv(dev);
7851 struct net_device_stats *stats = &tp->net_stats;
7852 struct net_device_stats *old_stats = &tp->net_stats_prev;
7853 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7854
7855 if (!hw_stats)
7856 return old_stats;
7857
7858 stats->rx_packets = old_stats->rx_packets +
7859 get_stat64(&hw_stats->rx_ucast_packets) +
7860 get_stat64(&hw_stats->rx_mcast_packets) +
7861 get_stat64(&hw_stats->rx_bcast_packets);
6aa20a22 7862
1da177e4
LT
7863 stats->tx_packets = old_stats->tx_packets +
7864 get_stat64(&hw_stats->tx_ucast_packets) +
7865 get_stat64(&hw_stats->tx_mcast_packets) +
7866 get_stat64(&hw_stats->tx_bcast_packets);
7867
7868 stats->rx_bytes = old_stats->rx_bytes +
7869 get_stat64(&hw_stats->rx_octets);
7870 stats->tx_bytes = old_stats->tx_bytes +
7871 get_stat64(&hw_stats->tx_octets);
7872
7873 stats->rx_errors = old_stats->rx_errors +
4f63b877 7874 get_stat64(&hw_stats->rx_errors);
1da177e4
LT
7875 stats->tx_errors = old_stats->tx_errors +
7876 get_stat64(&hw_stats->tx_errors) +
7877 get_stat64(&hw_stats->tx_mac_errors) +
7878 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7879 get_stat64(&hw_stats->tx_discards);
7880
7881 stats->multicast = old_stats->multicast +
7882 get_stat64(&hw_stats->rx_mcast_packets);
7883 stats->collisions = old_stats->collisions +
7884 get_stat64(&hw_stats->tx_collisions);
7885
7886 stats->rx_length_errors = old_stats->rx_length_errors +
7887 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7888 get_stat64(&hw_stats->rx_undersize_packets);
7889
7890 stats->rx_over_errors = old_stats->rx_over_errors +
7891 get_stat64(&hw_stats->rxbds_empty);
7892 stats->rx_frame_errors = old_stats->rx_frame_errors +
7893 get_stat64(&hw_stats->rx_align_errors);
7894 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7895 get_stat64(&hw_stats->tx_discards);
7896 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7897 get_stat64(&hw_stats->tx_carrier_sense_errors);
7898
7899 stats->rx_crc_errors = old_stats->rx_crc_errors +
7900 calc_crc_errors(tp);
7901
4f63b877
JL
7902 stats->rx_missed_errors = old_stats->rx_missed_errors +
7903 get_stat64(&hw_stats->rx_discards);
7904
1da177e4
LT
7905 return stats;
7906}
7907
7908static inline u32 calc_crc(unsigned char *buf, int len)
7909{
7910 u32 reg;
7911 u32 tmp;
7912 int j, k;
7913
7914 reg = 0xffffffff;
7915
7916 for (j = 0; j < len; j++) {
7917 reg ^= buf[j];
7918
7919 for (k = 0; k < 8; k++) {
7920 tmp = reg & 0x01;
7921
7922 reg >>= 1;
7923
7924 if (tmp) {
7925 reg ^= 0xedb88320;
7926 }
7927 }
7928 }
7929
7930 return ~reg;
7931}
7932
7933static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7934{
7935 /* accept or reject all multicast frames */
7936 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7937 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7938 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7939 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7940}
7941
7942static void __tg3_set_rx_mode(struct net_device *dev)
7943{
7944 struct tg3 *tp = netdev_priv(dev);
7945 u32 rx_mode;
7946
7947 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7948 RX_MODE_KEEP_VLAN_TAG);
7949
7950 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7951 * flag clear.
7952 */
7953#if TG3_VLAN_TAG_USED
7954 if (!tp->vlgrp &&
7955 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7956 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7957#else
7958 /* By definition, VLAN is disabled always in this
7959 * case.
7960 */
7961 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7962 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7963#endif
7964
7965 if (dev->flags & IFF_PROMISC) {
7966 /* Promiscuous mode. */
7967 rx_mode |= RX_MODE_PROMISC;
7968 } else if (dev->flags & IFF_ALLMULTI) {
7969 /* Accept all multicast. */
7970 tg3_set_multi (tp, 1);
7971 } else if (dev->mc_count < 1) {
7972 /* Reject all multicast. */
7973 tg3_set_multi (tp, 0);
7974 } else {
7975 /* Accept one or more multicast(s). */
7976 struct dev_mc_list *mclist;
7977 unsigned int i;
7978 u32 mc_filter[4] = { 0, };
7979 u32 regidx;
7980 u32 bit;
7981 u32 crc;
7982
7983 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7984 i++, mclist = mclist->next) {
7985
7986 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7987 bit = ~crc & 0x7f;
7988 regidx = (bit & 0x60) >> 5;
7989 bit &= 0x1f;
7990 mc_filter[regidx] |= (1 << bit);
7991 }
7992
7993 tw32(MAC_HASH_REG_0, mc_filter[0]);
7994 tw32(MAC_HASH_REG_1, mc_filter[1]);
7995 tw32(MAC_HASH_REG_2, mc_filter[2]);
7996 tw32(MAC_HASH_REG_3, mc_filter[3]);
7997 }
7998
7999 if (rx_mode != tp->rx_mode) {
8000 tp->rx_mode = rx_mode;
8001 tw32_f(MAC_RX_MODE, rx_mode);
8002 udelay(10);
8003 }
8004}
8005
8006static void tg3_set_rx_mode(struct net_device *dev)
8007{
8008 struct tg3 *tp = netdev_priv(dev);
8009
e75f7c90
MC
8010 if (!netif_running(dev))
8011 return;
8012
f47c11ee 8013 tg3_full_lock(tp, 0);
1da177e4 8014 __tg3_set_rx_mode(dev);
f47c11ee 8015 tg3_full_unlock(tp);
1da177e4
LT
8016}
8017
8018#define TG3_REGDUMP_LEN (32 * 1024)
8019
8020static int tg3_get_regs_len(struct net_device *dev)
8021{
8022 return TG3_REGDUMP_LEN;
8023}
8024
8025static void tg3_get_regs(struct net_device *dev,
8026 struct ethtool_regs *regs, void *_p)
8027{
8028 u32 *p = _p;
8029 struct tg3 *tp = netdev_priv(dev);
8030 u8 *orig_p = _p;
8031 int i;
8032
8033 regs->version = 0;
8034
8035 memset(p, 0, TG3_REGDUMP_LEN);
8036
bc1c7567
MC
8037 if (tp->link_config.phy_is_low_power)
8038 return;
8039
f47c11ee 8040 tg3_full_lock(tp, 0);
1da177e4
LT
8041
8042#define __GET_REG32(reg) (*(p)++ = tr32(reg))
8043#define GET_REG32_LOOP(base,len) \
8044do { p = (u32 *)(orig_p + (base)); \
8045 for (i = 0; i < len; i += 4) \
8046 __GET_REG32((base) + i); \
8047} while (0)
8048#define GET_REG32_1(reg) \
8049do { p = (u32 *)(orig_p + (reg)); \
8050 __GET_REG32((reg)); \
8051} while (0)
8052
8053 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8054 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8055 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8056 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8057 GET_REG32_1(SNDDATAC_MODE);
8058 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8059 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8060 GET_REG32_1(SNDBDC_MODE);
8061 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8062 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8063 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8064 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8065 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8066 GET_REG32_1(RCVDCC_MODE);
8067 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8068 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8069 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8070 GET_REG32_1(MBFREE_MODE);
8071 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8072 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8073 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8074 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8075 GET_REG32_LOOP(WDMAC_MODE, 0x08);
091465d7
CE
8076 GET_REG32_1(RX_CPU_MODE);
8077 GET_REG32_1(RX_CPU_STATE);
8078 GET_REG32_1(RX_CPU_PGMCTR);
8079 GET_REG32_1(RX_CPU_HWBKPT);
8080 GET_REG32_1(TX_CPU_MODE);
8081 GET_REG32_1(TX_CPU_STATE);
8082 GET_REG32_1(TX_CPU_PGMCTR);
1da177e4
LT
8083 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8084 GET_REG32_LOOP(FTQ_RESET, 0x120);
8085 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8086 GET_REG32_1(DMAC_MODE);
8087 GET_REG32_LOOP(GRC_MODE, 0x4c);
8088 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8089 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8090
8091#undef __GET_REG32
8092#undef GET_REG32_LOOP
8093#undef GET_REG32_1
8094
f47c11ee 8095 tg3_full_unlock(tp);
1da177e4
LT
8096}
8097
8098static int tg3_get_eeprom_len(struct net_device *dev)
8099{
8100 struct tg3 *tp = netdev_priv(dev);
8101
8102 return tp->nvram_size;
8103}
8104
8105static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
1820180b 8106static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
1da177e4
LT
8107
8108static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8109{
8110 struct tg3 *tp = netdev_priv(dev);
8111 int ret;
8112 u8 *pd;
8113 u32 i, offset, len, val, b_offset, b_count;
8114
bc1c7567
MC
8115 if (tp->link_config.phy_is_low_power)
8116 return -EAGAIN;
8117
1da177e4
LT
8118 offset = eeprom->offset;
8119 len = eeprom->len;
8120 eeprom->len = 0;
8121
8122 eeprom->magic = TG3_EEPROM_MAGIC;
8123
8124 if (offset & 3) {
8125 /* adjustments to start on required 4 byte boundary */
8126 b_offset = offset & 3;
8127 b_count = 4 - b_offset;
8128 if (b_count > len) {
8129 /* i.e. offset=1 len=2 */
8130 b_count = len;
8131 }
8132 ret = tg3_nvram_read(tp, offset-b_offset, &val);
8133 if (ret)
8134 return ret;
8135 val = cpu_to_le32(val);
8136 memcpy(data, ((char*)&val) + b_offset, b_count);
8137 len -= b_count;
8138 offset += b_count;
8139 eeprom->len += b_count;
8140 }
8141
8142 /* read bytes upto the last 4 byte boundary */
8143 pd = &data[eeprom->len];
8144 for (i = 0; i < (len - (len & 3)); i += 4) {
8145 ret = tg3_nvram_read(tp, offset + i, &val);
8146 if (ret) {
8147 eeprom->len += i;
8148 return ret;
8149 }
8150 val = cpu_to_le32(val);
8151 memcpy(pd + i, &val, 4);
8152 }
8153 eeprom->len += i;
8154
8155 if (len & 3) {
8156 /* read last bytes not ending on 4 byte boundary */
8157 pd = &data[eeprom->len];
8158 b_count = len & 3;
8159 b_offset = offset + len - b_count;
8160 ret = tg3_nvram_read(tp, b_offset, &val);
8161 if (ret)
8162 return ret;
8163 val = cpu_to_le32(val);
8164 memcpy(pd, ((char*)&val), b_count);
8165 eeprom->len += b_count;
8166 }
8167 return 0;
8168}
8169
6aa20a22 8170static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
1da177e4
LT
8171
8172static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8173{
8174 struct tg3 *tp = netdev_priv(dev);
8175 int ret;
8176 u32 offset, len, b_offset, odd_len, start, end;
8177 u8 *buf;
8178
bc1c7567
MC
8179 if (tp->link_config.phy_is_low_power)
8180 return -EAGAIN;
8181
1da177e4
LT
8182 if (eeprom->magic != TG3_EEPROM_MAGIC)
8183 return -EINVAL;
8184
8185 offset = eeprom->offset;
8186 len = eeprom->len;
8187
8188 if ((b_offset = (offset & 3))) {
8189 /* adjustments to start on required 4 byte boundary */
8190 ret = tg3_nvram_read(tp, offset-b_offset, &start);
8191 if (ret)
8192 return ret;
8193 start = cpu_to_le32(start);
8194 len += b_offset;
8195 offset &= ~3;
1c8594b4
MC
8196 if (len < 4)
8197 len = 4;
1da177e4
LT
8198 }
8199
8200 odd_len = 0;
1c8594b4 8201 if (len & 3) {
1da177e4
LT
8202 /* adjustments to end on required 4 byte boundary */
8203 odd_len = 1;
8204 len = (len + 3) & ~3;
8205 ret = tg3_nvram_read(tp, offset+len-4, &end);
8206 if (ret)
8207 return ret;
8208 end = cpu_to_le32(end);
8209 }
8210
8211 buf = data;
8212 if (b_offset || odd_len) {
8213 buf = kmalloc(len, GFP_KERNEL);
ab0049b4 8214 if (!buf)
1da177e4
LT
8215 return -ENOMEM;
8216 if (b_offset)
8217 memcpy(buf, &start, 4);
8218 if (odd_len)
8219 memcpy(buf+len-4, &end, 4);
8220 memcpy(buf + b_offset, data, eeprom->len);
8221 }
8222
8223 ret = tg3_nvram_write_block(tp, offset, len, buf);
8224
8225 if (buf != data)
8226 kfree(buf);
8227
8228 return ret;
8229}
8230
8231static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8232{
8233 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8234
1da177e4
LT
8235 cmd->supported = (SUPPORTED_Autoneg);
8236
8237 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8238 cmd->supported |= (SUPPORTED_1000baseT_Half |
8239 SUPPORTED_1000baseT_Full);
8240
ef348144 8241 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1da177e4
LT
8242 cmd->supported |= (SUPPORTED_100baseT_Half |
8243 SUPPORTED_100baseT_Full |
8244 SUPPORTED_10baseT_Half |
8245 SUPPORTED_10baseT_Full |
8246 SUPPORTED_MII);
ef348144
KK
8247 cmd->port = PORT_TP;
8248 } else {
1da177e4 8249 cmd->supported |= SUPPORTED_FIBRE;
ef348144
KK
8250 cmd->port = PORT_FIBRE;
8251 }
6aa20a22 8252
1da177e4
LT
8253 cmd->advertising = tp->link_config.advertising;
8254 if (netif_running(dev)) {
8255 cmd->speed = tp->link_config.active_speed;
8256 cmd->duplex = tp->link_config.active_duplex;
8257 }
1da177e4
LT
8258 cmd->phy_address = PHY_ADDR;
8259 cmd->transceiver = 0;
8260 cmd->autoneg = tp->link_config.autoneg;
8261 cmd->maxtxpkt = 0;
8262 cmd->maxrxpkt = 0;
8263 return 0;
8264}
6aa20a22 8265
1da177e4
LT
8266static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8267{
8268 struct tg3 *tp = netdev_priv(dev);
6aa20a22
JG
8269
8270 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
1da177e4
LT
8271 /* These are the only valid advertisement bits allowed. */
8272 if (cmd->autoneg == AUTONEG_ENABLE &&
8273 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8274 ADVERTISED_1000baseT_Full |
8275 ADVERTISED_Autoneg |
8276 ADVERTISED_FIBRE)))
8277 return -EINVAL;
37ff238d
MC
8278 /* Fiber can only do SPEED_1000. */
8279 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8280 (cmd->speed != SPEED_1000))
8281 return -EINVAL;
8282 /* Copper cannot force SPEED_1000. */
8283 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8284 (cmd->speed == SPEED_1000))
8285 return -EINVAL;
8286 else if ((cmd->speed == SPEED_1000) &&
8287 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8288 return -EINVAL;
1da177e4 8289
f47c11ee 8290 tg3_full_lock(tp, 0);
1da177e4
LT
8291
8292 tp->link_config.autoneg = cmd->autoneg;
8293 if (cmd->autoneg == AUTONEG_ENABLE) {
405d8e5c
AG
8294 tp->link_config.advertising = (cmd->advertising |
8295 ADVERTISED_Autoneg);
1da177e4
LT
8296 tp->link_config.speed = SPEED_INVALID;
8297 tp->link_config.duplex = DUPLEX_INVALID;
8298 } else {
8299 tp->link_config.advertising = 0;
8300 tp->link_config.speed = cmd->speed;
8301 tp->link_config.duplex = cmd->duplex;
8302 }
6aa20a22 8303
24fcad6b
MC
8304 tp->link_config.orig_speed = tp->link_config.speed;
8305 tp->link_config.orig_duplex = tp->link_config.duplex;
8306 tp->link_config.orig_autoneg = tp->link_config.autoneg;
8307
1da177e4
LT
8308 if (netif_running(dev))
8309 tg3_setup_phy(tp, 1);
8310
f47c11ee 8311 tg3_full_unlock(tp);
6aa20a22 8312
1da177e4
LT
8313 return 0;
8314}
6aa20a22 8315
1da177e4
LT
8316static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8317{
8318 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8319
1da177e4
LT
8320 strcpy(info->driver, DRV_MODULE_NAME);
8321 strcpy(info->version, DRV_MODULE_VERSION);
c4e6575c 8322 strcpy(info->fw_version, tp->fw_ver);
1da177e4
LT
8323 strcpy(info->bus_info, pci_name(tp->pdev));
8324}
6aa20a22 8325
1da177e4
LT
8326static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8327{
8328 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8329
a85feb8c
GZ
8330 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8331 wol->supported = WAKE_MAGIC;
8332 else
8333 wol->supported = 0;
1da177e4
LT
8334 wol->wolopts = 0;
8335 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8336 wol->wolopts = WAKE_MAGIC;
8337 memset(&wol->sopass, 0, sizeof(wol->sopass));
8338}
6aa20a22 8339
1da177e4
LT
8340static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8341{
8342 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8343
1da177e4
LT
8344 if (wol->wolopts & ~WAKE_MAGIC)
8345 return -EINVAL;
8346 if ((wol->wolopts & WAKE_MAGIC) &&
a85feb8c 8347 !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
1da177e4 8348 return -EINVAL;
6aa20a22 8349
f47c11ee 8350 spin_lock_bh(&tp->lock);
1da177e4
LT
8351 if (wol->wolopts & WAKE_MAGIC)
8352 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8353 else
8354 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
f47c11ee 8355 spin_unlock_bh(&tp->lock);
6aa20a22 8356
1da177e4
LT
8357 return 0;
8358}
6aa20a22 8359
1da177e4
LT
8360static u32 tg3_get_msglevel(struct net_device *dev)
8361{
8362 struct tg3 *tp = netdev_priv(dev);
8363 return tp->msg_enable;
8364}
6aa20a22 8365
1da177e4
LT
8366static void tg3_set_msglevel(struct net_device *dev, u32 value)
8367{
8368 struct tg3 *tp = netdev_priv(dev);
8369 tp->msg_enable = value;
8370}
6aa20a22 8371
1da177e4
LT
8372static int tg3_set_tso(struct net_device *dev, u32 value)
8373{
8374 struct tg3 *tp = netdev_priv(dev);
8375
8376 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8377 if (value)
8378 return -EINVAL;
8379 return 0;
8380 }
b5d3772c
MC
8381 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8382 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
9936bcf6 8383 if (value) {
b0026624 8384 dev->features |= NETIF_F_TSO6;
9936bcf6
MC
8385 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8386 dev->features |= NETIF_F_TSO_ECN;
8387 } else
8388 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
b0026624 8389 }
1da177e4
LT
8390 return ethtool_op_set_tso(dev, value);
8391}
6aa20a22 8392
1da177e4
LT
8393static int tg3_nway_reset(struct net_device *dev)
8394{
8395 struct tg3 *tp = netdev_priv(dev);
8396 u32 bmcr;
8397 int r;
6aa20a22 8398
1da177e4
LT
8399 if (!netif_running(dev))
8400 return -EAGAIN;
8401
c94e3941
MC
8402 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8403 return -EINVAL;
8404
f47c11ee 8405 spin_lock_bh(&tp->lock);
1da177e4
LT
8406 r = -EINVAL;
8407 tg3_readphy(tp, MII_BMCR, &bmcr);
8408 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
c94e3941
MC
8409 ((bmcr & BMCR_ANENABLE) ||
8410 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8411 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8412 BMCR_ANENABLE);
1da177e4
LT
8413 r = 0;
8414 }
f47c11ee 8415 spin_unlock_bh(&tp->lock);
6aa20a22 8416
1da177e4
LT
8417 return r;
8418}
6aa20a22 8419
1da177e4
LT
8420static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8421{
8422 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8423
1da177e4
LT
8424 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8425 ering->rx_mini_max_pending = 0;
4f81c32b
MC
8426 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8427 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8428 else
8429 ering->rx_jumbo_max_pending = 0;
8430
8431 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
1da177e4
LT
8432
8433 ering->rx_pending = tp->rx_pending;
8434 ering->rx_mini_pending = 0;
4f81c32b
MC
8435 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8436 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8437 else
8438 ering->rx_jumbo_pending = 0;
8439
1da177e4
LT
8440 ering->tx_pending = tp->tx_pending;
8441}
6aa20a22 8442
1da177e4
LT
8443static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8444{
8445 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 8446 int irq_sync = 0, err = 0;
6aa20a22 8447
1da177e4
LT
8448 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8449 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
bc3a9254
MC
8450 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8451 (ering->tx_pending <= MAX_SKB_FRAGS) ||
7f62ad5d 8452 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
bc3a9254 8453 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
1da177e4 8454 return -EINVAL;
6aa20a22 8455
bbe832c0 8456 if (netif_running(dev)) {
1da177e4 8457 tg3_netif_stop(tp);
bbe832c0
MC
8458 irq_sync = 1;
8459 }
1da177e4 8460
bbe832c0 8461 tg3_full_lock(tp, irq_sync);
6aa20a22 8462
1da177e4
LT
8463 tp->rx_pending = ering->rx_pending;
8464
8465 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8466 tp->rx_pending > 63)
8467 tp->rx_pending = 63;
8468 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8469 tp->tx_pending = ering->tx_pending;
8470
8471 if (netif_running(dev)) {
944d980e 8472 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
b9ec6c1b
MC
8473 err = tg3_restart_hw(tp, 1);
8474 if (!err)
8475 tg3_netif_start(tp);
1da177e4
LT
8476 }
8477
f47c11ee 8478 tg3_full_unlock(tp);
6aa20a22 8479
b9ec6c1b 8480 return err;
1da177e4 8481}
6aa20a22 8482
1da177e4
LT
8483static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8484{
8485 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8486
1da177e4
LT
8487 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8488 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8489 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8490}
6aa20a22 8491
1da177e4
LT
8492static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8493{
8494 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 8495 int irq_sync = 0, err = 0;
6aa20a22 8496
bbe832c0 8497 if (netif_running(dev)) {
1da177e4 8498 tg3_netif_stop(tp);
bbe832c0
MC
8499 irq_sync = 1;
8500 }
1da177e4 8501
bbe832c0 8502 tg3_full_lock(tp, irq_sync);
f47c11ee 8503
1da177e4
LT
8504 if (epause->autoneg)
8505 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8506 else
8507 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8508 if (epause->rx_pause)
8509 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8510 else
8511 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8512 if (epause->tx_pause)
8513 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8514 else
8515 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8516
8517 if (netif_running(dev)) {
944d980e 8518 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
b9ec6c1b
MC
8519 err = tg3_restart_hw(tp, 1);
8520 if (!err)
8521 tg3_netif_start(tp);
1da177e4 8522 }
f47c11ee
DM
8523
8524 tg3_full_unlock(tp);
6aa20a22 8525
b9ec6c1b 8526 return err;
1da177e4 8527}
6aa20a22 8528
1da177e4
LT
8529static u32 tg3_get_rx_csum(struct net_device *dev)
8530{
8531 struct tg3 *tp = netdev_priv(dev);
8532 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8533}
6aa20a22 8534
1da177e4
LT
8535static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8536{
8537 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8538
1da177e4
LT
8539 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8540 if (data != 0)
8541 return -EINVAL;
8542 return 0;
8543 }
6aa20a22 8544
f47c11ee 8545 spin_lock_bh(&tp->lock);
1da177e4
LT
8546 if (data)
8547 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8548 else
8549 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
f47c11ee 8550 spin_unlock_bh(&tp->lock);
6aa20a22 8551
1da177e4
LT
8552 return 0;
8553}
6aa20a22 8554
1da177e4
LT
8555static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8556{
8557 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8558
1da177e4
LT
8559 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8560 if (data != 0)
8561 return -EINVAL;
8562 return 0;
8563 }
6aa20a22 8564
af36e6b6 8565 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 8566 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
8567 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8568 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6460d948 8569 ethtool_op_set_tx_ipv6_csum(dev, data);
1da177e4 8570 else
9c27dbdf 8571 ethtool_op_set_tx_csum(dev, data);
1da177e4
LT
8572
8573 return 0;
8574}
8575
b9f2c044 8576static int tg3_get_sset_count (struct net_device *dev, int sset)
1da177e4 8577{
b9f2c044
JG
8578 switch (sset) {
8579 case ETH_SS_TEST:
8580 return TG3_NUM_TEST;
8581 case ETH_SS_STATS:
8582 return TG3_NUM_STATS;
8583 default:
8584 return -EOPNOTSUPP;
8585 }
4cafd3f5
MC
8586}
8587
1da177e4
LT
8588static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8589{
8590 switch (stringset) {
8591 case ETH_SS_STATS:
8592 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8593 break;
4cafd3f5
MC
8594 case ETH_SS_TEST:
8595 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8596 break;
1da177e4
LT
8597 default:
8598 WARN_ON(1); /* we need a WARN() */
8599 break;
8600 }
8601}
8602
4009a93d
MC
8603static int tg3_phys_id(struct net_device *dev, u32 data)
8604{
8605 struct tg3 *tp = netdev_priv(dev);
8606 int i;
8607
8608 if (!netif_running(tp->dev))
8609 return -EAGAIN;
8610
8611 if (data == 0)
8612 data = 2;
8613
8614 for (i = 0; i < (data * 2); i++) {
8615 if ((i % 2) == 0)
8616 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8617 LED_CTRL_1000MBPS_ON |
8618 LED_CTRL_100MBPS_ON |
8619 LED_CTRL_10MBPS_ON |
8620 LED_CTRL_TRAFFIC_OVERRIDE |
8621 LED_CTRL_TRAFFIC_BLINK |
8622 LED_CTRL_TRAFFIC_LED);
6aa20a22 8623
4009a93d
MC
8624 else
8625 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8626 LED_CTRL_TRAFFIC_OVERRIDE);
8627
8628 if (msleep_interruptible(500))
8629 break;
8630 }
8631 tw32(MAC_LED_CTRL, tp->led_ctrl);
8632 return 0;
8633}
8634
1da177e4
LT
8635static void tg3_get_ethtool_stats (struct net_device *dev,
8636 struct ethtool_stats *estats, u64 *tmp_stats)
8637{
8638 struct tg3 *tp = netdev_priv(dev);
8639 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8640}
8641
566f86ad 8642#define NVRAM_TEST_SIZE 0x100
1b27777a 8643#define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
b16250e3
MC
8644#define NVRAM_SELFBOOT_HW_SIZE 0x20
8645#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
566f86ad
MC
8646
8647static int tg3_test_nvram(struct tg3 *tp)
8648{
1b27777a 8649 u32 *buf, csum, magic;
ab0049b4 8650 int i, j, k, err = 0, size;
566f86ad 8651
1820180b 8652 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
1b27777a
MC
8653 return -EIO;
8654
1b27777a
MC
8655 if (magic == TG3_EEPROM_MAGIC)
8656 size = NVRAM_TEST_SIZE;
b16250e3 8657 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
1b27777a
MC
8658 if ((magic & 0xe00000) == 0x200000)
8659 size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8660 else
8661 return 0;
b16250e3
MC
8662 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8663 size = NVRAM_SELFBOOT_HW_SIZE;
8664 else
1b27777a
MC
8665 return -EIO;
8666
8667 buf = kmalloc(size, GFP_KERNEL);
566f86ad
MC
8668 if (buf == NULL)
8669 return -ENOMEM;
8670
1b27777a
MC
8671 err = -EIO;
8672 for (i = 0, j = 0; i < size; i += 4, j++) {
566f86ad
MC
8673 u32 val;
8674
8675 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8676 break;
8677 buf[j] = cpu_to_le32(val);
8678 }
1b27777a 8679 if (i < size)
566f86ad
MC
8680 goto out;
8681
1b27777a 8682 /* Selfboot format */
b16250e3
MC
8683 if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
8684 TG3_EEPROM_MAGIC_FW) {
1b27777a
MC
8685 u8 *buf8 = (u8 *) buf, csum8 = 0;
8686
8687 for (i = 0; i < size; i++)
8688 csum8 += buf8[i];
8689
ad96b485
AB
8690 if (csum8 == 0) {
8691 err = 0;
8692 goto out;
8693 }
8694
8695 err = -EIO;
8696 goto out;
1b27777a 8697 }
566f86ad 8698
b16250e3
MC
8699 if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
8700 TG3_EEPROM_MAGIC_HW) {
8701 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8702 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8703 u8 *buf8 = (u8 *) buf;
b16250e3
MC
8704
8705 /* Separate the parity bits and the data bytes. */
8706 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8707 if ((i == 0) || (i == 8)) {
8708 int l;
8709 u8 msk;
8710
8711 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8712 parity[k++] = buf8[i] & msk;
8713 i++;
8714 }
8715 else if (i == 16) {
8716 int l;
8717 u8 msk;
8718
8719 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8720 parity[k++] = buf8[i] & msk;
8721 i++;
8722
8723 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8724 parity[k++] = buf8[i] & msk;
8725 i++;
8726 }
8727 data[j++] = buf8[i];
8728 }
8729
8730 err = -EIO;
8731 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8732 u8 hw8 = hweight8(data[i]);
8733
8734 if ((hw8 & 0x1) && parity[i])
8735 goto out;
8736 else if (!(hw8 & 0x1) && !parity[i])
8737 goto out;
8738 }
8739 err = 0;
8740 goto out;
8741 }
8742
566f86ad
MC
8743 /* Bootstrap checksum at offset 0x10 */
8744 csum = calc_crc((unsigned char *) buf, 0x10);
8745 if(csum != cpu_to_le32(buf[0x10/4]))
8746 goto out;
8747
8748 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8749 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8750 if (csum != cpu_to_le32(buf[0xfc/4]))
8751 goto out;
8752
8753 err = 0;
8754
8755out:
8756 kfree(buf);
8757 return err;
8758}
8759
ca43007a
MC
8760#define TG3_SERDES_TIMEOUT_SEC 2
8761#define TG3_COPPER_TIMEOUT_SEC 6
8762
8763static int tg3_test_link(struct tg3 *tp)
8764{
8765 int i, max;
8766
8767 if (!netif_running(tp->dev))
8768 return -ENODEV;
8769
4c987487 8770 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
ca43007a
MC
8771 max = TG3_SERDES_TIMEOUT_SEC;
8772 else
8773 max = TG3_COPPER_TIMEOUT_SEC;
8774
8775 for (i = 0; i < max; i++) {
8776 if (netif_carrier_ok(tp->dev))
8777 return 0;
8778
8779 if (msleep_interruptible(1000))
8780 break;
8781 }
8782
8783 return -EIO;
8784}
8785
a71116d1 8786/* Only test the commonly used registers */
30ca3e37 8787static int tg3_test_registers(struct tg3 *tp)
a71116d1 8788{
b16250e3 8789 int i, is_5705, is_5750;
a71116d1
MC
8790 u32 offset, read_mask, write_mask, val, save_val, read_val;
8791 static struct {
8792 u16 offset;
8793 u16 flags;
8794#define TG3_FL_5705 0x1
8795#define TG3_FL_NOT_5705 0x2
8796#define TG3_FL_NOT_5788 0x4
b16250e3 8797#define TG3_FL_NOT_5750 0x8
a71116d1
MC
8798 u32 read_mask;
8799 u32 write_mask;
8800 } reg_tbl[] = {
8801 /* MAC Control Registers */
8802 { MAC_MODE, TG3_FL_NOT_5705,
8803 0x00000000, 0x00ef6f8c },
8804 { MAC_MODE, TG3_FL_5705,
8805 0x00000000, 0x01ef6b8c },
8806 { MAC_STATUS, TG3_FL_NOT_5705,
8807 0x03800107, 0x00000000 },
8808 { MAC_STATUS, TG3_FL_5705,
8809 0x03800100, 0x00000000 },
8810 { MAC_ADDR_0_HIGH, 0x0000,
8811 0x00000000, 0x0000ffff },
8812 { MAC_ADDR_0_LOW, 0x0000,
8813 0x00000000, 0xffffffff },
8814 { MAC_RX_MTU_SIZE, 0x0000,
8815 0x00000000, 0x0000ffff },
8816 { MAC_TX_MODE, 0x0000,
8817 0x00000000, 0x00000070 },
8818 { MAC_TX_LENGTHS, 0x0000,
8819 0x00000000, 0x00003fff },
8820 { MAC_RX_MODE, TG3_FL_NOT_5705,
8821 0x00000000, 0x000007fc },
8822 { MAC_RX_MODE, TG3_FL_5705,
8823 0x00000000, 0x000007dc },
8824 { MAC_HASH_REG_0, 0x0000,
8825 0x00000000, 0xffffffff },
8826 { MAC_HASH_REG_1, 0x0000,
8827 0x00000000, 0xffffffff },
8828 { MAC_HASH_REG_2, 0x0000,
8829 0x00000000, 0xffffffff },
8830 { MAC_HASH_REG_3, 0x0000,
8831 0x00000000, 0xffffffff },
8832
8833 /* Receive Data and Receive BD Initiator Control Registers. */
8834 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8835 0x00000000, 0xffffffff },
8836 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8837 0x00000000, 0xffffffff },
8838 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8839 0x00000000, 0x00000003 },
8840 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8841 0x00000000, 0xffffffff },
8842 { RCVDBDI_STD_BD+0, 0x0000,
8843 0x00000000, 0xffffffff },
8844 { RCVDBDI_STD_BD+4, 0x0000,
8845 0x00000000, 0xffffffff },
8846 { RCVDBDI_STD_BD+8, 0x0000,
8847 0x00000000, 0xffff0002 },
8848 { RCVDBDI_STD_BD+0xc, 0x0000,
8849 0x00000000, 0xffffffff },
6aa20a22 8850
a71116d1
MC
8851 /* Receive BD Initiator Control Registers. */
8852 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8853 0x00000000, 0xffffffff },
8854 { RCVBDI_STD_THRESH, TG3_FL_5705,
8855 0x00000000, 0x000003ff },
8856 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8857 0x00000000, 0xffffffff },
6aa20a22 8858
a71116d1
MC
8859 /* Host Coalescing Control Registers. */
8860 { HOSTCC_MODE, TG3_FL_NOT_5705,
8861 0x00000000, 0x00000004 },
8862 { HOSTCC_MODE, TG3_FL_5705,
8863 0x00000000, 0x000000f6 },
8864 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8865 0x00000000, 0xffffffff },
8866 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8867 0x00000000, 0x000003ff },
8868 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8869 0x00000000, 0xffffffff },
8870 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8871 0x00000000, 0x000003ff },
8872 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8873 0x00000000, 0xffffffff },
8874 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8875 0x00000000, 0x000000ff },
8876 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8877 0x00000000, 0xffffffff },
8878 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8879 0x00000000, 0x000000ff },
8880 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8881 0x00000000, 0xffffffff },
8882 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8883 0x00000000, 0xffffffff },
8884 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8885 0x00000000, 0xffffffff },
8886 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8887 0x00000000, 0x000000ff },
8888 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8889 0x00000000, 0xffffffff },
8890 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8891 0x00000000, 0x000000ff },
8892 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8893 0x00000000, 0xffffffff },
8894 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8895 0x00000000, 0xffffffff },
8896 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8897 0x00000000, 0xffffffff },
8898 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8899 0x00000000, 0xffffffff },
8900 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8901 0x00000000, 0xffffffff },
8902 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8903 0xffffffff, 0x00000000 },
8904 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8905 0xffffffff, 0x00000000 },
8906
8907 /* Buffer Manager Control Registers. */
b16250e3 8908 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
a71116d1 8909 0x00000000, 0x007fff80 },
b16250e3 8910 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
a71116d1
MC
8911 0x00000000, 0x007fffff },
8912 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8913 0x00000000, 0x0000003f },
8914 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8915 0x00000000, 0x000001ff },
8916 { BUFMGR_MB_HIGH_WATER, 0x0000,
8917 0x00000000, 0x000001ff },
8918 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8919 0xffffffff, 0x00000000 },
8920 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8921 0xffffffff, 0x00000000 },
6aa20a22 8922
a71116d1
MC
8923 /* Mailbox Registers */
8924 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8925 0x00000000, 0x000001ff },
8926 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8927 0x00000000, 0x000001ff },
8928 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8929 0x00000000, 0x000007ff },
8930 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8931 0x00000000, 0x000001ff },
8932
8933 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8934 };
8935
b16250e3
MC
8936 is_5705 = is_5750 = 0;
8937 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
a71116d1 8938 is_5705 = 1;
b16250e3
MC
8939 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8940 is_5750 = 1;
8941 }
a71116d1
MC
8942
8943 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8944 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8945 continue;
8946
8947 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8948 continue;
8949
8950 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8951 (reg_tbl[i].flags & TG3_FL_NOT_5788))
8952 continue;
8953
b16250e3
MC
8954 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
8955 continue;
8956
a71116d1
MC
8957 offset = (u32) reg_tbl[i].offset;
8958 read_mask = reg_tbl[i].read_mask;
8959 write_mask = reg_tbl[i].write_mask;
8960
8961 /* Save the original register content */
8962 save_val = tr32(offset);
8963
8964 /* Determine the read-only value. */
8965 read_val = save_val & read_mask;
8966
8967 /* Write zero to the register, then make sure the read-only bits
8968 * are not changed and the read/write bits are all zeros.
8969 */
8970 tw32(offset, 0);
8971
8972 val = tr32(offset);
8973
8974 /* Test the read-only and read/write bits. */
8975 if (((val & read_mask) != read_val) || (val & write_mask))
8976 goto out;
8977
8978 /* Write ones to all the bits defined by RdMask and WrMask, then
8979 * make sure the read-only bits are not changed and the
8980 * read/write bits are all ones.
8981 */
8982 tw32(offset, read_mask | write_mask);
8983
8984 val = tr32(offset);
8985
8986 /* Test the read-only bits. */
8987 if ((val & read_mask) != read_val)
8988 goto out;
8989
8990 /* Test the read/write bits. */
8991 if ((val & write_mask) != write_mask)
8992 goto out;
8993
8994 tw32(offset, save_val);
8995 }
8996
8997 return 0;
8998
8999out:
9f88f29f
MC
9000 if (netif_msg_hw(tp))
9001 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9002 offset);
a71116d1
MC
9003 tw32(offset, save_val);
9004 return -EIO;
9005}
9006
7942e1db
MC
9007static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9008{
f71e1309 9009 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7942e1db
MC
9010 int i;
9011 u32 j;
9012
9013 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
9014 for (j = 0; j < len; j += 4) {
9015 u32 val;
9016
9017 tg3_write_mem(tp, offset + j, test_pattern[i]);
9018 tg3_read_mem(tp, offset + j, &val);
9019 if (val != test_pattern[i])
9020 return -EIO;
9021 }
9022 }
9023 return 0;
9024}
9025
9026static int tg3_test_memory(struct tg3 *tp)
9027{
9028 static struct mem_entry {
9029 u32 offset;
9030 u32 len;
9031 } mem_tbl_570x[] = {
38690194 9032 { 0x00000000, 0x00b50},
7942e1db
MC
9033 { 0x00002000, 0x1c000},
9034 { 0xffffffff, 0x00000}
9035 }, mem_tbl_5705[] = {
9036 { 0x00000100, 0x0000c},
9037 { 0x00000200, 0x00008},
7942e1db
MC
9038 { 0x00004000, 0x00800},
9039 { 0x00006000, 0x01000},
9040 { 0x00008000, 0x02000},
9041 { 0x00010000, 0x0e000},
9042 { 0xffffffff, 0x00000}
79f4d13a
MC
9043 }, mem_tbl_5755[] = {
9044 { 0x00000200, 0x00008},
9045 { 0x00004000, 0x00800},
9046 { 0x00006000, 0x00800},
9047 { 0x00008000, 0x02000},
9048 { 0x00010000, 0x0c000},
9049 { 0xffffffff, 0x00000}
b16250e3
MC
9050 }, mem_tbl_5906[] = {
9051 { 0x00000200, 0x00008},
9052 { 0x00004000, 0x00400},
9053 { 0x00006000, 0x00400},
9054 { 0x00008000, 0x01000},
9055 { 0x00010000, 0x01000},
9056 { 0xffffffff, 0x00000}
7942e1db
MC
9057 };
9058 struct mem_entry *mem_tbl;
9059 int err = 0;
9060 int i;
9061
79f4d13a 9062 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
af36e6b6 9063 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 9064 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
9065 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9066 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
79f4d13a 9067 mem_tbl = mem_tbl_5755;
b16250e3
MC
9068 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9069 mem_tbl = mem_tbl_5906;
79f4d13a
MC
9070 else
9071 mem_tbl = mem_tbl_5705;
9072 } else
7942e1db
MC
9073 mem_tbl = mem_tbl_570x;
9074
9075 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9076 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9077 mem_tbl[i].len)) != 0)
9078 break;
9079 }
6aa20a22 9080
7942e1db
MC
9081 return err;
9082}
9083
9f40dead
MC
9084#define TG3_MAC_LOOPBACK 0
9085#define TG3_PHY_LOOPBACK 1
9086
9087static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
c76949a6 9088{
9f40dead 9089 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
c76949a6
MC
9090 u32 desc_idx;
9091 struct sk_buff *skb, *rx_skb;
9092 u8 *tx_data;
9093 dma_addr_t map;
9094 int num_pkts, tx_len, rx_len, i, err;
9095 struct tg3_rx_buffer_desc *desc;
9096
9f40dead 9097 if (loopback_mode == TG3_MAC_LOOPBACK) {
c94e3941
MC
9098 /* HW errata - mac loopback fails in some cases on 5780.
9099 * Normal traffic and PHY loopback are not affected by
9100 * errata.
9101 */
9102 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9103 return 0;
9104
9f40dead 9105 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
e8f3f6ca
MC
9106 MAC_MODE_PORT_INT_LPBACK;
9107 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9108 mac_mode |= MAC_MODE_LINK_POLARITY;
3f7045c1
MC
9109 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9110 mac_mode |= MAC_MODE_PORT_MODE_MII;
9111 else
9112 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9f40dead
MC
9113 tw32(MAC_MODE, mac_mode);
9114 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
3f7045c1
MC
9115 u32 val;
9116
b16250e3
MC
9117 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9118 u32 phytest;
9119
9120 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9121 u32 phy;
9122
9123 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9124 phytest | MII_TG3_EPHY_SHADOW_EN);
9125 if (!tg3_readphy(tp, 0x1b, &phy))
9126 tg3_writephy(tp, 0x1b, phy & ~0x20);
b16250e3
MC
9127 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9128 }
5d64ad34
MC
9129 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9130 } else
9131 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
3f7045c1 9132
9ef8ca99
MC
9133 tg3_phy_toggle_automdix(tp, 0);
9134
3f7045c1 9135 tg3_writephy(tp, MII_BMCR, val);
c94e3941 9136 udelay(40);
5d64ad34 9137
e8f3f6ca 9138 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5d64ad34 9139 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
b16250e3 9140 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
5d64ad34
MC
9141 mac_mode |= MAC_MODE_PORT_MODE_MII;
9142 } else
9143 mac_mode |= MAC_MODE_PORT_MODE_GMII;
b16250e3 9144
c94e3941
MC
9145 /* reset to prevent losing 1st rx packet intermittently */
9146 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9147 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9148 udelay(10);
9149 tw32_f(MAC_RX_MODE, tp->rx_mode);
9150 }
e8f3f6ca
MC
9151 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9152 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9153 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9154 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9155 mac_mode |= MAC_MODE_LINK_POLARITY;
ff18ff02
MC
9156 tg3_writephy(tp, MII_TG3_EXT_CTRL,
9157 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9158 }
9f40dead 9159 tw32(MAC_MODE, mac_mode);
9f40dead
MC
9160 }
9161 else
9162 return -EINVAL;
c76949a6
MC
9163
9164 err = -EIO;
9165
c76949a6 9166 tx_len = 1514;
a20e9c62 9167 skb = netdev_alloc_skb(tp->dev, tx_len);
a50bb7b9
JJ
9168 if (!skb)
9169 return -ENOMEM;
9170
c76949a6
MC
9171 tx_data = skb_put(skb, tx_len);
9172 memcpy(tx_data, tp->dev->dev_addr, 6);
9173 memset(tx_data + 6, 0x0, 8);
9174
9175 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9176
9177 for (i = 14; i < tx_len; i++)
9178 tx_data[i] = (u8) (i & 0xff);
9179
9180 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9181
9182 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9183 HOSTCC_MODE_NOW);
9184
9185 udelay(10);
9186
9187 rx_start_idx = tp->hw_status->idx[0].rx_producer;
9188
c76949a6
MC
9189 num_pkts = 0;
9190
9f40dead 9191 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
c76949a6 9192
9f40dead 9193 tp->tx_prod++;
c76949a6
MC
9194 num_pkts++;
9195
9f40dead
MC
9196 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9197 tp->tx_prod);
09ee929c 9198 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
c76949a6
MC
9199
9200 udelay(10);
9201
3f7045c1
MC
9202 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
9203 for (i = 0; i < 25; i++) {
c76949a6
MC
9204 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9205 HOSTCC_MODE_NOW);
9206
9207 udelay(10);
9208
9209 tx_idx = tp->hw_status->idx[0].tx_consumer;
9210 rx_idx = tp->hw_status->idx[0].rx_producer;
9f40dead 9211 if ((tx_idx == tp->tx_prod) &&
c76949a6
MC
9212 (rx_idx == (rx_start_idx + num_pkts)))
9213 break;
9214 }
9215
9216 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9217 dev_kfree_skb(skb);
9218
9f40dead 9219 if (tx_idx != tp->tx_prod)
c76949a6
MC
9220 goto out;
9221
9222 if (rx_idx != rx_start_idx + num_pkts)
9223 goto out;
9224
9225 desc = &tp->rx_rcb[rx_start_idx];
9226 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9227 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9228 if (opaque_key != RXD_OPAQUE_RING_STD)
9229 goto out;
9230
9231 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9232 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9233 goto out;
9234
9235 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9236 if (rx_len != tx_len)
9237 goto out;
9238
9239 rx_skb = tp->rx_std_buffers[desc_idx].skb;
9240
9241 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9242 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9243
9244 for (i = 14; i < tx_len; i++) {
9245 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9246 goto out;
9247 }
9248 err = 0;
6aa20a22 9249
c76949a6
MC
9250 /* tg3_free_rings will unmap and free the rx_skb */
9251out:
9252 return err;
9253}
9254
9f40dead
MC
9255#define TG3_MAC_LOOPBACK_FAILED 1
9256#define TG3_PHY_LOOPBACK_FAILED 2
9257#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
9258 TG3_PHY_LOOPBACK_FAILED)
9259
9260static int tg3_test_loopback(struct tg3 *tp)
9261{
9262 int err = 0;
9936bcf6 9263 u32 cpmuctrl = 0;
9f40dead
MC
9264
9265 if (!netif_running(tp->dev))
9266 return TG3_LOOPBACK_FAILED;
9267
b9ec6c1b
MC
9268 err = tg3_reset_hw(tp, 1);
9269 if (err)
9270 return TG3_LOOPBACK_FAILED;
9f40dead 9271
9936bcf6
MC
9272 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9273 int i;
9274 u32 status;
9275
9276 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9277
9278 /* Wait for up to 40 microseconds to acquire lock. */
9279 for (i = 0; i < 4; i++) {
9280 status = tr32(TG3_CPMU_MUTEX_GNT);
9281 if (status == CPMU_MUTEX_GNT_DRIVER)
9282 break;
9283 udelay(10);
9284 }
9285
9286 if (status != CPMU_MUTEX_GNT_DRIVER)
9287 return TG3_LOOPBACK_FAILED;
9288
9289 cpmuctrl = tr32(TG3_CPMU_CTRL);
9290
9291 /* Turn off power management based on link speed. */
9292 tw32(TG3_CPMU_CTRL,
9293 cpmuctrl & ~CPMU_CTRL_LINK_SPEED_MODE);
9294 }
9295
9f40dead
MC
9296 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9297 err |= TG3_MAC_LOOPBACK_FAILED;
9936bcf6
MC
9298
9299 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9300 tw32(TG3_CPMU_CTRL, cpmuctrl);
9301
9302 /* Release the mutex */
9303 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9304 }
9305
9f40dead
MC
9306 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9307 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9308 err |= TG3_PHY_LOOPBACK_FAILED;
9309 }
9310
9311 return err;
9312}
9313
4cafd3f5
MC
9314static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9315 u64 *data)
9316{
566f86ad
MC
9317 struct tg3 *tp = netdev_priv(dev);
9318
bc1c7567
MC
9319 if (tp->link_config.phy_is_low_power)
9320 tg3_set_power_state(tp, PCI_D0);
9321
566f86ad
MC
9322 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9323
9324 if (tg3_test_nvram(tp) != 0) {
9325 etest->flags |= ETH_TEST_FL_FAILED;
9326 data[0] = 1;
9327 }
ca43007a
MC
9328 if (tg3_test_link(tp) != 0) {
9329 etest->flags |= ETH_TEST_FL_FAILED;
9330 data[1] = 1;
9331 }
a71116d1 9332 if (etest->flags & ETH_TEST_FL_OFFLINE) {
ec41c7df 9333 int err, irq_sync = 0;
bbe832c0
MC
9334
9335 if (netif_running(dev)) {
a71116d1 9336 tg3_netif_stop(tp);
bbe832c0
MC
9337 irq_sync = 1;
9338 }
a71116d1 9339
bbe832c0 9340 tg3_full_lock(tp, irq_sync);
a71116d1
MC
9341
9342 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
ec41c7df 9343 err = tg3_nvram_lock(tp);
a71116d1
MC
9344 tg3_halt_cpu(tp, RX_CPU_BASE);
9345 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9346 tg3_halt_cpu(tp, TX_CPU_BASE);
ec41c7df
MC
9347 if (!err)
9348 tg3_nvram_unlock(tp);
a71116d1 9349
d9ab5ad1
MC
9350 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9351 tg3_phy_reset(tp);
9352
a71116d1
MC
9353 if (tg3_test_registers(tp) != 0) {
9354 etest->flags |= ETH_TEST_FL_FAILED;
9355 data[2] = 1;
9356 }
7942e1db
MC
9357 if (tg3_test_memory(tp) != 0) {
9358 etest->flags |= ETH_TEST_FL_FAILED;
9359 data[3] = 1;
9360 }
9f40dead 9361 if ((data[4] = tg3_test_loopback(tp)) != 0)
c76949a6 9362 etest->flags |= ETH_TEST_FL_FAILED;
a71116d1 9363
f47c11ee
DM
9364 tg3_full_unlock(tp);
9365
d4bc3927
MC
9366 if (tg3_test_interrupt(tp) != 0) {
9367 etest->flags |= ETH_TEST_FL_FAILED;
9368 data[5] = 1;
9369 }
f47c11ee
DM
9370
9371 tg3_full_lock(tp, 0);
d4bc3927 9372
a71116d1
MC
9373 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9374 if (netif_running(dev)) {
9375 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
b9ec6c1b
MC
9376 if (!tg3_restart_hw(tp, 1))
9377 tg3_netif_start(tp);
a71116d1 9378 }
f47c11ee
DM
9379
9380 tg3_full_unlock(tp);
a71116d1 9381 }
bc1c7567
MC
9382 if (tp->link_config.phy_is_low_power)
9383 tg3_set_power_state(tp, PCI_D3hot);
9384
4cafd3f5
MC
9385}
9386
1da177e4
LT
9387static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9388{
9389 struct mii_ioctl_data *data = if_mii(ifr);
9390 struct tg3 *tp = netdev_priv(dev);
9391 int err;
9392
9393 switch(cmd) {
9394 case SIOCGMIIPHY:
9395 data->phy_id = PHY_ADDR;
9396
9397 /* fallthru */
9398 case SIOCGMIIREG: {
9399 u32 mii_regval;
9400
9401 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9402 break; /* We have no PHY */
9403
bc1c7567
MC
9404 if (tp->link_config.phy_is_low_power)
9405 return -EAGAIN;
9406
f47c11ee 9407 spin_lock_bh(&tp->lock);
1da177e4 9408 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
f47c11ee 9409 spin_unlock_bh(&tp->lock);
1da177e4
LT
9410
9411 data->val_out = mii_regval;
9412
9413 return err;
9414 }
9415
9416 case SIOCSMIIREG:
9417 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9418 break; /* We have no PHY */
9419
9420 if (!capable(CAP_NET_ADMIN))
9421 return -EPERM;
9422
bc1c7567
MC
9423 if (tp->link_config.phy_is_low_power)
9424 return -EAGAIN;
9425
f47c11ee 9426 spin_lock_bh(&tp->lock);
1da177e4 9427 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
f47c11ee 9428 spin_unlock_bh(&tp->lock);
1da177e4
LT
9429
9430 return err;
9431
9432 default:
9433 /* do nothing */
9434 break;
9435 }
9436 return -EOPNOTSUPP;
9437}
9438
9439#if TG3_VLAN_TAG_USED
9440static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9441{
9442 struct tg3 *tp = netdev_priv(dev);
9443
29315e87
MC
9444 if (netif_running(dev))
9445 tg3_netif_stop(tp);
9446
f47c11ee 9447 tg3_full_lock(tp, 0);
1da177e4
LT
9448
9449 tp->vlgrp = grp;
9450
9451 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9452 __tg3_set_rx_mode(dev);
9453
29315e87
MC
9454 if (netif_running(dev))
9455 tg3_netif_start(tp);
46966545
MC
9456
9457 tg3_full_unlock(tp);
1da177e4 9458}
1da177e4
LT
9459#endif
9460
15f9850d
DM
9461static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9462{
9463 struct tg3 *tp = netdev_priv(dev);
9464
9465 memcpy(ec, &tp->coal, sizeof(*ec));
9466 return 0;
9467}
9468
d244c892
MC
9469static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9470{
9471 struct tg3 *tp = netdev_priv(dev);
9472 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9473 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9474
9475 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9476 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9477 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9478 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9479 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9480 }
9481
9482 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9483 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9484 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9485 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9486 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9487 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9488 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9489 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9490 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9491 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9492 return -EINVAL;
9493
9494 /* No rx interrupts will be generated if both are zero */
9495 if ((ec->rx_coalesce_usecs == 0) &&
9496 (ec->rx_max_coalesced_frames == 0))
9497 return -EINVAL;
9498
9499 /* No tx interrupts will be generated if both are zero */
9500 if ((ec->tx_coalesce_usecs == 0) &&
9501 (ec->tx_max_coalesced_frames == 0))
9502 return -EINVAL;
9503
9504 /* Only copy relevant parameters, ignore all others. */
9505 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9506 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9507 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9508 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9509 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9510 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9511 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9512 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9513 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9514
9515 if (netif_running(dev)) {
9516 tg3_full_lock(tp, 0);
9517 __tg3_set_coalesce(tp, &tp->coal);
9518 tg3_full_unlock(tp);
9519 }
9520 return 0;
9521}
9522
7282d491 9523static const struct ethtool_ops tg3_ethtool_ops = {
1da177e4
LT
9524 .get_settings = tg3_get_settings,
9525 .set_settings = tg3_set_settings,
9526 .get_drvinfo = tg3_get_drvinfo,
9527 .get_regs_len = tg3_get_regs_len,
9528 .get_regs = tg3_get_regs,
9529 .get_wol = tg3_get_wol,
9530 .set_wol = tg3_set_wol,
9531 .get_msglevel = tg3_get_msglevel,
9532 .set_msglevel = tg3_set_msglevel,
9533 .nway_reset = tg3_nway_reset,
9534 .get_link = ethtool_op_get_link,
9535 .get_eeprom_len = tg3_get_eeprom_len,
9536 .get_eeprom = tg3_get_eeprom,
9537 .set_eeprom = tg3_set_eeprom,
9538 .get_ringparam = tg3_get_ringparam,
9539 .set_ringparam = tg3_set_ringparam,
9540 .get_pauseparam = tg3_get_pauseparam,
9541 .set_pauseparam = tg3_set_pauseparam,
9542 .get_rx_csum = tg3_get_rx_csum,
9543 .set_rx_csum = tg3_set_rx_csum,
1da177e4 9544 .set_tx_csum = tg3_set_tx_csum,
1da177e4 9545 .set_sg = ethtool_op_set_sg,
1da177e4 9546 .set_tso = tg3_set_tso,
4cafd3f5 9547 .self_test = tg3_self_test,
1da177e4 9548 .get_strings = tg3_get_strings,
4009a93d 9549 .phys_id = tg3_phys_id,
1da177e4 9550 .get_ethtool_stats = tg3_get_ethtool_stats,
15f9850d 9551 .get_coalesce = tg3_get_coalesce,
d244c892 9552 .set_coalesce = tg3_set_coalesce,
b9f2c044 9553 .get_sset_count = tg3_get_sset_count,
1da177e4
LT
9554};
9555
9556static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9557{
1b27777a 9558 u32 cursize, val, magic;
1da177e4
LT
9559
9560 tp->nvram_size = EEPROM_CHIP_SIZE;
9561
1820180b 9562 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
1da177e4
LT
9563 return;
9564
b16250e3
MC
9565 if ((magic != TG3_EEPROM_MAGIC) &&
9566 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9567 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
1da177e4
LT
9568 return;
9569
9570 /*
9571 * Size the chip by reading offsets at increasing powers of two.
9572 * When we encounter our validation signature, we know the addressing
9573 * has wrapped around, and thus have our chip size.
9574 */
1b27777a 9575 cursize = 0x10;
1da177e4
LT
9576
9577 while (cursize < tp->nvram_size) {
1820180b 9578 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
1da177e4
LT
9579 return;
9580
1820180b 9581 if (val == magic)
1da177e4
LT
9582 break;
9583
9584 cursize <<= 1;
9585 }
9586
9587 tp->nvram_size = cursize;
9588}
6aa20a22 9589
1da177e4
LT
9590static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9591{
9592 u32 val;
9593
1820180b 9594 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
1b27777a
MC
9595 return;
9596
9597 /* Selfboot format */
1820180b 9598 if (val != TG3_EEPROM_MAGIC) {
1b27777a
MC
9599 tg3_get_eeprom_size(tp);
9600 return;
9601 }
9602
1da177e4
LT
9603 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9604 if (val != 0) {
9605 tp->nvram_size = (val >> 16) * 1024;
9606 return;
9607 }
9608 }
989a9d23 9609 tp->nvram_size = 0x80000;
1da177e4
LT
9610}
9611
9612static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9613{
9614 u32 nvcfg1;
9615
9616 nvcfg1 = tr32(NVRAM_CFG1);
9617 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9618 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9619 }
9620 else {
9621 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9622 tw32(NVRAM_CFG1, nvcfg1);
9623 }
9624
4c987487 9625 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
a4e2b347 9626 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
9627 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9628 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9629 tp->nvram_jedecnum = JEDEC_ATMEL;
9630 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9631 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9632 break;
9633 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9634 tp->nvram_jedecnum = JEDEC_ATMEL;
9635 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9636 break;
9637 case FLASH_VENDOR_ATMEL_EEPROM:
9638 tp->nvram_jedecnum = JEDEC_ATMEL;
9639 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9640 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9641 break;
9642 case FLASH_VENDOR_ST:
9643 tp->nvram_jedecnum = JEDEC_ST;
9644 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9645 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9646 break;
9647 case FLASH_VENDOR_SAIFUN:
9648 tp->nvram_jedecnum = JEDEC_SAIFUN;
9649 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9650 break;
9651 case FLASH_VENDOR_SST_SMALL:
9652 case FLASH_VENDOR_SST_LARGE:
9653 tp->nvram_jedecnum = JEDEC_SST;
9654 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9655 break;
9656 }
9657 }
9658 else {
9659 tp->nvram_jedecnum = JEDEC_ATMEL;
9660 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9661 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9662 }
9663}
9664
361b4ac2
MC
9665static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9666{
9667 u32 nvcfg1;
9668
9669 nvcfg1 = tr32(NVRAM_CFG1);
9670
e6af301b
MC
9671 /* NVRAM protection for TPM */
9672 if (nvcfg1 & (1 << 27))
9673 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9674
361b4ac2
MC
9675 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9676 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9677 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9678 tp->nvram_jedecnum = JEDEC_ATMEL;
9679 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9680 break;
9681 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9682 tp->nvram_jedecnum = JEDEC_ATMEL;
9683 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9684 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9685 break;
9686 case FLASH_5752VENDOR_ST_M45PE10:
9687 case FLASH_5752VENDOR_ST_M45PE20:
9688 case FLASH_5752VENDOR_ST_M45PE40:
9689 tp->nvram_jedecnum = JEDEC_ST;
9690 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9691 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9692 break;
9693 }
9694
9695 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9696 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9697 case FLASH_5752PAGE_SIZE_256:
9698 tp->nvram_pagesize = 256;
9699 break;
9700 case FLASH_5752PAGE_SIZE_512:
9701 tp->nvram_pagesize = 512;
9702 break;
9703 case FLASH_5752PAGE_SIZE_1K:
9704 tp->nvram_pagesize = 1024;
9705 break;
9706 case FLASH_5752PAGE_SIZE_2K:
9707 tp->nvram_pagesize = 2048;
9708 break;
9709 case FLASH_5752PAGE_SIZE_4K:
9710 tp->nvram_pagesize = 4096;
9711 break;
9712 case FLASH_5752PAGE_SIZE_264:
9713 tp->nvram_pagesize = 264;
9714 break;
9715 }
9716 }
9717 else {
9718 /* For eeprom, set pagesize to maximum eeprom size */
9719 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9720
9721 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9722 tw32(NVRAM_CFG1, nvcfg1);
9723 }
9724}
9725
d3c7b886
MC
9726static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9727{
989a9d23 9728 u32 nvcfg1, protect = 0;
d3c7b886
MC
9729
9730 nvcfg1 = tr32(NVRAM_CFG1);
9731
9732 /* NVRAM protection for TPM */
989a9d23 9733 if (nvcfg1 & (1 << 27)) {
d3c7b886 9734 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
989a9d23
MC
9735 protect = 1;
9736 }
d3c7b886 9737
989a9d23
MC
9738 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9739 switch (nvcfg1) {
d3c7b886
MC
9740 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9741 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9742 case FLASH_5755VENDOR_ATMEL_FLASH_3:
70b65a2d 9743 case FLASH_5755VENDOR_ATMEL_FLASH_5:
d3c7b886
MC
9744 tp->nvram_jedecnum = JEDEC_ATMEL;
9745 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9746 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9747 tp->nvram_pagesize = 264;
70b65a2d
MC
9748 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
9749 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
989a9d23
MC
9750 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9751 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9752 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
9753 else
9754 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
d3c7b886
MC
9755 break;
9756 case FLASH_5752VENDOR_ST_M45PE10:
9757 case FLASH_5752VENDOR_ST_M45PE20:
9758 case FLASH_5752VENDOR_ST_M45PE40:
9759 tp->nvram_jedecnum = JEDEC_ST;
9760 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9761 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9762 tp->nvram_pagesize = 256;
989a9d23
MC
9763 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
9764 tp->nvram_size = (protect ? 0x10000 : 0x20000);
9765 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
9766 tp->nvram_size = (protect ? 0x10000 : 0x40000);
9767 else
9768 tp->nvram_size = (protect ? 0x20000 : 0x80000);
d3c7b886
MC
9769 break;
9770 }
9771}
9772
1b27777a
MC
9773static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9774{
9775 u32 nvcfg1;
9776
9777 nvcfg1 = tr32(NVRAM_CFG1);
9778
9779 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9780 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9781 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9782 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9783 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9784 tp->nvram_jedecnum = JEDEC_ATMEL;
9785 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9786 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9787
9788 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9789 tw32(NVRAM_CFG1, nvcfg1);
9790 break;
9791 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9792 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9793 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9794 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9795 tp->nvram_jedecnum = JEDEC_ATMEL;
9796 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9797 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9798 tp->nvram_pagesize = 264;
9799 break;
9800 case FLASH_5752VENDOR_ST_M45PE10:
9801 case FLASH_5752VENDOR_ST_M45PE20:
9802 case FLASH_5752VENDOR_ST_M45PE40:
9803 tp->nvram_jedecnum = JEDEC_ST;
9804 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9805 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9806 tp->nvram_pagesize = 256;
9807 break;
9808 }
9809}
9810
6b91fa02
MC
9811static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
9812{
9813 u32 nvcfg1, protect = 0;
9814
9815 nvcfg1 = tr32(NVRAM_CFG1);
9816
9817 /* NVRAM protection for TPM */
9818 if (nvcfg1 & (1 << 27)) {
9819 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9820 protect = 1;
9821 }
9822
9823 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9824 switch (nvcfg1) {
9825 case FLASH_5761VENDOR_ATMEL_ADB021D:
9826 case FLASH_5761VENDOR_ATMEL_ADB041D:
9827 case FLASH_5761VENDOR_ATMEL_ADB081D:
9828 case FLASH_5761VENDOR_ATMEL_ADB161D:
9829 case FLASH_5761VENDOR_ATMEL_MDB021D:
9830 case FLASH_5761VENDOR_ATMEL_MDB041D:
9831 case FLASH_5761VENDOR_ATMEL_MDB081D:
9832 case FLASH_5761VENDOR_ATMEL_MDB161D:
9833 tp->nvram_jedecnum = JEDEC_ATMEL;
9834 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9835 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9836 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
9837 tp->nvram_pagesize = 256;
9838 break;
9839 case FLASH_5761VENDOR_ST_A_M45PE20:
9840 case FLASH_5761VENDOR_ST_A_M45PE40:
9841 case FLASH_5761VENDOR_ST_A_M45PE80:
9842 case FLASH_5761VENDOR_ST_A_M45PE16:
9843 case FLASH_5761VENDOR_ST_M_M45PE20:
9844 case FLASH_5761VENDOR_ST_M_M45PE40:
9845 case FLASH_5761VENDOR_ST_M_M45PE80:
9846 case FLASH_5761VENDOR_ST_M_M45PE16:
9847 tp->nvram_jedecnum = JEDEC_ST;
9848 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9849 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9850 tp->nvram_pagesize = 256;
9851 break;
9852 }
9853
9854 if (protect) {
9855 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
9856 } else {
9857 switch (nvcfg1) {
9858 case FLASH_5761VENDOR_ATMEL_ADB161D:
9859 case FLASH_5761VENDOR_ATMEL_MDB161D:
9860 case FLASH_5761VENDOR_ST_A_M45PE16:
9861 case FLASH_5761VENDOR_ST_M_M45PE16:
9862 tp->nvram_size = 0x100000;
9863 break;
9864 case FLASH_5761VENDOR_ATMEL_ADB081D:
9865 case FLASH_5761VENDOR_ATMEL_MDB081D:
9866 case FLASH_5761VENDOR_ST_A_M45PE80:
9867 case FLASH_5761VENDOR_ST_M_M45PE80:
9868 tp->nvram_size = 0x80000;
9869 break;
9870 case FLASH_5761VENDOR_ATMEL_ADB041D:
9871 case FLASH_5761VENDOR_ATMEL_MDB041D:
9872 case FLASH_5761VENDOR_ST_A_M45PE40:
9873 case FLASH_5761VENDOR_ST_M_M45PE40:
9874 tp->nvram_size = 0x40000;
9875 break;
9876 case FLASH_5761VENDOR_ATMEL_ADB021D:
9877 case FLASH_5761VENDOR_ATMEL_MDB021D:
9878 case FLASH_5761VENDOR_ST_A_M45PE20:
9879 case FLASH_5761VENDOR_ST_M_M45PE20:
9880 tp->nvram_size = 0x20000;
9881 break;
9882 }
9883 }
9884}
9885
b5d3772c
MC
9886static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9887{
9888 tp->nvram_jedecnum = JEDEC_ATMEL;
9889 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9890 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9891}
9892
1da177e4
LT
9893/* Chips other than 5700/5701 use the NVRAM for fetching info. */
9894static void __devinit tg3_nvram_init(struct tg3 *tp)
9895{
1da177e4
LT
9896 tw32_f(GRC_EEPROM_ADDR,
9897 (EEPROM_ADDR_FSM_RESET |
9898 (EEPROM_DEFAULT_CLOCK_PERIOD <<
9899 EEPROM_ADDR_CLKPERD_SHIFT)));
9900
9d57f01c 9901 msleep(1);
1da177e4
LT
9902
9903 /* Enable seeprom accesses. */
9904 tw32_f(GRC_LOCAL_CTRL,
9905 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9906 udelay(100);
9907
9908 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9909 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9910 tp->tg3_flags |= TG3_FLAG_NVRAM;
9911
ec41c7df
MC
9912 if (tg3_nvram_lock(tp)) {
9913 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9914 "tg3_nvram_init failed.\n", tp->dev->name);
9915 return;
9916 }
e6af301b 9917 tg3_enable_nvram_access(tp);
1da177e4 9918
989a9d23
MC
9919 tp->nvram_size = 0;
9920
361b4ac2
MC
9921 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9922 tg3_get_5752_nvram_info(tp);
d3c7b886
MC
9923 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9924 tg3_get_5755_nvram_info(tp);
d30cdd28
MC
9925 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9926 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
1b27777a 9927 tg3_get_5787_nvram_info(tp);
6b91fa02
MC
9928 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9929 tg3_get_5761_nvram_info(tp);
b5d3772c
MC
9930 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9931 tg3_get_5906_nvram_info(tp);
361b4ac2
MC
9932 else
9933 tg3_get_nvram_info(tp);
9934
989a9d23
MC
9935 if (tp->nvram_size == 0)
9936 tg3_get_nvram_size(tp);
1da177e4 9937
e6af301b 9938 tg3_disable_nvram_access(tp);
381291b7 9939 tg3_nvram_unlock(tp);
1da177e4
LT
9940
9941 } else {
9942 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9943
9944 tg3_get_eeprom_size(tp);
9945 }
9946}
9947
9948static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9949 u32 offset, u32 *val)
9950{
9951 u32 tmp;
9952 int i;
9953
9954 if (offset > EEPROM_ADDR_ADDR_MASK ||
9955 (offset % 4) != 0)
9956 return -EINVAL;
9957
9958 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9959 EEPROM_ADDR_DEVID_MASK |
9960 EEPROM_ADDR_READ);
9961 tw32(GRC_EEPROM_ADDR,
9962 tmp |
9963 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9964 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9965 EEPROM_ADDR_ADDR_MASK) |
9966 EEPROM_ADDR_READ | EEPROM_ADDR_START);
9967
9d57f01c 9968 for (i = 0; i < 1000; i++) {
1da177e4
LT
9969 tmp = tr32(GRC_EEPROM_ADDR);
9970
9971 if (tmp & EEPROM_ADDR_COMPLETE)
9972 break;
9d57f01c 9973 msleep(1);
1da177e4
LT
9974 }
9975 if (!(tmp & EEPROM_ADDR_COMPLETE))
9976 return -EBUSY;
9977
9978 *val = tr32(GRC_EEPROM_DATA);
9979 return 0;
9980}
9981
9982#define NVRAM_CMD_TIMEOUT 10000
9983
9984static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9985{
9986 int i;
9987
9988 tw32(NVRAM_CMD, nvram_cmd);
9989 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9990 udelay(10);
9991 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9992 udelay(10);
9993 break;
9994 }
9995 }
9996 if (i == NVRAM_CMD_TIMEOUT) {
9997 return -EBUSY;
9998 }
9999 return 0;
10000}
10001
1820180b
MC
10002static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10003{
10004 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10005 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10006 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
6b91fa02 10007 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
1820180b
MC
10008 (tp->nvram_jedecnum == JEDEC_ATMEL))
10009
10010 addr = ((addr / tp->nvram_pagesize) <<
10011 ATMEL_AT45DB0X1B_PAGE_POS) +
10012 (addr % tp->nvram_pagesize);
10013
10014 return addr;
10015}
10016
c4e6575c
MC
10017static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10018{
10019 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10020 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10021 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
6b91fa02 10022 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
c4e6575c
MC
10023 (tp->nvram_jedecnum == JEDEC_ATMEL))
10024
10025 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10026 tp->nvram_pagesize) +
10027 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10028
10029 return addr;
10030}
10031
1da177e4
LT
10032static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10033{
10034 int ret;
10035
1da177e4
LT
10036 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10037 return tg3_nvram_read_using_eeprom(tp, offset, val);
10038
1820180b 10039 offset = tg3_nvram_phys_addr(tp, offset);
1da177e4
LT
10040
10041 if (offset > NVRAM_ADDR_MSK)
10042 return -EINVAL;
10043
ec41c7df
MC
10044 ret = tg3_nvram_lock(tp);
10045 if (ret)
10046 return ret;
1da177e4 10047
e6af301b 10048 tg3_enable_nvram_access(tp);
1da177e4
LT
10049
10050 tw32(NVRAM_ADDR, offset);
10051 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10052 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10053
10054 if (ret == 0)
10055 *val = swab32(tr32(NVRAM_RDDATA));
10056
e6af301b 10057 tg3_disable_nvram_access(tp);
1da177e4 10058
381291b7
MC
10059 tg3_nvram_unlock(tp);
10060
1da177e4
LT
10061 return ret;
10062}
10063
1820180b
MC
10064static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10065{
10066 int err;
10067 u32 tmp;
10068
10069 err = tg3_nvram_read(tp, offset, &tmp);
10070 *val = swab32(tmp);
10071 return err;
10072}
10073
1da177e4
LT
10074static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10075 u32 offset, u32 len, u8 *buf)
10076{
10077 int i, j, rc = 0;
10078 u32 val;
10079
10080 for (i = 0; i < len; i += 4) {
10081 u32 addr, data;
10082
10083 addr = offset + i;
10084
10085 memcpy(&data, buf + i, 4);
10086
10087 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
10088
10089 val = tr32(GRC_EEPROM_ADDR);
10090 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10091
10092 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10093 EEPROM_ADDR_READ);
10094 tw32(GRC_EEPROM_ADDR, val |
10095 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10096 (addr & EEPROM_ADDR_ADDR_MASK) |
10097 EEPROM_ADDR_START |
10098 EEPROM_ADDR_WRITE);
6aa20a22 10099
9d57f01c 10100 for (j = 0; j < 1000; j++) {
1da177e4
LT
10101 val = tr32(GRC_EEPROM_ADDR);
10102
10103 if (val & EEPROM_ADDR_COMPLETE)
10104 break;
9d57f01c 10105 msleep(1);
1da177e4
LT
10106 }
10107 if (!(val & EEPROM_ADDR_COMPLETE)) {
10108 rc = -EBUSY;
10109 break;
10110 }
10111 }
10112
10113 return rc;
10114}
10115
10116/* offset and length are dword aligned */
10117static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10118 u8 *buf)
10119{
10120 int ret = 0;
10121 u32 pagesize = tp->nvram_pagesize;
10122 u32 pagemask = pagesize - 1;
10123 u32 nvram_cmd;
10124 u8 *tmp;
10125
10126 tmp = kmalloc(pagesize, GFP_KERNEL);
10127 if (tmp == NULL)
10128 return -ENOMEM;
10129
10130 while (len) {
10131 int j;
e6af301b 10132 u32 phy_addr, page_off, size;
1da177e4
LT
10133
10134 phy_addr = offset & ~pagemask;
6aa20a22 10135
1da177e4
LT
10136 for (j = 0; j < pagesize; j += 4) {
10137 if ((ret = tg3_nvram_read(tp, phy_addr + j,
10138 (u32 *) (tmp + j))))
10139 break;
10140 }
10141 if (ret)
10142 break;
10143
10144 page_off = offset & pagemask;
10145 size = pagesize;
10146 if (len < size)
10147 size = len;
10148
10149 len -= size;
10150
10151 memcpy(tmp + page_off, buf, size);
10152
10153 offset = offset + (pagesize - page_off);
10154
e6af301b 10155 tg3_enable_nvram_access(tp);
1da177e4
LT
10156
10157 /*
10158 * Before we can erase the flash page, we need
10159 * to issue a special "write enable" command.
10160 */
10161 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10162
10163 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10164 break;
10165
10166 /* Erase the target page */
10167 tw32(NVRAM_ADDR, phy_addr);
10168
10169 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10170 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10171
10172 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10173 break;
10174
10175 /* Issue another write enable to start the write. */
10176 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10177
10178 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10179 break;
10180
10181 for (j = 0; j < pagesize; j += 4) {
10182 u32 data;
10183
10184 data = *((u32 *) (tmp + j));
10185 tw32(NVRAM_WRDATA, cpu_to_be32(data));
10186
10187 tw32(NVRAM_ADDR, phy_addr + j);
10188
10189 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10190 NVRAM_CMD_WR;
10191
10192 if (j == 0)
10193 nvram_cmd |= NVRAM_CMD_FIRST;
10194 else if (j == (pagesize - 4))
10195 nvram_cmd |= NVRAM_CMD_LAST;
10196
10197 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10198 break;
10199 }
10200 if (ret)
10201 break;
10202 }
10203
10204 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10205 tg3_nvram_exec_cmd(tp, nvram_cmd);
10206
10207 kfree(tmp);
10208
10209 return ret;
10210}
10211
10212/* offset and length are dword aligned */
10213static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10214 u8 *buf)
10215{
10216 int i, ret = 0;
10217
10218 for (i = 0; i < len; i += 4, offset += 4) {
10219 u32 data, page_off, phy_addr, nvram_cmd;
10220
10221 memcpy(&data, buf + i, 4);
10222 tw32(NVRAM_WRDATA, cpu_to_be32(data));
10223
10224 page_off = offset % tp->nvram_pagesize;
10225
1820180b 10226 phy_addr = tg3_nvram_phys_addr(tp, offset);
1da177e4
LT
10227
10228 tw32(NVRAM_ADDR, phy_addr);
10229
10230 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10231
10232 if ((page_off == 0) || (i == 0))
10233 nvram_cmd |= NVRAM_CMD_FIRST;
f6d9a256 10234 if (page_off == (tp->nvram_pagesize - 4))
1da177e4
LT
10235 nvram_cmd |= NVRAM_CMD_LAST;
10236
10237 if (i == (len - 4))
10238 nvram_cmd |= NVRAM_CMD_LAST;
10239
4c987487 10240 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
af36e6b6 10241 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
1b27777a 10242 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
d30cdd28 10243 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
9936bcf6 10244 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
4c987487
MC
10245 (tp->nvram_jedecnum == JEDEC_ST) &&
10246 (nvram_cmd & NVRAM_CMD_FIRST)) {
1da177e4
LT
10247
10248 if ((ret = tg3_nvram_exec_cmd(tp,
10249 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10250 NVRAM_CMD_DONE)))
10251
10252 break;
10253 }
10254 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10255 /* We always do complete word writes to eeprom. */
10256 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10257 }
10258
10259 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10260 break;
10261 }
10262 return ret;
10263}
10264
10265/* offset and length are dword aligned */
10266static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10267{
10268 int ret;
10269
1da177e4 10270 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34
MC
10271 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10272 ~GRC_LCLCTRL_GPIO_OUTPUT1);
1da177e4
LT
10273 udelay(40);
10274 }
10275
10276 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10277 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10278 }
10279 else {
10280 u32 grc_mode;
10281
ec41c7df
MC
10282 ret = tg3_nvram_lock(tp);
10283 if (ret)
10284 return ret;
1da177e4 10285
e6af301b
MC
10286 tg3_enable_nvram_access(tp);
10287 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10288 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
1da177e4 10289 tw32(NVRAM_WRITE1, 0x406);
1da177e4
LT
10290
10291 grc_mode = tr32(GRC_MODE);
10292 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10293
10294 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10295 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10296
10297 ret = tg3_nvram_write_block_buffered(tp, offset, len,
10298 buf);
10299 }
10300 else {
10301 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10302 buf);
10303 }
10304
10305 grc_mode = tr32(GRC_MODE);
10306 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10307
e6af301b 10308 tg3_disable_nvram_access(tp);
1da177e4
LT
10309 tg3_nvram_unlock(tp);
10310 }
10311
10312 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34 10313 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1da177e4
LT
10314 udelay(40);
10315 }
10316
10317 return ret;
10318}
10319
10320struct subsys_tbl_ent {
10321 u16 subsys_vendor, subsys_devid;
10322 u32 phy_id;
10323};
10324
10325static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10326 /* Broadcom boards. */
10327 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10328 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10329 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10330 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
10331 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10332 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10333 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
10334 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10335 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10336 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10337 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10338
10339 /* 3com boards. */
10340 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10341 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10342 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
10343 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10344 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10345
10346 /* DELL boards. */
10347 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10348 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10349 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10350 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10351
10352 /* Compaq boards. */
10353 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10354 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10355 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
10356 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10357 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10358
10359 /* IBM boards. */
10360 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10361};
10362
10363static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10364{
10365 int i;
10366
10367 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10368 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10369 tp->pdev->subsystem_vendor) &&
10370 (subsys_id_to_phy_id[i].subsys_devid ==
10371 tp->pdev->subsystem_device))
10372 return &subsys_id_to_phy_id[i];
10373 }
10374 return NULL;
10375}
10376
7d0c41ef 10377static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
1da177e4 10378{
1da177e4 10379 u32 val;
caf636c7
MC
10380 u16 pmcsr;
10381
10382 /* On some early chips the SRAM cannot be accessed in D3hot state,
10383 * so need make sure we're in D0.
10384 */
10385 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10386 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10387 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10388 msleep(1);
7d0c41ef
MC
10389
10390 /* Make sure register accesses (indirect or otherwise)
10391 * will function correctly.
10392 */
10393 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10394 tp->misc_host_ctrl);
1da177e4 10395
f49639e6
DM
10396 /* The memory arbiter has to be enabled in order for SRAM accesses
10397 * to succeed. Normally on powerup the tg3 chip firmware will make
10398 * sure it is enabled, but other entities such as system netboot
10399 * code might disable it.
10400 */
10401 val = tr32(MEMARB_MODE);
10402 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10403
1da177e4 10404 tp->phy_id = PHY_ID_INVALID;
7d0c41ef
MC
10405 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10406
a85feb8c
GZ
10407 /* Assume an onboard device and WOL capable by default. */
10408 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
72b845e0 10409
b5d3772c 10410 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9d26e213 10411 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
b5d3772c 10412 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9d26e213
MC
10413 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10414 }
8ed5d97e
MC
10415 if (tr32(VCPU_CFGSHDW) & VCPU_CFGSHDW_ASPM_DBNC)
10416 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
b5d3772c
MC
10417 return;
10418 }
10419
1da177e4
LT
10420 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10421 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10422 u32 nic_cfg, led_cfg;
7d0c41ef
MC
10423 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10424 int eeprom_phy_serdes = 0;
1da177e4
LT
10425
10426 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10427 tp->nic_sram_data_cfg = nic_cfg;
10428
10429 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10430 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10431 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10432 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10433 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10434 (ver > 0) && (ver < 0x100))
10435 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10436
1da177e4
LT
10437 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10438 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10439 eeprom_phy_serdes = 1;
10440
10441 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10442 if (nic_phy_id != 0) {
10443 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10444 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10445
10446 eeprom_phy_id = (id1 >> 16) << 10;
10447 eeprom_phy_id |= (id2 & 0xfc00) << 16;
10448 eeprom_phy_id |= (id2 & 0x03ff) << 0;
10449 } else
10450 eeprom_phy_id = 0;
10451
7d0c41ef 10452 tp->phy_id = eeprom_phy_id;
747e8f8b 10453 if (eeprom_phy_serdes) {
a4e2b347 10454 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
747e8f8b
MC
10455 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10456 else
10457 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10458 }
7d0c41ef 10459
cbf46853 10460 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
10461 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10462 SHASTA_EXT_LED_MODE_MASK);
cbf46853 10463 else
1da177e4
LT
10464 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10465
10466 switch (led_cfg) {
10467 default:
10468 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10469 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10470 break;
10471
10472 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10473 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10474 break;
10475
10476 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10477 tp->led_ctrl = LED_CTRL_MODE_MAC;
9ba27794
MC
10478
10479 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10480 * read on some older 5700/5701 bootcode.
10481 */
10482 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10483 ASIC_REV_5700 ||
10484 GET_ASIC_REV(tp->pci_chip_rev_id) ==
10485 ASIC_REV_5701)
10486 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10487
1da177e4
LT
10488 break;
10489
10490 case SHASTA_EXT_LED_SHARED:
10491 tp->led_ctrl = LED_CTRL_MODE_SHARED;
10492 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10493 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10494 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10495 LED_CTRL_MODE_PHY_2);
10496 break;
10497
10498 case SHASTA_EXT_LED_MAC:
10499 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10500 break;
10501
10502 case SHASTA_EXT_LED_COMBO:
10503 tp->led_ctrl = LED_CTRL_MODE_COMBO;
10504 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10505 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10506 LED_CTRL_MODE_PHY_2);
10507 break;
10508
10509 };
10510
10511 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10512 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10513 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10514 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10515
9d26e213 10516 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
1da177e4 10517 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9d26e213
MC
10518 if ((tp->pdev->subsystem_vendor ==
10519 PCI_VENDOR_ID_ARIMA) &&
10520 (tp->pdev->subsystem_device == 0x205a ||
10521 tp->pdev->subsystem_device == 0x2063))
10522 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10523 } else {
f49639e6 10524 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9d26e213
MC
10525 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10526 }
1da177e4
LT
10527
10528 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10529 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 10530 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
10531 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10532 }
0d3031d9
MC
10533 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10534 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
a85feb8c
GZ
10535 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10536 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10537 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
1da177e4
LT
10538
10539 if (cfg2 & (1 << 17))
10540 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10541
10542 /* serdes signal pre-emphasis in register 0x590 set by */
10543 /* bootcode if bit 18 is set */
10544 if (cfg2 & (1 << 18))
10545 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
8ed5d97e
MC
10546
10547 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10548 u32 cfg3;
10549
10550 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10551 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10552 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10553 }
1da177e4 10554 }
7d0c41ef
MC
10555}
10556
10557static int __devinit tg3_phy_probe(struct tg3 *tp)
10558{
10559 u32 hw_phy_id_1, hw_phy_id_2;
10560 u32 hw_phy_id, hw_phy_id_masked;
10561 int err;
1da177e4
LT
10562
10563 /* Reading the PHY ID register can conflict with ASF
10564 * firwmare access to the PHY hardware.
10565 */
10566 err = 0;
0d3031d9
MC
10567 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10568 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
1da177e4
LT
10569 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10570 } else {
10571 /* Now read the physical PHY_ID from the chip and verify
10572 * that it is sane. If it doesn't look good, we fall back
10573 * to either the hard-coded table based PHY_ID and failing
10574 * that the value found in the eeprom area.
10575 */
10576 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10577 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10578
10579 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
10580 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10581 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
10582
10583 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10584 }
10585
10586 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10587 tp->phy_id = hw_phy_id;
10588 if (hw_phy_id_masked == PHY_ID_BCM8002)
10589 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
da6b2d01
MC
10590 else
10591 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
1da177e4 10592 } else {
7d0c41ef
MC
10593 if (tp->phy_id != PHY_ID_INVALID) {
10594 /* Do nothing, phy ID already set up in
10595 * tg3_get_eeprom_hw_cfg().
10596 */
1da177e4
LT
10597 } else {
10598 struct subsys_tbl_ent *p;
10599
10600 /* No eeprom signature? Try the hardcoded
10601 * subsys device table.
10602 */
10603 p = lookup_by_subsys(tp);
10604 if (!p)
10605 return -ENODEV;
10606
10607 tp->phy_id = p->phy_id;
10608 if (!tp->phy_id ||
10609 tp->phy_id == PHY_ID_BCM8002)
10610 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10611 }
10612 }
10613
747e8f8b 10614 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
0d3031d9 10615 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
1da177e4 10616 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
3600d918 10617 u32 bmsr, adv_reg, tg3_ctrl, mask;
1da177e4
LT
10618
10619 tg3_readphy(tp, MII_BMSR, &bmsr);
10620 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10621 (bmsr & BMSR_LSTATUS))
10622 goto skip_phy_reset;
6aa20a22 10623
1da177e4
LT
10624 err = tg3_phy_reset(tp);
10625 if (err)
10626 return err;
10627
10628 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10629 ADVERTISE_100HALF | ADVERTISE_100FULL |
10630 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10631 tg3_ctrl = 0;
10632 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10633 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10634 MII_TG3_CTRL_ADV_1000_FULL);
10635 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10636 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10637 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10638 MII_TG3_CTRL_ENABLE_AS_MASTER);
10639 }
10640
3600d918
MC
10641 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10642 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10643 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10644 if (!tg3_copper_is_advertising_all(tp, mask)) {
1da177e4
LT
10645 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10646
10647 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10648 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10649
10650 tg3_writephy(tp, MII_BMCR,
10651 BMCR_ANENABLE | BMCR_ANRESTART);
10652 }
10653 tg3_phy_set_wirespeed(tp);
10654
10655 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10656 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10657 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10658 }
10659
10660skip_phy_reset:
10661 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10662 err = tg3_init_5401phy_dsp(tp);
10663 if (err)
10664 return err;
10665 }
10666
10667 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10668 err = tg3_init_5401phy_dsp(tp);
10669 }
10670
747e8f8b 10671 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1da177e4
LT
10672 tp->link_config.advertising =
10673 (ADVERTISED_1000baseT_Half |
10674 ADVERTISED_1000baseT_Full |
10675 ADVERTISED_Autoneg |
10676 ADVERTISED_FIBRE);
10677 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10678 tp->link_config.advertising &=
10679 ~(ADVERTISED_1000baseT_Half |
10680 ADVERTISED_1000baseT_Full);
10681
10682 return err;
10683}
10684
10685static void __devinit tg3_read_partno(struct tg3 *tp)
10686{
10687 unsigned char vpd_data[256];
af2c6a4a 10688 unsigned int i;
1b27777a 10689 u32 magic;
1da177e4 10690
1820180b 10691 if (tg3_nvram_read_swab(tp, 0x0, &magic))
f49639e6 10692 goto out_not_found;
1da177e4 10693
1820180b 10694 if (magic == TG3_EEPROM_MAGIC) {
1b27777a
MC
10695 for (i = 0; i < 256; i += 4) {
10696 u32 tmp;
1da177e4 10697
1b27777a
MC
10698 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10699 goto out_not_found;
10700
10701 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
10702 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
10703 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10704 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10705 }
10706 } else {
10707 int vpd_cap;
10708
10709 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10710 for (i = 0; i < 256; i += 4) {
10711 u32 tmp, j = 0;
10712 u16 tmp16;
10713
10714 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10715 i);
10716 while (j++ < 100) {
10717 pci_read_config_word(tp->pdev, vpd_cap +
10718 PCI_VPD_ADDR, &tmp16);
10719 if (tmp16 & 0x8000)
10720 break;
10721 msleep(1);
10722 }
f49639e6
DM
10723 if (!(tmp16 & 0x8000))
10724 goto out_not_found;
10725
1b27777a
MC
10726 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10727 &tmp);
10728 tmp = cpu_to_le32(tmp);
10729 memcpy(&vpd_data[i], &tmp, 4);
10730 }
1da177e4
LT
10731 }
10732
10733 /* Now parse and find the part number. */
af2c6a4a 10734 for (i = 0; i < 254; ) {
1da177e4 10735 unsigned char val = vpd_data[i];
af2c6a4a 10736 unsigned int block_end;
1da177e4
LT
10737
10738 if (val == 0x82 || val == 0x91) {
10739 i = (i + 3 +
10740 (vpd_data[i + 1] +
10741 (vpd_data[i + 2] << 8)));
10742 continue;
10743 }
10744
10745 if (val != 0x90)
10746 goto out_not_found;
10747
10748 block_end = (i + 3 +
10749 (vpd_data[i + 1] +
10750 (vpd_data[i + 2] << 8)));
10751 i += 3;
af2c6a4a
MC
10752
10753 if (block_end > 256)
10754 goto out_not_found;
10755
10756 while (i < (block_end - 2)) {
1da177e4
LT
10757 if (vpd_data[i + 0] == 'P' &&
10758 vpd_data[i + 1] == 'N') {
10759 int partno_len = vpd_data[i + 2];
10760
af2c6a4a
MC
10761 i += 3;
10762 if (partno_len > 24 || (partno_len + i) > 256)
1da177e4
LT
10763 goto out_not_found;
10764
10765 memcpy(tp->board_part_number,
af2c6a4a 10766 &vpd_data[i], partno_len);
1da177e4
LT
10767
10768 /* Success. */
10769 return;
10770 }
af2c6a4a 10771 i += 3 + vpd_data[i + 2];
1da177e4
LT
10772 }
10773
10774 /* Part number not found. */
10775 goto out_not_found;
10776 }
10777
10778out_not_found:
b5d3772c
MC
10779 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10780 strcpy(tp->board_part_number, "BCM95906");
10781 else
10782 strcpy(tp->board_part_number, "none");
1da177e4
LT
10783}
10784
c4e6575c
MC
10785static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10786{
10787 u32 val, offset, start;
10788
10789 if (tg3_nvram_read_swab(tp, 0, &val))
10790 return;
10791
10792 if (val != TG3_EEPROM_MAGIC)
10793 return;
10794
10795 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10796 tg3_nvram_read_swab(tp, 0x4, &start))
10797 return;
10798
10799 offset = tg3_nvram_logical_addr(tp, offset);
10800 if (tg3_nvram_read_swab(tp, offset, &val))
10801 return;
10802
10803 if ((val & 0xfc000000) == 0x0c000000) {
10804 u32 ver_offset, addr;
10805 int i;
10806
10807 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10808 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10809 return;
10810
10811 if (val != 0)
10812 return;
10813
10814 addr = offset + ver_offset - start;
10815 for (i = 0; i < 16; i += 4) {
10816 if (tg3_nvram_read(tp, addr + i, &val))
10817 return;
10818
10819 val = cpu_to_le32(val);
10820 memcpy(tp->fw_ver + i, &val, 4);
10821 }
10822 }
10823}
10824
7544b097
MC
10825static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
10826
1da177e4
LT
10827static int __devinit tg3_get_invariants(struct tg3 *tp)
10828{
10829 static struct pci_device_id write_reorder_chipsets[] = {
1da177e4
LT
10830 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10831 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
c165b004
JL
10832 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10833 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
399de50b
MC
10834 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10835 PCI_DEVICE_ID_VIA_8385_0) },
1da177e4
LT
10836 { },
10837 };
10838 u32 misc_ctrl_reg;
10839 u32 cacheline_sz_reg;
10840 u32 pci_state_reg, grc_misc_cfg;
10841 u32 val;
10842 u16 pci_cmd;
c7835a77 10843 int err, pcie_cap;
1da177e4 10844
1da177e4
LT
10845 /* Force memory write invalidate off. If we leave it on,
10846 * then on 5700_BX chips we have to enable a workaround.
10847 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10848 * to match the cacheline size. The Broadcom driver have this
10849 * workaround but turns MWI off all the times so never uses
10850 * it. This seems to suggest that the workaround is insufficient.
10851 */
10852 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10853 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10854 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10855
10856 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10857 * has the register indirect write enable bit set before
10858 * we try to access any of the MMIO registers. It is also
10859 * critical that the PCI-X hw workaround situation is decided
10860 * before that as well.
10861 */
10862 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10863 &misc_ctrl_reg);
10864
10865 tp->pci_chip_rev_id = (misc_ctrl_reg >>
10866 MISC_HOST_CTRL_CHIPREV_SHIFT);
795d01c5
MC
10867 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
10868 u32 prod_id_asic_rev;
10869
10870 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
10871 &prod_id_asic_rev);
10872 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
10873 }
1da177e4 10874
ff645bec
MC
10875 /* Wrong chip ID in 5752 A0. This code can be removed later
10876 * as A0 is not in production.
10877 */
10878 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10879 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10880
6892914f
MC
10881 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10882 * we need to disable memory and use config. cycles
10883 * only to access all registers. The 5702/03 chips
10884 * can mistakenly decode the special cycles from the
10885 * ICH chipsets as memory write cycles, causing corruption
10886 * of register and memory space. Only certain ICH bridges
10887 * will drive special cycles with non-zero data during the
10888 * address phase which can fall within the 5703's address
10889 * range. This is not an ICH bug as the PCI spec allows
10890 * non-zero address during special cycles. However, only
10891 * these ICH bridges are known to drive non-zero addresses
10892 * during special cycles.
10893 *
10894 * Since special cycles do not cross PCI bridges, we only
10895 * enable this workaround if the 5703 is on the secondary
10896 * bus of these ICH bridges.
10897 */
10898 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10899 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10900 static struct tg3_dev_id {
10901 u32 vendor;
10902 u32 device;
10903 u32 rev;
10904 } ich_chipsets[] = {
10905 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10906 PCI_ANY_ID },
10907 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10908 PCI_ANY_ID },
10909 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10910 0xa },
10911 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10912 PCI_ANY_ID },
10913 { },
10914 };
10915 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10916 struct pci_dev *bridge = NULL;
10917
10918 while (pci_id->vendor != 0) {
10919 bridge = pci_get_device(pci_id->vendor, pci_id->device,
10920 bridge);
10921 if (!bridge) {
10922 pci_id++;
10923 continue;
10924 }
10925 if (pci_id->rev != PCI_ANY_ID) {
44c10138 10926 if (bridge->revision > pci_id->rev)
6892914f
MC
10927 continue;
10928 }
10929 if (bridge->subordinate &&
10930 (bridge->subordinate->number ==
10931 tp->pdev->bus->number)) {
10932
10933 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10934 pci_dev_put(bridge);
10935 break;
10936 }
10937 }
10938 }
10939
4a29cc2e
MC
10940 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10941 * DMA addresses > 40-bit. This bridge may have other additional
10942 * 57xx devices behind it in some 4-port NIC designs for example.
10943 * Any tg3 device found behind the bridge will also need the 40-bit
10944 * DMA workaround.
10945 */
a4e2b347
MC
10946 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10947 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10948 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
4a29cc2e 10949 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
4cf78e4f 10950 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
a4e2b347 10951 }
4a29cc2e
MC
10952 else {
10953 struct pci_dev *bridge = NULL;
10954
10955 do {
10956 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10957 PCI_DEVICE_ID_SERVERWORKS_EPB,
10958 bridge);
10959 if (bridge && bridge->subordinate &&
10960 (bridge->subordinate->number <=
10961 tp->pdev->bus->number) &&
10962 (bridge->subordinate->subordinate >=
10963 tp->pdev->bus->number)) {
10964 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10965 pci_dev_put(bridge);
10966 break;
10967 }
10968 } while (bridge);
10969 }
4cf78e4f 10970
1da177e4
LT
10971 /* Initialize misc host control in PCI block. */
10972 tp->misc_host_ctrl |= (misc_ctrl_reg &
10973 MISC_HOST_CTRL_CHIPREV);
10974 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10975 tp->misc_host_ctrl);
10976
10977 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10978 &cacheline_sz_reg);
10979
10980 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
10981 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
10982 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
10983 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
10984
7544b097
MC
10985 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10986 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
10987 tp->pdev_peer = tg3_find_peer(tp);
10988
6708e5cc 10989 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
4cf78e4f 10990 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
af36e6b6 10991 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d9ab5ad1 10992 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
d30cdd28 10993 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9936bcf6 10994 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
b5d3772c 10995 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
a4e2b347 10996 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6708e5cc
JL
10997 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10998
1b440c56
JL
10999 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11000 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11001 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11002
5a6f3074 11003 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7544b097
MC
11004 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11005 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11006 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11007 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11008 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11009 tp->pdev_peer == tp->pdev))
11010 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11011
af36e6b6 11012 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
b5d3772c 11013 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
d30cdd28 11014 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9936bcf6 11015 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
b5d3772c 11016 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5a6f3074 11017 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
fcfa0a32 11018 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
52c0fd83 11019 } else {
7f62ad5d 11020 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
52c0fd83
MC
11021 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11022 ASIC_REV_5750 &&
11023 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
7f62ad5d 11024 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
52c0fd83 11025 }
5a6f3074 11026 }
1da177e4 11027
0f893dc6
MC
11028 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11029 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
d9ab5ad1 11030 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
af36e6b6 11031 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
b5d3772c 11032 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
d30cdd28 11033 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
9936bcf6 11034 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
b5d3772c 11035 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
0f893dc6
MC
11036 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11037
c7835a77
MC
11038 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11039 if (pcie_cap != 0) {
1da177e4 11040 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
c7835a77
MC
11041 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11042 u16 lnkctl;
11043
11044 pci_read_config_word(tp->pdev,
11045 pcie_cap + PCI_EXP_LNKCTL,
11046 &lnkctl);
11047 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11048 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11049 }
11050 }
1da177e4 11051
399de50b
MC
11052 /* If we have an AMD 762 or VIA K8T800 chipset, write
11053 * reordering to the mailbox registers done by the host
11054 * controller can cause major troubles. We read back from
11055 * every mailbox register write to force the writes to be
11056 * posted to the chip in order.
11057 */
11058 if (pci_dev_present(write_reorder_chipsets) &&
11059 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11060 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11061
1da177e4
LT
11062 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11063 tp->pci_lat_timer < 64) {
11064 tp->pci_lat_timer = 64;
11065
11066 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
11067 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
11068 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
11069 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
11070
11071 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11072 cacheline_sz_reg);
11073 }
11074
9974a356
MC
11075 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11076 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11077 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11078 if (!tp->pcix_cap) {
11079 printk(KERN_ERR PFX "Cannot find PCI-X "
11080 "capability, aborting.\n");
11081 return -EIO;
11082 }
11083 }
11084
1da177e4
LT
11085 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11086 &pci_state_reg);
11087
9974a356 11088 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
1da177e4
LT
11089 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11090
11091 /* If this is a 5700 BX chipset, and we are in PCI-X
11092 * mode, enable register write workaround.
11093 *
11094 * The workaround is to use indirect register accesses
11095 * for all chip writes not to mailbox registers.
11096 */
11097 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11098 u32 pm_reg;
1da177e4
LT
11099
11100 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11101
11102 /* The chip can have it's power management PCI config
11103 * space registers clobbered due to this bug.
11104 * So explicitly force the chip into D0 here.
11105 */
9974a356
MC
11106 pci_read_config_dword(tp->pdev,
11107 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
11108 &pm_reg);
11109 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11110 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9974a356
MC
11111 pci_write_config_dword(tp->pdev,
11112 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
11113 pm_reg);
11114
11115 /* Also, force SERR#/PERR# in PCI command. */
11116 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11117 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11118 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11119 }
11120 }
11121
087fe256
MC
11122 /* 5700 BX chips need to have their TX producer index mailboxes
11123 * written twice to workaround a bug.
11124 */
11125 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11126 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11127
1da177e4
LT
11128 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11129 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11130 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11131 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11132
11133 /* Chip-specific fixup from Broadcom driver */
11134 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11135 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11136 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11137 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11138 }
11139
1ee582d8 11140 /* Default fast path register access methods */
20094930 11141 tp->read32 = tg3_read32;
1ee582d8 11142 tp->write32 = tg3_write32;
09ee929c 11143 tp->read32_mbox = tg3_read32;
20094930 11144 tp->write32_mbox = tg3_write32;
1ee582d8
MC
11145 tp->write32_tx_mbox = tg3_write32;
11146 tp->write32_rx_mbox = tg3_write32;
11147
11148 /* Various workaround register access methods */
11149 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11150 tp->write32 = tg3_write_indirect_reg32;
98efd8a6
MC
11151 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11152 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11153 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11154 /*
11155 * Back to back register writes can cause problems on these
11156 * chips, the workaround is to read back all reg writes
11157 * except those to mailbox regs.
11158 *
11159 * See tg3_write_indirect_reg32().
11160 */
1ee582d8 11161 tp->write32 = tg3_write_flush_reg32;
98efd8a6
MC
11162 }
11163
1ee582d8
MC
11164
11165 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11166 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11167 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11168 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11169 tp->write32_rx_mbox = tg3_write_flush_reg32;
11170 }
20094930 11171
6892914f
MC
11172 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11173 tp->read32 = tg3_read_indirect_reg32;
11174 tp->write32 = tg3_write_indirect_reg32;
11175 tp->read32_mbox = tg3_read_indirect_mbox;
11176 tp->write32_mbox = tg3_write_indirect_mbox;
11177 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11178 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11179
11180 iounmap(tp->regs);
22abe310 11181 tp->regs = NULL;
6892914f
MC
11182
11183 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11184 pci_cmd &= ~PCI_COMMAND_MEMORY;
11185 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11186 }
b5d3772c
MC
11187 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11188 tp->read32_mbox = tg3_read32_mbox_5906;
11189 tp->write32_mbox = tg3_write32_mbox_5906;
11190 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11191 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11192 }
6892914f 11193
bbadf503
MC
11194 if (tp->write32 == tg3_write_indirect_reg32 ||
11195 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11196 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
f49639e6 11197 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
bbadf503
MC
11198 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11199
7d0c41ef 11200 /* Get eeprom hw config before calling tg3_set_power_state().
9d26e213 11201 * In particular, the TG3_FLG2_IS_NIC flag must be
7d0c41ef
MC
11202 * determined before calling tg3_set_power_state() so that
11203 * we know whether or not to switch out of Vaux power.
11204 * When the flag is set, it means that GPIO1 is used for eeprom
11205 * write protect and also implies that it is a LOM where GPIOs
11206 * are not used to switch power.
6aa20a22 11207 */
7d0c41ef
MC
11208 tg3_get_eeprom_hw_cfg(tp);
11209
0d3031d9
MC
11210 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11211 /* Allow reads and writes to the
11212 * APE register and memory space.
11213 */
11214 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11215 PCISTATE_ALLOW_APE_SHMEM_WR;
11216 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11217 pci_state_reg);
11218 }
11219
9936bcf6
MC
11220 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11221 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
d30cdd28
MC
11222 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11223
314fba34
MC
11224 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11225 * GPIO1 driven high will bring 5700's external PHY out of reset.
11226 * It is also used as eeprom write protect on LOMs.
11227 */
11228 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11229 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11230 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11231 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11232 GRC_LCLCTRL_GPIO_OUTPUT1);
3e7d83bc
MC
11233 /* Unused GPIO3 must be driven as output on 5752 because there
11234 * are no pull-up resistors on unused GPIO pins.
11235 */
11236 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11237 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
314fba34 11238
af36e6b6
MC
11239 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11240 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11241
1da177e4 11242 /* Force the chip into D0. */
bc1c7567 11243 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
11244 if (err) {
11245 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11246 pci_name(tp->pdev));
11247 return err;
11248 }
11249
11250 /* 5700 B0 chips do not support checksumming correctly due
11251 * to hardware bugs.
11252 */
11253 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11254 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11255
1da177e4
LT
11256 /* Derive initial jumbo mode from MTU assigned in
11257 * ether_setup() via the alloc_etherdev() call
11258 */
0f893dc6 11259 if (tp->dev->mtu > ETH_DATA_LEN &&
a4e2b347 11260 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
0f893dc6 11261 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
1da177e4
LT
11262
11263 /* Determine WakeOnLan speed to use. */
11264 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11265 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11266 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11267 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11268 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11269 } else {
11270 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11271 }
11272
11273 /* A few boards don't want Ethernet@WireSpeed phy feature */
11274 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11275 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11276 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
747e8f8b 11277 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
b5d3772c 11278 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
747e8f8b 11279 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1da177e4
LT
11280 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11281
11282 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11283 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11284 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11285 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11286 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11287
c424cb24
MC
11288 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11289 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 11290 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
11291 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11292 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
d4011ada
MC
11293 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11294 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11295 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
c1d2a196
MC
11296 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11297 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11298 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
c424cb24
MC
11299 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11300 }
1da177e4 11301
1da177e4 11302 tp->coalesce_mode = 0;
1da177e4
LT
11303 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11304 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11305 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11306
11307 /* Initialize MAC MI mode, polling disabled. */
11308 tw32_f(MAC_MI_MODE, tp->mi_mode);
11309 udelay(80);
11310
11311 /* Initialize data/descriptor byte/word swapping. */
11312 val = tr32(GRC_MODE);
11313 val &= GRC_MODE_HOST_STACKUP;
11314 tw32(GRC_MODE, val | tp->grc_mode);
11315
11316 tg3_switch_clocks(tp);
11317
11318 /* Clear this out for sanity. */
11319 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11320
11321 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11322 &pci_state_reg);
11323 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11324 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11325 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11326
11327 if (chiprevid == CHIPREV_ID_5701_A0 ||
11328 chiprevid == CHIPREV_ID_5701_B0 ||
11329 chiprevid == CHIPREV_ID_5701_B2 ||
11330 chiprevid == CHIPREV_ID_5701_B5) {
11331 void __iomem *sram_base;
11332
11333 /* Write some dummy words into the SRAM status block
11334 * area, see if it reads back correctly. If the return
11335 * value is bad, force enable the PCIX workaround.
11336 */
11337 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11338
11339 writel(0x00000000, sram_base);
11340 writel(0x00000000, sram_base + 4);
11341 writel(0xffffffff, sram_base + 4);
11342 if (readl(sram_base) != 0x00000000)
11343 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11344 }
11345 }
11346
11347 udelay(50);
11348 tg3_nvram_init(tp);
11349
11350 grc_misc_cfg = tr32(GRC_MISC_CFG);
11351 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11352
1da177e4
LT
11353 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11354 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11355 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11356 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11357
fac9b83e
DM
11358 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11359 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11360 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11361 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11362 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11363 HOSTCC_MODE_CLRTICK_TXBD);
11364
11365 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11366 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11367 tp->misc_host_ctrl);
11368 }
11369
1da177e4
LT
11370 /* these are limited to 10/100 only */
11371 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11372 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11373 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11374 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11375 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11376 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11377 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11378 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11379 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
676917d4
MC
11380 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11381 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
b5d3772c 11382 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
1da177e4
LT
11383 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11384
11385 err = tg3_phy_probe(tp);
11386 if (err) {
11387 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11388 pci_name(tp->pdev), err);
11389 /* ... but do not return immediately ... */
11390 }
11391
11392 tg3_read_partno(tp);
c4e6575c 11393 tg3_read_fw_ver(tp);
1da177e4
LT
11394
11395 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11396 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11397 } else {
11398 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11399 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11400 else
11401 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11402 }
11403
11404 /* 5700 {AX,BX} chips have a broken status block link
11405 * change bit implementation, so we must use the
11406 * status register in those cases.
11407 */
11408 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11409 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11410 else
11411 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11412
11413 /* The led_ctrl is set during tg3_phy_probe, here we might
11414 * have to force the link status polling mechanism based
11415 * upon subsystem IDs.
11416 */
11417 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
007a880d 11418 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
1da177e4
LT
11419 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11420 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11421 TG3_FLAG_USE_LINKCHG_REG);
11422 }
11423
11424 /* For all SERDES we poll the MAC status register. */
11425 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11426 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11427 else
11428 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11429
5a6f3074 11430 /* All chips before 5787 can get confused if TX buffers
1da177e4
LT
11431 * straddle the 4GB address boundary in some cases.
11432 */
af36e6b6 11433 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
b5d3772c 11434 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
d30cdd28 11435 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9936bcf6 11436 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
b5d3772c 11437 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
5a6f3074
MC
11438 tp->dev->hard_start_xmit = tg3_start_xmit;
11439 else
11440 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
1da177e4
LT
11441
11442 tp->rx_offset = 2;
11443 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11444 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11445 tp->rx_offset = 0;
11446
f92905de
MC
11447 tp->rx_std_max_post = TG3_RX_RING_SIZE;
11448
11449 /* Increment the rx prod index on the rx std ring by at most
11450 * 8 for these chips to workaround hw errata.
11451 */
11452 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11453 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11454 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11455 tp->rx_std_max_post = 8;
11456
1da177e4
LT
11457 /* By default, disable wake-on-lan. User can change this
11458 * using ETHTOOL_SWOL.
11459 */
11460 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
11461
8ed5d97e
MC
11462 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11463 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11464 PCIE_PWR_MGMT_L1_THRESH_MSK;
11465
1da177e4
LT
11466 return err;
11467}
11468
49b6e95f 11469#ifdef CONFIG_SPARC
1da177e4
LT
11470static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11471{
11472 struct net_device *dev = tp->dev;
11473 struct pci_dev *pdev = tp->pdev;
49b6e95f 11474 struct device_node *dp = pci_device_to_OF_node(pdev);
374d4cac 11475 const unsigned char *addr;
49b6e95f
DM
11476 int len;
11477
11478 addr = of_get_property(dp, "local-mac-address", &len);
11479 if (addr && len == 6) {
11480 memcpy(dev->dev_addr, addr, 6);
11481 memcpy(dev->perm_addr, dev->dev_addr, 6);
11482 return 0;
1da177e4
LT
11483 }
11484 return -ENODEV;
11485}
11486
11487static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11488{
11489 struct net_device *dev = tp->dev;
11490
11491 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2ff43697 11492 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
1da177e4
LT
11493 return 0;
11494}
11495#endif
11496
11497static int __devinit tg3_get_device_address(struct tg3 *tp)
11498{
11499 struct net_device *dev = tp->dev;
11500 u32 hi, lo, mac_offset;
008652b3 11501 int addr_ok = 0;
1da177e4 11502
49b6e95f 11503#ifdef CONFIG_SPARC
1da177e4
LT
11504 if (!tg3_get_macaddr_sparc(tp))
11505 return 0;
11506#endif
11507
11508 mac_offset = 0x7c;
f49639e6 11509 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
a4e2b347 11510 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
11511 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11512 mac_offset = 0xcc;
11513 if (tg3_nvram_lock(tp))
11514 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11515 else
11516 tg3_nvram_unlock(tp);
11517 }
b5d3772c
MC
11518 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11519 mac_offset = 0x10;
1da177e4
LT
11520
11521 /* First try to get it from MAC address mailbox. */
11522 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11523 if ((hi >> 16) == 0x484b) {
11524 dev->dev_addr[0] = (hi >> 8) & 0xff;
11525 dev->dev_addr[1] = (hi >> 0) & 0xff;
11526
11527 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11528 dev->dev_addr[2] = (lo >> 24) & 0xff;
11529 dev->dev_addr[3] = (lo >> 16) & 0xff;
11530 dev->dev_addr[4] = (lo >> 8) & 0xff;
11531 dev->dev_addr[5] = (lo >> 0) & 0xff;
1da177e4 11532
008652b3
MC
11533 /* Some old bootcode may report a 0 MAC address in SRAM */
11534 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11535 }
11536 if (!addr_ok) {
11537 /* Next, try NVRAM. */
f49639e6 11538 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
008652b3
MC
11539 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11540 dev->dev_addr[0] = ((hi >> 16) & 0xff);
11541 dev->dev_addr[1] = ((hi >> 24) & 0xff);
11542 dev->dev_addr[2] = ((lo >> 0) & 0xff);
11543 dev->dev_addr[3] = ((lo >> 8) & 0xff);
11544 dev->dev_addr[4] = ((lo >> 16) & 0xff);
11545 dev->dev_addr[5] = ((lo >> 24) & 0xff);
11546 }
11547 /* Finally just fetch it out of the MAC control regs. */
11548 else {
11549 hi = tr32(MAC_ADDR_0_HIGH);
11550 lo = tr32(MAC_ADDR_0_LOW);
11551
11552 dev->dev_addr[5] = lo & 0xff;
11553 dev->dev_addr[4] = (lo >> 8) & 0xff;
11554 dev->dev_addr[3] = (lo >> 16) & 0xff;
11555 dev->dev_addr[2] = (lo >> 24) & 0xff;
11556 dev->dev_addr[1] = hi & 0xff;
11557 dev->dev_addr[0] = (hi >> 8) & 0xff;
11558 }
1da177e4
LT
11559 }
11560
11561 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11562#ifdef CONFIG_SPARC64
11563 if (!tg3_get_default_macaddr_sparc(tp))
11564 return 0;
11565#endif
11566 return -EINVAL;
11567 }
2ff43697 11568 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
11569 return 0;
11570}
11571
59e6b434
DM
11572#define BOUNDARY_SINGLE_CACHELINE 1
11573#define BOUNDARY_MULTI_CACHELINE 2
11574
11575static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11576{
11577 int cacheline_size;
11578 u8 byte;
11579 int goal;
11580
11581 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11582 if (byte == 0)
11583 cacheline_size = 1024;
11584 else
11585 cacheline_size = (int) byte * 4;
11586
11587 /* On 5703 and later chips, the boundary bits have no
11588 * effect.
11589 */
11590 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11591 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11592 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11593 goto out;
11594
11595#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11596 goal = BOUNDARY_MULTI_CACHELINE;
11597#else
11598#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11599 goal = BOUNDARY_SINGLE_CACHELINE;
11600#else
11601 goal = 0;
11602#endif
11603#endif
11604
11605 if (!goal)
11606 goto out;
11607
11608 /* PCI controllers on most RISC systems tend to disconnect
11609 * when a device tries to burst across a cache-line boundary.
11610 * Therefore, letting tg3 do so just wastes PCI bandwidth.
11611 *
11612 * Unfortunately, for PCI-E there are only limited
11613 * write-side controls for this, and thus for reads
11614 * we will still get the disconnects. We'll also waste
11615 * these PCI cycles for both read and write for chips
11616 * other than 5700 and 5701 which do not implement the
11617 * boundary bits.
11618 */
11619 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11620 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11621 switch (cacheline_size) {
11622 case 16:
11623 case 32:
11624 case 64:
11625 case 128:
11626 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11627 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11628 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11629 } else {
11630 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11631 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11632 }
11633 break;
11634
11635 case 256:
11636 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11637 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11638 break;
11639
11640 default:
11641 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11642 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11643 break;
11644 };
11645 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11646 switch (cacheline_size) {
11647 case 16:
11648 case 32:
11649 case 64:
11650 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11651 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11652 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11653 break;
11654 }
11655 /* fallthrough */
11656 case 128:
11657 default:
11658 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11659 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11660 break;
11661 };
11662 } else {
11663 switch (cacheline_size) {
11664 case 16:
11665 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11666 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11667 DMA_RWCTRL_WRITE_BNDRY_16);
11668 break;
11669 }
11670 /* fallthrough */
11671 case 32:
11672 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11673 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11674 DMA_RWCTRL_WRITE_BNDRY_32);
11675 break;
11676 }
11677 /* fallthrough */
11678 case 64:
11679 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11680 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11681 DMA_RWCTRL_WRITE_BNDRY_64);
11682 break;
11683 }
11684 /* fallthrough */
11685 case 128:
11686 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11687 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11688 DMA_RWCTRL_WRITE_BNDRY_128);
11689 break;
11690 }
11691 /* fallthrough */
11692 case 256:
11693 val |= (DMA_RWCTRL_READ_BNDRY_256 |
11694 DMA_RWCTRL_WRITE_BNDRY_256);
11695 break;
11696 case 512:
11697 val |= (DMA_RWCTRL_READ_BNDRY_512 |
11698 DMA_RWCTRL_WRITE_BNDRY_512);
11699 break;
11700 case 1024:
11701 default:
11702 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11703 DMA_RWCTRL_WRITE_BNDRY_1024);
11704 break;
11705 };
11706 }
11707
11708out:
11709 return val;
11710}
11711
1da177e4
LT
11712static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11713{
11714 struct tg3_internal_buffer_desc test_desc;
11715 u32 sram_dma_descs;
11716 int i, ret;
11717
11718 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11719
11720 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11721 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11722 tw32(RDMAC_STATUS, 0);
11723 tw32(WDMAC_STATUS, 0);
11724
11725 tw32(BUFMGR_MODE, 0);
11726 tw32(FTQ_RESET, 0);
11727
11728 test_desc.addr_hi = ((u64) buf_dma) >> 32;
11729 test_desc.addr_lo = buf_dma & 0xffffffff;
11730 test_desc.nic_mbuf = 0x00002100;
11731 test_desc.len = size;
11732
11733 /*
11734 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11735 * the *second* time the tg3 driver was getting loaded after an
11736 * initial scan.
11737 *
11738 * Broadcom tells me:
11739 * ...the DMA engine is connected to the GRC block and a DMA
11740 * reset may affect the GRC block in some unpredictable way...
11741 * The behavior of resets to individual blocks has not been tested.
11742 *
11743 * Broadcom noted the GRC reset will also reset all sub-components.
11744 */
11745 if (to_device) {
11746 test_desc.cqid_sqid = (13 << 8) | 2;
11747
11748 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11749 udelay(40);
11750 } else {
11751 test_desc.cqid_sqid = (16 << 8) | 7;
11752
11753 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11754 udelay(40);
11755 }
11756 test_desc.flags = 0x00000005;
11757
11758 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11759 u32 val;
11760
11761 val = *(((u32 *)&test_desc) + i);
11762 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11763 sram_dma_descs + (i * sizeof(u32)));
11764 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11765 }
11766 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11767
11768 if (to_device) {
11769 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11770 } else {
11771 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11772 }
11773
11774 ret = -ENODEV;
11775 for (i = 0; i < 40; i++) {
11776 u32 val;
11777
11778 if (to_device)
11779 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11780 else
11781 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11782 if ((val & 0xffff) == sram_dma_descs) {
11783 ret = 0;
11784 break;
11785 }
11786
11787 udelay(100);
11788 }
11789
11790 return ret;
11791}
11792
ded7340d 11793#define TEST_BUFFER_SIZE 0x2000
1da177e4
LT
11794
11795static int __devinit tg3_test_dma(struct tg3 *tp)
11796{
11797 dma_addr_t buf_dma;
59e6b434 11798 u32 *buf, saved_dma_rwctrl;
1da177e4
LT
11799 int ret;
11800
11801 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11802 if (!buf) {
11803 ret = -ENOMEM;
11804 goto out_nofree;
11805 }
11806
11807 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11808 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11809
59e6b434 11810 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
1da177e4
LT
11811
11812 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11813 /* DMA read watermark not used on PCIE */
11814 tp->dma_rwctrl |= 0x00180000;
11815 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
85e94ced
MC
11816 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11817 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
1da177e4
LT
11818 tp->dma_rwctrl |= 0x003f0000;
11819 else
11820 tp->dma_rwctrl |= 0x003f000f;
11821 } else {
11822 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11823 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11824 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
49afdeb6 11825 u32 read_water = 0x7;
1da177e4 11826
4a29cc2e
MC
11827 /* If the 5704 is behind the EPB bridge, we can
11828 * do the less restrictive ONE_DMA workaround for
11829 * better performance.
11830 */
11831 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11832 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11833 tp->dma_rwctrl |= 0x8000;
11834 else if (ccval == 0x6 || ccval == 0x7)
1da177e4
LT
11835 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11836
49afdeb6
MC
11837 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
11838 read_water = 4;
59e6b434 11839 /* Set bit 23 to enable PCIX hw bug fix */
49afdeb6
MC
11840 tp->dma_rwctrl |=
11841 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
11842 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
11843 (1 << 23);
4cf78e4f
MC
11844 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11845 /* 5780 always in PCIX mode */
11846 tp->dma_rwctrl |= 0x00144000;
a4e2b347
MC
11847 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11848 /* 5714 always in PCIX mode */
11849 tp->dma_rwctrl |= 0x00148000;
1da177e4
LT
11850 } else {
11851 tp->dma_rwctrl |= 0x001b000f;
11852 }
11853 }
11854
11855 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11856 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11857 tp->dma_rwctrl &= 0xfffffff0;
11858
11859 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11860 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11861 /* Remove this if it causes problems for some boards. */
11862 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11863
11864 /* On 5700/5701 chips, we need to set this bit.
11865 * Otherwise the chip will issue cacheline transactions
11866 * to streamable DMA memory with not all the byte
11867 * enables turned on. This is an error on several
11868 * RISC PCI controllers, in particular sparc64.
11869 *
11870 * On 5703/5704 chips, this bit has been reassigned
11871 * a different meaning. In particular, it is used
11872 * on those chips to enable a PCI-X workaround.
11873 */
11874 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11875 }
11876
11877 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11878
11879#if 0
11880 /* Unneeded, already done by tg3_get_invariants. */
11881 tg3_switch_clocks(tp);
11882#endif
11883
11884 ret = 0;
11885 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11886 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11887 goto out;
11888
59e6b434
DM
11889 /* It is best to perform DMA test with maximum write burst size
11890 * to expose the 5700/5701 write DMA bug.
11891 */
11892 saved_dma_rwctrl = tp->dma_rwctrl;
11893 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11894 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11895
1da177e4
LT
11896 while (1) {
11897 u32 *p = buf, i;
11898
11899 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11900 p[i] = i;
11901
11902 /* Send the buffer to the chip. */
11903 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11904 if (ret) {
11905 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11906 break;
11907 }
11908
11909#if 0
11910 /* validate data reached card RAM correctly. */
11911 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11912 u32 val;
11913 tg3_read_mem(tp, 0x2100 + (i*4), &val);
11914 if (le32_to_cpu(val) != p[i]) {
11915 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
11916 /* ret = -ENODEV here? */
11917 }
11918 p[i] = 0;
11919 }
11920#endif
11921 /* Now read it back. */
11922 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11923 if (ret) {
11924 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11925
11926 break;
11927 }
11928
11929 /* Verify it. */
11930 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11931 if (p[i] == i)
11932 continue;
11933
59e6b434
DM
11934 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11935 DMA_RWCTRL_WRITE_BNDRY_16) {
11936 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
1da177e4
LT
11937 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11938 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11939 break;
11940 } else {
11941 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11942 ret = -ENODEV;
11943 goto out;
11944 }
11945 }
11946
11947 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11948 /* Success. */
11949 ret = 0;
11950 break;
11951 }
11952 }
59e6b434
DM
11953 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11954 DMA_RWCTRL_WRITE_BNDRY_16) {
6d1cfbab
MC
11955 static struct pci_device_id dma_wait_state_chipsets[] = {
11956 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11957 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11958 { },
11959 };
11960
59e6b434 11961 /* DMA test passed without adjusting DMA boundary,
6d1cfbab
MC
11962 * now look for chipsets that are known to expose the
11963 * DMA bug without failing the test.
59e6b434 11964 */
6d1cfbab
MC
11965 if (pci_dev_present(dma_wait_state_chipsets)) {
11966 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11967 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11968 }
11969 else
11970 /* Safe to use the calculated DMA boundary. */
11971 tp->dma_rwctrl = saved_dma_rwctrl;
11972
59e6b434
DM
11973 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11974 }
1da177e4
LT
11975
11976out:
11977 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11978out_nofree:
11979 return ret;
11980}
11981
11982static void __devinit tg3_init_link_config(struct tg3 *tp)
11983{
11984 tp->link_config.advertising =
11985 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11986 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11987 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11988 ADVERTISED_Autoneg | ADVERTISED_MII);
11989 tp->link_config.speed = SPEED_INVALID;
11990 tp->link_config.duplex = DUPLEX_INVALID;
11991 tp->link_config.autoneg = AUTONEG_ENABLE;
1da177e4
LT
11992 tp->link_config.active_speed = SPEED_INVALID;
11993 tp->link_config.active_duplex = DUPLEX_INVALID;
11994 tp->link_config.phy_is_low_power = 0;
11995 tp->link_config.orig_speed = SPEED_INVALID;
11996 tp->link_config.orig_duplex = DUPLEX_INVALID;
11997 tp->link_config.orig_autoneg = AUTONEG_INVALID;
11998}
11999
12000static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12001{
fdfec172
MC
12002 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12003 tp->bufmgr_config.mbuf_read_dma_low_water =
12004 DEFAULT_MB_RDMA_LOW_WATER_5705;
12005 tp->bufmgr_config.mbuf_mac_rx_low_water =
12006 DEFAULT_MB_MACRX_LOW_WATER_5705;
12007 tp->bufmgr_config.mbuf_high_water =
12008 DEFAULT_MB_HIGH_WATER_5705;
b5d3772c
MC
12009 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12010 tp->bufmgr_config.mbuf_mac_rx_low_water =
12011 DEFAULT_MB_MACRX_LOW_WATER_5906;
12012 tp->bufmgr_config.mbuf_high_water =
12013 DEFAULT_MB_HIGH_WATER_5906;
12014 }
fdfec172
MC
12015
12016 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12017 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12018 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12019 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12020 tp->bufmgr_config.mbuf_high_water_jumbo =
12021 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12022 } else {
12023 tp->bufmgr_config.mbuf_read_dma_low_water =
12024 DEFAULT_MB_RDMA_LOW_WATER;
12025 tp->bufmgr_config.mbuf_mac_rx_low_water =
12026 DEFAULT_MB_MACRX_LOW_WATER;
12027 tp->bufmgr_config.mbuf_high_water =
12028 DEFAULT_MB_HIGH_WATER;
12029
12030 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12031 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12032 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12033 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12034 tp->bufmgr_config.mbuf_high_water_jumbo =
12035 DEFAULT_MB_HIGH_WATER_JUMBO;
12036 }
1da177e4
LT
12037
12038 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12039 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12040}
12041
12042static char * __devinit tg3_phy_string(struct tg3 *tp)
12043{
12044 switch (tp->phy_id & PHY_ID_MASK) {
12045 case PHY_ID_BCM5400: return "5400";
12046 case PHY_ID_BCM5401: return "5401";
12047 case PHY_ID_BCM5411: return "5411";
12048 case PHY_ID_BCM5701: return "5701";
12049 case PHY_ID_BCM5703: return "5703";
12050 case PHY_ID_BCM5704: return "5704";
12051 case PHY_ID_BCM5705: return "5705";
12052 case PHY_ID_BCM5750: return "5750";
85e94ced 12053 case PHY_ID_BCM5752: return "5752";
a4e2b347 12054 case PHY_ID_BCM5714: return "5714";
4cf78e4f 12055 case PHY_ID_BCM5780: return "5780";
af36e6b6 12056 case PHY_ID_BCM5755: return "5755";
d9ab5ad1 12057 case PHY_ID_BCM5787: return "5787";
d30cdd28 12058 case PHY_ID_BCM5784: return "5784";
126a3368 12059 case PHY_ID_BCM5756: return "5722/5756";
b5d3772c 12060 case PHY_ID_BCM5906: return "5906";
9936bcf6 12061 case PHY_ID_BCM5761: return "5761";
1da177e4
LT
12062 case PHY_ID_BCM8002: return "8002/serdes";
12063 case 0: return "serdes";
12064 default: return "unknown";
12065 };
12066}
12067
f9804ddb
MC
12068static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12069{
12070 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12071 strcpy(str, "PCI Express");
12072 return str;
12073 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12074 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12075
12076 strcpy(str, "PCIX:");
12077
12078 if ((clock_ctrl == 7) ||
12079 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12080 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12081 strcat(str, "133MHz");
12082 else if (clock_ctrl == 0)
12083 strcat(str, "33MHz");
12084 else if (clock_ctrl == 2)
12085 strcat(str, "50MHz");
12086 else if (clock_ctrl == 4)
12087 strcat(str, "66MHz");
12088 else if (clock_ctrl == 6)
12089 strcat(str, "100MHz");
f9804ddb
MC
12090 } else {
12091 strcpy(str, "PCI:");
12092 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12093 strcat(str, "66MHz");
12094 else
12095 strcat(str, "33MHz");
12096 }
12097 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12098 strcat(str, ":32-bit");
12099 else
12100 strcat(str, ":64-bit");
12101 return str;
12102}
12103
8c2dc7e1 12104static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
1da177e4
LT
12105{
12106 struct pci_dev *peer;
12107 unsigned int func, devnr = tp->pdev->devfn & ~7;
12108
12109 for (func = 0; func < 8; func++) {
12110 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12111 if (peer && peer != tp->pdev)
12112 break;
12113 pci_dev_put(peer);
12114 }
16fe9d74
MC
12115 /* 5704 can be configured in single-port mode, set peer to
12116 * tp->pdev in that case.
12117 */
12118 if (!peer) {
12119 peer = tp->pdev;
12120 return peer;
12121 }
1da177e4
LT
12122
12123 /*
12124 * We don't need to keep the refcount elevated; there's no way
12125 * to remove one half of this device without removing the other
12126 */
12127 pci_dev_put(peer);
12128
12129 return peer;
12130}
12131
15f9850d
DM
12132static void __devinit tg3_init_coal(struct tg3 *tp)
12133{
12134 struct ethtool_coalesce *ec = &tp->coal;
12135
12136 memset(ec, 0, sizeof(*ec));
12137 ec->cmd = ETHTOOL_GCOALESCE;
12138 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12139 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12140 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12141 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12142 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12143 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12144 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12145 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12146 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12147
12148 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12149 HOSTCC_MODE_CLRTICK_TXBD)) {
12150 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12151 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12152 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12153 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12154 }
d244c892
MC
12155
12156 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12157 ec->rx_coalesce_usecs_irq = 0;
12158 ec->tx_coalesce_usecs_irq = 0;
12159 ec->stats_block_coalesce_usecs = 0;
12160 }
15f9850d
DM
12161}
12162
1da177e4
LT
12163static int __devinit tg3_init_one(struct pci_dev *pdev,
12164 const struct pci_device_id *ent)
12165{
12166 static int tg3_version_printed = 0;
12167 unsigned long tg3reg_base, tg3reg_len;
12168 struct net_device *dev;
12169 struct tg3 *tp;
72f2afb8 12170 int i, err, pm_cap;
f9804ddb 12171 char str[40];
72f2afb8 12172 u64 dma_mask, persist_dma_mask;
1da177e4
LT
12173
12174 if (tg3_version_printed++ == 0)
12175 printk(KERN_INFO "%s", version);
12176
12177 err = pci_enable_device(pdev);
12178 if (err) {
12179 printk(KERN_ERR PFX "Cannot enable PCI device, "
12180 "aborting.\n");
12181 return err;
12182 }
12183
12184 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12185 printk(KERN_ERR PFX "Cannot find proper PCI device "
12186 "base address, aborting.\n");
12187 err = -ENODEV;
12188 goto err_out_disable_pdev;
12189 }
12190
12191 err = pci_request_regions(pdev, DRV_MODULE_NAME);
12192 if (err) {
12193 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12194 "aborting.\n");
12195 goto err_out_disable_pdev;
12196 }
12197
12198 pci_set_master(pdev);
12199
12200 /* Find power-management capability. */
12201 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12202 if (pm_cap == 0) {
12203 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12204 "aborting.\n");
12205 err = -EIO;
12206 goto err_out_free_res;
12207 }
12208
1da177e4
LT
12209 tg3reg_base = pci_resource_start(pdev, 0);
12210 tg3reg_len = pci_resource_len(pdev, 0);
12211
12212 dev = alloc_etherdev(sizeof(*tp));
12213 if (!dev) {
12214 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12215 err = -ENOMEM;
12216 goto err_out_free_res;
12217 }
12218
1da177e4
LT
12219 SET_NETDEV_DEV(dev, &pdev->dev);
12220
1da177e4
LT
12221#if TG3_VLAN_TAG_USED
12222 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12223 dev->vlan_rx_register = tg3_vlan_rx_register;
1da177e4
LT
12224#endif
12225
12226 tp = netdev_priv(dev);
12227 tp->pdev = pdev;
12228 tp->dev = dev;
12229 tp->pm_cap = pm_cap;
12230 tp->mac_mode = TG3_DEF_MAC_MODE;
12231 tp->rx_mode = TG3_DEF_RX_MODE;
12232 tp->tx_mode = TG3_DEF_TX_MODE;
12233 tp->mi_mode = MAC_MI_MODE_BASE;
12234 if (tg3_debug > 0)
12235 tp->msg_enable = tg3_debug;
12236 else
12237 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12238
12239 /* The word/byte swap controls here control register access byte
12240 * swapping. DMA data byte swapping is controlled in the GRC_MODE
12241 * setting below.
12242 */
12243 tp->misc_host_ctrl =
12244 MISC_HOST_CTRL_MASK_PCI_INT |
12245 MISC_HOST_CTRL_WORD_SWAP |
12246 MISC_HOST_CTRL_INDIR_ACCESS |
12247 MISC_HOST_CTRL_PCISTATE_RW;
12248
12249 /* The NONFRM (non-frame) byte/word swap controls take effect
12250 * on descriptor entries, anything which isn't packet data.
12251 *
12252 * The StrongARM chips on the board (one for tx, one for rx)
12253 * are running in big-endian mode.
12254 */
12255 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12256 GRC_MODE_WSWAP_NONFRM_DATA);
12257#ifdef __BIG_ENDIAN
12258 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12259#endif
12260 spin_lock_init(&tp->lock);
1da177e4 12261 spin_lock_init(&tp->indirect_lock);
c4028958 12262 INIT_WORK(&tp->reset_task, tg3_reset_task);
1da177e4
LT
12263
12264 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
ab0049b4 12265 if (!tp->regs) {
1da177e4
LT
12266 printk(KERN_ERR PFX "Cannot map device registers, "
12267 "aborting.\n");
12268 err = -ENOMEM;
12269 goto err_out_free_dev;
12270 }
12271
12272 tg3_init_link_config(tp);
12273
1da177e4
LT
12274 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12275 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12276 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12277
12278 dev->open = tg3_open;
12279 dev->stop = tg3_close;
12280 dev->get_stats = tg3_get_stats;
12281 dev->set_multicast_list = tg3_set_rx_mode;
12282 dev->set_mac_address = tg3_set_mac_addr;
12283 dev->do_ioctl = tg3_ioctl;
12284 dev->tx_timeout = tg3_tx_timeout;
bea3348e 12285 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
1da177e4 12286 dev->ethtool_ops = &tg3_ethtool_ops;
1da177e4
LT
12287 dev->watchdog_timeo = TG3_TX_TIMEOUT;
12288 dev->change_mtu = tg3_change_mtu;
12289 dev->irq = pdev->irq;
12290#ifdef CONFIG_NET_POLL_CONTROLLER
12291 dev->poll_controller = tg3_poll_controller;
12292#endif
12293
12294 err = tg3_get_invariants(tp);
12295 if (err) {
12296 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12297 "aborting.\n");
12298 goto err_out_iounmap;
12299 }
12300
4a29cc2e
MC
12301 /* The EPB bridge inside 5714, 5715, and 5780 and any
12302 * device behind the EPB cannot support DMA addresses > 40-bit.
72f2afb8
MC
12303 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12304 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12305 * do DMA address check in tg3_start_xmit().
12306 */
4a29cc2e
MC
12307 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12308 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12309 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
72f2afb8
MC
12310 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12311#ifdef CONFIG_HIGHMEM
12312 dma_mask = DMA_64BIT_MASK;
12313#endif
4a29cc2e 12314 } else
72f2afb8
MC
12315 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12316
12317 /* Configure DMA attributes. */
12318 if (dma_mask > DMA_32BIT_MASK) {
12319 err = pci_set_dma_mask(pdev, dma_mask);
12320 if (!err) {
12321 dev->features |= NETIF_F_HIGHDMA;
12322 err = pci_set_consistent_dma_mask(pdev,
12323 persist_dma_mask);
12324 if (err < 0) {
12325 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12326 "DMA for consistent allocations\n");
12327 goto err_out_iounmap;
12328 }
12329 }
12330 }
12331 if (err || dma_mask == DMA_32BIT_MASK) {
12332 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12333 if (err) {
12334 printk(KERN_ERR PFX "No usable DMA configuration, "
12335 "aborting.\n");
12336 goto err_out_iounmap;
12337 }
12338 }
12339
fdfec172 12340 tg3_init_bufmgr_config(tp);
1da177e4 12341
1da177e4
LT
12342 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12343 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12344 }
12345 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12346 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12347 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
c7835a77 12348 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
1da177e4
LT
12349 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12350 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12351 } else {
7f62ad5d 12352 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
1da177e4
LT
12353 }
12354
4e3a7aaa
MC
12355 /* TSO is on by default on chips that support hardware TSO.
12356 * Firmware TSO on older chips gives lower performance, so it
12357 * is off by default, but can be enabled using ethtool.
12358 */
b0026624 12359 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
1da177e4 12360 dev->features |= NETIF_F_TSO;
b5d3772c
MC
12361 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12362 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
b0026624 12363 dev->features |= NETIF_F_TSO6;
9936bcf6
MC
12364 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12365 dev->features |= NETIF_F_TSO_ECN;
b0026624 12366 }
1da177e4 12367
1da177e4
LT
12368
12369 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12370 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12371 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12372 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12373 tp->rx_pending = 63;
12374 }
12375
1da177e4
LT
12376 err = tg3_get_device_address(tp);
12377 if (err) {
12378 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12379 "aborting.\n");
12380 goto err_out_iounmap;
12381 }
12382
12383 /*
12384 * Reset chip in case UNDI or EFI driver did not shutdown
12385 * DMA self test will enable WDMAC and we'll see (spurious)
12386 * pending DMA on the PCI bus at that point.
12387 */
12388 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12389 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
1da177e4 12390 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
944d980e 12391 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
12392 }
12393
12394 err = tg3_test_dma(tp);
12395 if (err) {
12396 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12397 goto err_out_iounmap;
12398 }
12399
12400 /* Tigon3 can do ipv4 only... and some chips have buggy
12401 * checksumming.
12402 */
12403 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
d212f87b 12404 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
af36e6b6 12405 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 12406 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
12407 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12408 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
d212f87b
SH
12409 dev->features |= NETIF_F_IPV6_CSUM;
12410
1da177e4
LT
12411 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12412 } else
12413 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12414
1da177e4
LT
12415 /* flow control autonegotiation is default behavior */
12416 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12417
15f9850d
DM
12418 tg3_init_coal(tp);
12419
0d3031d9
MC
12420 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12421 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12422 printk(KERN_ERR PFX "Cannot find proper PCI device "
12423 "base address for APE, aborting.\n");
12424 err = -ENODEV;
12425 goto err_out_iounmap;
12426 }
12427
12428 tg3reg_base = pci_resource_start(pdev, 2);
12429 tg3reg_len = pci_resource_len(pdev, 2);
12430
12431 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12432 if (tp->aperegs == 0UL) {
12433 printk(KERN_ERR PFX "Cannot map APE registers, "
12434 "aborting.\n");
12435 err = -ENOMEM;
12436 goto err_out_iounmap;
12437 }
12438
12439 tg3_ape_lock_init(tp);
12440 }
12441
c49a1561
MC
12442 pci_set_drvdata(pdev, dev);
12443
1da177e4
LT
12444 err = register_netdev(dev);
12445 if (err) {
12446 printk(KERN_ERR PFX "Cannot register net device, "
12447 "aborting.\n");
0d3031d9 12448 goto err_out_apeunmap;
1da177e4
LT
12449 }
12450
cbb45d21 12451 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
1da177e4
LT
12452 dev->name,
12453 tp->board_part_number,
12454 tp->pci_chip_rev_id,
12455 tg3_phy_string(tp),
f9804ddb 12456 tg3_bus_string(tp, str),
cbb45d21
MC
12457 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12458 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12459 "10/100/1000Base-T")));
1da177e4
LT
12460
12461 for (i = 0; i < 6; i++)
12462 printk("%2.2x%c", dev->dev_addr[i],
12463 i == 5 ? '\n' : ':');
12464
12465 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
1c46ae05 12466 "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
1da177e4
LT
12467 dev->name,
12468 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12469 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12470 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12471 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
1da177e4
LT
12472 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12473 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
4a29cc2e
MC
12474 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12475 dev->name, tp->dma_rwctrl,
12476 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12477 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
1da177e4
LT
12478
12479 return 0;
12480
0d3031d9
MC
12481err_out_apeunmap:
12482 if (tp->aperegs) {
12483 iounmap(tp->aperegs);
12484 tp->aperegs = NULL;
12485 }
12486
1da177e4 12487err_out_iounmap:
6892914f
MC
12488 if (tp->regs) {
12489 iounmap(tp->regs);
22abe310 12490 tp->regs = NULL;
6892914f 12491 }
1da177e4
LT
12492
12493err_out_free_dev:
12494 free_netdev(dev);
12495
12496err_out_free_res:
12497 pci_release_regions(pdev);
12498
12499err_out_disable_pdev:
12500 pci_disable_device(pdev);
12501 pci_set_drvdata(pdev, NULL);
12502 return err;
12503}
12504
12505static void __devexit tg3_remove_one(struct pci_dev *pdev)
12506{
12507 struct net_device *dev = pci_get_drvdata(pdev);
12508
12509 if (dev) {
12510 struct tg3 *tp = netdev_priv(dev);
12511
7faa006f 12512 flush_scheduled_work();
1da177e4 12513 unregister_netdev(dev);
0d3031d9
MC
12514 if (tp->aperegs) {
12515 iounmap(tp->aperegs);
12516 tp->aperegs = NULL;
12517 }
6892914f
MC
12518 if (tp->regs) {
12519 iounmap(tp->regs);
22abe310 12520 tp->regs = NULL;
6892914f 12521 }
1da177e4
LT
12522 free_netdev(dev);
12523 pci_release_regions(pdev);
12524 pci_disable_device(pdev);
12525 pci_set_drvdata(pdev, NULL);
12526 }
12527}
12528
12529static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12530{
12531 struct net_device *dev = pci_get_drvdata(pdev);
12532 struct tg3 *tp = netdev_priv(dev);
12533 int err;
12534
3e0c95fd
MC
12535 /* PCI register 4 needs to be saved whether netif_running() or not.
12536 * MSI address and data need to be saved if using MSI and
12537 * netif_running().
12538 */
12539 pci_save_state(pdev);
12540
1da177e4
LT
12541 if (!netif_running(dev))
12542 return 0;
12543
7faa006f 12544 flush_scheduled_work();
1da177e4
LT
12545 tg3_netif_stop(tp);
12546
12547 del_timer_sync(&tp->timer);
12548
f47c11ee 12549 tg3_full_lock(tp, 1);
1da177e4 12550 tg3_disable_ints(tp);
f47c11ee 12551 tg3_full_unlock(tp);
1da177e4
LT
12552
12553 netif_device_detach(dev);
12554
f47c11ee 12555 tg3_full_lock(tp, 0);
944d980e 12556 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6a9eba15 12557 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
f47c11ee 12558 tg3_full_unlock(tp);
1da177e4
LT
12559
12560 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12561 if (err) {
f47c11ee 12562 tg3_full_lock(tp, 0);
1da177e4 12563
6a9eba15 12564 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
b9ec6c1b
MC
12565 if (tg3_restart_hw(tp, 1))
12566 goto out;
1da177e4
LT
12567
12568 tp->timer.expires = jiffies + tp->timer_offset;
12569 add_timer(&tp->timer);
12570
12571 netif_device_attach(dev);
12572 tg3_netif_start(tp);
12573
b9ec6c1b 12574out:
f47c11ee 12575 tg3_full_unlock(tp);
1da177e4
LT
12576 }
12577
12578 return err;
12579}
12580
12581static int tg3_resume(struct pci_dev *pdev)
12582{
12583 struct net_device *dev = pci_get_drvdata(pdev);
12584 struct tg3 *tp = netdev_priv(dev);
12585 int err;
12586
3e0c95fd
MC
12587 pci_restore_state(tp->pdev);
12588
1da177e4
LT
12589 if (!netif_running(dev))
12590 return 0;
12591
bc1c7567 12592 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
12593 if (err)
12594 return err;
12595
2fbe43f6
MC
12596 /* Hardware bug - MSI won't work if INTX disabled. */
12597 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
12598 (tp->tg3_flags2 & TG3_FLG2_USING_MSI))
12599 pci_intx(tp->pdev, 1);
12600
1da177e4
LT
12601 netif_device_attach(dev);
12602
f47c11ee 12603 tg3_full_lock(tp, 0);
1da177e4 12604
6a9eba15 12605 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
b9ec6c1b
MC
12606 err = tg3_restart_hw(tp, 1);
12607 if (err)
12608 goto out;
1da177e4
LT
12609
12610 tp->timer.expires = jiffies + tp->timer_offset;
12611 add_timer(&tp->timer);
12612
1da177e4
LT
12613 tg3_netif_start(tp);
12614
b9ec6c1b 12615out:
f47c11ee 12616 tg3_full_unlock(tp);
1da177e4 12617
b9ec6c1b 12618 return err;
1da177e4
LT
12619}
12620
12621static struct pci_driver tg3_driver = {
12622 .name = DRV_MODULE_NAME,
12623 .id_table = tg3_pci_tbl,
12624 .probe = tg3_init_one,
12625 .remove = __devexit_p(tg3_remove_one),
12626 .suspend = tg3_suspend,
12627 .resume = tg3_resume
12628};
12629
12630static int __init tg3_init(void)
12631{
29917620 12632 return pci_register_driver(&tg3_driver);
1da177e4
LT
12633}
12634
12635static void __exit tg3_cleanup(void)
12636{
12637 pci_unregister_driver(&tg3_driver);
12638}
12639
12640module_init(tg3_init);
12641module_exit(tg3_cleanup);