]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/tg3.c
[AF_PACKET]: Fix minor code duplication
[net-next-2.6.git] / drivers / net / tg3.c
CommitLineData
1da177e4
LT
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
65610fba 7 * Copyright (C) 2005-2007 Broadcom Corporation.
1da177e4
LT
8 *
9 * Firmware is:
49cabf49
MC
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
1da177e4
LT
16 */
17
1da177e4
LT
18
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/compiler.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
14c85021 26#include <linux/in.h>
1da177e4
LT
27#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/workqueue.h>
61487480 39#include <linux/prefetch.h>
f9a5f7d3 40#include <linux/dma-mapping.h>
1da177e4
LT
41
42#include <net/checksum.h>
c9bdd4b5 43#include <net/ip.h>
1da177e4
LT
44
45#include <asm/system.h>
46#include <asm/io.h>
47#include <asm/byteorder.h>
48#include <asm/uaccess.h>
49
49b6e95f 50#ifdef CONFIG_SPARC
1da177e4 51#include <asm/idprom.h>
49b6e95f 52#include <asm/prom.h>
1da177e4
LT
53#endif
54
55#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56#define TG3_VLAN_TAG_USED 1
57#else
58#define TG3_VLAN_TAG_USED 0
59#endif
60
1da177e4 61#define TG3_TSO_SUPPORT 1
1da177e4
LT
62
63#include "tg3.h"
64
65#define DRV_MODULE_NAME "tg3"
66#define PFX DRV_MODULE_NAME ": "
33b0c4fe
MC
67#define DRV_MODULE_VERSION "3.85"
68#define DRV_MODULE_RELDATE "October 18, 2007"
1da177e4
LT
69
70#define TG3_DEF_MAC_MODE 0
71#define TG3_DEF_RX_MODE 0
72#define TG3_DEF_TX_MODE 0
73#define TG3_DEF_MSG_ENABLE \
74 (NETIF_MSG_DRV | \
75 NETIF_MSG_PROBE | \
76 NETIF_MSG_LINK | \
77 NETIF_MSG_TIMER | \
78 NETIF_MSG_IFDOWN | \
79 NETIF_MSG_IFUP | \
80 NETIF_MSG_RX_ERR | \
81 NETIF_MSG_TX_ERR)
82
83/* length of time before we decide the hardware is borked,
84 * and dev->tx_timeout() should be called to fix the problem
85 */
86#define TG3_TX_TIMEOUT (5 * HZ)
87
88/* hardware minimum and maximum for a single frame's data payload */
89#define TG3_MIN_MTU 60
90#define TG3_MAX_MTU(tp) \
0f893dc6 91 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
1da177e4
LT
92
93/* These numbers seem to be hard coded in the NIC firmware somehow.
94 * You can't change the ring sizes, but you can change where you place
95 * them in the NIC onboard memory.
96 */
97#define TG3_RX_RING_SIZE 512
98#define TG3_DEF_RX_RING_PENDING 200
99#define TG3_RX_JUMBO_RING_SIZE 256
100#define TG3_DEF_RX_JUMBO_RING_PENDING 100
101
102/* Do not place this n-ring entries value into the tp struct itself,
103 * we really want to expose these constants to GCC so that modulo et
104 * al. operations are done with shifts and masks instead of with
105 * hw multiply/modulo instructions. Another solution would be to
106 * replace things like '% foo' with '& (foo - 1)'.
107 */
108#define TG3_RX_RCB_RING_SIZE(tp) \
109 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
110
111#define TG3_TX_RING_SIZE 512
112#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
113
114#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
115 TG3_RX_RING_SIZE)
116#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117 TG3_RX_JUMBO_RING_SIZE)
118#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_RCB_RING_SIZE(tp))
120#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
121 TG3_TX_RING_SIZE)
1da177e4
LT
122#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
125#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
126
127/* minimum number of free TX descriptors required to wake up TX process */
42952231 128#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
1da177e4
LT
129
130/* number of ETHTOOL_GSTATS u64's */
131#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
4cafd3f5
MC
133#define TG3_NUM_TEST 6
134
1da177e4
LT
135static char version[] __devinitdata =
136 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140MODULE_LICENSE("GPL");
141MODULE_VERSION(DRV_MODULE_VERSION);
142
143static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
144module_param(tg3_debug, int, 0);
145MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147static struct pci_device_id tg3_pci_tbl[] = {
13185217
HK
148 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
126a3368 172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
13185217
HK
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
126a3368 187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
13185217
HK
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
676917d4 191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
13185217
HK
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
b5d3772c
MC
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
d30cdd28
MC
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
6c7af27c 203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
9936bcf6
MC
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
13185217
HK
206 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213 {}
1da177e4
LT
214};
215
216MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
50da859d 218static const struct {
1da177e4
LT
219 const char string[ETH_GSTRING_LEN];
220} ethtool_stats_keys[TG3_NUM_STATS] = {
221 { "rx_octets" },
222 { "rx_fragments" },
223 { "rx_ucast_packets" },
224 { "rx_mcast_packets" },
225 { "rx_bcast_packets" },
226 { "rx_fcs_errors" },
227 { "rx_align_errors" },
228 { "rx_xon_pause_rcvd" },
229 { "rx_xoff_pause_rcvd" },
230 { "rx_mac_ctrl_rcvd" },
231 { "rx_xoff_entered" },
232 { "rx_frame_too_long_errors" },
233 { "rx_jabbers" },
234 { "rx_undersize_packets" },
235 { "rx_in_length_errors" },
236 { "rx_out_length_errors" },
237 { "rx_64_or_less_octet_packets" },
238 { "rx_65_to_127_octet_packets" },
239 { "rx_128_to_255_octet_packets" },
240 { "rx_256_to_511_octet_packets" },
241 { "rx_512_to_1023_octet_packets" },
242 { "rx_1024_to_1522_octet_packets" },
243 { "rx_1523_to_2047_octet_packets" },
244 { "rx_2048_to_4095_octet_packets" },
245 { "rx_4096_to_8191_octet_packets" },
246 { "rx_8192_to_9022_octet_packets" },
247
248 { "tx_octets" },
249 { "tx_collisions" },
250
251 { "tx_xon_sent" },
252 { "tx_xoff_sent" },
253 { "tx_flow_control" },
254 { "tx_mac_errors" },
255 { "tx_single_collisions" },
256 { "tx_mult_collisions" },
257 { "tx_deferred" },
258 { "tx_excessive_collisions" },
259 { "tx_late_collisions" },
260 { "tx_collide_2times" },
261 { "tx_collide_3times" },
262 { "tx_collide_4times" },
263 { "tx_collide_5times" },
264 { "tx_collide_6times" },
265 { "tx_collide_7times" },
266 { "tx_collide_8times" },
267 { "tx_collide_9times" },
268 { "tx_collide_10times" },
269 { "tx_collide_11times" },
270 { "tx_collide_12times" },
271 { "tx_collide_13times" },
272 { "tx_collide_14times" },
273 { "tx_collide_15times" },
274 { "tx_ucast_packets" },
275 { "tx_mcast_packets" },
276 { "tx_bcast_packets" },
277 { "tx_carrier_sense_errors" },
278 { "tx_discards" },
279 { "tx_errors" },
280
281 { "dma_writeq_full" },
282 { "dma_write_prioq_full" },
283 { "rxbds_empty" },
284 { "rx_discards" },
285 { "rx_errors" },
286 { "rx_threshold_hit" },
287
288 { "dma_readq_full" },
289 { "dma_read_prioq_full" },
290 { "tx_comp_queue_full" },
291
292 { "ring_set_send_prod_index" },
293 { "ring_status_update" },
294 { "nic_irqs" },
295 { "nic_avoided_irqs" },
296 { "nic_tx_threshold_hit" }
297};
298
50da859d 299static const struct {
4cafd3f5
MC
300 const char string[ETH_GSTRING_LEN];
301} ethtool_test_keys[TG3_NUM_TEST] = {
302 { "nvram test (online) " },
303 { "link test (online) " },
304 { "register test (offline)" },
305 { "memory test (offline)" },
306 { "loopback test (offline)" },
307 { "interrupt test (offline)" },
308};
309
b401e9e2
MC
310static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311{
312 writel(val, tp->regs + off);
313}
314
315static u32 tg3_read32(struct tg3 *tp, u32 off)
316{
6aa20a22 317 return (readl(tp->regs + off));
b401e9e2
MC
318}
319
0d3031d9
MC
320static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321{
322 writel(val, tp->aperegs + off);
323}
324
325static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326{
327 return (readl(tp->aperegs + off));
328}
329
1da177e4
LT
330static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331{
6892914f
MC
332 unsigned long flags;
333
334 spin_lock_irqsave(&tp->indirect_lock, flags);
1ee582d8
MC
335 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
6892914f 337 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1ee582d8
MC
338}
339
340static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341{
342 writel(val, tp->regs + off);
343 readl(tp->regs + off);
1da177e4
LT
344}
345
6892914f 346static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
1da177e4 347{
6892914f
MC
348 unsigned long flags;
349 u32 val;
350
351 spin_lock_irqsave(&tp->indirect_lock, flags);
352 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354 spin_unlock_irqrestore(&tp->indirect_lock, flags);
355 return val;
356}
357
358static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359{
360 unsigned long flags;
361
362 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364 TG3_64BIT_REG_LOW, val);
365 return;
366 }
367 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369 TG3_64BIT_REG_LOW, val);
370 return;
1da177e4 371 }
6892914f
MC
372
373 spin_lock_irqsave(&tp->indirect_lock, flags);
374 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376 spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378 /* In indirect mode when disabling interrupts, we also need
379 * to clear the interrupt bit in the GRC local ctrl register.
380 */
381 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382 (val == 0x1)) {
383 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385 }
386}
387
388static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389{
390 unsigned long flags;
391 u32 val;
392
393 spin_lock_irqsave(&tp->indirect_lock, flags);
394 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396 spin_unlock_irqrestore(&tp->indirect_lock, flags);
397 return val;
398}
399
b401e9e2
MC
400/* usec_wait specifies the wait time in usec when writing to certain registers
401 * where it is unsafe to read back the register without some delay.
402 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404 */
405static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
6892914f 406{
b401e9e2
MC
407 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409 /* Non-posted methods */
410 tp->write32(tp, off, val);
411 else {
412 /* Posted method */
413 tg3_write32(tp, off, val);
414 if (usec_wait)
415 udelay(usec_wait);
416 tp->read32(tp, off);
417 }
418 /* Wait again after the read for the posted method to guarantee that
419 * the wait time is met.
420 */
421 if (usec_wait)
422 udelay(usec_wait);
1da177e4
LT
423}
424
09ee929c
MC
425static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426{
427 tp->write32_mbox(tp, off, val);
6892914f
MC
428 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430 tp->read32_mbox(tp, off);
09ee929c
MC
431}
432
20094930 433static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
1da177e4
LT
434{
435 void __iomem *mbox = tp->regs + off;
436 writel(val, mbox);
437 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438 writel(val, mbox);
439 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440 readl(mbox);
441}
442
b5d3772c
MC
443static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444{
445 return (readl(tp->regs + off + GRCMBOX_BASE));
446}
447
448static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449{
450 writel(val, tp->regs + off + GRCMBOX_BASE);
451}
452
20094930 453#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
09ee929c 454#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
20094930
MC
455#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
456#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
09ee929c 457#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
20094930
MC
458
459#define tw32(reg,val) tp->write32(tp, reg, val)
b401e9e2
MC
460#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
461#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
20094930 462#define tr32(reg) tp->read32(tp, reg)
1da177e4
LT
463
464static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465{
6892914f
MC
466 unsigned long flags;
467
b5d3772c
MC
468 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470 return;
471
6892914f 472 spin_lock_irqsave(&tp->indirect_lock, flags);
bbadf503
MC
473 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 476
bbadf503
MC
477 /* Always leave this as zero. */
478 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479 } else {
480 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481 tw32_f(TG3PCI_MEM_WIN_DATA, val);
28fbef78 482
bbadf503
MC
483 /* Always leave this as zero. */
484 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485 }
486 spin_unlock_irqrestore(&tp->indirect_lock, flags);
758a6139
DM
487}
488
1da177e4
LT
489static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490{
6892914f
MC
491 unsigned long flags;
492
b5d3772c
MC
493 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495 *val = 0;
496 return;
497 }
498
6892914f 499 spin_lock_irqsave(&tp->indirect_lock, flags);
bbadf503
MC
500 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 503
bbadf503
MC
504 /* Always leave this as zero. */
505 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506 } else {
507 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510 /* Always leave this as zero. */
511 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512 }
6892914f 513 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
514}
515
0d3031d9
MC
516static void tg3_ape_lock_init(struct tg3 *tp)
517{
518 int i;
519
520 /* Make sure the driver hasn't any stale locks. */
521 for (i = 0; i < 8; i++)
522 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523 APE_LOCK_GRANT_DRIVER);
524}
525
526static int tg3_ape_lock(struct tg3 *tp, int locknum)
527{
528 int i, off;
529 int ret = 0;
530 u32 status;
531
532 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533 return 0;
534
535 switch (locknum) {
536 case TG3_APE_LOCK_MEM:
537 break;
538 default:
539 return -EINVAL;
540 }
541
542 off = 4 * locknum;
543
544 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546 /* Wait for up to 1 millisecond to acquire lock. */
547 for (i = 0; i < 100; i++) {
548 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549 if (status == APE_LOCK_GRANT_DRIVER)
550 break;
551 udelay(10);
552 }
553
554 if (status != APE_LOCK_GRANT_DRIVER) {
555 /* Revoke the lock request. */
556 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557 APE_LOCK_GRANT_DRIVER);
558
559 ret = -EBUSY;
560 }
561
562 return ret;
563}
564
565static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566{
567 int off;
568
569 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570 return;
571
572 switch (locknum) {
573 case TG3_APE_LOCK_MEM:
574 break;
575 default:
576 return;
577 }
578
579 off = 4 * locknum;
580 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581}
582
1da177e4
LT
583static void tg3_disable_ints(struct tg3 *tp)
584{
585 tw32(TG3PCI_MISC_HOST_CTRL,
586 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c 587 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
1da177e4
LT
588}
589
590static inline void tg3_cond_int(struct tg3 *tp)
591{
38f3843e
MC
592 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593 (tp->hw_status->status & SD_STATUS_UPDATED))
1da177e4 594 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
b5d3772c
MC
595 else
596 tw32(HOSTCC_MODE, tp->coalesce_mode |
597 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
1da177e4
LT
598}
599
600static void tg3_enable_ints(struct tg3 *tp)
601{
bbe832c0
MC
602 tp->irq_sync = 0;
603 wmb();
604
1da177e4
LT
605 tw32(TG3PCI_MISC_HOST_CTRL,
606 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c
MC
607 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608 (tp->last_tag << 24));
fcfa0a32
MC
609 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611 (tp->last_tag << 24));
1da177e4
LT
612 tg3_cond_int(tp);
613}
614
04237ddd
MC
615static inline unsigned int tg3_has_work(struct tg3 *tp)
616{
617 struct tg3_hw_status *sblk = tp->hw_status;
618 unsigned int work_exists = 0;
619
620 /* check for phy events */
621 if (!(tp->tg3_flags &
622 (TG3_FLAG_USE_LINKCHG_REG |
623 TG3_FLAG_POLL_SERDES))) {
624 if (sblk->status & SD_STATUS_LINK_CHG)
625 work_exists = 1;
626 }
627 /* check for RX/TX work to do */
628 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630 work_exists = 1;
631
632 return work_exists;
633}
634
1da177e4 635/* tg3_restart_ints
04237ddd
MC
636 * similar to tg3_enable_ints, but it accurately determines whether there
637 * is new work pending and can return without flushing the PIO write
6aa20a22 638 * which reenables interrupts
1da177e4
LT
639 */
640static void tg3_restart_ints(struct tg3 *tp)
641{
fac9b83e
DM
642 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643 tp->last_tag << 24);
1da177e4
LT
644 mmiowb();
645
fac9b83e
DM
646 /* When doing tagged status, this work check is unnecessary.
647 * The last_tag we write above tells the chip which piece of
648 * work we've completed.
649 */
650 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651 tg3_has_work(tp))
04237ddd
MC
652 tw32(HOSTCC_MODE, tp->coalesce_mode |
653 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
1da177e4
LT
654}
655
656static inline void tg3_netif_stop(struct tg3 *tp)
657{
bbe832c0 658 tp->dev->trans_start = jiffies; /* prevent tx timeout */
bea3348e 659 napi_disable(&tp->napi);
1da177e4
LT
660 netif_tx_disable(tp->dev);
661}
662
663static inline void tg3_netif_start(struct tg3 *tp)
664{
665 netif_wake_queue(tp->dev);
666 /* NOTE: unconditional netif_wake_queue is only appropriate
667 * so long as all callers are assured to have free tx slots
668 * (such as after tg3_init_hw)
669 */
bea3348e 670 napi_enable(&tp->napi);
f47c11ee
DM
671 tp->hw_status->status |= SD_STATUS_UPDATED;
672 tg3_enable_ints(tp);
1da177e4
LT
673}
674
675static void tg3_switch_clocks(struct tg3 *tp)
676{
677 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678 u32 orig_clock_ctrl;
679
795d01c5
MC
680 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
4cf78e4f
MC
682 return;
683
1da177e4
LT
684 orig_clock_ctrl = clock_ctrl;
685 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686 CLOCK_CTRL_CLKRUN_OENABLE |
687 0x1f);
688 tp->pci_clock_ctrl = clock_ctrl;
689
690 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
b401e9e2
MC
692 tw32_wait_f(TG3PCI_CLOCK_CTRL,
693 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1da177e4
LT
694 }
695 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
b401e9e2
MC
696 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697 clock_ctrl |
698 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699 40);
700 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701 clock_ctrl | (CLOCK_CTRL_ALTCLK),
702 40);
1da177e4 703 }
b401e9e2 704 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1da177e4
LT
705}
706
707#define PHY_BUSY_LOOPS 5000
708
709static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710{
711 u32 frame_val;
712 unsigned int loops;
713 int ret;
714
715 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716 tw32_f(MAC_MI_MODE,
717 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718 udelay(80);
719 }
720
721 *val = 0x0;
722
723 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724 MI_COM_PHY_ADDR_MASK);
725 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726 MI_COM_REG_ADDR_MASK);
727 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
6aa20a22 728
1da177e4
LT
729 tw32_f(MAC_MI_COM, frame_val);
730
731 loops = PHY_BUSY_LOOPS;
732 while (loops != 0) {
733 udelay(10);
734 frame_val = tr32(MAC_MI_COM);
735
736 if ((frame_val & MI_COM_BUSY) == 0) {
737 udelay(5);
738 frame_val = tr32(MAC_MI_COM);
739 break;
740 }
741 loops -= 1;
742 }
743
744 ret = -EBUSY;
745 if (loops != 0) {
746 *val = frame_val & MI_COM_DATA_MASK;
747 ret = 0;
748 }
749
750 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751 tw32_f(MAC_MI_MODE, tp->mi_mode);
752 udelay(80);
753 }
754
755 return ret;
756}
757
758static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759{
760 u32 frame_val;
761 unsigned int loops;
762 int ret;
763
b5d3772c
MC
764 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766 return 0;
767
1da177e4
LT
768 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769 tw32_f(MAC_MI_MODE,
770 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771 udelay(80);
772 }
773
774 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775 MI_COM_PHY_ADDR_MASK);
776 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777 MI_COM_REG_ADDR_MASK);
778 frame_val |= (val & MI_COM_DATA_MASK);
779 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
6aa20a22 780
1da177e4
LT
781 tw32_f(MAC_MI_COM, frame_val);
782
783 loops = PHY_BUSY_LOOPS;
784 while (loops != 0) {
785 udelay(10);
786 frame_val = tr32(MAC_MI_COM);
787 if ((frame_val & MI_COM_BUSY) == 0) {
788 udelay(5);
789 frame_val = tr32(MAC_MI_COM);
790 break;
791 }
792 loops -= 1;
793 }
794
795 ret = -EBUSY;
796 if (loops != 0)
797 ret = 0;
798
799 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800 tw32_f(MAC_MI_MODE, tp->mi_mode);
801 udelay(80);
802 }
803
804 return ret;
805}
806
9ef8ca99
MC
807static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
808{
809 u32 phy;
810
811 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
812 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
813 return;
814
815 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
816 u32 ephy;
817
818 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
819 tg3_writephy(tp, MII_TG3_EPHY_TEST,
820 ephy | MII_TG3_EPHY_SHADOW_EN);
821 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
822 if (enable)
823 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
824 else
825 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
826 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
827 }
828 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
829 }
830 } else {
831 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
832 MII_TG3_AUXCTL_SHDWSEL_MISC;
833 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
834 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
835 if (enable)
836 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
837 else
838 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
839 phy |= MII_TG3_AUXCTL_MISC_WREN;
840 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
841 }
842 }
843}
844
1da177e4
LT
845static void tg3_phy_set_wirespeed(struct tg3 *tp)
846{
847 u32 val;
848
849 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
850 return;
851
852 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
853 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
854 tg3_writephy(tp, MII_TG3_AUX_CTRL,
855 (val | (1 << 15) | (1 << 4)));
856}
857
858static int tg3_bmcr_reset(struct tg3 *tp)
859{
860 u32 phy_control;
861 int limit, err;
862
863 /* OK, reset it, and poll the BMCR_RESET bit until it
864 * clears or we time out.
865 */
866 phy_control = BMCR_RESET;
867 err = tg3_writephy(tp, MII_BMCR, phy_control);
868 if (err != 0)
869 return -EBUSY;
870
871 limit = 5000;
872 while (limit--) {
873 err = tg3_readphy(tp, MII_BMCR, &phy_control);
874 if (err != 0)
875 return -EBUSY;
876
877 if ((phy_control & BMCR_RESET) == 0) {
878 udelay(40);
879 break;
880 }
881 udelay(10);
882 }
883 if (limit <= 0)
884 return -EBUSY;
885
886 return 0;
887}
888
889static int tg3_wait_macro_done(struct tg3 *tp)
890{
891 int limit = 100;
892
893 while (limit--) {
894 u32 tmp32;
895
896 if (!tg3_readphy(tp, 0x16, &tmp32)) {
897 if ((tmp32 & 0x1000) == 0)
898 break;
899 }
900 }
901 if (limit <= 0)
902 return -EBUSY;
903
904 return 0;
905}
906
907static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
908{
909 static const u32 test_pat[4][6] = {
910 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
911 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
912 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
913 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
914 };
915 int chan;
916
917 for (chan = 0; chan < 4; chan++) {
918 int i;
919
920 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
921 (chan * 0x2000) | 0x0200);
922 tg3_writephy(tp, 0x16, 0x0002);
923
924 for (i = 0; i < 6; i++)
925 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
926 test_pat[chan][i]);
927
928 tg3_writephy(tp, 0x16, 0x0202);
929 if (tg3_wait_macro_done(tp)) {
930 *resetp = 1;
931 return -EBUSY;
932 }
933
934 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
935 (chan * 0x2000) | 0x0200);
936 tg3_writephy(tp, 0x16, 0x0082);
937 if (tg3_wait_macro_done(tp)) {
938 *resetp = 1;
939 return -EBUSY;
940 }
941
942 tg3_writephy(tp, 0x16, 0x0802);
943 if (tg3_wait_macro_done(tp)) {
944 *resetp = 1;
945 return -EBUSY;
946 }
947
948 for (i = 0; i < 6; i += 2) {
949 u32 low, high;
950
951 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
952 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
953 tg3_wait_macro_done(tp)) {
954 *resetp = 1;
955 return -EBUSY;
956 }
957 low &= 0x7fff;
958 high &= 0x000f;
959 if (low != test_pat[chan][i] ||
960 high != test_pat[chan][i+1]) {
961 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
962 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
963 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
964
965 return -EBUSY;
966 }
967 }
968 }
969
970 return 0;
971}
972
973static int tg3_phy_reset_chanpat(struct tg3 *tp)
974{
975 int chan;
976
977 for (chan = 0; chan < 4; chan++) {
978 int i;
979
980 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
981 (chan * 0x2000) | 0x0200);
982 tg3_writephy(tp, 0x16, 0x0002);
983 for (i = 0; i < 6; i++)
984 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
985 tg3_writephy(tp, 0x16, 0x0202);
986 if (tg3_wait_macro_done(tp))
987 return -EBUSY;
988 }
989
990 return 0;
991}
992
993static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
994{
995 u32 reg32, phy9_orig;
996 int retries, do_phy_reset, err;
997
998 retries = 10;
999 do_phy_reset = 1;
1000 do {
1001 if (do_phy_reset) {
1002 err = tg3_bmcr_reset(tp);
1003 if (err)
1004 return err;
1005 do_phy_reset = 0;
1006 }
1007
1008 /* Disable transmitter and interrupt. */
1009 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1010 continue;
1011
1012 reg32 |= 0x3000;
1013 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1014
1015 /* Set full-duplex, 1000 mbps. */
1016 tg3_writephy(tp, MII_BMCR,
1017 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1018
1019 /* Set to master mode. */
1020 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1021 continue;
1022
1023 tg3_writephy(tp, MII_TG3_CTRL,
1024 (MII_TG3_CTRL_AS_MASTER |
1025 MII_TG3_CTRL_ENABLE_AS_MASTER));
1026
1027 /* Enable SM_DSP_CLOCK and 6dB. */
1028 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1029
1030 /* Block the PHY control access. */
1031 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1032 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1033
1034 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1035 if (!err)
1036 break;
1037 } while (--retries);
1038
1039 err = tg3_phy_reset_chanpat(tp);
1040 if (err)
1041 return err;
1042
1043 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1044 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1045
1046 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1047 tg3_writephy(tp, 0x16, 0x0000);
1048
1049 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1050 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1051 /* Set Extended packet length bit for jumbo frames */
1052 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1053 }
1054 else {
1055 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1056 }
1057
1058 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1059
1060 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1061 reg32 &= ~0x3000;
1062 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1063 } else if (!err)
1064 err = -EBUSY;
1065
1066 return err;
1067}
1068
c8e1e82b
MC
1069static void tg3_link_report(struct tg3 *);
1070
1da177e4
LT
1071/* This will reset the tigon3 PHY if there is no valid
1072 * link unless the FORCE argument is non-zero.
1073 */
1074static int tg3_phy_reset(struct tg3 *tp)
1075{
1076 u32 phy_status;
1077 int err;
1078
60189ddf
MC
1079 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1080 u32 val;
1081
1082 val = tr32(GRC_MISC_CFG);
1083 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1084 udelay(40);
1085 }
1da177e4
LT
1086 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1087 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1088 if (err != 0)
1089 return -EBUSY;
1090
c8e1e82b
MC
1091 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1092 netif_carrier_off(tp->dev);
1093 tg3_link_report(tp);
1094 }
1095
1da177e4
LT
1096 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1097 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1098 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1099 err = tg3_phy_reset_5703_4_5(tp);
1100 if (err)
1101 return err;
1102 goto out;
1103 }
1104
1105 err = tg3_bmcr_reset(tp);
1106 if (err)
1107 return err;
1108
1109out:
1110 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1111 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1112 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1113 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1114 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1115 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1116 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1117 }
1118 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1119 tg3_writephy(tp, 0x1c, 0x8d68);
1120 tg3_writephy(tp, 0x1c, 0x8d68);
1121 }
1122 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1123 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1124 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1125 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1126 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1127 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1128 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1129 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1130 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1131 }
c424cb24
MC
1132 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1133 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1134 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
c1d2a196
MC
1135 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1136 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1137 tg3_writephy(tp, MII_TG3_TEST1,
1138 MII_TG3_TEST1_TRIM_EN | 0x4);
1139 } else
1140 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
c424cb24
MC
1141 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1142 }
1da177e4
LT
1143 /* Set Extended packet length bit (bit 14) on all chips that */
1144 /* support jumbo frames */
1145 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1146 /* Cannot do read-modify-write on 5401 */
1147 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
0f893dc6 1148 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1149 u32 phy_reg;
1150
1151 /* Set bit 14 with read-modify-write to preserve other bits */
1152 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1153 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1154 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1155 }
1156
1157 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1158 * jumbo frames transmission.
1159 */
0f893dc6 1160 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1161 u32 phy_reg;
1162
1163 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1164 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1165 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1166 }
1167
715116a1 1168 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
715116a1
MC
1169 /* adjust output voltage */
1170 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
715116a1
MC
1171 }
1172
9ef8ca99 1173 tg3_phy_toggle_automdix(tp, 1);
1da177e4
LT
1174 tg3_phy_set_wirespeed(tp);
1175 return 0;
1176}
1177
1178static void tg3_frob_aux_power(struct tg3 *tp)
1179{
1180 struct tg3 *tp_peer = tp;
1181
9d26e213 1182 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1da177e4
LT
1183 return;
1184
8c2dc7e1
MC
1185 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1186 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1187 struct net_device *dev_peer;
1188
1189 dev_peer = pci_get_drvdata(tp->pdev_peer);
bc1c7567 1190 /* remove_one() may have been run on the peer. */
8c2dc7e1 1191 if (!dev_peer)
bc1c7567
MC
1192 tp_peer = tp;
1193 else
1194 tp_peer = netdev_priv(dev_peer);
1da177e4
LT
1195 }
1196
1da177e4 1197 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
6921d201
MC
1198 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1199 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1200 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1da177e4
LT
1201 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1202 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
b401e9e2
MC
1203 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1204 (GRC_LCLCTRL_GPIO_OE0 |
1205 GRC_LCLCTRL_GPIO_OE1 |
1206 GRC_LCLCTRL_GPIO_OE2 |
1207 GRC_LCLCTRL_GPIO_OUTPUT0 |
1208 GRC_LCLCTRL_GPIO_OUTPUT1),
1209 100);
1da177e4
LT
1210 } else {
1211 u32 no_gpio2;
dc56b7d4 1212 u32 grc_local_ctrl = 0;
1da177e4
LT
1213
1214 if (tp_peer != tp &&
1215 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1216 return;
1217
dc56b7d4
MC
1218 /* Workaround to prevent overdrawing Amps. */
1219 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1220 ASIC_REV_5714) {
1221 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
b401e9e2
MC
1222 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1223 grc_local_ctrl, 100);
dc56b7d4
MC
1224 }
1225
1da177e4
LT
1226 /* On 5753 and variants, GPIO2 cannot be used. */
1227 no_gpio2 = tp->nic_sram_data_cfg &
1228 NIC_SRAM_DATA_CFG_NO_GPIO2;
1229
dc56b7d4 1230 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1da177e4
LT
1231 GRC_LCLCTRL_GPIO_OE1 |
1232 GRC_LCLCTRL_GPIO_OE2 |
1233 GRC_LCLCTRL_GPIO_OUTPUT1 |
1234 GRC_LCLCTRL_GPIO_OUTPUT2;
1235 if (no_gpio2) {
1236 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1237 GRC_LCLCTRL_GPIO_OUTPUT2);
1238 }
b401e9e2
MC
1239 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1240 grc_local_ctrl, 100);
1da177e4
LT
1241
1242 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1243
b401e9e2
MC
1244 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1245 grc_local_ctrl, 100);
1da177e4
LT
1246
1247 if (!no_gpio2) {
1248 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
b401e9e2
MC
1249 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1250 grc_local_ctrl, 100);
1da177e4
LT
1251 }
1252 }
1253 } else {
1254 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1255 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1256 if (tp_peer != tp &&
1257 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1258 return;
1259
b401e9e2
MC
1260 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1261 (GRC_LCLCTRL_GPIO_OE1 |
1262 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4 1263
b401e9e2
MC
1264 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1265 GRC_LCLCTRL_GPIO_OE1, 100);
1da177e4 1266
b401e9e2
MC
1267 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1268 (GRC_LCLCTRL_GPIO_OE1 |
1269 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4
LT
1270 }
1271 }
1272}
1273
e8f3f6ca
MC
1274static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1275{
1276 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1277 return 1;
1278 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1279 if (speed != SPEED_10)
1280 return 1;
1281 } else if (speed == SPEED_10)
1282 return 1;
1283
1284 return 0;
1285}
1286
1da177e4
LT
1287static int tg3_setup_phy(struct tg3 *, int);
1288
1289#define RESET_KIND_SHUTDOWN 0
1290#define RESET_KIND_INIT 1
1291#define RESET_KIND_SUSPEND 2
1292
1293static void tg3_write_sig_post_reset(struct tg3 *, int);
1294static int tg3_halt_cpu(struct tg3 *, u32);
6921d201
MC
1295static int tg3_nvram_lock(struct tg3 *);
1296static void tg3_nvram_unlock(struct tg3 *);
1da177e4 1297
15c3b696
MC
1298static void tg3_power_down_phy(struct tg3 *tp)
1299{
5129724a
MC
1300 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1301 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1302 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1303 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1304
1305 sg_dig_ctrl |=
1306 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1307 tw32(SG_DIG_CTRL, sg_dig_ctrl);
1308 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1309 }
3f7045c1 1310 return;
5129724a 1311 }
3f7045c1 1312
60189ddf
MC
1313 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1314 u32 val;
1315
1316 tg3_bmcr_reset(tp);
1317 val = tr32(GRC_MISC_CFG);
1318 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1319 udelay(40);
1320 return;
1321 } else {
715116a1
MC
1322 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1323 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1324 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1325 }
3f7045c1 1326
15c3b696
MC
1327 /* The PHY should not be powered down on some chips because
1328 * of bugs.
1329 */
1330 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1331 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1332 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1333 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1334 return;
1335 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1336}
1337
bc1c7567 1338static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1da177e4
LT
1339{
1340 u32 misc_host_ctrl;
1341 u16 power_control, power_caps;
1342 int pm = tp->pm_cap;
1343
1344 /* Make sure register accesses (indirect or otherwise)
1345 * will function correctly.
1346 */
1347 pci_write_config_dword(tp->pdev,
1348 TG3PCI_MISC_HOST_CTRL,
1349 tp->misc_host_ctrl);
1350
1351 pci_read_config_word(tp->pdev,
1352 pm + PCI_PM_CTRL,
1353 &power_control);
1354 power_control |= PCI_PM_CTRL_PME_STATUS;
1355 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1356 switch (state) {
bc1c7567 1357 case PCI_D0:
1da177e4
LT
1358 power_control |= 0;
1359 pci_write_config_word(tp->pdev,
1360 pm + PCI_PM_CTRL,
1361 power_control);
8c6bda1a
MC
1362 udelay(100); /* Delay after power state change */
1363
9d26e213
MC
1364 /* Switch out of Vaux if it is a NIC */
1365 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
b401e9e2 1366 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1da177e4
LT
1367
1368 return 0;
1369
bc1c7567 1370 case PCI_D1:
1da177e4
LT
1371 power_control |= 1;
1372 break;
1373
bc1c7567 1374 case PCI_D2:
1da177e4
LT
1375 power_control |= 2;
1376 break;
1377
bc1c7567 1378 case PCI_D3hot:
1da177e4
LT
1379 power_control |= 3;
1380 break;
1381
1382 default:
1383 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1384 "requested.\n",
1385 tp->dev->name, state);
1386 return -EINVAL;
1387 };
1388
1389 power_control |= PCI_PM_CTRL_PME_ENABLE;
1390
1391 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1392 tw32(TG3PCI_MISC_HOST_CTRL,
1393 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1394
1395 if (tp->link_config.phy_is_low_power == 0) {
1396 tp->link_config.phy_is_low_power = 1;
1397 tp->link_config.orig_speed = tp->link_config.speed;
1398 tp->link_config.orig_duplex = tp->link_config.duplex;
1399 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1400 }
1401
747e8f8b 1402 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1da177e4
LT
1403 tp->link_config.speed = SPEED_10;
1404 tp->link_config.duplex = DUPLEX_HALF;
1405 tp->link_config.autoneg = AUTONEG_ENABLE;
1406 tg3_setup_phy(tp, 0);
1407 }
1408
b5d3772c
MC
1409 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1410 u32 val;
1411
1412 val = tr32(GRC_VCPU_EXT_CTRL);
1413 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1414 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
6921d201
MC
1415 int i;
1416 u32 val;
1417
1418 for (i = 0; i < 200; i++) {
1419 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1420 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1421 break;
1422 msleep(1);
1423 }
1424 }
a85feb8c
GZ
1425 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1426 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1427 WOL_DRV_STATE_SHUTDOWN |
1428 WOL_DRV_WOL |
1429 WOL_SET_MAGIC_PKT);
6921d201 1430
1da177e4
LT
1431 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1432
1433 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1434 u32 mac_mode;
1435
1436 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1437 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1438 udelay(40);
1439
3f7045c1
MC
1440 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1441 mac_mode = MAC_MODE_PORT_MODE_GMII;
1442 else
1443 mac_mode = MAC_MODE_PORT_MODE_MII;
1da177e4 1444
e8f3f6ca
MC
1445 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1446 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1447 ASIC_REV_5700) {
1448 u32 speed = (tp->tg3_flags &
1449 TG3_FLAG_WOL_SPEED_100MB) ?
1450 SPEED_100 : SPEED_10;
1451 if (tg3_5700_link_polarity(tp, speed))
1452 mac_mode |= MAC_MODE_LINK_POLARITY;
1453 else
1454 mac_mode &= ~MAC_MODE_LINK_POLARITY;
1455 }
1da177e4
LT
1456 } else {
1457 mac_mode = MAC_MODE_PORT_MODE_TBI;
1458 }
1459
cbf46853 1460 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1da177e4
LT
1461 tw32(MAC_LED_CTRL, tp->led_ctrl);
1462
1463 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1464 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1465 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1466
1467 tw32_f(MAC_MODE, mac_mode);
1468 udelay(100);
1469
1470 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1471 udelay(10);
1472 }
1473
1474 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1475 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1476 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1477 u32 base_val;
1478
1479 base_val = tp->pci_clock_ctrl;
1480 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1481 CLOCK_CTRL_TXCLK_DISABLE);
1482
b401e9e2
MC
1483 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1484 CLOCK_CTRL_PWRDOWN_PLL133, 40);
d7b0a857 1485 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
795d01c5 1486 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
d7b0a857 1487 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
4cf78e4f 1488 /* do nothing */
85e94ced 1489 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1da177e4
LT
1490 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1491 u32 newbits1, newbits2;
1492
1493 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1494 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1495 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1496 CLOCK_CTRL_TXCLK_DISABLE |
1497 CLOCK_CTRL_ALTCLK);
1498 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1499 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1500 newbits1 = CLOCK_CTRL_625_CORE;
1501 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1502 } else {
1503 newbits1 = CLOCK_CTRL_ALTCLK;
1504 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1505 }
1506
b401e9e2
MC
1507 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1508 40);
1da177e4 1509
b401e9e2
MC
1510 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1511 40);
1da177e4
LT
1512
1513 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1514 u32 newbits3;
1515
1516 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1517 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1518 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1519 CLOCK_CTRL_TXCLK_DISABLE |
1520 CLOCK_CTRL_44MHZ_CORE);
1521 } else {
1522 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1523 }
1524
b401e9e2
MC
1525 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1526 tp->pci_clock_ctrl | newbits3, 40);
1da177e4
LT
1527 }
1528 }
1529
6921d201 1530 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
0d3031d9
MC
1531 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1532 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
3f7045c1 1533 tg3_power_down_phy(tp);
6921d201 1534
1da177e4
LT
1535 tg3_frob_aux_power(tp);
1536
1537 /* Workaround for unstable PLL clock */
1538 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1539 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1540 u32 val = tr32(0x7d00);
1541
1542 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1543 tw32(0x7d00, val);
6921d201 1544 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
ec41c7df
MC
1545 int err;
1546
1547 err = tg3_nvram_lock(tp);
1da177e4 1548 tg3_halt_cpu(tp, RX_CPU_BASE);
ec41c7df
MC
1549 if (!err)
1550 tg3_nvram_unlock(tp);
6921d201 1551 }
1da177e4
LT
1552 }
1553
bbadf503
MC
1554 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1555
1da177e4
LT
1556 /* Finally, set the new power state. */
1557 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
8c6bda1a 1558 udelay(100); /* Delay after power state change */
1da177e4 1559
1da177e4
LT
1560 return 0;
1561}
1562
1563static void tg3_link_report(struct tg3 *tp)
1564{
1565 if (!netif_carrier_ok(tp->dev)) {
9f88f29f
MC
1566 if (netif_msg_link(tp))
1567 printk(KERN_INFO PFX "%s: Link is down.\n",
1568 tp->dev->name);
1569 } else if (netif_msg_link(tp)) {
1da177e4
LT
1570 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1571 tp->dev->name,
1572 (tp->link_config.active_speed == SPEED_1000 ?
1573 1000 :
1574 (tp->link_config.active_speed == SPEED_100 ?
1575 100 : 10)),
1576 (tp->link_config.active_duplex == DUPLEX_FULL ?
1577 "full" : "half"));
1578
1579 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1580 "%s for RX.\n",
1581 tp->dev->name,
1582 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1583 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1584 }
1585}
1586
1587static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1588{
1589 u32 new_tg3_flags = 0;
1590 u32 old_rx_mode = tp->rx_mode;
1591 u32 old_tx_mode = tp->tx_mode;
1592
1593 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
747e8f8b
MC
1594
1595 /* Convert 1000BaseX flow control bits to 1000BaseT
1596 * bits before resolving flow control.
1597 */
1598 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1599 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1600 ADVERTISE_PAUSE_ASYM);
1601 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1602
1603 if (local_adv & ADVERTISE_1000XPAUSE)
1604 local_adv |= ADVERTISE_PAUSE_CAP;
1605 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1606 local_adv |= ADVERTISE_PAUSE_ASYM;
1607 if (remote_adv & LPA_1000XPAUSE)
1608 remote_adv |= LPA_PAUSE_CAP;
1609 if (remote_adv & LPA_1000XPAUSE_ASYM)
1610 remote_adv |= LPA_PAUSE_ASYM;
1611 }
1612
1da177e4
LT
1613 if (local_adv & ADVERTISE_PAUSE_CAP) {
1614 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1615 if (remote_adv & LPA_PAUSE_CAP)
1616 new_tg3_flags |=
1617 (TG3_FLAG_RX_PAUSE |
1618 TG3_FLAG_TX_PAUSE);
1619 else if (remote_adv & LPA_PAUSE_ASYM)
1620 new_tg3_flags |=
1621 (TG3_FLAG_RX_PAUSE);
1622 } else {
1623 if (remote_adv & LPA_PAUSE_CAP)
1624 new_tg3_flags |=
1625 (TG3_FLAG_RX_PAUSE |
1626 TG3_FLAG_TX_PAUSE);
1627 }
1628 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1629 if ((remote_adv & LPA_PAUSE_CAP) &&
1630 (remote_adv & LPA_PAUSE_ASYM))
1631 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1632 }
1633
1634 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1635 tp->tg3_flags |= new_tg3_flags;
1636 } else {
1637 new_tg3_flags = tp->tg3_flags;
1638 }
1639
1640 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1641 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1642 else
1643 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1644
1645 if (old_rx_mode != tp->rx_mode) {
1646 tw32_f(MAC_RX_MODE, tp->rx_mode);
1647 }
6aa20a22 1648
1da177e4
LT
1649 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1650 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1651 else
1652 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1653
1654 if (old_tx_mode != tp->tx_mode) {
1655 tw32_f(MAC_TX_MODE, tp->tx_mode);
1656 }
1657}
1658
1659static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1660{
1661 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1662 case MII_TG3_AUX_STAT_10HALF:
1663 *speed = SPEED_10;
1664 *duplex = DUPLEX_HALF;
1665 break;
1666
1667 case MII_TG3_AUX_STAT_10FULL:
1668 *speed = SPEED_10;
1669 *duplex = DUPLEX_FULL;
1670 break;
1671
1672 case MII_TG3_AUX_STAT_100HALF:
1673 *speed = SPEED_100;
1674 *duplex = DUPLEX_HALF;
1675 break;
1676
1677 case MII_TG3_AUX_STAT_100FULL:
1678 *speed = SPEED_100;
1679 *duplex = DUPLEX_FULL;
1680 break;
1681
1682 case MII_TG3_AUX_STAT_1000HALF:
1683 *speed = SPEED_1000;
1684 *duplex = DUPLEX_HALF;
1685 break;
1686
1687 case MII_TG3_AUX_STAT_1000FULL:
1688 *speed = SPEED_1000;
1689 *duplex = DUPLEX_FULL;
1690 break;
1691
1692 default:
715116a1
MC
1693 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1694 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1695 SPEED_10;
1696 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1697 DUPLEX_HALF;
1698 break;
1699 }
1da177e4
LT
1700 *speed = SPEED_INVALID;
1701 *duplex = DUPLEX_INVALID;
1702 break;
1703 };
1704}
1705
1706static void tg3_phy_copper_begin(struct tg3 *tp)
1707{
1708 u32 new_adv;
1709 int i;
1710
1711 if (tp->link_config.phy_is_low_power) {
1712 /* Entering low power mode. Disable gigabit and
1713 * 100baseT advertisements.
1714 */
1715 tg3_writephy(tp, MII_TG3_CTRL, 0);
1716
1717 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1718 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1719 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1720 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1721
1722 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1723 } else if (tp->link_config.speed == SPEED_INVALID) {
1da177e4
LT
1724 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1725 tp->link_config.advertising &=
1726 ~(ADVERTISED_1000baseT_Half |
1727 ADVERTISED_1000baseT_Full);
1728
1729 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1730 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1731 new_adv |= ADVERTISE_10HALF;
1732 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1733 new_adv |= ADVERTISE_10FULL;
1734 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1735 new_adv |= ADVERTISE_100HALF;
1736 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1737 new_adv |= ADVERTISE_100FULL;
1738 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1739
1740 if (tp->link_config.advertising &
1741 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1742 new_adv = 0;
1743 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1744 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1745 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1746 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1747 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1748 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1749 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1750 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1751 MII_TG3_CTRL_ENABLE_AS_MASTER);
1752 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1753 } else {
1754 tg3_writephy(tp, MII_TG3_CTRL, 0);
1755 }
1756 } else {
1757 /* Asking for a specific link mode. */
1758 if (tp->link_config.speed == SPEED_1000) {
1759 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1760 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1761
1762 if (tp->link_config.duplex == DUPLEX_FULL)
1763 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1764 else
1765 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1766 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1767 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1768 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1769 MII_TG3_CTRL_ENABLE_AS_MASTER);
1770 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1771 } else {
1772 tg3_writephy(tp, MII_TG3_CTRL, 0);
1773
1774 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1775 if (tp->link_config.speed == SPEED_100) {
1776 if (tp->link_config.duplex == DUPLEX_FULL)
1777 new_adv |= ADVERTISE_100FULL;
1778 else
1779 new_adv |= ADVERTISE_100HALF;
1780 } else {
1781 if (tp->link_config.duplex == DUPLEX_FULL)
1782 new_adv |= ADVERTISE_10FULL;
1783 else
1784 new_adv |= ADVERTISE_10HALF;
1785 }
1786 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1787 }
1788 }
1789
1790 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1791 tp->link_config.speed != SPEED_INVALID) {
1792 u32 bmcr, orig_bmcr;
1793
1794 tp->link_config.active_speed = tp->link_config.speed;
1795 tp->link_config.active_duplex = tp->link_config.duplex;
1796
1797 bmcr = 0;
1798 switch (tp->link_config.speed) {
1799 default:
1800 case SPEED_10:
1801 break;
1802
1803 case SPEED_100:
1804 bmcr |= BMCR_SPEED100;
1805 break;
1806
1807 case SPEED_1000:
1808 bmcr |= TG3_BMCR_SPEED1000;
1809 break;
1810 };
1811
1812 if (tp->link_config.duplex == DUPLEX_FULL)
1813 bmcr |= BMCR_FULLDPLX;
1814
1815 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1816 (bmcr != orig_bmcr)) {
1817 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1818 for (i = 0; i < 1500; i++) {
1819 u32 tmp;
1820
1821 udelay(10);
1822 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1823 tg3_readphy(tp, MII_BMSR, &tmp))
1824 continue;
1825 if (!(tmp & BMSR_LSTATUS)) {
1826 udelay(40);
1827 break;
1828 }
1829 }
1830 tg3_writephy(tp, MII_BMCR, bmcr);
1831 udelay(40);
1832 }
1833 } else {
1834 tg3_writephy(tp, MII_BMCR,
1835 BMCR_ANENABLE | BMCR_ANRESTART);
1836 }
1837}
1838
1839static int tg3_init_5401phy_dsp(struct tg3 *tp)
1840{
1841 int err;
1842
1843 /* Turn off tap power management. */
1844 /* Set Extended packet length bit */
1845 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1846
1847 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1848 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1849
1850 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1851 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1852
1853 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1854 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1855
1856 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1857 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1858
1859 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1860 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1861
1862 udelay(40);
1863
1864 return err;
1865}
1866
3600d918 1867static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1da177e4 1868{
3600d918
MC
1869 u32 adv_reg, all_mask = 0;
1870
1871 if (mask & ADVERTISED_10baseT_Half)
1872 all_mask |= ADVERTISE_10HALF;
1873 if (mask & ADVERTISED_10baseT_Full)
1874 all_mask |= ADVERTISE_10FULL;
1875 if (mask & ADVERTISED_100baseT_Half)
1876 all_mask |= ADVERTISE_100HALF;
1877 if (mask & ADVERTISED_100baseT_Full)
1878 all_mask |= ADVERTISE_100FULL;
1da177e4
LT
1879
1880 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1881 return 0;
1882
1da177e4
LT
1883 if ((adv_reg & all_mask) != all_mask)
1884 return 0;
1885 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1886 u32 tg3_ctrl;
1887
3600d918
MC
1888 all_mask = 0;
1889 if (mask & ADVERTISED_1000baseT_Half)
1890 all_mask |= ADVERTISE_1000HALF;
1891 if (mask & ADVERTISED_1000baseT_Full)
1892 all_mask |= ADVERTISE_1000FULL;
1893
1da177e4
LT
1894 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1895 return 0;
1896
1da177e4
LT
1897 if ((tg3_ctrl & all_mask) != all_mask)
1898 return 0;
1899 }
1900 return 1;
1901}
1902
1903static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1904{
1905 int current_link_up;
1906 u32 bmsr, dummy;
1907 u16 current_speed;
1908 u8 current_duplex;
1909 int i, err;
1910
1911 tw32(MAC_EVENT, 0);
1912
1913 tw32_f(MAC_STATUS,
1914 (MAC_STATUS_SYNC_CHANGED |
1915 MAC_STATUS_CFG_CHANGED |
1916 MAC_STATUS_MI_COMPLETION |
1917 MAC_STATUS_LNKSTATE_CHANGED));
1918 udelay(40);
1919
1920 tp->mi_mode = MAC_MI_MODE_BASE;
1921 tw32_f(MAC_MI_MODE, tp->mi_mode);
1922 udelay(80);
1923
1924 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1925
1926 /* Some third-party PHYs need to be reset on link going
1927 * down.
1928 */
1929 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1930 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1931 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1932 netif_carrier_ok(tp->dev)) {
1933 tg3_readphy(tp, MII_BMSR, &bmsr);
1934 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1935 !(bmsr & BMSR_LSTATUS))
1936 force_reset = 1;
1937 }
1938 if (force_reset)
1939 tg3_phy_reset(tp);
1940
1941 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1942 tg3_readphy(tp, MII_BMSR, &bmsr);
1943 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1944 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1945 bmsr = 0;
1946
1947 if (!(bmsr & BMSR_LSTATUS)) {
1948 err = tg3_init_5401phy_dsp(tp);
1949 if (err)
1950 return err;
1951
1952 tg3_readphy(tp, MII_BMSR, &bmsr);
1953 for (i = 0; i < 1000; i++) {
1954 udelay(10);
1955 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1956 (bmsr & BMSR_LSTATUS)) {
1957 udelay(40);
1958 break;
1959 }
1960 }
1961
1962 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1963 !(bmsr & BMSR_LSTATUS) &&
1964 tp->link_config.active_speed == SPEED_1000) {
1965 err = tg3_phy_reset(tp);
1966 if (!err)
1967 err = tg3_init_5401phy_dsp(tp);
1968 if (err)
1969 return err;
1970 }
1971 }
1972 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1973 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1974 /* 5701 {A0,B0} CRC bug workaround */
1975 tg3_writephy(tp, 0x15, 0x0a75);
1976 tg3_writephy(tp, 0x1c, 0x8c68);
1977 tg3_writephy(tp, 0x1c, 0x8d68);
1978 tg3_writephy(tp, 0x1c, 0x8c68);
1979 }
1980
1981 /* Clear pending interrupts... */
1982 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1983 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1984
1985 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1986 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
715116a1 1987 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1da177e4
LT
1988 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1989
1990 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1991 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1992 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1993 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1994 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1995 else
1996 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1997 }
1998
1999 current_link_up = 0;
2000 current_speed = SPEED_INVALID;
2001 current_duplex = DUPLEX_INVALID;
2002
2003 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2004 u32 val;
2005
2006 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2007 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2008 if (!(val & (1 << 10))) {
2009 val |= (1 << 10);
2010 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2011 goto relink;
2012 }
2013 }
2014
2015 bmsr = 0;
2016 for (i = 0; i < 100; i++) {
2017 tg3_readphy(tp, MII_BMSR, &bmsr);
2018 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2019 (bmsr & BMSR_LSTATUS))
2020 break;
2021 udelay(40);
2022 }
2023
2024 if (bmsr & BMSR_LSTATUS) {
2025 u32 aux_stat, bmcr;
2026
2027 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2028 for (i = 0; i < 2000; i++) {
2029 udelay(10);
2030 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2031 aux_stat)
2032 break;
2033 }
2034
2035 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2036 &current_speed,
2037 &current_duplex);
2038
2039 bmcr = 0;
2040 for (i = 0; i < 200; i++) {
2041 tg3_readphy(tp, MII_BMCR, &bmcr);
2042 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2043 continue;
2044 if (bmcr && bmcr != 0x7fff)
2045 break;
2046 udelay(10);
2047 }
2048
2049 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2050 if (bmcr & BMCR_ANENABLE) {
2051 current_link_up = 1;
2052
2053 /* Force autoneg restart if we are exiting
2054 * low power mode.
2055 */
3600d918
MC
2056 if (!tg3_copper_is_advertising_all(tp,
2057 tp->link_config.advertising))
1da177e4
LT
2058 current_link_up = 0;
2059 } else {
2060 current_link_up = 0;
2061 }
2062 } else {
2063 if (!(bmcr & BMCR_ANENABLE) &&
2064 tp->link_config.speed == current_speed &&
2065 tp->link_config.duplex == current_duplex) {
2066 current_link_up = 1;
2067 } else {
2068 current_link_up = 0;
2069 }
2070 }
2071
2072 tp->link_config.active_speed = current_speed;
2073 tp->link_config.active_duplex = current_duplex;
2074 }
2075
2076 if (current_link_up == 1 &&
2077 (tp->link_config.active_duplex == DUPLEX_FULL) &&
2078 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2079 u32 local_adv, remote_adv;
2080
2081 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2082 local_adv = 0;
2083 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2084
2085 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2086 remote_adv = 0;
2087
2088 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
2089
2090 /* If we are not advertising full pause capability,
2091 * something is wrong. Bring the link down and reconfigure.
2092 */
2093 if (local_adv != ADVERTISE_PAUSE_CAP) {
2094 current_link_up = 0;
2095 } else {
2096 tg3_setup_flow_control(tp, local_adv, remote_adv);
2097 }
2098 }
2099relink:
6921d201 2100 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1da177e4
LT
2101 u32 tmp;
2102
2103 tg3_phy_copper_begin(tp);
2104
2105 tg3_readphy(tp, MII_BMSR, &tmp);
2106 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2107 (tmp & BMSR_LSTATUS))
2108 current_link_up = 1;
2109 }
2110
2111 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2112 if (current_link_up == 1) {
2113 if (tp->link_config.active_speed == SPEED_100 ||
2114 tp->link_config.active_speed == SPEED_10)
2115 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2116 else
2117 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2118 } else
2119 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2120
2121 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2122 if (tp->link_config.active_duplex == DUPLEX_HALF)
2123 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2124
1da177e4 2125 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
e8f3f6ca
MC
2126 if (current_link_up == 1 &&
2127 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
1da177e4 2128 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
e8f3f6ca
MC
2129 else
2130 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1da177e4
LT
2131 }
2132
2133 /* ??? Without this setting Netgear GA302T PHY does not
2134 * ??? send/receive packets...
2135 */
2136 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2137 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2138 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2139 tw32_f(MAC_MI_MODE, tp->mi_mode);
2140 udelay(80);
2141 }
2142
2143 tw32_f(MAC_MODE, tp->mac_mode);
2144 udelay(40);
2145
2146 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2147 /* Polled via timer. */
2148 tw32_f(MAC_EVENT, 0);
2149 } else {
2150 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2151 }
2152 udelay(40);
2153
2154 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2155 current_link_up == 1 &&
2156 tp->link_config.active_speed == SPEED_1000 &&
2157 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2158 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2159 udelay(120);
2160 tw32_f(MAC_STATUS,
2161 (MAC_STATUS_SYNC_CHANGED |
2162 MAC_STATUS_CFG_CHANGED));
2163 udelay(40);
2164 tg3_write_mem(tp,
2165 NIC_SRAM_FIRMWARE_MBOX,
2166 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2167 }
2168
2169 if (current_link_up != netif_carrier_ok(tp->dev)) {
2170 if (current_link_up)
2171 netif_carrier_on(tp->dev);
2172 else
2173 netif_carrier_off(tp->dev);
2174 tg3_link_report(tp);
2175 }
2176
2177 return 0;
2178}
2179
2180struct tg3_fiber_aneginfo {
2181 int state;
2182#define ANEG_STATE_UNKNOWN 0
2183#define ANEG_STATE_AN_ENABLE 1
2184#define ANEG_STATE_RESTART_INIT 2
2185#define ANEG_STATE_RESTART 3
2186#define ANEG_STATE_DISABLE_LINK_OK 4
2187#define ANEG_STATE_ABILITY_DETECT_INIT 5
2188#define ANEG_STATE_ABILITY_DETECT 6
2189#define ANEG_STATE_ACK_DETECT_INIT 7
2190#define ANEG_STATE_ACK_DETECT 8
2191#define ANEG_STATE_COMPLETE_ACK_INIT 9
2192#define ANEG_STATE_COMPLETE_ACK 10
2193#define ANEG_STATE_IDLE_DETECT_INIT 11
2194#define ANEG_STATE_IDLE_DETECT 12
2195#define ANEG_STATE_LINK_OK 13
2196#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2197#define ANEG_STATE_NEXT_PAGE_WAIT 15
2198
2199 u32 flags;
2200#define MR_AN_ENABLE 0x00000001
2201#define MR_RESTART_AN 0x00000002
2202#define MR_AN_COMPLETE 0x00000004
2203#define MR_PAGE_RX 0x00000008
2204#define MR_NP_LOADED 0x00000010
2205#define MR_TOGGLE_TX 0x00000020
2206#define MR_LP_ADV_FULL_DUPLEX 0x00000040
2207#define MR_LP_ADV_HALF_DUPLEX 0x00000080
2208#define MR_LP_ADV_SYM_PAUSE 0x00000100
2209#define MR_LP_ADV_ASYM_PAUSE 0x00000200
2210#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2211#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2212#define MR_LP_ADV_NEXT_PAGE 0x00001000
2213#define MR_TOGGLE_RX 0x00002000
2214#define MR_NP_RX 0x00004000
2215
2216#define MR_LINK_OK 0x80000000
2217
2218 unsigned long link_time, cur_time;
2219
2220 u32 ability_match_cfg;
2221 int ability_match_count;
2222
2223 char ability_match, idle_match, ack_match;
2224
2225 u32 txconfig, rxconfig;
2226#define ANEG_CFG_NP 0x00000080
2227#define ANEG_CFG_ACK 0x00000040
2228#define ANEG_CFG_RF2 0x00000020
2229#define ANEG_CFG_RF1 0x00000010
2230#define ANEG_CFG_PS2 0x00000001
2231#define ANEG_CFG_PS1 0x00008000
2232#define ANEG_CFG_HD 0x00004000
2233#define ANEG_CFG_FD 0x00002000
2234#define ANEG_CFG_INVAL 0x00001f06
2235
2236};
2237#define ANEG_OK 0
2238#define ANEG_DONE 1
2239#define ANEG_TIMER_ENAB 2
2240#define ANEG_FAILED -1
2241
2242#define ANEG_STATE_SETTLE_TIME 10000
2243
2244static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2245 struct tg3_fiber_aneginfo *ap)
2246{
2247 unsigned long delta;
2248 u32 rx_cfg_reg;
2249 int ret;
2250
2251 if (ap->state == ANEG_STATE_UNKNOWN) {
2252 ap->rxconfig = 0;
2253 ap->link_time = 0;
2254 ap->cur_time = 0;
2255 ap->ability_match_cfg = 0;
2256 ap->ability_match_count = 0;
2257 ap->ability_match = 0;
2258 ap->idle_match = 0;
2259 ap->ack_match = 0;
2260 }
2261 ap->cur_time++;
2262
2263 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2264 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2265
2266 if (rx_cfg_reg != ap->ability_match_cfg) {
2267 ap->ability_match_cfg = rx_cfg_reg;
2268 ap->ability_match = 0;
2269 ap->ability_match_count = 0;
2270 } else {
2271 if (++ap->ability_match_count > 1) {
2272 ap->ability_match = 1;
2273 ap->ability_match_cfg = rx_cfg_reg;
2274 }
2275 }
2276 if (rx_cfg_reg & ANEG_CFG_ACK)
2277 ap->ack_match = 1;
2278 else
2279 ap->ack_match = 0;
2280
2281 ap->idle_match = 0;
2282 } else {
2283 ap->idle_match = 1;
2284 ap->ability_match_cfg = 0;
2285 ap->ability_match_count = 0;
2286 ap->ability_match = 0;
2287 ap->ack_match = 0;
2288
2289 rx_cfg_reg = 0;
2290 }
2291
2292 ap->rxconfig = rx_cfg_reg;
2293 ret = ANEG_OK;
2294
2295 switch(ap->state) {
2296 case ANEG_STATE_UNKNOWN:
2297 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2298 ap->state = ANEG_STATE_AN_ENABLE;
2299
2300 /* fallthru */
2301 case ANEG_STATE_AN_ENABLE:
2302 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2303 if (ap->flags & MR_AN_ENABLE) {
2304 ap->link_time = 0;
2305 ap->cur_time = 0;
2306 ap->ability_match_cfg = 0;
2307 ap->ability_match_count = 0;
2308 ap->ability_match = 0;
2309 ap->idle_match = 0;
2310 ap->ack_match = 0;
2311
2312 ap->state = ANEG_STATE_RESTART_INIT;
2313 } else {
2314 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2315 }
2316 break;
2317
2318 case ANEG_STATE_RESTART_INIT:
2319 ap->link_time = ap->cur_time;
2320 ap->flags &= ~(MR_NP_LOADED);
2321 ap->txconfig = 0;
2322 tw32(MAC_TX_AUTO_NEG, 0);
2323 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2324 tw32_f(MAC_MODE, tp->mac_mode);
2325 udelay(40);
2326
2327 ret = ANEG_TIMER_ENAB;
2328 ap->state = ANEG_STATE_RESTART;
2329
2330 /* fallthru */
2331 case ANEG_STATE_RESTART:
2332 delta = ap->cur_time - ap->link_time;
2333 if (delta > ANEG_STATE_SETTLE_TIME) {
2334 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2335 } else {
2336 ret = ANEG_TIMER_ENAB;
2337 }
2338 break;
2339
2340 case ANEG_STATE_DISABLE_LINK_OK:
2341 ret = ANEG_DONE;
2342 break;
2343
2344 case ANEG_STATE_ABILITY_DETECT_INIT:
2345 ap->flags &= ~(MR_TOGGLE_TX);
2346 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2347 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2348 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2349 tw32_f(MAC_MODE, tp->mac_mode);
2350 udelay(40);
2351
2352 ap->state = ANEG_STATE_ABILITY_DETECT;
2353 break;
2354
2355 case ANEG_STATE_ABILITY_DETECT:
2356 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2357 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2358 }
2359 break;
2360
2361 case ANEG_STATE_ACK_DETECT_INIT:
2362 ap->txconfig |= ANEG_CFG_ACK;
2363 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2364 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2365 tw32_f(MAC_MODE, tp->mac_mode);
2366 udelay(40);
2367
2368 ap->state = ANEG_STATE_ACK_DETECT;
2369
2370 /* fallthru */
2371 case ANEG_STATE_ACK_DETECT:
2372 if (ap->ack_match != 0) {
2373 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2374 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2375 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2376 } else {
2377 ap->state = ANEG_STATE_AN_ENABLE;
2378 }
2379 } else if (ap->ability_match != 0 &&
2380 ap->rxconfig == 0) {
2381 ap->state = ANEG_STATE_AN_ENABLE;
2382 }
2383 break;
2384
2385 case ANEG_STATE_COMPLETE_ACK_INIT:
2386 if (ap->rxconfig & ANEG_CFG_INVAL) {
2387 ret = ANEG_FAILED;
2388 break;
2389 }
2390 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2391 MR_LP_ADV_HALF_DUPLEX |
2392 MR_LP_ADV_SYM_PAUSE |
2393 MR_LP_ADV_ASYM_PAUSE |
2394 MR_LP_ADV_REMOTE_FAULT1 |
2395 MR_LP_ADV_REMOTE_FAULT2 |
2396 MR_LP_ADV_NEXT_PAGE |
2397 MR_TOGGLE_RX |
2398 MR_NP_RX);
2399 if (ap->rxconfig & ANEG_CFG_FD)
2400 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2401 if (ap->rxconfig & ANEG_CFG_HD)
2402 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2403 if (ap->rxconfig & ANEG_CFG_PS1)
2404 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2405 if (ap->rxconfig & ANEG_CFG_PS2)
2406 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2407 if (ap->rxconfig & ANEG_CFG_RF1)
2408 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2409 if (ap->rxconfig & ANEG_CFG_RF2)
2410 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2411 if (ap->rxconfig & ANEG_CFG_NP)
2412 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2413
2414 ap->link_time = ap->cur_time;
2415
2416 ap->flags ^= (MR_TOGGLE_TX);
2417 if (ap->rxconfig & 0x0008)
2418 ap->flags |= MR_TOGGLE_RX;
2419 if (ap->rxconfig & ANEG_CFG_NP)
2420 ap->flags |= MR_NP_RX;
2421 ap->flags |= MR_PAGE_RX;
2422
2423 ap->state = ANEG_STATE_COMPLETE_ACK;
2424 ret = ANEG_TIMER_ENAB;
2425 break;
2426
2427 case ANEG_STATE_COMPLETE_ACK:
2428 if (ap->ability_match != 0 &&
2429 ap->rxconfig == 0) {
2430 ap->state = ANEG_STATE_AN_ENABLE;
2431 break;
2432 }
2433 delta = ap->cur_time - ap->link_time;
2434 if (delta > ANEG_STATE_SETTLE_TIME) {
2435 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2436 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2437 } else {
2438 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2439 !(ap->flags & MR_NP_RX)) {
2440 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2441 } else {
2442 ret = ANEG_FAILED;
2443 }
2444 }
2445 }
2446 break;
2447
2448 case ANEG_STATE_IDLE_DETECT_INIT:
2449 ap->link_time = ap->cur_time;
2450 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2451 tw32_f(MAC_MODE, tp->mac_mode);
2452 udelay(40);
2453
2454 ap->state = ANEG_STATE_IDLE_DETECT;
2455 ret = ANEG_TIMER_ENAB;
2456 break;
2457
2458 case ANEG_STATE_IDLE_DETECT:
2459 if (ap->ability_match != 0 &&
2460 ap->rxconfig == 0) {
2461 ap->state = ANEG_STATE_AN_ENABLE;
2462 break;
2463 }
2464 delta = ap->cur_time - ap->link_time;
2465 if (delta > ANEG_STATE_SETTLE_TIME) {
2466 /* XXX another gem from the Broadcom driver :( */
2467 ap->state = ANEG_STATE_LINK_OK;
2468 }
2469 break;
2470
2471 case ANEG_STATE_LINK_OK:
2472 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2473 ret = ANEG_DONE;
2474 break;
2475
2476 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2477 /* ??? unimplemented */
2478 break;
2479
2480 case ANEG_STATE_NEXT_PAGE_WAIT:
2481 /* ??? unimplemented */
2482 break;
2483
2484 default:
2485 ret = ANEG_FAILED;
2486 break;
2487 };
2488
2489 return ret;
2490}
2491
2492static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2493{
2494 int res = 0;
2495 struct tg3_fiber_aneginfo aninfo;
2496 int status = ANEG_FAILED;
2497 unsigned int tick;
2498 u32 tmp;
2499
2500 tw32_f(MAC_TX_AUTO_NEG, 0);
2501
2502 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2503 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2504 udelay(40);
2505
2506 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2507 udelay(40);
2508
2509 memset(&aninfo, 0, sizeof(aninfo));
2510 aninfo.flags |= MR_AN_ENABLE;
2511 aninfo.state = ANEG_STATE_UNKNOWN;
2512 aninfo.cur_time = 0;
2513 tick = 0;
2514 while (++tick < 195000) {
2515 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2516 if (status == ANEG_DONE || status == ANEG_FAILED)
2517 break;
2518
2519 udelay(1);
2520 }
2521
2522 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2523 tw32_f(MAC_MODE, tp->mac_mode);
2524 udelay(40);
2525
2526 *flags = aninfo.flags;
2527
2528 if (status == ANEG_DONE &&
2529 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2530 MR_LP_ADV_FULL_DUPLEX)))
2531 res = 1;
2532
2533 return res;
2534}
2535
2536static void tg3_init_bcm8002(struct tg3 *tp)
2537{
2538 u32 mac_status = tr32(MAC_STATUS);
2539 int i;
2540
2541 /* Reset when initting first time or we have a link. */
2542 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2543 !(mac_status & MAC_STATUS_PCS_SYNCED))
2544 return;
2545
2546 /* Set PLL lock range. */
2547 tg3_writephy(tp, 0x16, 0x8007);
2548
2549 /* SW reset */
2550 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2551
2552 /* Wait for reset to complete. */
2553 /* XXX schedule_timeout() ... */
2554 for (i = 0; i < 500; i++)
2555 udelay(10);
2556
2557 /* Config mode; select PMA/Ch 1 regs. */
2558 tg3_writephy(tp, 0x10, 0x8411);
2559
2560 /* Enable auto-lock and comdet, select txclk for tx. */
2561 tg3_writephy(tp, 0x11, 0x0a10);
2562
2563 tg3_writephy(tp, 0x18, 0x00a0);
2564 tg3_writephy(tp, 0x16, 0x41ff);
2565
2566 /* Assert and deassert POR. */
2567 tg3_writephy(tp, 0x13, 0x0400);
2568 udelay(40);
2569 tg3_writephy(tp, 0x13, 0x0000);
2570
2571 tg3_writephy(tp, 0x11, 0x0a50);
2572 udelay(40);
2573 tg3_writephy(tp, 0x11, 0x0a10);
2574
2575 /* Wait for signal to stabilize */
2576 /* XXX schedule_timeout() ... */
2577 for (i = 0; i < 15000; i++)
2578 udelay(10);
2579
2580 /* Deselect the channel register so we can read the PHYID
2581 * later.
2582 */
2583 tg3_writephy(tp, 0x10, 0x8011);
2584}
2585
2586static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2587{
2588 u32 sg_dig_ctrl, sg_dig_status;
2589 u32 serdes_cfg, expected_sg_dig_ctrl;
2590 int workaround, port_a;
2591 int current_link_up;
2592
2593 serdes_cfg = 0;
2594 expected_sg_dig_ctrl = 0;
2595 workaround = 0;
2596 port_a = 1;
2597 current_link_up = 0;
2598
2599 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2600 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2601 workaround = 1;
2602 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2603 port_a = 0;
2604
2605 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2606 /* preserve bits 20-23 for voltage regulator */
2607 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2608 }
2609
2610 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2611
2612 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2613 if (sg_dig_ctrl & (1 << 31)) {
2614 if (workaround) {
2615 u32 val = serdes_cfg;
2616
2617 if (port_a)
2618 val |= 0xc010000;
2619 else
2620 val |= 0x4010000;
2621 tw32_f(MAC_SERDES_CFG, val);
2622 }
2623 tw32_f(SG_DIG_CTRL, 0x01388400);
2624 }
2625 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2626 tg3_setup_flow_control(tp, 0, 0);
2627 current_link_up = 1;
2628 }
2629 goto out;
2630 }
2631
2632 /* Want auto-negotiation. */
2633 expected_sg_dig_ctrl = 0x81388400;
2634
2635 /* Pause capability */
2636 expected_sg_dig_ctrl |= (1 << 11);
2637
2638 /* Asymettric pause */
2639 expected_sg_dig_ctrl |= (1 << 12);
2640
2641 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3d3ebe74
MC
2642 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2643 tp->serdes_counter &&
2644 ((mac_status & (MAC_STATUS_PCS_SYNCED |
2645 MAC_STATUS_RCVD_CFG)) ==
2646 MAC_STATUS_PCS_SYNCED)) {
2647 tp->serdes_counter--;
2648 current_link_up = 1;
2649 goto out;
2650 }
2651restart_autoneg:
1da177e4
LT
2652 if (workaround)
2653 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2654 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2655 udelay(5);
2656 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2657
3d3ebe74
MC
2658 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2659 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
1da177e4
LT
2660 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2661 MAC_STATUS_SIGNAL_DET)) {
3d3ebe74 2662 sg_dig_status = tr32(SG_DIG_STATUS);
1da177e4
LT
2663 mac_status = tr32(MAC_STATUS);
2664
2665 if ((sg_dig_status & (1 << 1)) &&
2666 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2667 u32 local_adv, remote_adv;
2668
2669 local_adv = ADVERTISE_PAUSE_CAP;
2670 remote_adv = 0;
2671 if (sg_dig_status & (1 << 19))
2672 remote_adv |= LPA_PAUSE_CAP;
2673 if (sg_dig_status & (1 << 20))
2674 remote_adv |= LPA_PAUSE_ASYM;
2675
2676 tg3_setup_flow_control(tp, local_adv, remote_adv);
2677 current_link_up = 1;
3d3ebe74
MC
2678 tp->serdes_counter = 0;
2679 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
1da177e4 2680 } else if (!(sg_dig_status & (1 << 1))) {
3d3ebe74
MC
2681 if (tp->serdes_counter)
2682 tp->serdes_counter--;
1da177e4
LT
2683 else {
2684 if (workaround) {
2685 u32 val = serdes_cfg;
2686
2687 if (port_a)
2688 val |= 0xc010000;
2689 else
2690 val |= 0x4010000;
2691
2692 tw32_f(MAC_SERDES_CFG, val);
2693 }
2694
2695 tw32_f(SG_DIG_CTRL, 0x01388400);
2696 udelay(40);
2697
2698 /* Link parallel detection - link is up */
2699 /* only if we have PCS_SYNC and not */
2700 /* receiving config code words */
2701 mac_status = tr32(MAC_STATUS);
2702 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2703 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2704 tg3_setup_flow_control(tp, 0, 0);
2705 current_link_up = 1;
3d3ebe74
MC
2706 tp->tg3_flags2 |=
2707 TG3_FLG2_PARALLEL_DETECT;
2708 tp->serdes_counter =
2709 SERDES_PARALLEL_DET_TIMEOUT;
2710 } else
2711 goto restart_autoneg;
1da177e4
LT
2712 }
2713 }
3d3ebe74
MC
2714 } else {
2715 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2716 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
1da177e4
LT
2717 }
2718
2719out:
2720 return current_link_up;
2721}
2722
2723static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2724{
2725 int current_link_up = 0;
2726
5cf64b8a 2727 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
1da177e4 2728 goto out;
1da177e4
LT
2729
2730 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2731 u32 flags;
2732 int i;
6aa20a22 2733
1da177e4
LT
2734 if (fiber_autoneg(tp, &flags)) {
2735 u32 local_adv, remote_adv;
2736
2737 local_adv = ADVERTISE_PAUSE_CAP;
2738 remote_adv = 0;
2739 if (flags & MR_LP_ADV_SYM_PAUSE)
2740 remote_adv |= LPA_PAUSE_CAP;
2741 if (flags & MR_LP_ADV_ASYM_PAUSE)
2742 remote_adv |= LPA_PAUSE_ASYM;
2743
2744 tg3_setup_flow_control(tp, local_adv, remote_adv);
2745
1da177e4
LT
2746 current_link_up = 1;
2747 }
2748 for (i = 0; i < 30; i++) {
2749 udelay(20);
2750 tw32_f(MAC_STATUS,
2751 (MAC_STATUS_SYNC_CHANGED |
2752 MAC_STATUS_CFG_CHANGED));
2753 udelay(40);
2754 if ((tr32(MAC_STATUS) &
2755 (MAC_STATUS_SYNC_CHANGED |
2756 MAC_STATUS_CFG_CHANGED)) == 0)
2757 break;
2758 }
2759
2760 mac_status = tr32(MAC_STATUS);
2761 if (current_link_up == 0 &&
2762 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2763 !(mac_status & MAC_STATUS_RCVD_CFG))
2764 current_link_up = 1;
2765 } else {
2766 /* Forcing 1000FD link up. */
2767 current_link_up = 1;
1da177e4
LT
2768
2769 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2770 udelay(40);
e8f3f6ca
MC
2771
2772 tw32_f(MAC_MODE, tp->mac_mode);
2773 udelay(40);
1da177e4
LT
2774 }
2775
2776out:
2777 return current_link_up;
2778}
2779
2780static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2781{
2782 u32 orig_pause_cfg;
2783 u16 orig_active_speed;
2784 u8 orig_active_duplex;
2785 u32 mac_status;
2786 int current_link_up;
2787 int i;
2788
2789 orig_pause_cfg =
2790 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2791 TG3_FLAG_TX_PAUSE));
2792 orig_active_speed = tp->link_config.active_speed;
2793 orig_active_duplex = tp->link_config.active_duplex;
2794
2795 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2796 netif_carrier_ok(tp->dev) &&
2797 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2798 mac_status = tr32(MAC_STATUS);
2799 mac_status &= (MAC_STATUS_PCS_SYNCED |
2800 MAC_STATUS_SIGNAL_DET |
2801 MAC_STATUS_CFG_CHANGED |
2802 MAC_STATUS_RCVD_CFG);
2803 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2804 MAC_STATUS_SIGNAL_DET)) {
2805 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2806 MAC_STATUS_CFG_CHANGED));
2807 return 0;
2808 }
2809 }
2810
2811 tw32_f(MAC_TX_AUTO_NEG, 0);
2812
2813 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2814 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2815 tw32_f(MAC_MODE, tp->mac_mode);
2816 udelay(40);
2817
2818 if (tp->phy_id == PHY_ID_BCM8002)
2819 tg3_init_bcm8002(tp);
2820
2821 /* Enable link change event even when serdes polling. */
2822 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2823 udelay(40);
2824
2825 current_link_up = 0;
2826 mac_status = tr32(MAC_STATUS);
2827
2828 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2829 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2830 else
2831 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2832
1da177e4
LT
2833 tp->hw_status->status =
2834 (SD_STATUS_UPDATED |
2835 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2836
2837 for (i = 0; i < 100; i++) {
2838 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2839 MAC_STATUS_CFG_CHANGED));
2840 udelay(5);
2841 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3d3ebe74
MC
2842 MAC_STATUS_CFG_CHANGED |
2843 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
1da177e4
LT
2844 break;
2845 }
2846
2847 mac_status = tr32(MAC_STATUS);
2848 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2849 current_link_up = 0;
3d3ebe74
MC
2850 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2851 tp->serdes_counter == 0) {
1da177e4
LT
2852 tw32_f(MAC_MODE, (tp->mac_mode |
2853 MAC_MODE_SEND_CONFIGS));
2854 udelay(1);
2855 tw32_f(MAC_MODE, tp->mac_mode);
2856 }
2857 }
2858
2859 if (current_link_up == 1) {
2860 tp->link_config.active_speed = SPEED_1000;
2861 tp->link_config.active_duplex = DUPLEX_FULL;
2862 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2863 LED_CTRL_LNKLED_OVERRIDE |
2864 LED_CTRL_1000MBPS_ON));
2865 } else {
2866 tp->link_config.active_speed = SPEED_INVALID;
2867 tp->link_config.active_duplex = DUPLEX_INVALID;
2868 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2869 LED_CTRL_LNKLED_OVERRIDE |
2870 LED_CTRL_TRAFFIC_OVERRIDE));
2871 }
2872
2873 if (current_link_up != netif_carrier_ok(tp->dev)) {
2874 if (current_link_up)
2875 netif_carrier_on(tp->dev);
2876 else
2877 netif_carrier_off(tp->dev);
2878 tg3_link_report(tp);
2879 } else {
2880 u32 now_pause_cfg =
2881 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2882 TG3_FLAG_TX_PAUSE);
2883 if (orig_pause_cfg != now_pause_cfg ||
2884 orig_active_speed != tp->link_config.active_speed ||
2885 orig_active_duplex != tp->link_config.active_duplex)
2886 tg3_link_report(tp);
2887 }
2888
2889 return 0;
2890}
2891
747e8f8b
MC
2892static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2893{
2894 int current_link_up, err = 0;
2895 u32 bmsr, bmcr;
2896 u16 current_speed;
2897 u8 current_duplex;
2898
2899 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2900 tw32_f(MAC_MODE, tp->mac_mode);
2901 udelay(40);
2902
2903 tw32(MAC_EVENT, 0);
2904
2905 tw32_f(MAC_STATUS,
2906 (MAC_STATUS_SYNC_CHANGED |
2907 MAC_STATUS_CFG_CHANGED |
2908 MAC_STATUS_MI_COMPLETION |
2909 MAC_STATUS_LNKSTATE_CHANGED));
2910 udelay(40);
2911
2912 if (force_reset)
2913 tg3_phy_reset(tp);
2914
2915 current_link_up = 0;
2916 current_speed = SPEED_INVALID;
2917 current_duplex = DUPLEX_INVALID;
2918
2919 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2920 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
2921 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2922 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2923 bmsr |= BMSR_LSTATUS;
2924 else
2925 bmsr &= ~BMSR_LSTATUS;
2926 }
747e8f8b
MC
2927
2928 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2929
2930 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2931 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2932 /* do nothing, just check for link up at the end */
2933 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2934 u32 adv, new_adv;
2935
2936 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2937 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2938 ADVERTISE_1000XPAUSE |
2939 ADVERTISE_1000XPSE_ASYM |
2940 ADVERTISE_SLCT);
2941
2942 /* Always advertise symmetric PAUSE just like copper */
2943 new_adv |= ADVERTISE_1000XPAUSE;
2944
2945 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2946 new_adv |= ADVERTISE_1000XHALF;
2947 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2948 new_adv |= ADVERTISE_1000XFULL;
2949
2950 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2951 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2952 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2953 tg3_writephy(tp, MII_BMCR, bmcr);
2954
2955 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3d3ebe74 2956 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
747e8f8b
MC
2957 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2958
2959 return err;
2960 }
2961 } else {
2962 u32 new_bmcr;
2963
2964 bmcr &= ~BMCR_SPEED1000;
2965 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2966
2967 if (tp->link_config.duplex == DUPLEX_FULL)
2968 new_bmcr |= BMCR_FULLDPLX;
2969
2970 if (new_bmcr != bmcr) {
2971 /* BMCR_SPEED1000 is a reserved bit that needs
2972 * to be set on write.
2973 */
2974 new_bmcr |= BMCR_SPEED1000;
2975
2976 /* Force a linkdown */
2977 if (netif_carrier_ok(tp->dev)) {
2978 u32 adv;
2979
2980 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2981 adv &= ~(ADVERTISE_1000XFULL |
2982 ADVERTISE_1000XHALF |
2983 ADVERTISE_SLCT);
2984 tg3_writephy(tp, MII_ADVERTISE, adv);
2985 tg3_writephy(tp, MII_BMCR, bmcr |
2986 BMCR_ANRESTART |
2987 BMCR_ANENABLE);
2988 udelay(10);
2989 netif_carrier_off(tp->dev);
2990 }
2991 tg3_writephy(tp, MII_BMCR, new_bmcr);
2992 bmcr = new_bmcr;
2993 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2994 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
2995 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2996 ASIC_REV_5714) {
2997 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2998 bmsr |= BMSR_LSTATUS;
2999 else
3000 bmsr &= ~BMSR_LSTATUS;
3001 }
747e8f8b
MC
3002 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3003 }
3004 }
3005
3006 if (bmsr & BMSR_LSTATUS) {
3007 current_speed = SPEED_1000;
3008 current_link_up = 1;
3009 if (bmcr & BMCR_FULLDPLX)
3010 current_duplex = DUPLEX_FULL;
3011 else
3012 current_duplex = DUPLEX_HALF;
3013
3014 if (bmcr & BMCR_ANENABLE) {
3015 u32 local_adv, remote_adv, common;
3016
3017 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3018 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3019 common = local_adv & remote_adv;
3020 if (common & (ADVERTISE_1000XHALF |
3021 ADVERTISE_1000XFULL)) {
3022 if (common & ADVERTISE_1000XFULL)
3023 current_duplex = DUPLEX_FULL;
3024 else
3025 current_duplex = DUPLEX_HALF;
3026
3027 tg3_setup_flow_control(tp, local_adv,
3028 remote_adv);
3029 }
3030 else
3031 current_link_up = 0;
3032 }
3033 }
3034
3035 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3036 if (tp->link_config.active_duplex == DUPLEX_HALF)
3037 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3038
3039 tw32_f(MAC_MODE, tp->mac_mode);
3040 udelay(40);
3041
3042 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3043
3044 tp->link_config.active_speed = current_speed;
3045 tp->link_config.active_duplex = current_duplex;
3046
3047 if (current_link_up != netif_carrier_ok(tp->dev)) {
3048 if (current_link_up)
3049 netif_carrier_on(tp->dev);
3050 else {
3051 netif_carrier_off(tp->dev);
3052 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3053 }
3054 tg3_link_report(tp);
3055 }
3056 return err;
3057}
3058
3059static void tg3_serdes_parallel_detect(struct tg3 *tp)
3060{
3d3ebe74 3061 if (tp->serdes_counter) {
747e8f8b 3062 /* Give autoneg time to complete. */
3d3ebe74 3063 tp->serdes_counter--;
747e8f8b
MC
3064 return;
3065 }
3066 if (!netif_carrier_ok(tp->dev) &&
3067 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3068 u32 bmcr;
3069
3070 tg3_readphy(tp, MII_BMCR, &bmcr);
3071 if (bmcr & BMCR_ANENABLE) {
3072 u32 phy1, phy2;
3073
3074 /* Select shadow register 0x1f */
3075 tg3_writephy(tp, 0x1c, 0x7c00);
3076 tg3_readphy(tp, 0x1c, &phy1);
3077
3078 /* Select expansion interrupt status register */
3079 tg3_writephy(tp, 0x17, 0x0f01);
3080 tg3_readphy(tp, 0x15, &phy2);
3081 tg3_readphy(tp, 0x15, &phy2);
3082
3083 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3084 /* We have signal detect and not receiving
3085 * config code words, link is up by parallel
3086 * detection.
3087 */
3088
3089 bmcr &= ~BMCR_ANENABLE;
3090 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3091 tg3_writephy(tp, MII_BMCR, bmcr);
3092 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3093 }
3094 }
3095 }
3096 else if (netif_carrier_ok(tp->dev) &&
3097 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3098 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3099 u32 phy2;
3100
3101 /* Select expansion interrupt status register */
3102 tg3_writephy(tp, 0x17, 0x0f01);
3103 tg3_readphy(tp, 0x15, &phy2);
3104 if (phy2 & 0x20) {
3105 u32 bmcr;
3106
3107 /* Config code words received, turn on autoneg. */
3108 tg3_readphy(tp, MII_BMCR, &bmcr);
3109 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3110
3111 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3112
3113 }
3114 }
3115}
3116
1da177e4
LT
3117static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3118{
3119 int err;
3120
3121 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3122 err = tg3_setup_fiber_phy(tp, force_reset);
747e8f8b
MC
3123 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3124 err = tg3_setup_fiber_mii_phy(tp, force_reset);
1da177e4
LT
3125 } else {
3126 err = tg3_setup_copper_phy(tp, force_reset);
3127 }
3128
3129 if (tp->link_config.active_speed == SPEED_1000 &&
3130 tp->link_config.active_duplex == DUPLEX_HALF)
3131 tw32(MAC_TX_LENGTHS,
3132 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3133 (6 << TX_LENGTHS_IPG_SHIFT) |
3134 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3135 else
3136 tw32(MAC_TX_LENGTHS,
3137 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3138 (6 << TX_LENGTHS_IPG_SHIFT) |
3139 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3140
3141 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3142 if (netif_carrier_ok(tp->dev)) {
3143 tw32(HOSTCC_STAT_COAL_TICKS,
15f9850d 3144 tp->coal.stats_block_coalesce_usecs);
1da177e4
LT
3145 } else {
3146 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3147 }
3148 }
3149
8ed5d97e
MC
3150 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3151 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3152 if (!netif_carrier_ok(tp->dev))
3153 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3154 tp->pwrmgmt_thresh;
3155 else
3156 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3157 tw32(PCIE_PWR_MGMT_THRESH, val);
3158 }
3159
1da177e4
LT
3160 return err;
3161}
3162
df3e6548
MC
3163/* This is called whenever we suspect that the system chipset is re-
3164 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3165 * is bogus tx completions. We try to recover by setting the
3166 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3167 * in the workqueue.
3168 */
3169static void tg3_tx_recover(struct tg3 *tp)
3170{
3171 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3172 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3173
3174 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3175 "mapped I/O cycles to the network device, attempting to "
3176 "recover. Please report the problem to the driver maintainer "
3177 "and include system chipset information.\n", tp->dev->name);
3178
3179 spin_lock(&tp->lock);
df3e6548 3180 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
df3e6548
MC
3181 spin_unlock(&tp->lock);
3182}
3183
1b2a7205
MC
3184static inline u32 tg3_tx_avail(struct tg3 *tp)
3185{
3186 smp_mb();
3187 return (tp->tx_pending -
3188 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3189}
3190
1da177e4
LT
3191/* Tigon3 never reports partial packet sends. So we do not
3192 * need special logic to handle SKBs that have not had all
3193 * of their frags sent yet, like SunGEM does.
3194 */
3195static void tg3_tx(struct tg3 *tp)
3196{
3197 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3198 u32 sw_idx = tp->tx_cons;
3199
3200 while (sw_idx != hw_idx) {
3201 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3202 struct sk_buff *skb = ri->skb;
df3e6548
MC
3203 int i, tx_bug = 0;
3204
3205 if (unlikely(skb == NULL)) {
3206 tg3_tx_recover(tp);
3207 return;
3208 }
1da177e4 3209
1da177e4
LT
3210 pci_unmap_single(tp->pdev,
3211 pci_unmap_addr(ri, mapping),
3212 skb_headlen(skb),
3213 PCI_DMA_TODEVICE);
3214
3215 ri->skb = NULL;
3216
3217 sw_idx = NEXT_TX(sw_idx);
3218
3219 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1da177e4 3220 ri = &tp->tx_buffers[sw_idx];
df3e6548
MC
3221 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3222 tx_bug = 1;
1da177e4
LT
3223
3224 pci_unmap_page(tp->pdev,
3225 pci_unmap_addr(ri, mapping),
3226 skb_shinfo(skb)->frags[i].size,
3227 PCI_DMA_TODEVICE);
3228
3229 sw_idx = NEXT_TX(sw_idx);
3230 }
3231
f47c11ee 3232 dev_kfree_skb(skb);
df3e6548
MC
3233
3234 if (unlikely(tx_bug)) {
3235 tg3_tx_recover(tp);
3236 return;
3237 }
1da177e4
LT
3238 }
3239
3240 tp->tx_cons = sw_idx;
3241
1b2a7205
MC
3242 /* Need to make the tx_cons update visible to tg3_start_xmit()
3243 * before checking for netif_queue_stopped(). Without the
3244 * memory barrier, there is a small possibility that tg3_start_xmit()
3245 * will miss it and cause the queue to be stopped forever.
3246 */
3247 smp_mb();
3248
3249 if (unlikely(netif_queue_stopped(tp->dev) &&
42952231 3250 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
1b2a7205 3251 netif_tx_lock(tp->dev);
51b91468 3252 if (netif_queue_stopped(tp->dev) &&
42952231 3253 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
51b91468 3254 netif_wake_queue(tp->dev);
1b2a7205 3255 netif_tx_unlock(tp->dev);
51b91468 3256 }
1da177e4
LT
3257}
3258
3259/* Returns size of skb allocated or < 0 on error.
3260 *
3261 * We only need to fill in the address because the other members
3262 * of the RX descriptor are invariant, see tg3_init_rings.
3263 *
3264 * Note the purposeful assymetry of cpu vs. chip accesses. For
3265 * posting buffers we only dirty the first cache line of the RX
3266 * descriptor (containing the address). Whereas for the RX status
3267 * buffers the cpu only reads the last cacheline of the RX descriptor
3268 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3269 */
3270static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3271 int src_idx, u32 dest_idx_unmasked)
3272{
3273 struct tg3_rx_buffer_desc *desc;
3274 struct ring_info *map, *src_map;
3275 struct sk_buff *skb;
3276 dma_addr_t mapping;
3277 int skb_size, dest_idx;
3278
3279 src_map = NULL;
3280 switch (opaque_key) {
3281 case RXD_OPAQUE_RING_STD:
3282 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3283 desc = &tp->rx_std[dest_idx];
3284 map = &tp->rx_std_buffers[dest_idx];
3285 if (src_idx >= 0)
3286 src_map = &tp->rx_std_buffers[src_idx];
7e72aad4 3287 skb_size = tp->rx_pkt_buf_sz;
1da177e4
LT
3288 break;
3289
3290 case RXD_OPAQUE_RING_JUMBO:
3291 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3292 desc = &tp->rx_jumbo[dest_idx];
3293 map = &tp->rx_jumbo_buffers[dest_idx];
3294 if (src_idx >= 0)
3295 src_map = &tp->rx_jumbo_buffers[src_idx];
3296 skb_size = RX_JUMBO_PKT_BUF_SZ;
3297 break;
3298
3299 default:
3300 return -EINVAL;
3301 };
3302
3303 /* Do not overwrite any of the map or rp information
3304 * until we are sure we can commit to a new buffer.
3305 *
3306 * Callers depend upon this behavior and assume that
3307 * we leave everything unchanged if we fail.
3308 */
a20e9c62 3309 skb = netdev_alloc_skb(tp->dev, skb_size);
1da177e4
LT
3310 if (skb == NULL)
3311 return -ENOMEM;
3312
1da177e4
LT
3313 skb_reserve(skb, tp->rx_offset);
3314
3315 mapping = pci_map_single(tp->pdev, skb->data,
3316 skb_size - tp->rx_offset,
3317 PCI_DMA_FROMDEVICE);
3318
3319 map->skb = skb;
3320 pci_unmap_addr_set(map, mapping, mapping);
3321
3322 if (src_map != NULL)
3323 src_map->skb = NULL;
3324
3325 desc->addr_hi = ((u64)mapping >> 32);
3326 desc->addr_lo = ((u64)mapping & 0xffffffff);
3327
3328 return skb_size;
3329}
3330
3331/* We only need to move over in the address because the other
3332 * members of the RX descriptor are invariant. See notes above
3333 * tg3_alloc_rx_skb for full details.
3334 */
3335static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3336 int src_idx, u32 dest_idx_unmasked)
3337{
3338 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3339 struct ring_info *src_map, *dest_map;
3340 int dest_idx;
3341
3342 switch (opaque_key) {
3343 case RXD_OPAQUE_RING_STD:
3344 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3345 dest_desc = &tp->rx_std[dest_idx];
3346 dest_map = &tp->rx_std_buffers[dest_idx];
3347 src_desc = &tp->rx_std[src_idx];
3348 src_map = &tp->rx_std_buffers[src_idx];
3349 break;
3350
3351 case RXD_OPAQUE_RING_JUMBO:
3352 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3353 dest_desc = &tp->rx_jumbo[dest_idx];
3354 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3355 src_desc = &tp->rx_jumbo[src_idx];
3356 src_map = &tp->rx_jumbo_buffers[src_idx];
3357 break;
3358
3359 default:
3360 return;
3361 };
3362
3363 dest_map->skb = src_map->skb;
3364 pci_unmap_addr_set(dest_map, mapping,
3365 pci_unmap_addr(src_map, mapping));
3366 dest_desc->addr_hi = src_desc->addr_hi;
3367 dest_desc->addr_lo = src_desc->addr_lo;
3368
3369 src_map->skb = NULL;
3370}
3371
3372#if TG3_VLAN_TAG_USED
3373static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3374{
3375 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3376}
3377#endif
3378
3379/* The RX ring scheme is composed of multiple rings which post fresh
3380 * buffers to the chip, and one special ring the chip uses to report
3381 * status back to the host.
3382 *
3383 * The special ring reports the status of received packets to the
3384 * host. The chip does not write into the original descriptor the
3385 * RX buffer was obtained from. The chip simply takes the original
3386 * descriptor as provided by the host, updates the status and length
3387 * field, then writes this into the next status ring entry.
3388 *
3389 * Each ring the host uses to post buffers to the chip is described
3390 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3391 * it is first placed into the on-chip ram. When the packet's length
3392 * is known, it walks down the TG3_BDINFO entries to select the ring.
3393 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3394 * which is within the range of the new packet's length is chosen.
3395 *
3396 * The "separate ring for rx status" scheme may sound queer, but it makes
3397 * sense from a cache coherency perspective. If only the host writes
3398 * to the buffer post rings, and only the chip writes to the rx status
3399 * rings, then cache lines never move beyond shared-modified state.
3400 * If both the host and chip were to write into the same ring, cache line
3401 * eviction could occur since both entities want it in an exclusive state.
3402 */
3403static int tg3_rx(struct tg3 *tp, int budget)
3404{
f92905de 3405 u32 work_mask, rx_std_posted = 0;
483ba50b
MC
3406 u32 sw_idx = tp->rx_rcb_ptr;
3407 u16 hw_idx;
1da177e4
LT
3408 int received;
3409
3410 hw_idx = tp->hw_status->idx[0].rx_producer;
3411 /*
3412 * We need to order the read of hw_idx and the read of
3413 * the opaque cookie.
3414 */
3415 rmb();
1da177e4
LT
3416 work_mask = 0;
3417 received = 0;
3418 while (sw_idx != hw_idx && budget > 0) {
3419 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3420 unsigned int len;
3421 struct sk_buff *skb;
3422 dma_addr_t dma_addr;
3423 u32 opaque_key, desc_idx, *post_ptr;
3424
3425 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3426 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3427 if (opaque_key == RXD_OPAQUE_RING_STD) {
3428 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3429 mapping);
3430 skb = tp->rx_std_buffers[desc_idx].skb;
3431 post_ptr = &tp->rx_std_ptr;
f92905de 3432 rx_std_posted++;
1da177e4
LT
3433 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3434 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3435 mapping);
3436 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3437 post_ptr = &tp->rx_jumbo_ptr;
3438 }
3439 else {
3440 goto next_pkt_nopost;
3441 }
3442
3443 work_mask |= opaque_key;
3444
3445 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3446 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3447 drop_it:
3448 tg3_recycle_rx(tp, opaque_key,
3449 desc_idx, *post_ptr);
3450 drop_it_no_recycle:
3451 /* Other statistics kept track of by card. */
3452 tp->net_stats.rx_dropped++;
3453 goto next_pkt;
3454 }
3455
3456 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3457
6aa20a22 3458 if (len > RX_COPY_THRESHOLD
1da177e4
LT
3459 && tp->rx_offset == 2
3460 /* rx_offset != 2 iff this is a 5701 card running
3461 * in PCI-X mode [see tg3_get_invariants()] */
3462 ) {
3463 int skb_size;
3464
3465 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3466 desc_idx, *post_ptr);
3467 if (skb_size < 0)
3468 goto drop_it;
3469
3470 pci_unmap_single(tp->pdev, dma_addr,
3471 skb_size - tp->rx_offset,
3472 PCI_DMA_FROMDEVICE);
3473
3474 skb_put(skb, len);
3475 } else {
3476 struct sk_buff *copy_skb;
3477
3478 tg3_recycle_rx(tp, opaque_key,
3479 desc_idx, *post_ptr);
3480
a20e9c62 3481 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
1da177e4
LT
3482 if (copy_skb == NULL)
3483 goto drop_it_no_recycle;
3484
1da177e4
LT
3485 skb_reserve(copy_skb, 2);
3486 skb_put(copy_skb, len);
3487 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
d626f62b 3488 skb_copy_from_linear_data(skb, copy_skb->data, len);
1da177e4
LT
3489 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3490
3491 /* We'll reuse the original ring buffer. */
3492 skb = copy_skb;
3493 }
3494
3495 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3496 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3497 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3498 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3499 skb->ip_summed = CHECKSUM_UNNECESSARY;
3500 else
3501 skb->ip_summed = CHECKSUM_NONE;
3502
3503 skb->protocol = eth_type_trans(skb, tp->dev);
3504#if TG3_VLAN_TAG_USED
3505 if (tp->vlgrp != NULL &&
3506 desc->type_flags & RXD_FLAG_VLAN) {
3507 tg3_vlan_rx(tp, skb,
3508 desc->err_vlan & RXD_VLAN_MASK);
3509 } else
3510#endif
3511 netif_receive_skb(skb);
3512
3513 tp->dev->last_rx = jiffies;
3514 received++;
3515 budget--;
3516
3517next_pkt:
3518 (*post_ptr)++;
f92905de
MC
3519
3520 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3521 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3522
3523 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3524 TG3_64BIT_REG_LOW, idx);
3525 work_mask &= ~RXD_OPAQUE_RING_STD;
3526 rx_std_posted = 0;
3527 }
1da177e4 3528next_pkt_nopost:
483ba50b 3529 sw_idx++;
6b31a515 3530 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
52f6d697
MC
3531
3532 /* Refresh hw_idx to see if there is new work */
3533 if (sw_idx == hw_idx) {
3534 hw_idx = tp->hw_status->idx[0].rx_producer;
3535 rmb();
3536 }
1da177e4
LT
3537 }
3538
3539 /* ACK the status ring. */
483ba50b
MC
3540 tp->rx_rcb_ptr = sw_idx;
3541 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
1da177e4
LT
3542
3543 /* Refill RX ring(s). */
3544 if (work_mask & RXD_OPAQUE_RING_STD) {
3545 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3546 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3547 sw_idx);
3548 }
3549 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3550 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3551 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3552 sw_idx);
3553 }
3554 mmiowb();
3555
3556 return received;
3557}
3558
6f535763 3559static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
1da177e4 3560{
1da177e4 3561 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4 3562
1da177e4
LT
3563 /* handle link change and other phy events */
3564 if (!(tp->tg3_flags &
3565 (TG3_FLAG_USE_LINKCHG_REG |
3566 TG3_FLAG_POLL_SERDES))) {
3567 if (sblk->status & SD_STATUS_LINK_CHG) {
3568 sblk->status = SD_STATUS_UPDATED |
3569 (sblk->status & ~SD_STATUS_LINK_CHG);
f47c11ee 3570 spin_lock(&tp->lock);
1da177e4 3571 tg3_setup_phy(tp, 0);
f47c11ee 3572 spin_unlock(&tp->lock);
1da177e4
LT
3573 }
3574 }
3575
3576 /* run TX completion thread */
3577 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
1da177e4 3578 tg3_tx(tp);
6f535763 3579 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4fd7ab59 3580 return work_done;
1da177e4
LT
3581 }
3582
1da177e4
LT
3583 /* run RX thread, within the bounds set by NAPI.
3584 * All RX "locking" is done by ensuring outside
bea3348e 3585 * code synchronizes with tg3->napi.poll()
1da177e4 3586 */
bea3348e 3587 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
6f535763 3588 work_done += tg3_rx(tp, budget - work_done);
1da177e4 3589
6f535763
DM
3590 return work_done;
3591}
3592
3593static int tg3_poll(struct napi_struct *napi, int budget)
3594{
3595 struct tg3 *tp = container_of(napi, struct tg3, napi);
3596 int work_done = 0;
4fd7ab59 3597 struct tg3_hw_status *sblk = tp->hw_status;
6f535763
DM
3598
3599 while (1) {
3600 work_done = tg3_poll_work(tp, work_done, budget);
3601
3602 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3603 goto tx_recovery;
3604
3605 if (unlikely(work_done >= budget))
3606 break;
3607
4fd7ab59
MC
3608 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3609 /* tp->last_tag is used in tg3_restart_ints() below
3610 * to tell the hw how much work has been processed,
3611 * so we must read it before checking for more work.
3612 */
3613 tp->last_tag = sblk->status_tag;
3614 rmb();
3615 } else
3616 sblk->status &= ~SD_STATUS_UPDATED;
6f535763 3617
4fd7ab59 3618 if (likely(!tg3_has_work(tp))) {
6f535763
DM
3619 netif_rx_complete(tp->dev, napi);
3620 tg3_restart_ints(tp);
3621 break;
3622 }
1da177e4
LT
3623 }
3624
bea3348e 3625 return work_done;
6f535763
DM
3626
3627tx_recovery:
4fd7ab59 3628 /* work_done is guaranteed to be less than budget. */
6f535763
DM
3629 netif_rx_complete(tp->dev, napi);
3630 schedule_work(&tp->reset_task);
4fd7ab59 3631 return work_done;
1da177e4
LT
3632}
3633
f47c11ee
DM
3634static void tg3_irq_quiesce(struct tg3 *tp)
3635{
3636 BUG_ON(tp->irq_sync);
3637
3638 tp->irq_sync = 1;
3639 smp_mb();
3640
3641 synchronize_irq(tp->pdev->irq);
3642}
3643
3644static inline int tg3_irq_sync(struct tg3 *tp)
3645{
3646 return tp->irq_sync;
3647}
3648
3649/* Fully shutdown all tg3 driver activity elsewhere in the system.
3650 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3651 * with as well. Most of the time, this is not necessary except when
3652 * shutting down the device.
3653 */
3654static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3655{
46966545 3656 spin_lock_bh(&tp->lock);
f47c11ee
DM
3657 if (irq_sync)
3658 tg3_irq_quiesce(tp);
f47c11ee
DM
3659}
3660
3661static inline void tg3_full_unlock(struct tg3 *tp)
3662{
f47c11ee
DM
3663 spin_unlock_bh(&tp->lock);
3664}
3665
fcfa0a32
MC
3666/* One-shot MSI handler - Chip automatically disables interrupt
3667 * after sending MSI so driver doesn't have to do it.
3668 */
7d12e780 3669static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
fcfa0a32
MC
3670{
3671 struct net_device *dev = dev_id;
3672 struct tg3 *tp = netdev_priv(dev);
3673
3674 prefetch(tp->hw_status);
3675 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3676
3677 if (likely(!tg3_irq_sync(tp)))
bea3348e 3678 netif_rx_schedule(dev, &tp->napi);
fcfa0a32
MC
3679
3680 return IRQ_HANDLED;
3681}
3682
88b06bc2
MC
3683/* MSI ISR - No need to check for interrupt sharing and no need to
3684 * flush status block and interrupt mailbox. PCI ordering rules
3685 * guarantee that MSI will arrive after the status block.
3686 */
7d12e780 3687static irqreturn_t tg3_msi(int irq, void *dev_id)
88b06bc2
MC
3688{
3689 struct net_device *dev = dev_id;
3690 struct tg3 *tp = netdev_priv(dev);
88b06bc2 3691
61487480
MC
3692 prefetch(tp->hw_status);
3693 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
88b06bc2 3694 /*
fac9b83e 3695 * Writing any value to intr-mbox-0 clears PCI INTA# and
88b06bc2 3696 * chip-internal interrupt pending events.
fac9b83e 3697 * Writing non-zero to intr-mbox-0 additional tells the
88b06bc2
MC
3698 * NIC to stop sending us irqs, engaging "in-intr-handler"
3699 * event coalescing.
3700 */
3701 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
61487480 3702 if (likely(!tg3_irq_sync(tp)))
bea3348e 3703 netif_rx_schedule(dev, &tp->napi);
61487480 3704
88b06bc2
MC
3705 return IRQ_RETVAL(1);
3706}
3707
7d12e780 3708static irqreturn_t tg3_interrupt(int irq, void *dev_id)
1da177e4
LT
3709{
3710 struct net_device *dev = dev_id;
3711 struct tg3 *tp = netdev_priv(dev);
3712 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4
LT
3713 unsigned int handled = 1;
3714
1da177e4
LT
3715 /* In INTx mode, it is possible for the interrupt to arrive at
3716 * the CPU before the status block posted prior to the interrupt.
3717 * Reading the PCI State register will confirm whether the
3718 * interrupt is ours and will flush the status block.
3719 */
d18edcb2
MC
3720 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3721 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3722 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3723 handled = 0;
f47c11ee 3724 goto out;
fac9b83e 3725 }
d18edcb2
MC
3726 }
3727
3728 /*
3729 * Writing any value to intr-mbox-0 clears PCI INTA# and
3730 * chip-internal interrupt pending events.
3731 * Writing non-zero to intr-mbox-0 additional tells the
3732 * NIC to stop sending us irqs, engaging "in-intr-handler"
3733 * event coalescing.
c04cb347
MC
3734 *
3735 * Flush the mailbox to de-assert the IRQ immediately to prevent
3736 * spurious interrupts. The flush impacts performance but
3737 * excessive spurious interrupts can be worse in some cases.
d18edcb2 3738 */
c04cb347 3739 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
d18edcb2
MC
3740 if (tg3_irq_sync(tp))
3741 goto out;
3742 sblk->status &= ~SD_STATUS_UPDATED;
3743 if (likely(tg3_has_work(tp))) {
3744 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
bea3348e 3745 netif_rx_schedule(dev, &tp->napi);
d18edcb2
MC
3746 } else {
3747 /* No work, shared interrupt perhaps? re-enable
3748 * interrupts, and flush that PCI write
3749 */
3750 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3751 0x00000000);
fac9b83e 3752 }
f47c11ee 3753out:
fac9b83e
DM
3754 return IRQ_RETVAL(handled);
3755}
3756
7d12e780 3757static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
fac9b83e
DM
3758{
3759 struct net_device *dev = dev_id;
3760 struct tg3 *tp = netdev_priv(dev);
3761 struct tg3_hw_status *sblk = tp->hw_status;
fac9b83e
DM
3762 unsigned int handled = 1;
3763
fac9b83e
DM
3764 /* In INTx mode, it is possible for the interrupt to arrive at
3765 * the CPU before the status block posted prior to the interrupt.
3766 * Reading the PCI State register will confirm whether the
3767 * interrupt is ours and will flush the status block.
3768 */
d18edcb2
MC
3769 if (unlikely(sblk->status_tag == tp->last_tag)) {
3770 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3771 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3772 handled = 0;
f47c11ee 3773 goto out;
1da177e4 3774 }
d18edcb2
MC
3775 }
3776
3777 /*
3778 * writing any value to intr-mbox-0 clears PCI INTA# and
3779 * chip-internal interrupt pending events.
3780 * writing non-zero to intr-mbox-0 additional tells the
3781 * NIC to stop sending us irqs, engaging "in-intr-handler"
3782 * event coalescing.
c04cb347
MC
3783 *
3784 * Flush the mailbox to de-assert the IRQ immediately to prevent
3785 * spurious interrupts. The flush impacts performance but
3786 * excessive spurious interrupts can be worse in some cases.
d18edcb2 3787 */
c04cb347 3788 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
d18edcb2
MC
3789 if (tg3_irq_sync(tp))
3790 goto out;
bea3348e 3791 if (netif_rx_schedule_prep(dev, &tp->napi)) {
d18edcb2
MC
3792 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3793 /* Update last_tag to mark that this status has been
3794 * seen. Because interrupt may be shared, we may be
3795 * racing with tg3_poll(), so only update last_tag
3796 * if tg3_poll() is not scheduled.
3797 */
3798 tp->last_tag = sblk->status_tag;
bea3348e 3799 __netif_rx_schedule(dev, &tp->napi);
1da177e4 3800 }
f47c11ee 3801out:
1da177e4
LT
3802 return IRQ_RETVAL(handled);
3803}
3804
7938109f 3805/* ISR for interrupt test */
7d12e780 3806static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7938109f
MC
3807{
3808 struct net_device *dev = dev_id;
3809 struct tg3 *tp = netdev_priv(dev);
3810 struct tg3_hw_status *sblk = tp->hw_status;
3811
f9804ddb
MC
3812 if ((sblk->status & SD_STATUS_UPDATED) ||
3813 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
b16250e3 3814 tg3_disable_ints(tp);
7938109f
MC
3815 return IRQ_RETVAL(1);
3816 }
3817 return IRQ_RETVAL(0);
3818}
3819
8e7a22e3 3820static int tg3_init_hw(struct tg3 *, int);
944d980e 3821static int tg3_halt(struct tg3 *, int, int);
1da177e4 3822
b9ec6c1b
MC
3823/* Restart hardware after configuration changes, self-test, etc.
3824 * Invoked with tp->lock held.
3825 */
3826static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3827{
3828 int err;
3829
3830 err = tg3_init_hw(tp, reset_phy);
3831 if (err) {
3832 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3833 "aborting.\n", tp->dev->name);
3834 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3835 tg3_full_unlock(tp);
3836 del_timer_sync(&tp->timer);
3837 tp->irq_sync = 0;
bea3348e 3838 napi_enable(&tp->napi);
b9ec6c1b
MC
3839 dev_close(tp->dev);
3840 tg3_full_lock(tp, 0);
3841 }
3842 return err;
3843}
3844
1da177e4
LT
3845#ifdef CONFIG_NET_POLL_CONTROLLER
3846static void tg3_poll_controller(struct net_device *dev)
3847{
88b06bc2
MC
3848 struct tg3 *tp = netdev_priv(dev);
3849
7d12e780 3850 tg3_interrupt(tp->pdev->irq, dev);
1da177e4
LT
3851}
3852#endif
3853
c4028958 3854static void tg3_reset_task(struct work_struct *work)
1da177e4 3855{
c4028958 3856 struct tg3 *tp = container_of(work, struct tg3, reset_task);
1da177e4
LT
3857 unsigned int restart_timer;
3858
7faa006f 3859 tg3_full_lock(tp, 0);
7faa006f
MC
3860
3861 if (!netif_running(tp->dev)) {
7faa006f
MC
3862 tg3_full_unlock(tp);
3863 return;
3864 }
3865
3866 tg3_full_unlock(tp);
3867
1da177e4
LT
3868 tg3_netif_stop(tp);
3869
f47c11ee 3870 tg3_full_lock(tp, 1);
1da177e4
LT
3871
3872 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3873 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3874
df3e6548
MC
3875 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3876 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3877 tp->write32_rx_mbox = tg3_write_flush_reg32;
3878 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3879 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3880 }
3881
944d980e 3882 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
b9ec6c1b
MC
3883 if (tg3_init_hw(tp, 1))
3884 goto out;
1da177e4
LT
3885
3886 tg3_netif_start(tp);
3887
1da177e4
LT
3888 if (restart_timer)
3889 mod_timer(&tp->timer, jiffies + 1);
7faa006f 3890
b9ec6c1b 3891out:
7faa006f 3892 tg3_full_unlock(tp);
1da177e4
LT
3893}
3894
b0408751
MC
3895static void tg3_dump_short_state(struct tg3 *tp)
3896{
3897 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3898 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3899 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3900 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3901}
3902
1da177e4
LT
3903static void tg3_tx_timeout(struct net_device *dev)
3904{
3905 struct tg3 *tp = netdev_priv(dev);
3906
b0408751 3907 if (netif_msg_tx_err(tp)) {
9f88f29f
MC
3908 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3909 dev->name);
b0408751
MC
3910 tg3_dump_short_state(tp);
3911 }
1da177e4
LT
3912
3913 schedule_work(&tp->reset_task);
3914}
3915
c58ec932
MC
3916/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3917static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3918{
3919 u32 base = (u32) mapping & 0xffffffff;
3920
3921 return ((base > 0xffffdcc0) &&
3922 (base + len + 8 < base));
3923}
3924
72f2afb8
MC
3925/* Test for DMA addresses > 40-bit */
3926static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3927 int len)
3928{
3929#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6728a8e2 3930 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
72f2afb8
MC
3931 return (((u64) mapping + len) > DMA_40BIT_MASK);
3932 return 0;
3933#else
3934 return 0;
3935#endif
3936}
3937
1da177e4
LT
3938static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3939
72f2afb8
MC
3940/* Workaround 4GB and 40-bit hardware DMA bugs. */
3941static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
c58ec932
MC
3942 u32 last_plus_one, u32 *start,
3943 u32 base_flags, u32 mss)
1da177e4
LT
3944{
3945 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
c58ec932 3946 dma_addr_t new_addr = 0;
1da177e4 3947 u32 entry = *start;
c58ec932 3948 int i, ret = 0;
1da177e4
LT
3949
3950 if (!new_skb) {
c58ec932
MC
3951 ret = -1;
3952 } else {
3953 /* New SKB is guaranteed to be linear. */
3954 entry = *start;
3955 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3956 PCI_DMA_TODEVICE);
3957 /* Make sure new skb does not cross any 4G boundaries.
3958 * Drop the packet if it does.
3959 */
3960 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3961 ret = -1;
3962 dev_kfree_skb(new_skb);
3963 new_skb = NULL;
3964 } else {
3965 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3966 base_flags, 1 | (mss << 1));
3967 *start = NEXT_TX(entry);
3968 }
1da177e4
LT
3969 }
3970
1da177e4
LT
3971 /* Now clean up the sw ring entries. */
3972 i = 0;
3973 while (entry != last_plus_one) {
3974 int len;
3975
3976 if (i == 0)
3977 len = skb_headlen(skb);
3978 else
3979 len = skb_shinfo(skb)->frags[i-1].size;
3980 pci_unmap_single(tp->pdev,
3981 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3982 len, PCI_DMA_TODEVICE);
3983 if (i == 0) {
3984 tp->tx_buffers[entry].skb = new_skb;
3985 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3986 } else {
3987 tp->tx_buffers[entry].skb = NULL;
3988 }
3989 entry = NEXT_TX(entry);
3990 i++;
3991 }
3992
3993 dev_kfree_skb(skb);
3994
c58ec932 3995 return ret;
1da177e4
LT
3996}
3997
3998static void tg3_set_txd(struct tg3 *tp, int entry,
3999 dma_addr_t mapping, int len, u32 flags,
4000 u32 mss_and_is_end)
4001{
4002 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4003 int is_end = (mss_and_is_end & 0x1);
4004 u32 mss = (mss_and_is_end >> 1);
4005 u32 vlan_tag = 0;
4006
4007 if (is_end)
4008 flags |= TXD_FLAG_END;
4009 if (flags & TXD_FLAG_VLAN) {
4010 vlan_tag = flags >> 16;
4011 flags &= 0xffff;
4012 }
4013 vlan_tag |= (mss << TXD_MSS_SHIFT);
4014
4015 txd->addr_hi = ((u64) mapping >> 32);
4016 txd->addr_lo = ((u64) mapping & 0xffffffff);
4017 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4018 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4019}
4020
5a6f3074
MC
4021/* hard_start_xmit for devices that don't have any bugs and
4022 * support TG3_FLG2_HW_TSO_2 only.
4023 */
1da177e4 4024static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5a6f3074
MC
4025{
4026 struct tg3 *tp = netdev_priv(dev);
4027 dma_addr_t mapping;
4028 u32 len, entry, base_flags, mss;
4029
4030 len = skb_headlen(skb);
4031
00b70504 4032 /* We are running in BH disabled context with netif_tx_lock
bea3348e 4033 * and TX reclaim runs via tp->napi.poll inside of a software
5a6f3074
MC
4034 * interrupt. Furthermore, IRQ processing runs lockless so we have
4035 * no IRQ context deadlocks to worry about either. Rejoice!
4036 */
1b2a7205 4037 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
5a6f3074
MC
4038 if (!netif_queue_stopped(dev)) {
4039 netif_stop_queue(dev);
4040
4041 /* This is a hard error, log it. */
4042 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4043 "queue awake!\n", dev->name);
4044 }
5a6f3074
MC
4045 return NETDEV_TX_BUSY;
4046 }
4047
4048 entry = tp->tx_prod;
4049 base_flags = 0;
5a6f3074 4050 mss = 0;
c13e3713 4051 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5a6f3074
MC
4052 int tcp_opt_len, ip_tcp_len;
4053
4054 if (skb_header_cloned(skb) &&
4055 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4056 dev_kfree_skb(skb);
4057 goto out_unlock;
4058 }
4059
b0026624
MC
4060 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4061 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4062 else {
eddc9ec5
ACM
4063 struct iphdr *iph = ip_hdr(skb);
4064
ab6a5bb6 4065 tcp_opt_len = tcp_optlen(skb);
c9bdd4b5 4066 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
b0026624 4067
eddc9ec5
ACM
4068 iph->check = 0;
4069 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
b0026624
MC
4070 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4071 }
5a6f3074
MC
4072
4073 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4074 TXD_FLAG_CPU_POST_DMA);
4075
aa8223c7 4076 tcp_hdr(skb)->check = 0;
5a6f3074 4077
5a6f3074 4078 }
84fa7933 4079 else if (skb->ip_summed == CHECKSUM_PARTIAL)
5a6f3074 4080 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5a6f3074
MC
4081#if TG3_VLAN_TAG_USED
4082 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4083 base_flags |= (TXD_FLAG_VLAN |
4084 (vlan_tx_tag_get(skb) << 16));
4085#endif
4086
4087 /* Queue skb data, a.k.a. the main skb fragment. */
4088 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4089
4090 tp->tx_buffers[entry].skb = skb;
4091 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4092
4093 tg3_set_txd(tp, entry, mapping, len, base_flags,
4094 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4095
4096 entry = NEXT_TX(entry);
4097
4098 /* Now loop through additional data fragments, and queue them. */
4099 if (skb_shinfo(skb)->nr_frags > 0) {
4100 unsigned int i, last;
4101
4102 last = skb_shinfo(skb)->nr_frags - 1;
4103 for (i = 0; i <= last; i++) {
4104 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4105
4106 len = frag->size;
4107 mapping = pci_map_page(tp->pdev,
4108 frag->page,
4109 frag->page_offset,
4110 len, PCI_DMA_TODEVICE);
4111
4112 tp->tx_buffers[entry].skb = NULL;
4113 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4114
4115 tg3_set_txd(tp, entry, mapping, len,
4116 base_flags, (i == last) | (mss << 1));
4117
4118 entry = NEXT_TX(entry);
4119 }
4120 }
4121
4122 /* Packets are ready, update Tx producer idx local and on card. */
4123 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4124
4125 tp->tx_prod = entry;
1b2a7205 4126 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5a6f3074 4127 netif_stop_queue(dev);
42952231 4128 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5a6f3074
MC
4129 netif_wake_queue(tp->dev);
4130 }
4131
4132out_unlock:
4133 mmiowb();
5a6f3074
MC
4134
4135 dev->trans_start = jiffies;
4136
4137 return NETDEV_TX_OK;
4138}
4139
52c0fd83
MC
4140static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4141
4142/* Use GSO to workaround a rare TSO bug that may be triggered when the
4143 * TSO header is greater than 80 bytes.
4144 */
4145static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4146{
4147 struct sk_buff *segs, *nskb;
4148
4149 /* Estimate the number of fragments in the worst case */
1b2a7205 4150 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
52c0fd83 4151 netif_stop_queue(tp->dev);
7f62ad5d
MC
4152 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4153 return NETDEV_TX_BUSY;
4154
4155 netif_wake_queue(tp->dev);
52c0fd83
MC
4156 }
4157
4158 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4159 if (unlikely(IS_ERR(segs)))
4160 goto tg3_tso_bug_end;
4161
4162 do {
4163 nskb = segs;
4164 segs = segs->next;
4165 nskb->next = NULL;
4166 tg3_start_xmit_dma_bug(nskb, tp->dev);
4167 } while (segs);
4168
4169tg3_tso_bug_end:
4170 dev_kfree_skb(skb);
4171
4172 return NETDEV_TX_OK;
4173}
52c0fd83 4174
5a6f3074
MC
4175/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4176 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4177 */
4178static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
4179{
4180 struct tg3 *tp = netdev_priv(dev);
4181 dma_addr_t mapping;
1da177e4
LT
4182 u32 len, entry, base_flags, mss;
4183 int would_hit_hwbug;
1da177e4
LT
4184
4185 len = skb_headlen(skb);
4186
00b70504 4187 /* We are running in BH disabled context with netif_tx_lock
bea3348e 4188 * and TX reclaim runs via tp->napi.poll inside of a software
f47c11ee
DM
4189 * interrupt. Furthermore, IRQ processing runs lockless so we have
4190 * no IRQ context deadlocks to worry about either. Rejoice!
1da177e4 4191 */
1b2a7205 4192 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
1f064a87
SH
4193 if (!netif_queue_stopped(dev)) {
4194 netif_stop_queue(dev);
4195
4196 /* This is a hard error, log it. */
4197 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4198 "queue awake!\n", dev->name);
4199 }
1da177e4
LT
4200 return NETDEV_TX_BUSY;
4201 }
4202
4203 entry = tp->tx_prod;
4204 base_flags = 0;
84fa7933 4205 if (skb->ip_summed == CHECKSUM_PARTIAL)
1da177e4 4206 base_flags |= TXD_FLAG_TCPUDP_CSUM;
1da177e4 4207 mss = 0;
c13e3713 4208 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
eddc9ec5 4209 struct iphdr *iph;
52c0fd83 4210 int tcp_opt_len, ip_tcp_len, hdr_len;
1da177e4
LT
4211
4212 if (skb_header_cloned(skb) &&
4213 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4214 dev_kfree_skb(skb);
4215 goto out_unlock;
4216 }
4217
ab6a5bb6 4218 tcp_opt_len = tcp_optlen(skb);
c9bdd4b5 4219 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
1da177e4 4220
52c0fd83
MC
4221 hdr_len = ip_tcp_len + tcp_opt_len;
4222 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7f62ad5d 4223 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
52c0fd83
MC
4224 return (tg3_tso_bug(tp, skb));
4225
1da177e4
LT
4226 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4227 TXD_FLAG_CPU_POST_DMA);
4228
eddc9ec5
ACM
4229 iph = ip_hdr(skb);
4230 iph->check = 0;
4231 iph->tot_len = htons(mss + hdr_len);
1da177e4 4232 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
aa8223c7 4233 tcp_hdr(skb)->check = 0;
1da177e4 4234 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
aa8223c7
ACM
4235 } else
4236 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4237 iph->daddr, 0,
4238 IPPROTO_TCP,
4239 0);
1da177e4
LT
4240
4241 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4242 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
eddc9ec5 4243 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
4244 int tsflags;
4245
eddc9ec5 4246 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
4247 mss |= (tsflags << 11);
4248 }
4249 } else {
eddc9ec5 4250 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
4251 int tsflags;
4252
eddc9ec5 4253 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
4254 base_flags |= tsflags << 12;
4255 }
4256 }
4257 }
1da177e4
LT
4258#if TG3_VLAN_TAG_USED
4259 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4260 base_flags |= (TXD_FLAG_VLAN |
4261 (vlan_tx_tag_get(skb) << 16));
4262#endif
4263
4264 /* Queue skb data, a.k.a. the main skb fragment. */
4265 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4266
4267 tp->tx_buffers[entry].skb = skb;
4268 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4269
4270 would_hit_hwbug = 0;
4271
4272 if (tg3_4g_overflow_test(mapping, len))
c58ec932 4273 would_hit_hwbug = 1;
1da177e4
LT
4274
4275 tg3_set_txd(tp, entry, mapping, len, base_flags,
4276 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4277
4278 entry = NEXT_TX(entry);
4279
4280 /* Now loop through additional data fragments, and queue them. */
4281 if (skb_shinfo(skb)->nr_frags > 0) {
4282 unsigned int i, last;
4283
4284 last = skb_shinfo(skb)->nr_frags - 1;
4285 for (i = 0; i <= last; i++) {
4286 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4287
4288 len = frag->size;
4289 mapping = pci_map_page(tp->pdev,
4290 frag->page,
4291 frag->page_offset,
4292 len, PCI_DMA_TODEVICE);
4293
4294 tp->tx_buffers[entry].skb = NULL;
4295 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4296
c58ec932
MC
4297 if (tg3_4g_overflow_test(mapping, len))
4298 would_hit_hwbug = 1;
1da177e4 4299
72f2afb8
MC
4300 if (tg3_40bit_overflow_test(tp, mapping, len))
4301 would_hit_hwbug = 1;
4302
1da177e4
LT
4303 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4304 tg3_set_txd(tp, entry, mapping, len,
4305 base_flags, (i == last)|(mss << 1));
4306 else
4307 tg3_set_txd(tp, entry, mapping, len,
4308 base_flags, (i == last));
4309
4310 entry = NEXT_TX(entry);
4311 }
4312 }
4313
4314 if (would_hit_hwbug) {
4315 u32 last_plus_one = entry;
4316 u32 start;
1da177e4 4317
c58ec932
MC
4318 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4319 start &= (TG3_TX_RING_SIZE - 1);
1da177e4
LT
4320
4321 /* If the workaround fails due to memory/mapping
4322 * failure, silently drop this packet.
4323 */
72f2afb8 4324 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
c58ec932 4325 &start, base_flags, mss))
1da177e4
LT
4326 goto out_unlock;
4327
4328 entry = start;
4329 }
4330
4331 /* Packets are ready, update Tx producer idx local and on card. */
4332 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4333
4334 tp->tx_prod = entry;
1b2a7205 4335 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
1da177e4 4336 netif_stop_queue(dev);
42952231 4337 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
51b91468
MC
4338 netif_wake_queue(tp->dev);
4339 }
1da177e4
LT
4340
4341out_unlock:
4342 mmiowb();
1da177e4
LT
4343
4344 dev->trans_start = jiffies;
4345
4346 return NETDEV_TX_OK;
4347}
4348
4349static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4350 int new_mtu)
4351{
4352 dev->mtu = new_mtu;
4353
ef7f5ec0 4354 if (new_mtu > ETH_DATA_LEN) {
a4e2b347 4355 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
ef7f5ec0
MC
4356 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4357 ethtool_op_set_tso(dev, 0);
4358 }
4359 else
4360 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4361 } else {
a4e2b347 4362 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
ef7f5ec0 4363 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
0f893dc6 4364 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
ef7f5ec0 4365 }
1da177e4
LT
4366}
4367
4368static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4369{
4370 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 4371 int err;
1da177e4
LT
4372
4373 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4374 return -EINVAL;
4375
4376 if (!netif_running(dev)) {
4377 /* We'll just catch it later when the
4378 * device is up'd.
4379 */
4380 tg3_set_mtu(dev, tp, new_mtu);
4381 return 0;
4382 }
4383
4384 tg3_netif_stop(tp);
f47c11ee
DM
4385
4386 tg3_full_lock(tp, 1);
1da177e4 4387
944d980e 4388 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
4389
4390 tg3_set_mtu(dev, tp, new_mtu);
4391
b9ec6c1b 4392 err = tg3_restart_hw(tp, 0);
1da177e4 4393
b9ec6c1b
MC
4394 if (!err)
4395 tg3_netif_start(tp);
1da177e4 4396
f47c11ee 4397 tg3_full_unlock(tp);
1da177e4 4398
b9ec6c1b 4399 return err;
1da177e4
LT
4400}
4401
4402/* Free up pending packets in all rx/tx rings.
4403 *
4404 * The chip has been shut down and the driver detached from
4405 * the networking, so no interrupts or new tx packets will
4406 * end up in the driver. tp->{tx,}lock is not held and we are not
4407 * in an interrupt context and thus may sleep.
4408 */
4409static void tg3_free_rings(struct tg3 *tp)
4410{
4411 struct ring_info *rxp;
4412 int i;
4413
4414 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4415 rxp = &tp->rx_std_buffers[i];
4416
4417 if (rxp->skb == NULL)
4418 continue;
4419 pci_unmap_single(tp->pdev,
4420 pci_unmap_addr(rxp, mapping),
7e72aad4 4421 tp->rx_pkt_buf_sz - tp->rx_offset,
1da177e4
LT
4422 PCI_DMA_FROMDEVICE);
4423 dev_kfree_skb_any(rxp->skb);
4424 rxp->skb = NULL;
4425 }
4426
4427 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4428 rxp = &tp->rx_jumbo_buffers[i];
4429
4430 if (rxp->skb == NULL)
4431 continue;
4432 pci_unmap_single(tp->pdev,
4433 pci_unmap_addr(rxp, mapping),
4434 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4435 PCI_DMA_FROMDEVICE);
4436 dev_kfree_skb_any(rxp->skb);
4437 rxp->skb = NULL;
4438 }
4439
4440 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4441 struct tx_ring_info *txp;
4442 struct sk_buff *skb;
4443 int j;
4444
4445 txp = &tp->tx_buffers[i];
4446 skb = txp->skb;
4447
4448 if (skb == NULL) {
4449 i++;
4450 continue;
4451 }
4452
4453 pci_unmap_single(tp->pdev,
4454 pci_unmap_addr(txp, mapping),
4455 skb_headlen(skb),
4456 PCI_DMA_TODEVICE);
4457 txp->skb = NULL;
4458
4459 i++;
4460
4461 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4462 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4463 pci_unmap_page(tp->pdev,
4464 pci_unmap_addr(txp, mapping),
4465 skb_shinfo(skb)->frags[j].size,
4466 PCI_DMA_TODEVICE);
4467 i++;
4468 }
4469
4470 dev_kfree_skb_any(skb);
4471 }
4472}
4473
4474/* Initialize tx/rx rings for packet processing.
4475 *
4476 * The chip has been shut down and the driver detached from
4477 * the networking, so no interrupts or new tx packets will
4478 * end up in the driver. tp->{tx,}lock are held and thus
4479 * we may not sleep.
4480 */
32d8c572 4481static int tg3_init_rings(struct tg3 *tp)
1da177e4
LT
4482{
4483 u32 i;
4484
4485 /* Free up all the SKBs. */
4486 tg3_free_rings(tp);
4487
4488 /* Zero out all descriptors. */
4489 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4490 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4491 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4492 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4493
7e72aad4 4494 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
a4e2b347 4495 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
7e72aad4
MC
4496 (tp->dev->mtu > ETH_DATA_LEN))
4497 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4498
1da177e4
LT
4499 /* Initialize invariants of the rings, we only set this
4500 * stuff once. This works because the card does not
4501 * write into the rx buffer posting rings.
4502 */
4503 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4504 struct tg3_rx_buffer_desc *rxd;
4505
4506 rxd = &tp->rx_std[i];
7e72aad4 4507 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
1da177e4
LT
4508 << RXD_LEN_SHIFT;
4509 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4510 rxd->opaque = (RXD_OPAQUE_RING_STD |
4511 (i << RXD_OPAQUE_INDEX_SHIFT));
4512 }
4513
0f893dc6 4514 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
4515 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4516 struct tg3_rx_buffer_desc *rxd;
4517
4518 rxd = &tp->rx_jumbo[i];
4519 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4520 << RXD_LEN_SHIFT;
4521 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4522 RXD_FLAG_JUMBO;
4523 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4524 (i << RXD_OPAQUE_INDEX_SHIFT));
4525 }
4526 }
4527
4528 /* Now allocate fresh SKBs for each rx ring. */
4529 for (i = 0; i < tp->rx_pending; i++) {
32d8c572
MC
4530 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4531 printk(KERN_WARNING PFX
4532 "%s: Using a smaller RX standard ring, "
4533 "only %d out of %d buffers were allocated "
4534 "successfully.\n",
4535 tp->dev->name, i, tp->rx_pending);
4536 if (i == 0)
4537 return -ENOMEM;
4538 tp->rx_pending = i;
1da177e4 4539 break;
32d8c572 4540 }
1da177e4
LT
4541 }
4542
0f893dc6 4543 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
4544 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4545 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
32d8c572
MC
4546 -1, i) < 0) {
4547 printk(KERN_WARNING PFX
4548 "%s: Using a smaller RX jumbo ring, "
4549 "only %d out of %d buffers were "
4550 "allocated successfully.\n",
4551 tp->dev->name, i, tp->rx_jumbo_pending);
4552 if (i == 0) {
4553 tg3_free_rings(tp);
4554 return -ENOMEM;
4555 }
4556 tp->rx_jumbo_pending = i;
1da177e4 4557 break;
32d8c572 4558 }
1da177e4
LT
4559 }
4560 }
32d8c572 4561 return 0;
1da177e4
LT
4562}
4563
4564/*
4565 * Must not be invoked with interrupt sources disabled and
4566 * the hardware shutdown down.
4567 */
4568static void tg3_free_consistent(struct tg3 *tp)
4569{
b4558ea9
JJ
4570 kfree(tp->rx_std_buffers);
4571 tp->rx_std_buffers = NULL;
1da177e4
LT
4572 if (tp->rx_std) {
4573 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4574 tp->rx_std, tp->rx_std_mapping);
4575 tp->rx_std = NULL;
4576 }
4577 if (tp->rx_jumbo) {
4578 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4579 tp->rx_jumbo, tp->rx_jumbo_mapping);
4580 tp->rx_jumbo = NULL;
4581 }
4582 if (tp->rx_rcb) {
4583 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4584 tp->rx_rcb, tp->rx_rcb_mapping);
4585 tp->rx_rcb = NULL;
4586 }
4587 if (tp->tx_ring) {
4588 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4589 tp->tx_ring, tp->tx_desc_mapping);
4590 tp->tx_ring = NULL;
4591 }
4592 if (tp->hw_status) {
4593 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4594 tp->hw_status, tp->status_mapping);
4595 tp->hw_status = NULL;
4596 }
4597 if (tp->hw_stats) {
4598 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4599 tp->hw_stats, tp->stats_mapping);
4600 tp->hw_stats = NULL;
4601 }
4602}
4603
4604/*
4605 * Must not be invoked with interrupt sources disabled and
4606 * the hardware shutdown down. Can sleep.
4607 */
4608static int tg3_alloc_consistent(struct tg3 *tp)
4609{
bd2b3343 4610 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
1da177e4
LT
4611 (TG3_RX_RING_SIZE +
4612 TG3_RX_JUMBO_RING_SIZE)) +
4613 (sizeof(struct tx_ring_info) *
4614 TG3_TX_RING_SIZE),
4615 GFP_KERNEL);
4616 if (!tp->rx_std_buffers)
4617 return -ENOMEM;
4618
1da177e4
LT
4619 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4620 tp->tx_buffers = (struct tx_ring_info *)
4621 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4622
4623 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4624 &tp->rx_std_mapping);
4625 if (!tp->rx_std)
4626 goto err_out;
4627
4628 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4629 &tp->rx_jumbo_mapping);
4630
4631 if (!tp->rx_jumbo)
4632 goto err_out;
4633
4634 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4635 &tp->rx_rcb_mapping);
4636 if (!tp->rx_rcb)
4637 goto err_out;
4638
4639 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4640 &tp->tx_desc_mapping);
4641 if (!tp->tx_ring)
4642 goto err_out;
4643
4644 tp->hw_status = pci_alloc_consistent(tp->pdev,
4645 TG3_HW_STATUS_SIZE,
4646 &tp->status_mapping);
4647 if (!tp->hw_status)
4648 goto err_out;
4649
4650 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4651 sizeof(struct tg3_hw_stats),
4652 &tp->stats_mapping);
4653 if (!tp->hw_stats)
4654 goto err_out;
4655
4656 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4657 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4658
4659 return 0;
4660
4661err_out:
4662 tg3_free_consistent(tp);
4663 return -ENOMEM;
4664}
4665
4666#define MAX_WAIT_CNT 1000
4667
4668/* To stop a block, clear the enable bit and poll till it
4669 * clears. tp->lock is held.
4670 */
b3b7d6be 4671static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
1da177e4
LT
4672{
4673 unsigned int i;
4674 u32 val;
4675
4676 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4677 switch (ofs) {
4678 case RCVLSC_MODE:
4679 case DMAC_MODE:
4680 case MBFREE_MODE:
4681 case BUFMGR_MODE:
4682 case MEMARB_MODE:
4683 /* We can't enable/disable these bits of the
4684 * 5705/5750, just say success.
4685 */
4686 return 0;
4687
4688 default:
4689 break;
4690 };
4691 }
4692
4693 val = tr32(ofs);
4694 val &= ~enable_bit;
4695 tw32_f(ofs, val);
4696
4697 for (i = 0; i < MAX_WAIT_CNT; i++) {
4698 udelay(100);
4699 val = tr32(ofs);
4700 if ((val & enable_bit) == 0)
4701 break;
4702 }
4703
b3b7d6be 4704 if (i == MAX_WAIT_CNT && !silent) {
1da177e4
LT
4705 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4706 "ofs=%lx enable_bit=%x\n",
4707 ofs, enable_bit);
4708 return -ENODEV;
4709 }
4710
4711 return 0;
4712}
4713
4714/* tp->lock is held. */
b3b7d6be 4715static int tg3_abort_hw(struct tg3 *tp, int silent)
1da177e4
LT
4716{
4717 int i, err;
4718
4719 tg3_disable_ints(tp);
4720
4721 tp->rx_mode &= ~RX_MODE_ENABLE;
4722 tw32_f(MAC_RX_MODE, tp->rx_mode);
4723 udelay(10);
4724
b3b7d6be
DM
4725 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4726 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4727 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4728 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4729 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4730 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4731
4732 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4733 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4734 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4735 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4736 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4737 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4738 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
1da177e4
LT
4739
4740 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4741 tw32_f(MAC_MODE, tp->mac_mode);
4742 udelay(40);
4743
4744 tp->tx_mode &= ~TX_MODE_ENABLE;
4745 tw32_f(MAC_TX_MODE, tp->tx_mode);
4746
4747 for (i = 0; i < MAX_WAIT_CNT; i++) {
4748 udelay(100);
4749 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4750 break;
4751 }
4752 if (i >= MAX_WAIT_CNT) {
4753 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4754 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4755 tp->dev->name, tr32(MAC_TX_MODE));
e6de8ad1 4756 err |= -ENODEV;
1da177e4
LT
4757 }
4758
e6de8ad1 4759 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
b3b7d6be
DM
4760 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4761 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
1da177e4
LT
4762
4763 tw32(FTQ_RESET, 0xffffffff);
4764 tw32(FTQ_RESET, 0x00000000);
4765
b3b7d6be
DM
4766 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4767 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
1da177e4
LT
4768
4769 if (tp->hw_status)
4770 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4771 if (tp->hw_stats)
4772 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4773
1da177e4
LT
4774 return err;
4775}
4776
4777/* tp->lock is held. */
4778static int tg3_nvram_lock(struct tg3 *tp)
4779{
4780 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4781 int i;
4782
ec41c7df
MC
4783 if (tp->nvram_lock_cnt == 0) {
4784 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4785 for (i = 0; i < 8000; i++) {
4786 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4787 break;
4788 udelay(20);
4789 }
4790 if (i == 8000) {
4791 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4792 return -ENODEV;
4793 }
1da177e4 4794 }
ec41c7df 4795 tp->nvram_lock_cnt++;
1da177e4
LT
4796 }
4797 return 0;
4798}
4799
4800/* tp->lock is held. */
4801static void tg3_nvram_unlock(struct tg3 *tp)
4802{
ec41c7df
MC
4803 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4804 if (tp->nvram_lock_cnt > 0)
4805 tp->nvram_lock_cnt--;
4806 if (tp->nvram_lock_cnt == 0)
4807 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4808 }
1da177e4
LT
4809}
4810
e6af301b
MC
4811/* tp->lock is held. */
4812static void tg3_enable_nvram_access(struct tg3 *tp)
4813{
4814 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4815 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4816 u32 nvaccess = tr32(NVRAM_ACCESS);
4817
4818 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4819 }
4820}
4821
4822/* tp->lock is held. */
4823static void tg3_disable_nvram_access(struct tg3 *tp)
4824{
4825 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4826 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4827 u32 nvaccess = tr32(NVRAM_ACCESS);
4828
4829 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4830 }
4831}
4832
0d3031d9
MC
4833static void tg3_ape_send_event(struct tg3 *tp, u32 event)
4834{
4835 int i;
4836 u32 apedata;
4837
4838 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
4839 if (apedata != APE_SEG_SIG_MAGIC)
4840 return;
4841
4842 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
4843 if (apedata != APE_FW_STATUS_READY)
4844 return;
4845
4846 /* Wait for up to 1 millisecond for APE to service previous event. */
4847 for (i = 0; i < 10; i++) {
4848 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
4849 return;
4850
4851 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
4852
4853 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4854 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
4855 event | APE_EVENT_STATUS_EVENT_PENDING);
4856
4857 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
4858
4859 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4860 break;
4861
4862 udelay(100);
4863 }
4864
4865 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4866 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
4867}
4868
4869static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
4870{
4871 u32 event;
4872 u32 apedata;
4873
4874 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
4875 return;
4876
4877 switch (kind) {
4878 case RESET_KIND_INIT:
4879 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
4880 APE_HOST_SEG_SIG_MAGIC);
4881 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
4882 APE_HOST_SEG_LEN_MAGIC);
4883 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
4884 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
4885 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
4886 APE_HOST_DRIVER_ID_MAGIC);
4887 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
4888 APE_HOST_BEHAV_NO_PHYLOCK);
4889
4890 event = APE_EVENT_STATUS_STATE_START;
4891 break;
4892 case RESET_KIND_SHUTDOWN:
4893 event = APE_EVENT_STATUS_STATE_UNLOAD;
4894 break;
4895 case RESET_KIND_SUSPEND:
4896 event = APE_EVENT_STATUS_STATE_SUSPEND;
4897 break;
4898 default:
4899 return;
4900 }
4901
4902 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
4903
4904 tg3_ape_send_event(tp, event);
4905}
4906
1da177e4
LT
4907/* tp->lock is held. */
4908static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4909{
f49639e6
DM
4910 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4911 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1da177e4
LT
4912
4913 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4914 switch (kind) {
4915 case RESET_KIND_INIT:
4916 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4917 DRV_STATE_START);
4918 break;
4919
4920 case RESET_KIND_SHUTDOWN:
4921 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4922 DRV_STATE_UNLOAD);
4923 break;
4924
4925 case RESET_KIND_SUSPEND:
4926 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4927 DRV_STATE_SUSPEND);
4928 break;
4929
4930 default:
4931 break;
4932 };
4933 }
0d3031d9
MC
4934
4935 if (kind == RESET_KIND_INIT ||
4936 kind == RESET_KIND_SUSPEND)
4937 tg3_ape_driver_state_change(tp, kind);
1da177e4
LT
4938}
4939
4940/* tp->lock is held. */
4941static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4942{
4943 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4944 switch (kind) {
4945 case RESET_KIND_INIT:
4946 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4947 DRV_STATE_START_DONE);
4948 break;
4949
4950 case RESET_KIND_SHUTDOWN:
4951 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4952 DRV_STATE_UNLOAD_DONE);
4953 break;
4954
4955 default:
4956 break;
4957 };
4958 }
0d3031d9
MC
4959
4960 if (kind == RESET_KIND_SHUTDOWN)
4961 tg3_ape_driver_state_change(tp, kind);
1da177e4
LT
4962}
4963
4964/* tp->lock is held. */
4965static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4966{
4967 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4968 switch (kind) {
4969 case RESET_KIND_INIT:
4970 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4971 DRV_STATE_START);
4972 break;
4973
4974 case RESET_KIND_SHUTDOWN:
4975 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4976 DRV_STATE_UNLOAD);
4977 break;
4978
4979 case RESET_KIND_SUSPEND:
4980 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4981 DRV_STATE_SUSPEND);
4982 break;
4983
4984 default:
4985 break;
4986 };
4987 }
4988}
4989
7a6f4369
MC
4990static int tg3_poll_fw(struct tg3 *tp)
4991{
4992 int i;
4993 u32 val;
4994
b5d3772c 4995 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
0ccead18
GZ
4996 /* Wait up to 20ms for init done. */
4997 for (i = 0; i < 200; i++) {
b5d3772c
MC
4998 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
4999 return 0;
0ccead18 5000 udelay(100);
b5d3772c
MC
5001 }
5002 return -ENODEV;
5003 }
5004
7a6f4369
MC
5005 /* Wait for firmware initialization to complete. */
5006 for (i = 0; i < 100000; i++) {
5007 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5008 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5009 break;
5010 udelay(10);
5011 }
5012
5013 /* Chip might not be fitted with firmware. Some Sun onboard
5014 * parts are configured like that. So don't signal the timeout
5015 * of the above loop as an error, but do report the lack of
5016 * running firmware once.
5017 */
5018 if (i >= 100000 &&
5019 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5020 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5021
5022 printk(KERN_INFO PFX "%s: No firmware running.\n",
5023 tp->dev->name);
5024 }
5025
5026 return 0;
5027}
5028
ee6a99b5
MC
5029/* Save PCI command register before chip reset */
5030static void tg3_save_pci_state(struct tg3 *tp)
5031{
8a6eac90 5032 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
ee6a99b5
MC
5033}
5034
5035/* Restore PCI state after chip reset */
5036static void tg3_restore_pci_state(struct tg3 *tp)
5037{
5038 u32 val;
5039
5040 /* Re-enable indirect register accesses. */
5041 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5042 tp->misc_host_ctrl);
5043
5044 /* Set MAX PCI retry to zero. */
5045 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5046 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5047 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5048 val |= PCISTATE_RETRY_SAME_DMA;
0d3031d9
MC
5049 /* Allow reads and writes to the APE register and memory space. */
5050 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5051 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5052 PCISTATE_ALLOW_APE_SHMEM_WR;
ee6a99b5
MC
5053 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5054
8a6eac90 5055 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
ee6a99b5 5056
114342f2
MC
5057 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5058 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5059 tp->pci_cacheline_sz);
5060 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5061 tp->pci_lat_timer);
5062 }
ee6a99b5 5063 /* Make sure PCI-X relaxed ordering bit is clear. */
9974a356
MC
5064 if (tp->pcix_cap) {
5065 u16 pcix_cmd;
5066
5067 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5068 &pcix_cmd);
5069 pcix_cmd &= ~PCI_X_CMD_ERO;
5070 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5071 pcix_cmd);
5072 }
ee6a99b5
MC
5073
5074 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
ee6a99b5
MC
5075
5076 /* Chip reset on 5780 will reset MSI enable bit,
5077 * so need to restore it.
5078 */
5079 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5080 u16 ctrl;
5081
5082 pci_read_config_word(tp->pdev,
5083 tp->msi_cap + PCI_MSI_FLAGS,
5084 &ctrl);
5085 pci_write_config_word(tp->pdev,
5086 tp->msi_cap + PCI_MSI_FLAGS,
5087 ctrl | PCI_MSI_FLAGS_ENABLE);
5088 val = tr32(MSGINT_MODE);
5089 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5090 }
5091 }
5092}
5093
1da177e4
LT
5094static void tg3_stop_fw(struct tg3 *);
5095
5096/* tp->lock is held. */
5097static int tg3_chip_reset(struct tg3 *tp)
5098{
5099 u32 val;
1ee582d8 5100 void (*write_op)(struct tg3 *, u32, u32);
7a6f4369 5101 int err;
1da177e4 5102
f49639e6
DM
5103 tg3_nvram_lock(tp);
5104
5105 /* No matching tg3_nvram_unlock() after this because
5106 * chip reset below will undo the nvram lock.
5107 */
5108 tp->nvram_lock_cnt = 0;
1da177e4 5109
ee6a99b5
MC
5110 /* GRC_MISC_CFG core clock reset will clear the memory
5111 * enable bit in PCI register 4 and the MSI enable bit
5112 * on some chips, so we save relevant registers here.
5113 */
5114 tg3_save_pci_state(tp);
5115
d9ab5ad1 5116 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
af36e6b6 5117 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 5118 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
5119 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5120 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
d9ab5ad1
MC
5121 tw32(GRC_FASTBOOT_PC, 0);
5122
1da177e4
LT
5123 /*
5124 * We must avoid the readl() that normally takes place.
5125 * It locks machines, causes machine checks, and other
5126 * fun things. So, temporarily disable the 5701
5127 * hardware workaround, while we do the reset.
5128 */
1ee582d8
MC
5129 write_op = tp->write32;
5130 if (write_op == tg3_write_flush_reg32)
5131 tp->write32 = tg3_write32;
1da177e4 5132
d18edcb2
MC
5133 /* Prevent the irq handler from reading or writing PCI registers
5134 * during chip reset when the memory enable bit in the PCI command
5135 * register may be cleared. The chip does not generate interrupt
5136 * at this time, but the irq handler may still be called due to irq
5137 * sharing or irqpoll.
5138 */
5139 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
b8fa2f3a
MC
5140 if (tp->hw_status) {
5141 tp->hw_status->status = 0;
5142 tp->hw_status->status_tag = 0;
5143 }
d18edcb2
MC
5144 tp->last_tag = 0;
5145 smp_mb();
5146 synchronize_irq(tp->pdev->irq);
5147
1da177e4
LT
5148 /* do the reset */
5149 val = GRC_MISC_CFG_CORECLK_RESET;
5150
5151 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5152 if (tr32(0x7e2c) == 0x60) {
5153 tw32(0x7e2c, 0x20);
5154 }
5155 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5156 tw32(GRC_MISC_CFG, (1 << 29));
5157 val |= (1 << 29);
5158 }
5159 }
5160
b5d3772c
MC
5161 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5162 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5163 tw32(GRC_VCPU_EXT_CTRL,
5164 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5165 }
5166
1da177e4
LT
5167 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5168 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5169 tw32(GRC_MISC_CFG, val);
5170
1ee582d8
MC
5171 /* restore 5701 hardware bug workaround write method */
5172 tp->write32 = write_op;
1da177e4
LT
5173
5174 /* Unfortunately, we have to delay before the PCI read back.
5175 * Some 575X chips even will not respond to a PCI cfg access
5176 * when the reset command is given to the chip.
5177 *
5178 * How do these hardware designers expect things to work
5179 * properly if the PCI write is posted for a long period
5180 * of time? It is always necessary to have some method by
5181 * which a register read back can occur to push the write
5182 * out which does the reset.
5183 *
5184 * For most tg3 variants the trick below was working.
5185 * Ho hum...
5186 */
5187 udelay(120);
5188
5189 /* Flush PCI posted writes. The normal MMIO registers
5190 * are inaccessible at this time so this is the only
5191 * way to make this reliably (actually, this is no longer
5192 * the case, see above). I tried to use indirect
5193 * register read/write but this upset some 5701 variants.
5194 */
5195 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5196
5197 udelay(120);
5198
5199 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5200 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5201 int i;
5202 u32 cfg_val;
5203
5204 /* Wait for link training to complete. */
5205 for (i = 0; i < 5000; i++)
5206 udelay(100);
5207
5208 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5209 pci_write_config_dword(tp->pdev, 0xc4,
5210 cfg_val | (1 << 15));
5211 }
5212 /* Set PCIE max payload size and clear error status. */
5213 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5214 }
5215
ee6a99b5 5216 tg3_restore_pci_state(tp);
1da177e4 5217
d18edcb2
MC
5218 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5219
ee6a99b5
MC
5220 val = 0;
5221 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4cf78e4f 5222 val = tr32(MEMARB_MODE);
ee6a99b5 5223 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
1da177e4
LT
5224
5225 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5226 tg3_stop_fw(tp);
5227 tw32(0x5000, 0x400);
5228 }
5229
5230 tw32(GRC_MODE, tp->grc_mode);
5231
5232 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
ab0049b4 5233 val = tr32(0xc4);
1da177e4
LT
5234
5235 tw32(0xc4, val | (1 << 15));
5236 }
5237
5238 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5239 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5240 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5241 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5242 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5243 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5244 }
5245
5246 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5247 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5248 tw32_f(MAC_MODE, tp->mac_mode);
747e8f8b
MC
5249 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5250 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5251 tw32_f(MAC_MODE, tp->mac_mode);
1da177e4
LT
5252 } else
5253 tw32_f(MAC_MODE, 0);
5254 udelay(40);
5255
7a6f4369
MC
5256 err = tg3_poll_fw(tp);
5257 if (err)
5258 return err;
1da177e4
LT
5259
5260 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5261 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
ab0049b4 5262 val = tr32(0x7c00);
1da177e4
LT
5263
5264 tw32(0x7c00, val | (1 << 25));
5265 }
5266
5267 /* Reprobe ASF enable state. */
5268 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5269 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5270 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5271 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5272 u32 nic_cfg;
5273
5274 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5275 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5276 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 5277 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
5278 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5279 }
5280 }
5281
5282 return 0;
5283}
5284
5285/* tp->lock is held. */
5286static void tg3_stop_fw(struct tg3 *tp)
5287{
0d3031d9
MC
5288 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5289 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
1da177e4
LT
5290 u32 val;
5291 int i;
5292
5293 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5294 val = tr32(GRC_RX_CPU_EVENT);
5295 val |= (1 << 14);
5296 tw32(GRC_RX_CPU_EVENT, val);
5297
5298 /* Wait for RX cpu to ACK the event. */
5299 for (i = 0; i < 100; i++) {
5300 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5301 break;
5302 udelay(1);
5303 }
5304 }
5305}
5306
5307/* tp->lock is held. */
944d980e 5308static int tg3_halt(struct tg3 *tp, int kind, int silent)
1da177e4
LT
5309{
5310 int err;
5311
5312 tg3_stop_fw(tp);
5313
944d980e 5314 tg3_write_sig_pre_reset(tp, kind);
1da177e4 5315
b3b7d6be 5316 tg3_abort_hw(tp, silent);
1da177e4
LT
5317 err = tg3_chip_reset(tp);
5318
944d980e
MC
5319 tg3_write_sig_legacy(tp, kind);
5320 tg3_write_sig_post_reset(tp, kind);
1da177e4
LT
5321
5322 if (err)
5323 return err;
5324
5325 return 0;
5326}
5327
5328#define TG3_FW_RELEASE_MAJOR 0x0
5329#define TG3_FW_RELASE_MINOR 0x0
5330#define TG3_FW_RELEASE_FIX 0x0
5331#define TG3_FW_START_ADDR 0x08000000
5332#define TG3_FW_TEXT_ADDR 0x08000000
5333#define TG3_FW_TEXT_LEN 0x9c0
5334#define TG3_FW_RODATA_ADDR 0x080009c0
5335#define TG3_FW_RODATA_LEN 0x60
5336#define TG3_FW_DATA_ADDR 0x08000a40
5337#define TG3_FW_DATA_LEN 0x20
5338#define TG3_FW_SBSS_ADDR 0x08000a60
5339#define TG3_FW_SBSS_LEN 0xc
5340#define TG3_FW_BSS_ADDR 0x08000a70
5341#define TG3_FW_BSS_LEN 0x10
5342
50da859d 5343static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
1da177e4
LT
5344 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5345 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5346 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5347 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5348 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5349 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5350 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5351 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5352 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5353 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5354 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5355 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5356 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5357 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5358 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5359 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5360 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5361 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5362 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5363 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5364 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5365 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5366 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5367 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5368 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5369 0, 0, 0, 0, 0, 0,
5370 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5371 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5372 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5373 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5374 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5375 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5376 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5377 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5378 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5379 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5380 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5381 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5382 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5383 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5384 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5385 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5386 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5387 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5388 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5389 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5390 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5391 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5392 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5393 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5394 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5395 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5396 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5397 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5398 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5399 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5400 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5401 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5402 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5403 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5404 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5405 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5406 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5407 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5408 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5409 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5410 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5411 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5412 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5413 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5414 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5415 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5416 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5417 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5418 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5419 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5420 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5421 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5422 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5423 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5424 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5425 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5426 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5427 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5428 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5429 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5430 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5431 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5432 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5433 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5434 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5435};
5436
50da859d 5437static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
1da177e4
LT
5438 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5439 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5440 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5441 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5442 0x00000000
5443};
5444
5445#if 0 /* All zeros, don't eat up space with it. */
5446u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5447 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5448 0x00000000, 0x00000000, 0x00000000, 0x00000000
5449};
5450#endif
5451
5452#define RX_CPU_SCRATCH_BASE 0x30000
5453#define RX_CPU_SCRATCH_SIZE 0x04000
5454#define TX_CPU_SCRATCH_BASE 0x34000
5455#define TX_CPU_SCRATCH_SIZE 0x04000
5456
5457/* tp->lock is held. */
5458static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5459{
5460 int i;
5461
5d9428de
ES
5462 BUG_ON(offset == TX_CPU_BASE &&
5463 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
1da177e4 5464
b5d3772c
MC
5465 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5466 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5467
5468 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5469 return 0;
5470 }
1da177e4
LT
5471 if (offset == RX_CPU_BASE) {
5472 for (i = 0; i < 10000; i++) {
5473 tw32(offset + CPU_STATE, 0xffffffff);
5474 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5475 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5476 break;
5477 }
5478
5479 tw32(offset + CPU_STATE, 0xffffffff);
5480 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
5481 udelay(10);
5482 } else {
5483 for (i = 0; i < 10000; i++) {
5484 tw32(offset + CPU_STATE, 0xffffffff);
5485 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5486 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5487 break;
5488 }
5489 }
5490
5491 if (i >= 10000) {
5492 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5493 "and %s CPU\n",
5494 tp->dev->name,
5495 (offset == RX_CPU_BASE ? "RX" : "TX"));
5496 return -ENODEV;
5497 }
ec41c7df
MC
5498
5499 /* Clear firmware's nvram arbitration. */
5500 if (tp->tg3_flags & TG3_FLAG_NVRAM)
5501 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
1da177e4
LT
5502 return 0;
5503}
5504
5505struct fw_info {
5506 unsigned int text_base;
5507 unsigned int text_len;
50da859d 5508 const u32 *text_data;
1da177e4
LT
5509 unsigned int rodata_base;
5510 unsigned int rodata_len;
50da859d 5511 const u32 *rodata_data;
1da177e4
LT
5512 unsigned int data_base;
5513 unsigned int data_len;
50da859d 5514 const u32 *data_data;
1da177e4
LT
5515};
5516
5517/* tp->lock is held. */
5518static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5519 int cpu_scratch_size, struct fw_info *info)
5520{
ec41c7df 5521 int err, lock_err, i;
1da177e4
LT
5522 void (*write_op)(struct tg3 *, u32, u32);
5523
5524 if (cpu_base == TX_CPU_BASE &&
5525 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5526 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5527 "TX cpu firmware on %s which is 5705.\n",
5528 tp->dev->name);
5529 return -EINVAL;
5530 }
5531
5532 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5533 write_op = tg3_write_mem;
5534 else
5535 write_op = tg3_write_indirect_reg32;
5536
1b628151
MC
5537 /* It is possible that bootcode is still loading at this point.
5538 * Get the nvram lock first before halting the cpu.
5539 */
ec41c7df 5540 lock_err = tg3_nvram_lock(tp);
1da177e4 5541 err = tg3_halt_cpu(tp, cpu_base);
ec41c7df
MC
5542 if (!lock_err)
5543 tg3_nvram_unlock(tp);
1da177e4
LT
5544 if (err)
5545 goto out;
5546
5547 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5548 write_op(tp, cpu_scratch_base + i, 0);
5549 tw32(cpu_base + CPU_STATE, 0xffffffff);
5550 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5551 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5552 write_op(tp, (cpu_scratch_base +
5553 (info->text_base & 0xffff) +
5554 (i * sizeof(u32))),
5555 (info->text_data ?
5556 info->text_data[i] : 0));
5557 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5558 write_op(tp, (cpu_scratch_base +
5559 (info->rodata_base & 0xffff) +
5560 (i * sizeof(u32))),
5561 (info->rodata_data ?
5562 info->rodata_data[i] : 0));
5563 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5564 write_op(tp, (cpu_scratch_base +
5565 (info->data_base & 0xffff) +
5566 (i * sizeof(u32))),
5567 (info->data_data ?
5568 info->data_data[i] : 0));
5569
5570 err = 0;
5571
5572out:
1da177e4
LT
5573 return err;
5574}
5575
5576/* tp->lock is held. */
5577static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5578{
5579 struct fw_info info;
5580 int err, i;
5581
5582 info.text_base = TG3_FW_TEXT_ADDR;
5583 info.text_len = TG3_FW_TEXT_LEN;
5584 info.text_data = &tg3FwText[0];
5585 info.rodata_base = TG3_FW_RODATA_ADDR;
5586 info.rodata_len = TG3_FW_RODATA_LEN;
5587 info.rodata_data = &tg3FwRodata[0];
5588 info.data_base = TG3_FW_DATA_ADDR;
5589 info.data_len = TG3_FW_DATA_LEN;
5590 info.data_data = NULL;
5591
5592 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5593 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5594 &info);
5595 if (err)
5596 return err;
5597
5598 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5599 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5600 &info);
5601 if (err)
5602 return err;
5603
5604 /* Now startup only the RX cpu. */
5605 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5606 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5607
5608 for (i = 0; i < 5; i++) {
5609 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5610 break;
5611 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5612 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
5613 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5614 udelay(1000);
5615 }
5616 if (i >= 5) {
5617 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5618 "to set RX CPU PC, is %08x should be %08x\n",
5619 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5620 TG3_FW_TEXT_ADDR);
5621 return -ENODEV;
5622 }
5623 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5624 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
5625
5626 return 0;
5627}
5628
1da177e4
LT
5629
5630#define TG3_TSO_FW_RELEASE_MAJOR 0x1
5631#define TG3_TSO_FW_RELASE_MINOR 0x6
5632#define TG3_TSO_FW_RELEASE_FIX 0x0
5633#define TG3_TSO_FW_START_ADDR 0x08000000
5634#define TG3_TSO_FW_TEXT_ADDR 0x08000000
5635#define TG3_TSO_FW_TEXT_LEN 0x1aa0
5636#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5637#define TG3_TSO_FW_RODATA_LEN 0x60
5638#define TG3_TSO_FW_DATA_ADDR 0x08001b20
5639#define TG3_TSO_FW_DATA_LEN 0x30
5640#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5641#define TG3_TSO_FW_SBSS_LEN 0x2c
5642#define TG3_TSO_FW_BSS_ADDR 0x08001b80
5643#define TG3_TSO_FW_BSS_LEN 0x894
5644
50da859d 5645static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
1da177e4
LT
5646 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5647 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5648 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5649 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5650 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5651 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5652 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5653 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5654 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5655 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5656 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5657 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5658 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5659 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5660 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5661 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5662 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5663 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5664 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5665 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5666 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5667 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5668 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5669 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5670 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5671 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5672 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5673 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5674 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5675 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5676 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5677 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5678 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5679 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5680 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5681 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5682 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5683 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5684 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5685 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5686 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5687 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5688 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5689 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5690 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5691 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5692 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5693 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5694 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5695 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5696 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5697 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5698 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5699 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5700 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5701 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5702 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5703 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5704 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5705 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5706 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5707 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5708 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5709 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5710 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5711 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5712 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5713 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5714 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5715 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5716 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5717 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5718 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5719 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5720 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5721 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5722 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5723 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5724 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5725 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5726 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5727 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5728 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5729 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5730 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5731 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5732 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5733 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5734 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5735 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5736 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5737 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5738 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5739 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5740 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5741 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5742 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5743 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5744 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5745 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5746 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5747 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5748 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5749 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5750 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5751 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5752 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5753 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5754 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5755 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5756 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5757 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5758 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5759 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5760 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5761 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5762 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5763 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5764 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5765 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5766 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5767 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5768 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5769 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5770 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5771 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5772 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5773 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5774 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5775 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5776 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5777 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5778 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5779 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5780 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5781 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5782 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5783 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5784 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5785 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5786 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5787 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5788 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5789 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5790 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5791 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5792 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5793 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5794 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5795 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5796 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5797 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5798 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5799 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5800 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5801 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5802 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5803 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5804 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5805 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5806 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5807 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5808 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5809 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5810 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5811 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5812 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5813 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5814 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5815 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5816 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5817 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5818 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5819 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5820 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5821 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5822 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5823 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5824 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5825 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5826 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5827 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5828 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5829 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5830 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5831 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5832 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5833 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5834 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5835 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5836 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5837 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5838 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5839 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5840 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5841 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5842 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5843 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5844 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5845 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5846 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5847 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5848 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5849 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5850 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5851 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5852 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5853 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5854 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5855 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5856 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5857 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5858 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5859 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5860 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5861 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5862 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5863 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5864 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5865 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5866 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5867 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5868 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5869 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5870 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5871 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5872 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5873 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5874 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5875 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5876 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5877 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5878 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5879 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5880 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5881 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5882 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5883 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5884 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5885 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5886 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5887 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5888 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5889 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5890 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5891 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5892 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5893 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5894 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5895 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5896 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5897 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5898 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5899 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5900 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5901 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5902 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5903 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5904 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5905 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5906 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5907 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5908 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5909 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5910 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5911 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5912 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5913 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5914 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5915 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5916 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5917 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5918 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5919 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5920 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5921 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5922 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5923 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5924 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5925 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5926 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5927 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5928 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5929 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5930};
5931
50da859d 5932static const u32 tg3TsoFwRodata[] = {
1da177e4
LT
5933 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5934 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5935 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5936 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5937 0x00000000,
5938};
5939
50da859d 5940static const u32 tg3TsoFwData[] = {
1da177e4
LT
5941 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5942 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5943 0x00000000,
5944};
5945
5946/* 5705 needs a special version of the TSO firmware. */
5947#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5948#define TG3_TSO5_FW_RELASE_MINOR 0x2
5949#define TG3_TSO5_FW_RELEASE_FIX 0x0
5950#define TG3_TSO5_FW_START_ADDR 0x00010000
5951#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5952#define TG3_TSO5_FW_TEXT_LEN 0xe90
5953#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5954#define TG3_TSO5_FW_RODATA_LEN 0x50
5955#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5956#define TG3_TSO5_FW_DATA_LEN 0x20
5957#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5958#define TG3_TSO5_FW_SBSS_LEN 0x28
5959#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5960#define TG3_TSO5_FW_BSS_LEN 0x88
5961
50da859d 5962static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
1da177e4
LT
5963 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5964 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5965 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5966 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5967 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5968 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5969 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5970 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5971 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5972 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5973 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5974 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5975 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5976 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5977 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5978 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5979 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5980 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5981 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5982 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5983 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5984 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5985 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5986 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5987 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5988 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5989 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5990 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5991 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5992 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5993 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5994 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5995 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5996 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5997 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5998 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5999 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6000 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6001 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6002 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6003 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6004 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6005 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6006 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6007 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6008 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6009 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6010 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6011 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6012 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6013 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6014 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6015 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6016 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6017 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6018 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6019 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6020 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6021 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6022 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6023 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6024 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6025 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6026 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6027 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6028 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6029 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6030 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6031 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6032 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6033 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6034 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6035 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6036 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6037 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6038 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6039 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6040 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6041 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6042 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6043 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6044 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6045 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6046 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6047 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6048 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6049 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6050 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6051 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6052 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6053 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6054 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6055 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6056 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6057 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6058 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6059 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6060 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6061 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6062 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6063 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6064 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6065 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6066 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6067 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6068 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6069 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6070 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6071 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6072 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6073 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6074 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6075 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6076 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6077 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6078 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6079 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6080 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6081 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6082 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6083 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6084 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6085 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6086 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6087 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6088 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6089 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6090 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6091 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6092 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6093 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6094 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6095 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6096 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6097 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6098 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6099 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6100 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6101 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6102 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6103 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6104 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6105 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6106 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6107 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6108 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6109 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6110 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6111 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6112 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6113 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6114 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6115 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6116 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6117 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6118 0x00000000, 0x00000000, 0x00000000,
6119};
6120
50da859d 6121static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
1da177e4
LT
6122 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6123 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6124 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6125 0x00000000, 0x00000000, 0x00000000,
6126};
6127
50da859d 6128static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
1da177e4
LT
6129 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6130 0x00000000, 0x00000000, 0x00000000,
6131};
6132
6133/* tp->lock is held. */
6134static int tg3_load_tso_firmware(struct tg3 *tp)
6135{
6136 struct fw_info info;
6137 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6138 int err, i;
6139
6140 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6141 return 0;
6142
6143 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6144 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6145 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6146 info.text_data = &tg3Tso5FwText[0];
6147 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6148 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6149 info.rodata_data = &tg3Tso5FwRodata[0];
6150 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6151 info.data_len = TG3_TSO5_FW_DATA_LEN;
6152 info.data_data = &tg3Tso5FwData[0];
6153 cpu_base = RX_CPU_BASE;
6154 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6155 cpu_scratch_size = (info.text_len +
6156 info.rodata_len +
6157 info.data_len +
6158 TG3_TSO5_FW_SBSS_LEN +
6159 TG3_TSO5_FW_BSS_LEN);
6160 } else {
6161 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6162 info.text_len = TG3_TSO_FW_TEXT_LEN;
6163 info.text_data = &tg3TsoFwText[0];
6164 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6165 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6166 info.rodata_data = &tg3TsoFwRodata[0];
6167 info.data_base = TG3_TSO_FW_DATA_ADDR;
6168 info.data_len = TG3_TSO_FW_DATA_LEN;
6169 info.data_data = &tg3TsoFwData[0];
6170 cpu_base = TX_CPU_BASE;
6171 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6172 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6173 }
6174
6175 err = tg3_load_firmware_cpu(tp, cpu_base,
6176 cpu_scratch_base, cpu_scratch_size,
6177 &info);
6178 if (err)
6179 return err;
6180
6181 /* Now startup the cpu. */
6182 tw32(cpu_base + CPU_STATE, 0xffffffff);
6183 tw32_f(cpu_base + CPU_PC, info.text_base);
6184
6185 for (i = 0; i < 5; i++) {
6186 if (tr32(cpu_base + CPU_PC) == info.text_base)
6187 break;
6188 tw32(cpu_base + CPU_STATE, 0xffffffff);
6189 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6190 tw32_f(cpu_base + CPU_PC, info.text_base);
6191 udelay(1000);
6192 }
6193 if (i >= 5) {
6194 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6195 "to set CPU PC, is %08x should be %08x\n",
6196 tp->dev->name, tr32(cpu_base + CPU_PC),
6197 info.text_base);
6198 return -ENODEV;
6199 }
6200 tw32(cpu_base + CPU_STATE, 0xffffffff);
6201 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6202 return 0;
6203}
6204
1da177e4
LT
6205
6206/* tp->lock is held. */
986e0aeb 6207static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
1da177e4
LT
6208{
6209 u32 addr_high, addr_low;
6210 int i;
6211
6212 addr_high = ((tp->dev->dev_addr[0] << 8) |
6213 tp->dev->dev_addr[1]);
6214 addr_low = ((tp->dev->dev_addr[2] << 24) |
6215 (tp->dev->dev_addr[3] << 16) |
6216 (tp->dev->dev_addr[4] << 8) |
6217 (tp->dev->dev_addr[5] << 0));
6218 for (i = 0; i < 4; i++) {
986e0aeb
MC
6219 if (i == 1 && skip_mac_1)
6220 continue;
1da177e4
LT
6221 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6222 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6223 }
6224
6225 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6226 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6227 for (i = 0; i < 12; i++) {
6228 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6229 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6230 }
6231 }
6232
6233 addr_high = (tp->dev->dev_addr[0] +
6234 tp->dev->dev_addr[1] +
6235 tp->dev->dev_addr[2] +
6236 tp->dev->dev_addr[3] +
6237 tp->dev->dev_addr[4] +
6238 tp->dev->dev_addr[5]) &
6239 TX_BACKOFF_SEED_MASK;
6240 tw32(MAC_TX_BACKOFF_SEED, addr_high);
6241}
6242
6243static int tg3_set_mac_addr(struct net_device *dev, void *p)
6244{
6245 struct tg3 *tp = netdev_priv(dev);
6246 struct sockaddr *addr = p;
986e0aeb 6247 int err = 0, skip_mac_1 = 0;
1da177e4 6248
f9804ddb
MC
6249 if (!is_valid_ether_addr(addr->sa_data))
6250 return -EINVAL;
6251
1da177e4
LT
6252 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6253
e75f7c90
MC
6254 if (!netif_running(dev))
6255 return 0;
6256
58712ef9 6257 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
986e0aeb 6258 u32 addr0_high, addr0_low, addr1_high, addr1_low;
58712ef9 6259
986e0aeb
MC
6260 addr0_high = tr32(MAC_ADDR_0_HIGH);
6261 addr0_low = tr32(MAC_ADDR_0_LOW);
6262 addr1_high = tr32(MAC_ADDR_1_HIGH);
6263 addr1_low = tr32(MAC_ADDR_1_LOW);
6264
6265 /* Skip MAC addr 1 if ASF is using it. */
6266 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6267 !(addr1_high == 0 && addr1_low == 0))
6268 skip_mac_1 = 1;
58712ef9 6269 }
986e0aeb
MC
6270 spin_lock_bh(&tp->lock);
6271 __tg3_set_mac_addr(tp, skip_mac_1);
6272 spin_unlock_bh(&tp->lock);
1da177e4 6273
b9ec6c1b 6274 return err;
1da177e4
LT
6275}
6276
6277/* tp->lock is held. */
6278static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6279 dma_addr_t mapping, u32 maxlen_flags,
6280 u32 nic_addr)
6281{
6282 tg3_write_mem(tp,
6283 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6284 ((u64) mapping >> 32));
6285 tg3_write_mem(tp,
6286 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6287 ((u64) mapping & 0xffffffff));
6288 tg3_write_mem(tp,
6289 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6290 maxlen_flags);
6291
6292 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6293 tg3_write_mem(tp,
6294 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6295 nic_addr);
6296}
6297
6298static void __tg3_set_rx_mode(struct net_device *);
d244c892 6299static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
15f9850d
DM
6300{
6301 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6302 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6303 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6304 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6305 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6306 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6307 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6308 }
6309 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6310 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6311 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6312 u32 val = ec->stats_block_coalesce_usecs;
6313
6314 if (!netif_carrier_ok(tp->dev))
6315 val = 0;
6316
6317 tw32(HOSTCC_STAT_COAL_TICKS, val);
6318 }
6319}
1da177e4
LT
6320
6321/* tp->lock is held. */
8e7a22e3 6322static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
6323{
6324 u32 val, rdmac_mode;
6325 int i, err, limit;
6326
6327 tg3_disable_ints(tp);
6328
6329 tg3_stop_fw(tp);
6330
6331 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6332
6333 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
e6de8ad1 6334 tg3_abort_hw(tp, 1);
1da177e4
LT
6335 }
6336
36da4d86 6337 if (reset_phy)
d4d2c558
MC
6338 tg3_phy_reset(tp);
6339
1da177e4
LT
6340 err = tg3_chip_reset(tp);
6341 if (err)
6342 return err;
6343
6344 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6345
d30cdd28
MC
6346 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0) {
6347 val = tr32(TG3_CPMU_CTRL);
6348 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6349 tw32(TG3_CPMU_CTRL, val);
6350 }
6351
1da177e4
LT
6352 /* This works around an issue with Athlon chipsets on
6353 * B3 tigon3 silicon. This bit has no effect on any
6354 * other revision. But do not set this on PCI Express
795d01c5 6355 * chips and don't even touch the clocks if the CPMU is present.
1da177e4 6356 */
795d01c5
MC
6357 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6358 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6359 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6360 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6361 }
1da177e4
LT
6362
6363 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6364 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6365 val = tr32(TG3PCI_PCISTATE);
6366 val |= PCISTATE_RETRY_SAME_DMA;
6367 tw32(TG3PCI_PCISTATE, val);
6368 }
6369
0d3031d9
MC
6370 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6371 /* Allow reads and writes to the
6372 * APE register and memory space.
6373 */
6374 val = tr32(TG3PCI_PCISTATE);
6375 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6376 PCISTATE_ALLOW_APE_SHMEM_WR;
6377 tw32(TG3PCI_PCISTATE, val);
6378 }
6379
1da177e4
LT
6380 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6381 /* Enable some hw fixes. */
6382 val = tr32(TG3PCI_MSI_DATA);
6383 val |= (1 << 26) | (1 << 28) | (1 << 29);
6384 tw32(TG3PCI_MSI_DATA, val);
6385 }
6386
6387 /* Descriptor ring init may make accesses to the
6388 * NIC SRAM area to setup the TX descriptors, so we
6389 * can only do this after the hardware has been
6390 * successfully reset.
6391 */
32d8c572
MC
6392 err = tg3_init_rings(tp);
6393 if (err)
6394 return err;
1da177e4 6395
9936bcf6
MC
6396 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6397 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
d30cdd28
MC
6398 /* This value is determined during the probe time DMA
6399 * engine test, tg3_test_dma.
6400 */
6401 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6402 }
1da177e4
LT
6403
6404 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6405 GRC_MODE_4X_NIC_SEND_RINGS |
6406 GRC_MODE_NO_TX_PHDR_CSUM |
6407 GRC_MODE_NO_RX_PHDR_CSUM);
6408 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
d2d746f8
MC
6409
6410 /* Pseudo-header checksum is done by hardware logic and not
6411 * the offload processers, so make the chip do the pseudo-
6412 * header checksums on receive. For transmit it is more
6413 * convenient to do the pseudo-header checksum in software
6414 * as Linux does that on transmit for us in all cases.
6415 */
6416 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
1da177e4
LT
6417
6418 tw32(GRC_MODE,
6419 tp->grc_mode |
6420 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6421
6422 /* Setup the timer prescalar register. Clock is always 66Mhz. */
6423 val = tr32(GRC_MISC_CFG);
6424 val &= ~0xff;
6425 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6426 tw32(GRC_MISC_CFG, val);
6427
6428 /* Initialize MBUF/DESC pool. */
cbf46853 6429 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
1da177e4
LT
6430 /* Do nothing. */
6431 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6432 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6433 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6434 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6435 else
6436 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6437 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6438 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6439 }
1da177e4
LT
6440 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6441 int fw_len;
6442
6443 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6444 TG3_TSO5_FW_RODATA_LEN +
6445 TG3_TSO5_FW_DATA_LEN +
6446 TG3_TSO5_FW_SBSS_LEN +
6447 TG3_TSO5_FW_BSS_LEN);
6448 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6449 tw32(BUFMGR_MB_POOL_ADDR,
6450 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6451 tw32(BUFMGR_MB_POOL_SIZE,
6452 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6453 }
1da177e4 6454
0f893dc6 6455 if (tp->dev->mtu <= ETH_DATA_LEN) {
1da177e4
LT
6456 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6457 tp->bufmgr_config.mbuf_read_dma_low_water);
6458 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6459 tp->bufmgr_config.mbuf_mac_rx_low_water);
6460 tw32(BUFMGR_MB_HIGH_WATER,
6461 tp->bufmgr_config.mbuf_high_water);
6462 } else {
6463 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6464 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6465 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6466 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6467 tw32(BUFMGR_MB_HIGH_WATER,
6468 tp->bufmgr_config.mbuf_high_water_jumbo);
6469 }
6470 tw32(BUFMGR_DMA_LOW_WATER,
6471 tp->bufmgr_config.dma_low_water);
6472 tw32(BUFMGR_DMA_HIGH_WATER,
6473 tp->bufmgr_config.dma_high_water);
6474
6475 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6476 for (i = 0; i < 2000; i++) {
6477 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6478 break;
6479 udelay(10);
6480 }
6481 if (i >= 2000) {
6482 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6483 tp->dev->name);
6484 return -ENODEV;
6485 }
6486
6487 /* Setup replenish threshold. */
f92905de
MC
6488 val = tp->rx_pending / 8;
6489 if (val == 0)
6490 val = 1;
6491 else if (val > tp->rx_std_max_post)
6492 val = tp->rx_std_max_post;
b5d3772c
MC
6493 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6494 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6495 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6496
6497 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6498 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6499 }
f92905de
MC
6500
6501 tw32(RCVBDI_STD_THRESH, val);
1da177e4
LT
6502
6503 /* Initialize TG3_BDINFO's at:
6504 * RCVDBDI_STD_BD: standard eth size rx ring
6505 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6506 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6507 *
6508 * like so:
6509 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6510 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6511 * ring attribute flags
6512 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6513 *
6514 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6515 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6516 *
6517 * The size of each ring is fixed in the firmware, but the location is
6518 * configurable.
6519 */
6520 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6521 ((u64) tp->rx_std_mapping >> 32));
6522 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6523 ((u64) tp->rx_std_mapping & 0xffffffff));
6524 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6525 NIC_SRAM_RX_BUFFER_DESC);
6526
6527 /* Don't even try to program the JUMBO/MINI buffer descriptor
6528 * configs on 5705.
6529 */
6530 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6531 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6532 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6533 } else {
6534 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6535 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6536
6537 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6538 BDINFO_FLAGS_DISABLED);
6539
6540 /* Setup replenish threshold. */
6541 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6542
0f893dc6 6543 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
6544 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6545 ((u64) tp->rx_jumbo_mapping >> 32));
6546 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6547 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6548 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6549 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6550 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6551 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6552 } else {
6553 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6554 BDINFO_FLAGS_DISABLED);
6555 }
6556
6557 }
6558
6559 /* There is only one send ring on 5705/5750, no need to explicitly
6560 * disable the others.
6561 */
6562 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6563 /* Clear out send RCB ring in SRAM. */
6564 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6565 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6566 BDINFO_FLAGS_DISABLED);
6567 }
6568
6569 tp->tx_prod = 0;
6570 tp->tx_cons = 0;
6571 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6572 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6573
6574 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6575 tp->tx_desc_mapping,
6576 (TG3_TX_RING_SIZE <<
6577 BDINFO_FLAGS_MAXLEN_SHIFT),
6578 NIC_SRAM_TX_BUFFER_DESC);
6579
6580 /* There is only one receive return ring on 5705/5750, no need
6581 * to explicitly disable the others.
6582 */
6583 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6584 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6585 i += TG3_BDINFO_SIZE) {
6586 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6587 BDINFO_FLAGS_DISABLED);
6588 }
6589 }
6590
6591 tp->rx_rcb_ptr = 0;
6592 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6593
6594 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6595 tp->rx_rcb_mapping,
6596 (TG3_RX_RCB_RING_SIZE(tp) <<
6597 BDINFO_FLAGS_MAXLEN_SHIFT),
6598 0);
6599
6600 tp->rx_std_ptr = tp->rx_pending;
6601 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6602 tp->rx_std_ptr);
6603
0f893dc6 6604 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
1da177e4
LT
6605 tp->rx_jumbo_pending : 0;
6606 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6607 tp->rx_jumbo_ptr);
6608
6609 /* Initialize MAC address and backoff seed. */
986e0aeb 6610 __tg3_set_mac_addr(tp, 0);
1da177e4
LT
6611
6612 /* MTU + ethernet header + FCS + optional VLAN tag */
6613 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6614
6615 /* The slot time is changed by tg3_setup_phy if we
6616 * run at gigabit with half duplex.
6617 */
6618 tw32(MAC_TX_LENGTHS,
6619 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6620 (6 << TX_LENGTHS_IPG_SHIFT) |
6621 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6622
6623 /* Receive rules. */
6624 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6625 tw32(RCVLPC_CONFIG, 0x0181);
6626
6627 /* Calculate RDMAC_MODE setting early, we need it to determine
6628 * the RCVLPC_STATE_ENABLE mask.
6629 */
6630 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6631 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6632 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6633 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6634 RDMAC_MODE_LNGREAD_ENAB);
85e94ced 6635
d30cdd28
MC
6636 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6637 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6638 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6639 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6640
85e94ced
MC
6641 /* If statement applies to 5705 and 5750 PCI devices only */
6642 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6643 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6644 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
1da177e4 6645 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
c13e3713 6646 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1da177e4
LT
6647 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6648 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6649 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6650 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6651 }
6652 }
6653
85e94ced
MC
6654 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6655 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6656
1da177e4
LT
6657 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6658 rdmac_mode |= (1 << 27);
1da177e4
LT
6659
6660 /* Receive/send statistics. */
1661394e
MC
6661 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6662 val = tr32(RCVLPC_STATS_ENABLE);
6663 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6664 tw32(RCVLPC_STATS_ENABLE, val);
6665 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6666 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
1da177e4
LT
6667 val = tr32(RCVLPC_STATS_ENABLE);
6668 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6669 tw32(RCVLPC_STATS_ENABLE, val);
6670 } else {
6671 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6672 }
6673 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6674 tw32(SNDDATAI_STATSENAB, 0xffffff);
6675 tw32(SNDDATAI_STATSCTRL,
6676 (SNDDATAI_SCTRL_ENABLE |
6677 SNDDATAI_SCTRL_FASTUPD));
6678
6679 /* Setup host coalescing engine. */
6680 tw32(HOSTCC_MODE, 0);
6681 for (i = 0; i < 2000; i++) {
6682 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6683 break;
6684 udelay(10);
6685 }
6686
d244c892 6687 __tg3_set_coalesce(tp, &tp->coal);
1da177e4
LT
6688
6689 /* set status block DMA address */
6690 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6691 ((u64) tp->status_mapping >> 32));
6692 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6693 ((u64) tp->status_mapping & 0xffffffff));
6694
6695 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6696 /* Status/statistics block address. See tg3_timer,
6697 * the tg3_periodic_fetch_stats call there, and
6698 * tg3_get_stats to see how this works for 5705/5750 chips.
6699 */
1da177e4
LT
6700 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6701 ((u64) tp->stats_mapping >> 32));
6702 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6703 ((u64) tp->stats_mapping & 0xffffffff));
6704 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6705 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6706 }
6707
6708 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6709
6710 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6711 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6712 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6713 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6714
6715 /* Clear statistics/status block in chip, and status block in ram. */
6716 for (i = NIC_SRAM_STATS_BLK;
6717 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6718 i += sizeof(u32)) {
6719 tg3_write_mem(tp, i, 0);
6720 udelay(40);
6721 }
6722 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6723
c94e3941
MC
6724 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6725 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6726 /* reset to prevent losing 1st rx packet intermittently */
6727 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6728 udelay(10);
6729 }
6730
1da177e4
LT
6731 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6732 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
e8f3f6ca
MC
6733 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6734 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6735 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6736 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1da177e4
LT
6737 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6738 udelay(40);
6739
314fba34 6740 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9d26e213 6741 * If TG3_FLG2_IS_NIC is zero, we should read the
314fba34
MC
6742 * register to preserve the GPIO settings for LOMs. The GPIOs,
6743 * whether used as inputs or outputs, are set by boot code after
6744 * reset.
6745 */
9d26e213 6746 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
314fba34
MC
6747 u32 gpio_mask;
6748
9d26e213
MC
6749 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6750 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6751 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
3e7d83bc
MC
6752
6753 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6754 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6755 GRC_LCLCTRL_GPIO_OUTPUT3;
6756
af36e6b6
MC
6757 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6758 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6759
aaf84465 6760 tp->grc_local_ctrl &= ~gpio_mask;
314fba34
MC
6761 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6762
6763 /* GPIO1 must be driven high for eeprom write protect */
9d26e213
MC
6764 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6765 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6766 GRC_LCLCTRL_GPIO_OUTPUT1);
314fba34 6767 }
1da177e4
LT
6768 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6769 udelay(100);
6770
09ee929c 6771 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
fac9b83e 6772 tp->last_tag = 0;
1da177e4
LT
6773
6774 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6775 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6776 udelay(40);
6777 }
6778
6779 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6780 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6781 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6782 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6783 WDMAC_MODE_LNGREAD_ENAB);
6784
85e94ced
MC
6785 /* If statement applies to 5705 and 5750 PCI devices only */
6786 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6787 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6788 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1da177e4
LT
6789 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6790 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6791 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6792 /* nothing */
6793 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6794 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6795 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6796 val |= WDMAC_MODE_RX_ACCEL;
6797 }
6798 }
6799
d9ab5ad1 6800 /* Enable host coalescing bug fix */
af36e6b6 6801 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
d30cdd28 6802 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
9936bcf6
MC
6803 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
6804 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
d9ab5ad1
MC
6805 val |= (1 << 29);
6806
1da177e4
LT
6807 tw32_f(WDMAC_MODE, val);
6808 udelay(40);
6809
9974a356
MC
6810 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6811 u16 pcix_cmd;
6812
6813 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6814 &pcix_cmd);
1da177e4 6815 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9974a356
MC
6816 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
6817 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 6818 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9974a356
MC
6819 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
6820 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 6821 }
9974a356
MC
6822 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6823 pcix_cmd);
1da177e4
LT
6824 }
6825
6826 tw32_f(RDMAC_MODE, rdmac_mode);
6827 udelay(40);
6828
6829 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6830 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6831 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9936bcf6
MC
6832
6833 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6834 tw32(SNDDATAC_MODE,
6835 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
6836 else
6837 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6838
1da177e4
LT
6839 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6840 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6841 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6842 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
1da177e4
LT
6843 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6844 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
1da177e4
LT
6845 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6846 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6847
6848 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6849 err = tg3_load_5701_a0_firmware_fix(tp);
6850 if (err)
6851 return err;
6852 }
6853
1da177e4
LT
6854 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6855 err = tg3_load_tso_firmware(tp);
6856 if (err)
6857 return err;
6858 }
1da177e4
LT
6859
6860 tp->tx_mode = TX_MODE_ENABLE;
6861 tw32_f(MAC_TX_MODE, tp->tx_mode);
6862 udelay(100);
6863
6864 tp->rx_mode = RX_MODE_ENABLE;
9936bcf6
MC
6865 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
6866 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
af36e6b6
MC
6867 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6868
1da177e4
LT
6869 tw32_f(MAC_RX_MODE, tp->rx_mode);
6870 udelay(10);
6871
6872 if (tp->link_config.phy_is_low_power) {
6873 tp->link_config.phy_is_low_power = 0;
6874 tp->link_config.speed = tp->link_config.orig_speed;
6875 tp->link_config.duplex = tp->link_config.orig_duplex;
6876 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6877 }
6878
6879 tp->mi_mode = MAC_MI_MODE_BASE;
6880 tw32_f(MAC_MI_MODE, tp->mi_mode);
6881 udelay(80);
6882
6883 tw32(MAC_LED_CTRL, tp->led_ctrl);
6884
6885 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
c94e3941 6886 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1da177e4
LT
6887 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6888 udelay(10);
6889 }
6890 tw32_f(MAC_RX_MODE, tp->rx_mode);
6891 udelay(10);
6892
6893 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6894 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6895 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6896 /* Set drive transmission level to 1.2V */
6897 /* only if the signal pre-emphasis bit is not set */
6898 val = tr32(MAC_SERDES_CFG);
6899 val &= 0xfffff000;
6900 val |= 0x880;
6901 tw32(MAC_SERDES_CFG, val);
6902 }
6903 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6904 tw32(MAC_SERDES_CFG, 0x616000);
6905 }
6906
6907 /* Prevent chip from dropping frames when flow control
6908 * is enabled.
6909 */
6910 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6911
6912 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6913 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6914 /* Use hardware link auto-negotiation */
6915 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6916 }
6917
d4d2c558
MC
6918 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6919 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6920 u32 tmp;
6921
6922 tmp = tr32(SERDES_RX_CTRL);
6923 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6924 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6925 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6926 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6927 }
6928
36da4d86 6929 err = tg3_setup_phy(tp, 0);
1da177e4
LT
6930 if (err)
6931 return err;
6932
715116a1
MC
6933 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6934 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
1da177e4
LT
6935 u32 tmp;
6936
6937 /* Clear CRC stats. */
569a5df8
MC
6938 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
6939 tg3_writephy(tp, MII_TG3_TEST1,
6940 tmp | MII_TG3_TEST1_CRC_EN);
1da177e4
LT
6941 tg3_readphy(tp, 0x14, &tmp);
6942 }
6943 }
6944
6945 __tg3_set_rx_mode(tp->dev);
6946
6947 /* Initialize receive rules. */
6948 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
6949 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6950 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
6951 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6952
4cf78e4f 6953 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
a4e2b347 6954 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
1da177e4
LT
6955 limit = 8;
6956 else
6957 limit = 16;
6958 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6959 limit -= 4;
6960 switch (limit) {
6961 case 16:
6962 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
6963 case 15:
6964 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
6965 case 14:
6966 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
6967 case 13:
6968 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
6969 case 12:
6970 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
6971 case 11:
6972 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
6973 case 10:
6974 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
6975 case 9:
6976 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
6977 case 8:
6978 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
6979 case 7:
6980 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
6981 case 6:
6982 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
6983 case 5:
6984 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
6985 case 4:
6986 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6987 case 3:
6988 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6989 case 2:
6990 case 1:
6991
6992 default:
6993 break;
6994 };
6995
9ce768ea
MC
6996 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6997 /* Write our heartbeat update interval to APE. */
6998 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
6999 APE_HOST_HEARTBEAT_INT_DISABLE);
0d3031d9 7000
1da177e4
LT
7001 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7002
1da177e4
LT
7003 return 0;
7004}
7005
7006/* Called at device open time to get the chip ready for
7007 * packet processing. Invoked with tp->lock held.
7008 */
8e7a22e3 7009static int tg3_init_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
7010{
7011 int err;
7012
7013 /* Force the chip into D0. */
bc1c7567 7014 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
7015 if (err)
7016 goto out;
7017
7018 tg3_switch_clocks(tp);
7019
7020 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7021
8e7a22e3 7022 err = tg3_reset_hw(tp, reset_phy);
1da177e4
LT
7023
7024out:
7025 return err;
7026}
7027
7028#define TG3_STAT_ADD32(PSTAT, REG) \
7029do { u32 __val = tr32(REG); \
7030 (PSTAT)->low += __val; \
7031 if ((PSTAT)->low < __val) \
7032 (PSTAT)->high += 1; \
7033} while (0)
7034
7035static void tg3_periodic_fetch_stats(struct tg3 *tp)
7036{
7037 struct tg3_hw_stats *sp = tp->hw_stats;
7038
7039 if (!netif_carrier_ok(tp->dev))
7040 return;
7041
7042 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7043 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7044 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7045 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7046 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7047 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7048 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7049 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7050 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7051 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7052 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7053 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7054 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7055
7056 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7057 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7058 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7059 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7060 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7061 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7062 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7063 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7064 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7065 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7066 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7067 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7068 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7069 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
463d305b
MC
7070
7071 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7072 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7073 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
1da177e4
LT
7074}
7075
7076static void tg3_timer(unsigned long __opaque)
7077{
7078 struct tg3 *tp = (struct tg3 *) __opaque;
1da177e4 7079
f475f163
MC
7080 if (tp->irq_sync)
7081 goto restart_timer;
7082
f47c11ee 7083 spin_lock(&tp->lock);
1da177e4 7084
fac9b83e
DM
7085 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7086 /* All of this garbage is because when using non-tagged
7087 * IRQ status the mailbox/status_block protocol the chip
7088 * uses with the cpu is race prone.
7089 */
7090 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7091 tw32(GRC_LOCAL_CTRL,
7092 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7093 } else {
7094 tw32(HOSTCC_MODE, tp->coalesce_mode |
7095 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7096 }
1da177e4 7097
fac9b83e
DM
7098 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7099 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
f47c11ee 7100 spin_unlock(&tp->lock);
fac9b83e
DM
7101 schedule_work(&tp->reset_task);
7102 return;
7103 }
1da177e4
LT
7104 }
7105
1da177e4
LT
7106 /* This part only runs once per second. */
7107 if (!--tp->timer_counter) {
fac9b83e
DM
7108 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7109 tg3_periodic_fetch_stats(tp);
7110
1da177e4
LT
7111 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7112 u32 mac_stat;
7113 int phy_event;
7114
7115 mac_stat = tr32(MAC_STATUS);
7116
7117 phy_event = 0;
7118 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7119 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7120 phy_event = 1;
7121 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7122 phy_event = 1;
7123
7124 if (phy_event)
7125 tg3_setup_phy(tp, 0);
7126 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7127 u32 mac_stat = tr32(MAC_STATUS);
7128 int need_setup = 0;
7129
7130 if (netif_carrier_ok(tp->dev) &&
7131 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7132 need_setup = 1;
7133 }
7134 if (! netif_carrier_ok(tp->dev) &&
7135 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7136 MAC_STATUS_SIGNAL_DET))) {
7137 need_setup = 1;
7138 }
7139 if (need_setup) {
3d3ebe74
MC
7140 if (!tp->serdes_counter) {
7141 tw32_f(MAC_MODE,
7142 (tp->mac_mode &
7143 ~MAC_MODE_PORT_MODE_MASK));
7144 udelay(40);
7145 tw32_f(MAC_MODE, tp->mac_mode);
7146 udelay(40);
7147 }
1da177e4
LT
7148 tg3_setup_phy(tp, 0);
7149 }
747e8f8b
MC
7150 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7151 tg3_serdes_parallel_detect(tp);
1da177e4
LT
7152
7153 tp->timer_counter = tp->timer_multiplier;
7154 }
7155
130b8e4d
MC
7156 /* Heartbeat is only sent once every 2 seconds.
7157 *
7158 * The heartbeat is to tell the ASF firmware that the host
7159 * driver is still alive. In the event that the OS crashes,
7160 * ASF needs to reset the hardware to free up the FIFO space
7161 * that may be filled with rx packets destined for the host.
7162 * If the FIFO is full, ASF will no longer function properly.
7163 *
7164 * Unintended resets have been reported on real time kernels
7165 * where the timer doesn't run on time. Netpoll will also have
7166 * same problem.
7167 *
7168 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7169 * to check the ring condition when the heartbeat is expiring
7170 * before doing the reset. This will prevent most unintended
7171 * resets.
7172 */
1da177e4
LT
7173 if (!--tp->asf_counter) {
7174 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7175 u32 val;
7176
bbadf503 7177 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
130b8e4d 7178 FWCMD_NICDRV_ALIVE3);
bbadf503 7179 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
28fbef78 7180 /* 5 seconds timeout */
bbadf503 7181 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
1da177e4
LT
7182 val = tr32(GRC_RX_CPU_EVENT);
7183 val |= (1 << 14);
7184 tw32(GRC_RX_CPU_EVENT, val);
7185 }
7186 tp->asf_counter = tp->asf_multiplier;
7187 }
7188
f47c11ee 7189 spin_unlock(&tp->lock);
1da177e4 7190
f475f163 7191restart_timer:
1da177e4
LT
7192 tp->timer.expires = jiffies + tp->timer_offset;
7193 add_timer(&tp->timer);
7194}
7195
81789ef5 7196static int tg3_request_irq(struct tg3 *tp)
fcfa0a32 7197{
7d12e780 7198 irq_handler_t fn;
fcfa0a32
MC
7199 unsigned long flags;
7200 struct net_device *dev = tp->dev;
7201
7202 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7203 fn = tg3_msi;
7204 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7205 fn = tg3_msi_1shot;
1fb9df5d 7206 flags = IRQF_SAMPLE_RANDOM;
fcfa0a32
MC
7207 } else {
7208 fn = tg3_interrupt;
7209 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7210 fn = tg3_interrupt_tagged;
1fb9df5d 7211 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
fcfa0a32
MC
7212 }
7213 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7214}
7215
7938109f
MC
7216static int tg3_test_interrupt(struct tg3 *tp)
7217{
7218 struct net_device *dev = tp->dev;
b16250e3 7219 int err, i, intr_ok = 0;
7938109f 7220
d4bc3927
MC
7221 if (!netif_running(dev))
7222 return -ENODEV;
7223
7938109f
MC
7224 tg3_disable_ints(tp);
7225
7226 free_irq(tp->pdev->irq, dev);
7227
7228 err = request_irq(tp->pdev->irq, tg3_test_isr,
1fb9df5d 7229 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7938109f
MC
7230 if (err)
7231 return err;
7232
38f3843e 7233 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7938109f
MC
7234 tg3_enable_ints(tp);
7235
7236 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7237 HOSTCC_MODE_NOW);
7238
7239 for (i = 0; i < 5; i++) {
b16250e3
MC
7240 u32 int_mbox, misc_host_ctrl;
7241
09ee929c
MC
7242 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7243 TG3_64BIT_REG_LOW);
b16250e3
MC
7244 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7245
7246 if ((int_mbox != 0) ||
7247 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7248 intr_ok = 1;
7938109f 7249 break;
b16250e3
MC
7250 }
7251
7938109f
MC
7252 msleep(10);
7253 }
7254
7255 tg3_disable_ints(tp);
7256
7257 free_irq(tp->pdev->irq, dev);
6aa20a22 7258
fcfa0a32 7259 err = tg3_request_irq(tp);
7938109f
MC
7260
7261 if (err)
7262 return err;
7263
b16250e3 7264 if (intr_ok)
7938109f
MC
7265 return 0;
7266
7267 return -EIO;
7268}
7269
7270/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7271 * successfully restored
7272 */
7273static int tg3_test_msi(struct tg3 *tp)
7274{
7275 struct net_device *dev = tp->dev;
7276 int err;
7277 u16 pci_cmd;
7278
7279 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7280 return 0;
7281
7282 /* Turn off SERR reporting in case MSI terminates with Master
7283 * Abort.
7284 */
7285 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7286 pci_write_config_word(tp->pdev, PCI_COMMAND,
7287 pci_cmd & ~PCI_COMMAND_SERR);
7288
7289 err = tg3_test_interrupt(tp);
7290
7291 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7292
7293 if (!err)
7294 return 0;
7295
7296 /* other failures */
7297 if (err != -EIO)
7298 return err;
7299
7300 /* MSI test failed, go back to INTx mode */
7301 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7302 "switching to INTx mode. Please report this failure to "
7303 "the PCI maintainer and include system chipset information.\n",
7304 tp->dev->name);
7305
7306 free_irq(tp->pdev->irq, dev);
7307 pci_disable_msi(tp->pdev);
7308
7309 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7310
fcfa0a32 7311 err = tg3_request_irq(tp);
7938109f
MC
7312 if (err)
7313 return err;
7314
7315 /* Need to reset the chip because the MSI cycle may have terminated
7316 * with Master Abort.
7317 */
f47c11ee 7318 tg3_full_lock(tp, 1);
7938109f 7319
944d980e 7320 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8e7a22e3 7321 err = tg3_init_hw(tp, 1);
7938109f 7322
f47c11ee 7323 tg3_full_unlock(tp);
7938109f
MC
7324
7325 if (err)
7326 free_irq(tp->pdev->irq, dev);
7327
7328 return err;
7329}
7330
1da177e4
LT
7331static int tg3_open(struct net_device *dev)
7332{
7333 struct tg3 *tp = netdev_priv(dev);
7334 int err;
7335
c49a1561
MC
7336 netif_carrier_off(tp->dev);
7337
f47c11ee 7338 tg3_full_lock(tp, 0);
1da177e4 7339
bc1c7567 7340 err = tg3_set_power_state(tp, PCI_D0);
12862086
IS
7341 if (err) {
7342 tg3_full_unlock(tp);
bc1c7567 7343 return err;
12862086 7344 }
bc1c7567 7345
1da177e4
LT
7346 tg3_disable_ints(tp);
7347 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7348
f47c11ee 7349 tg3_full_unlock(tp);
1da177e4
LT
7350
7351 /* The placement of this call is tied
7352 * to the setup and use of Host TX descriptors.
7353 */
7354 err = tg3_alloc_consistent(tp);
7355 if (err)
7356 return err;
7357
7544b097 7358 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
fac9b83e
DM
7359 /* All MSI supporting chips should support tagged
7360 * status. Assert that this is the case.
7361 */
7362 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7363 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7364 "Not using MSI.\n", tp->dev->name);
7365 } else if (pci_enable_msi(tp->pdev) == 0) {
88b06bc2
MC
7366 u32 msi_mode;
7367
7368 msi_mode = tr32(MSGINT_MODE);
7369 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7370 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7371 }
7372 }
fcfa0a32 7373 err = tg3_request_irq(tp);
1da177e4
LT
7374
7375 if (err) {
88b06bc2
MC
7376 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7377 pci_disable_msi(tp->pdev);
7378 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7379 }
1da177e4
LT
7380 tg3_free_consistent(tp);
7381 return err;
7382 }
7383
bea3348e
SH
7384 napi_enable(&tp->napi);
7385
f47c11ee 7386 tg3_full_lock(tp, 0);
1da177e4 7387
8e7a22e3 7388 err = tg3_init_hw(tp, 1);
1da177e4 7389 if (err) {
944d980e 7390 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7391 tg3_free_rings(tp);
7392 } else {
fac9b83e
DM
7393 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7394 tp->timer_offset = HZ;
7395 else
7396 tp->timer_offset = HZ / 10;
7397
7398 BUG_ON(tp->timer_offset > HZ);
7399 tp->timer_counter = tp->timer_multiplier =
7400 (HZ / tp->timer_offset);
7401 tp->asf_counter = tp->asf_multiplier =
28fbef78 7402 ((HZ / tp->timer_offset) * 2);
1da177e4
LT
7403
7404 init_timer(&tp->timer);
7405 tp->timer.expires = jiffies + tp->timer_offset;
7406 tp->timer.data = (unsigned long) tp;
7407 tp->timer.function = tg3_timer;
1da177e4
LT
7408 }
7409
f47c11ee 7410 tg3_full_unlock(tp);
1da177e4
LT
7411
7412 if (err) {
bea3348e 7413 napi_disable(&tp->napi);
88b06bc2
MC
7414 free_irq(tp->pdev->irq, dev);
7415 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7416 pci_disable_msi(tp->pdev);
7417 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7418 }
1da177e4
LT
7419 tg3_free_consistent(tp);
7420 return err;
7421 }
7422
7938109f
MC
7423 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7424 err = tg3_test_msi(tp);
fac9b83e 7425
7938109f 7426 if (err) {
f47c11ee 7427 tg3_full_lock(tp, 0);
7938109f
MC
7428
7429 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7430 pci_disable_msi(tp->pdev);
7431 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7432 }
944d980e 7433 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f
MC
7434 tg3_free_rings(tp);
7435 tg3_free_consistent(tp);
7436
f47c11ee 7437 tg3_full_unlock(tp);
7938109f 7438
bea3348e
SH
7439 napi_disable(&tp->napi);
7440
7938109f
MC
7441 return err;
7442 }
fcfa0a32
MC
7443
7444 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7445 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
b5d3772c 7446 u32 val = tr32(PCIE_TRANSACTION_CFG);
fcfa0a32 7447
b5d3772c
MC
7448 tw32(PCIE_TRANSACTION_CFG,
7449 val | PCIE_TRANS_CFG_1SHOT_MSI);
fcfa0a32
MC
7450 }
7451 }
7938109f
MC
7452 }
7453
f47c11ee 7454 tg3_full_lock(tp, 0);
1da177e4 7455
7938109f
MC
7456 add_timer(&tp->timer);
7457 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
1da177e4
LT
7458 tg3_enable_ints(tp);
7459
f47c11ee 7460 tg3_full_unlock(tp);
1da177e4
LT
7461
7462 netif_start_queue(dev);
7463
7464 return 0;
7465}
7466
7467#if 0
7468/*static*/ void tg3_dump_state(struct tg3 *tp)
7469{
7470 u32 val32, val32_2, val32_3, val32_4, val32_5;
7471 u16 val16;
7472 int i;
7473
7474 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7475 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7476 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7477 val16, val32);
7478
7479 /* MAC block */
7480 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7481 tr32(MAC_MODE), tr32(MAC_STATUS));
7482 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7483 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7484 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7485 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7486 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7487 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7488
7489 /* Send data initiator control block */
7490 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7491 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7492 printk(" SNDDATAI_STATSCTRL[%08x]\n",
7493 tr32(SNDDATAI_STATSCTRL));
7494
7495 /* Send data completion control block */
7496 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7497
7498 /* Send BD ring selector block */
7499 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7500 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7501
7502 /* Send BD initiator control block */
7503 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7504 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7505
7506 /* Send BD completion control block */
7507 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7508
7509 /* Receive list placement control block */
7510 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7511 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7512 printk(" RCVLPC_STATSCTRL[%08x]\n",
7513 tr32(RCVLPC_STATSCTRL));
7514
7515 /* Receive data and receive BD initiator control block */
7516 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7517 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7518
7519 /* Receive data completion control block */
7520 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7521 tr32(RCVDCC_MODE));
7522
7523 /* Receive BD initiator control block */
7524 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7525 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7526
7527 /* Receive BD completion control block */
7528 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7529 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7530
7531 /* Receive list selector control block */
7532 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7533 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7534
7535 /* Mbuf cluster free block */
7536 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7537 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7538
7539 /* Host coalescing control block */
7540 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7541 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7542 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7543 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7544 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7545 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7546 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7547 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7548 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7549 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7550 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7551 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7552
7553 /* Memory arbiter control block */
7554 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7555 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7556
7557 /* Buffer manager control block */
7558 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7559 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7560 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7561 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7562 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7563 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7564 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7565 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7566
7567 /* Read DMA control block */
7568 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7569 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7570
7571 /* Write DMA control block */
7572 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7573 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7574
7575 /* DMA completion block */
7576 printk("DEBUG: DMAC_MODE[%08x]\n",
7577 tr32(DMAC_MODE));
7578
7579 /* GRC block */
7580 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7581 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7582 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7583 tr32(GRC_LOCAL_CTRL));
7584
7585 /* TG3_BDINFOs */
7586 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7587 tr32(RCVDBDI_JUMBO_BD + 0x0),
7588 tr32(RCVDBDI_JUMBO_BD + 0x4),
7589 tr32(RCVDBDI_JUMBO_BD + 0x8),
7590 tr32(RCVDBDI_JUMBO_BD + 0xc));
7591 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7592 tr32(RCVDBDI_STD_BD + 0x0),
7593 tr32(RCVDBDI_STD_BD + 0x4),
7594 tr32(RCVDBDI_STD_BD + 0x8),
7595 tr32(RCVDBDI_STD_BD + 0xc));
7596 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7597 tr32(RCVDBDI_MINI_BD + 0x0),
7598 tr32(RCVDBDI_MINI_BD + 0x4),
7599 tr32(RCVDBDI_MINI_BD + 0x8),
7600 tr32(RCVDBDI_MINI_BD + 0xc));
7601
7602 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7603 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7604 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7605 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7606 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7607 val32, val32_2, val32_3, val32_4);
7608
7609 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7610 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7611 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7612 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7613 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7614 val32, val32_2, val32_3, val32_4);
7615
7616 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7617 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7618 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7619 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7620 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7621 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7622 val32, val32_2, val32_3, val32_4, val32_5);
7623
7624 /* SW status block */
7625 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7626 tp->hw_status->status,
7627 tp->hw_status->status_tag,
7628 tp->hw_status->rx_jumbo_consumer,
7629 tp->hw_status->rx_consumer,
7630 tp->hw_status->rx_mini_consumer,
7631 tp->hw_status->idx[0].rx_producer,
7632 tp->hw_status->idx[0].tx_consumer);
7633
7634 /* SW statistics block */
7635 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7636 ((u32 *)tp->hw_stats)[0],
7637 ((u32 *)tp->hw_stats)[1],
7638 ((u32 *)tp->hw_stats)[2],
7639 ((u32 *)tp->hw_stats)[3]);
7640
7641 /* Mailboxes */
7642 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
09ee929c
MC
7643 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7644 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7645 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7646 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
1da177e4
LT
7647
7648 /* NIC side send descriptors. */
7649 for (i = 0; i < 6; i++) {
7650 unsigned long txd;
7651
7652 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7653 + (i * sizeof(struct tg3_tx_buffer_desc));
7654 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7655 i,
7656 readl(txd + 0x0), readl(txd + 0x4),
7657 readl(txd + 0x8), readl(txd + 0xc));
7658 }
7659
7660 /* NIC side RX descriptors. */
7661 for (i = 0; i < 6; i++) {
7662 unsigned long rxd;
7663
7664 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7665 + (i * sizeof(struct tg3_rx_buffer_desc));
7666 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7667 i,
7668 readl(rxd + 0x0), readl(rxd + 0x4),
7669 readl(rxd + 0x8), readl(rxd + 0xc));
7670 rxd += (4 * sizeof(u32));
7671 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7672 i,
7673 readl(rxd + 0x0), readl(rxd + 0x4),
7674 readl(rxd + 0x8), readl(rxd + 0xc));
7675 }
7676
7677 for (i = 0; i < 6; i++) {
7678 unsigned long rxd;
7679
7680 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7681 + (i * sizeof(struct tg3_rx_buffer_desc));
7682 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7683 i,
7684 readl(rxd + 0x0), readl(rxd + 0x4),
7685 readl(rxd + 0x8), readl(rxd + 0xc));
7686 rxd += (4 * sizeof(u32));
7687 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7688 i,
7689 readl(rxd + 0x0), readl(rxd + 0x4),
7690 readl(rxd + 0x8), readl(rxd + 0xc));
7691 }
7692}
7693#endif
7694
7695static struct net_device_stats *tg3_get_stats(struct net_device *);
7696static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7697
7698static int tg3_close(struct net_device *dev)
7699{
7700 struct tg3 *tp = netdev_priv(dev);
7701
bea3348e 7702 napi_disable(&tp->napi);
28e53bdd 7703 cancel_work_sync(&tp->reset_task);
7faa006f 7704
1da177e4
LT
7705 netif_stop_queue(dev);
7706
7707 del_timer_sync(&tp->timer);
7708
f47c11ee 7709 tg3_full_lock(tp, 1);
1da177e4
LT
7710#if 0
7711 tg3_dump_state(tp);
7712#endif
7713
7714 tg3_disable_ints(tp);
7715
944d980e 7716 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4 7717 tg3_free_rings(tp);
5cf64b8a 7718 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
1da177e4 7719
f47c11ee 7720 tg3_full_unlock(tp);
1da177e4 7721
88b06bc2
MC
7722 free_irq(tp->pdev->irq, dev);
7723 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7724 pci_disable_msi(tp->pdev);
7725 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7726 }
1da177e4
LT
7727
7728 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7729 sizeof(tp->net_stats_prev));
7730 memcpy(&tp->estats_prev, tg3_get_estats(tp),
7731 sizeof(tp->estats_prev));
7732
7733 tg3_free_consistent(tp);
7734
bc1c7567
MC
7735 tg3_set_power_state(tp, PCI_D3hot);
7736
7737 netif_carrier_off(tp->dev);
7738
1da177e4
LT
7739 return 0;
7740}
7741
7742static inline unsigned long get_stat64(tg3_stat64_t *val)
7743{
7744 unsigned long ret;
7745
7746#if (BITS_PER_LONG == 32)
7747 ret = val->low;
7748#else
7749 ret = ((u64)val->high << 32) | ((u64)val->low);
7750#endif
7751 return ret;
7752}
7753
7754static unsigned long calc_crc_errors(struct tg3 *tp)
7755{
7756 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7757
7758 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7759 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7760 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1da177e4
LT
7761 u32 val;
7762
f47c11ee 7763 spin_lock_bh(&tp->lock);
569a5df8
MC
7764 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7765 tg3_writephy(tp, MII_TG3_TEST1,
7766 val | MII_TG3_TEST1_CRC_EN);
1da177e4
LT
7767 tg3_readphy(tp, 0x14, &val);
7768 } else
7769 val = 0;
f47c11ee 7770 spin_unlock_bh(&tp->lock);
1da177e4
LT
7771
7772 tp->phy_crc_errors += val;
7773
7774 return tp->phy_crc_errors;
7775 }
7776
7777 return get_stat64(&hw_stats->rx_fcs_errors);
7778}
7779
7780#define ESTAT_ADD(member) \
7781 estats->member = old_estats->member + \
7782 get_stat64(&hw_stats->member)
7783
7784static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7785{
7786 struct tg3_ethtool_stats *estats = &tp->estats;
7787 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7788 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7789
7790 if (!hw_stats)
7791 return old_estats;
7792
7793 ESTAT_ADD(rx_octets);
7794 ESTAT_ADD(rx_fragments);
7795 ESTAT_ADD(rx_ucast_packets);
7796 ESTAT_ADD(rx_mcast_packets);
7797 ESTAT_ADD(rx_bcast_packets);
7798 ESTAT_ADD(rx_fcs_errors);
7799 ESTAT_ADD(rx_align_errors);
7800 ESTAT_ADD(rx_xon_pause_rcvd);
7801 ESTAT_ADD(rx_xoff_pause_rcvd);
7802 ESTAT_ADD(rx_mac_ctrl_rcvd);
7803 ESTAT_ADD(rx_xoff_entered);
7804 ESTAT_ADD(rx_frame_too_long_errors);
7805 ESTAT_ADD(rx_jabbers);
7806 ESTAT_ADD(rx_undersize_packets);
7807 ESTAT_ADD(rx_in_length_errors);
7808 ESTAT_ADD(rx_out_length_errors);
7809 ESTAT_ADD(rx_64_or_less_octet_packets);
7810 ESTAT_ADD(rx_65_to_127_octet_packets);
7811 ESTAT_ADD(rx_128_to_255_octet_packets);
7812 ESTAT_ADD(rx_256_to_511_octet_packets);
7813 ESTAT_ADD(rx_512_to_1023_octet_packets);
7814 ESTAT_ADD(rx_1024_to_1522_octet_packets);
7815 ESTAT_ADD(rx_1523_to_2047_octet_packets);
7816 ESTAT_ADD(rx_2048_to_4095_octet_packets);
7817 ESTAT_ADD(rx_4096_to_8191_octet_packets);
7818 ESTAT_ADD(rx_8192_to_9022_octet_packets);
7819
7820 ESTAT_ADD(tx_octets);
7821 ESTAT_ADD(tx_collisions);
7822 ESTAT_ADD(tx_xon_sent);
7823 ESTAT_ADD(tx_xoff_sent);
7824 ESTAT_ADD(tx_flow_control);
7825 ESTAT_ADD(tx_mac_errors);
7826 ESTAT_ADD(tx_single_collisions);
7827 ESTAT_ADD(tx_mult_collisions);
7828 ESTAT_ADD(tx_deferred);
7829 ESTAT_ADD(tx_excessive_collisions);
7830 ESTAT_ADD(tx_late_collisions);
7831 ESTAT_ADD(tx_collide_2times);
7832 ESTAT_ADD(tx_collide_3times);
7833 ESTAT_ADD(tx_collide_4times);
7834 ESTAT_ADD(tx_collide_5times);
7835 ESTAT_ADD(tx_collide_6times);
7836 ESTAT_ADD(tx_collide_7times);
7837 ESTAT_ADD(tx_collide_8times);
7838 ESTAT_ADD(tx_collide_9times);
7839 ESTAT_ADD(tx_collide_10times);
7840 ESTAT_ADD(tx_collide_11times);
7841 ESTAT_ADD(tx_collide_12times);
7842 ESTAT_ADD(tx_collide_13times);
7843 ESTAT_ADD(tx_collide_14times);
7844 ESTAT_ADD(tx_collide_15times);
7845 ESTAT_ADD(tx_ucast_packets);
7846 ESTAT_ADD(tx_mcast_packets);
7847 ESTAT_ADD(tx_bcast_packets);
7848 ESTAT_ADD(tx_carrier_sense_errors);
7849 ESTAT_ADD(tx_discards);
7850 ESTAT_ADD(tx_errors);
7851
7852 ESTAT_ADD(dma_writeq_full);
7853 ESTAT_ADD(dma_write_prioq_full);
7854 ESTAT_ADD(rxbds_empty);
7855 ESTAT_ADD(rx_discards);
7856 ESTAT_ADD(rx_errors);
7857 ESTAT_ADD(rx_threshold_hit);
7858
7859 ESTAT_ADD(dma_readq_full);
7860 ESTAT_ADD(dma_read_prioq_full);
7861 ESTAT_ADD(tx_comp_queue_full);
7862
7863 ESTAT_ADD(ring_set_send_prod_index);
7864 ESTAT_ADD(ring_status_update);
7865 ESTAT_ADD(nic_irqs);
7866 ESTAT_ADD(nic_avoided_irqs);
7867 ESTAT_ADD(nic_tx_threshold_hit);
7868
7869 return estats;
7870}
7871
7872static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7873{
7874 struct tg3 *tp = netdev_priv(dev);
7875 struct net_device_stats *stats = &tp->net_stats;
7876 struct net_device_stats *old_stats = &tp->net_stats_prev;
7877 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7878
7879 if (!hw_stats)
7880 return old_stats;
7881
7882 stats->rx_packets = old_stats->rx_packets +
7883 get_stat64(&hw_stats->rx_ucast_packets) +
7884 get_stat64(&hw_stats->rx_mcast_packets) +
7885 get_stat64(&hw_stats->rx_bcast_packets);
6aa20a22 7886
1da177e4
LT
7887 stats->tx_packets = old_stats->tx_packets +
7888 get_stat64(&hw_stats->tx_ucast_packets) +
7889 get_stat64(&hw_stats->tx_mcast_packets) +
7890 get_stat64(&hw_stats->tx_bcast_packets);
7891
7892 stats->rx_bytes = old_stats->rx_bytes +
7893 get_stat64(&hw_stats->rx_octets);
7894 stats->tx_bytes = old_stats->tx_bytes +
7895 get_stat64(&hw_stats->tx_octets);
7896
7897 stats->rx_errors = old_stats->rx_errors +
4f63b877 7898 get_stat64(&hw_stats->rx_errors);
1da177e4
LT
7899 stats->tx_errors = old_stats->tx_errors +
7900 get_stat64(&hw_stats->tx_errors) +
7901 get_stat64(&hw_stats->tx_mac_errors) +
7902 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7903 get_stat64(&hw_stats->tx_discards);
7904
7905 stats->multicast = old_stats->multicast +
7906 get_stat64(&hw_stats->rx_mcast_packets);
7907 stats->collisions = old_stats->collisions +
7908 get_stat64(&hw_stats->tx_collisions);
7909
7910 stats->rx_length_errors = old_stats->rx_length_errors +
7911 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7912 get_stat64(&hw_stats->rx_undersize_packets);
7913
7914 stats->rx_over_errors = old_stats->rx_over_errors +
7915 get_stat64(&hw_stats->rxbds_empty);
7916 stats->rx_frame_errors = old_stats->rx_frame_errors +
7917 get_stat64(&hw_stats->rx_align_errors);
7918 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7919 get_stat64(&hw_stats->tx_discards);
7920 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7921 get_stat64(&hw_stats->tx_carrier_sense_errors);
7922
7923 stats->rx_crc_errors = old_stats->rx_crc_errors +
7924 calc_crc_errors(tp);
7925
4f63b877
JL
7926 stats->rx_missed_errors = old_stats->rx_missed_errors +
7927 get_stat64(&hw_stats->rx_discards);
7928
1da177e4
LT
7929 return stats;
7930}
7931
7932static inline u32 calc_crc(unsigned char *buf, int len)
7933{
7934 u32 reg;
7935 u32 tmp;
7936 int j, k;
7937
7938 reg = 0xffffffff;
7939
7940 for (j = 0; j < len; j++) {
7941 reg ^= buf[j];
7942
7943 for (k = 0; k < 8; k++) {
7944 tmp = reg & 0x01;
7945
7946 reg >>= 1;
7947
7948 if (tmp) {
7949 reg ^= 0xedb88320;
7950 }
7951 }
7952 }
7953
7954 return ~reg;
7955}
7956
7957static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7958{
7959 /* accept or reject all multicast frames */
7960 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7961 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7962 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7963 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7964}
7965
7966static void __tg3_set_rx_mode(struct net_device *dev)
7967{
7968 struct tg3 *tp = netdev_priv(dev);
7969 u32 rx_mode;
7970
7971 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7972 RX_MODE_KEEP_VLAN_TAG);
7973
7974 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7975 * flag clear.
7976 */
7977#if TG3_VLAN_TAG_USED
7978 if (!tp->vlgrp &&
7979 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7980 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7981#else
7982 /* By definition, VLAN is disabled always in this
7983 * case.
7984 */
7985 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7986 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7987#endif
7988
7989 if (dev->flags & IFF_PROMISC) {
7990 /* Promiscuous mode. */
7991 rx_mode |= RX_MODE_PROMISC;
7992 } else if (dev->flags & IFF_ALLMULTI) {
7993 /* Accept all multicast. */
7994 tg3_set_multi (tp, 1);
7995 } else if (dev->mc_count < 1) {
7996 /* Reject all multicast. */
7997 tg3_set_multi (tp, 0);
7998 } else {
7999 /* Accept one or more multicast(s). */
8000 struct dev_mc_list *mclist;
8001 unsigned int i;
8002 u32 mc_filter[4] = { 0, };
8003 u32 regidx;
8004 u32 bit;
8005 u32 crc;
8006
8007 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8008 i++, mclist = mclist->next) {
8009
8010 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8011 bit = ~crc & 0x7f;
8012 regidx = (bit & 0x60) >> 5;
8013 bit &= 0x1f;
8014 mc_filter[regidx] |= (1 << bit);
8015 }
8016
8017 tw32(MAC_HASH_REG_0, mc_filter[0]);
8018 tw32(MAC_HASH_REG_1, mc_filter[1]);
8019 tw32(MAC_HASH_REG_2, mc_filter[2]);
8020 tw32(MAC_HASH_REG_3, mc_filter[3]);
8021 }
8022
8023 if (rx_mode != tp->rx_mode) {
8024 tp->rx_mode = rx_mode;
8025 tw32_f(MAC_RX_MODE, rx_mode);
8026 udelay(10);
8027 }
8028}
8029
8030static void tg3_set_rx_mode(struct net_device *dev)
8031{
8032 struct tg3 *tp = netdev_priv(dev);
8033
e75f7c90
MC
8034 if (!netif_running(dev))
8035 return;
8036
f47c11ee 8037 tg3_full_lock(tp, 0);
1da177e4 8038 __tg3_set_rx_mode(dev);
f47c11ee 8039 tg3_full_unlock(tp);
1da177e4
LT
8040}
8041
8042#define TG3_REGDUMP_LEN (32 * 1024)
8043
8044static int tg3_get_regs_len(struct net_device *dev)
8045{
8046 return TG3_REGDUMP_LEN;
8047}
8048
8049static void tg3_get_regs(struct net_device *dev,
8050 struct ethtool_regs *regs, void *_p)
8051{
8052 u32 *p = _p;
8053 struct tg3 *tp = netdev_priv(dev);
8054 u8 *orig_p = _p;
8055 int i;
8056
8057 regs->version = 0;
8058
8059 memset(p, 0, TG3_REGDUMP_LEN);
8060
bc1c7567
MC
8061 if (tp->link_config.phy_is_low_power)
8062 return;
8063
f47c11ee 8064 tg3_full_lock(tp, 0);
1da177e4
LT
8065
8066#define __GET_REG32(reg) (*(p)++ = tr32(reg))
8067#define GET_REG32_LOOP(base,len) \
8068do { p = (u32 *)(orig_p + (base)); \
8069 for (i = 0; i < len; i += 4) \
8070 __GET_REG32((base) + i); \
8071} while (0)
8072#define GET_REG32_1(reg) \
8073do { p = (u32 *)(orig_p + (reg)); \
8074 __GET_REG32((reg)); \
8075} while (0)
8076
8077 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8078 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8079 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8080 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8081 GET_REG32_1(SNDDATAC_MODE);
8082 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8083 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8084 GET_REG32_1(SNDBDC_MODE);
8085 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8086 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8087 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8088 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8089 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8090 GET_REG32_1(RCVDCC_MODE);
8091 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8092 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8093 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8094 GET_REG32_1(MBFREE_MODE);
8095 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8096 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8097 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8098 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8099 GET_REG32_LOOP(WDMAC_MODE, 0x08);
091465d7
CE
8100 GET_REG32_1(RX_CPU_MODE);
8101 GET_REG32_1(RX_CPU_STATE);
8102 GET_REG32_1(RX_CPU_PGMCTR);
8103 GET_REG32_1(RX_CPU_HWBKPT);
8104 GET_REG32_1(TX_CPU_MODE);
8105 GET_REG32_1(TX_CPU_STATE);
8106 GET_REG32_1(TX_CPU_PGMCTR);
1da177e4
LT
8107 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8108 GET_REG32_LOOP(FTQ_RESET, 0x120);
8109 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8110 GET_REG32_1(DMAC_MODE);
8111 GET_REG32_LOOP(GRC_MODE, 0x4c);
8112 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8113 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8114
8115#undef __GET_REG32
8116#undef GET_REG32_LOOP
8117#undef GET_REG32_1
8118
f47c11ee 8119 tg3_full_unlock(tp);
1da177e4
LT
8120}
8121
8122static int tg3_get_eeprom_len(struct net_device *dev)
8123{
8124 struct tg3 *tp = netdev_priv(dev);
8125
8126 return tp->nvram_size;
8127}
8128
8129static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
1820180b 8130static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
1da177e4
LT
8131
8132static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8133{
8134 struct tg3 *tp = netdev_priv(dev);
8135 int ret;
8136 u8 *pd;
8137 u32 i, offset, len, val, b_offset, b_count;
8138
bc1c7567
MC
8139 if (tp->link_config.phy_is_low_power)
8140 return -EAGAIN;
8141
1da177e4
LT
8142 offset = eeprom->offset;
8143 len = eeprom->len;
8144 eeprom->len = 0;
8145
8146 eeprom->magic = TG3_EEPROM_MAGIC;
8147
8148 if (offset & 3) {
8149 /* adjustments to start on required 4 byte boundary */
8150 b_offset = offset & 3;
8151 b_count = 4 - b_offset;
8152 if (b_count > len) {
8153 /* i.e. offset=1 len=2 */
8154 b_count = len;
8155 }
8156 ret = tg3_nvram_read(tp, offset-b_offset, &val);
8157 if (ret)
8158 return ret;
8159 val = cpu_to_le32(val);
8160 memcpy(data, ((char*)&val) + b_offset, b_count);
8161 len -= b_count;
8162 offset += b_count;
8163 eeprom->len += b_count;
8164 }
8165
8166 /* read bytes upto the last 4 byte boundary */
8167 pd = &data[eeprom->len];
8168 for (i = 0; i < (len - (len & 3)); i += 4) {
8169 ret = tg3_nvram_read(tp, offset + i, &val);
8170 if (ret) {
8171 eeprom->len += i;
8172 return ret;
8173 }
8174 val = cpu_to_le32(val);
8175 memcpy(pd + i, &val, 4);
8176 }
8177 eeprom->len += i;
8178
8179 if (len & 3) {
8180 /* read last bytes not ending on 4 byte boundary */
8181 pd = &data[eeprom->len];
8182 b_count = len & 3;
8183 b_offset = offset + len - b_count;
8184 ret = tg3_nvram_read(tp, b_offset, &val);
8185 if (ret)
8186 return ret;
8187 val = cpu_to_le32(val);
8188 memcpy(pd, ((char*)&val), b_count);
8189 eeprom->len += b_count;
8190 }
8191 return 0;
8192}
8193
6aa20a22 8194static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
1da177e4
LT
8195
8196static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8197{
8198 struct tg3 *tp = netdev_priv(dev);
8199 int ret;
8200 u32 offset, len, b_offset, odd_len, start, end;
8201 u8 *buf;
8202
bc1c7567
MC
8203 if (tp->link_config.phy_is_low_power)
8204 return -EAGAIN;
8205
1da177e4
LT
8206 if (eeprom->magic != TG3_EEPROM_MAGIC)
8207 return -EINVAL;
8208
8209 offset = eeprom->offset;
8210 len = eeprom->len;
8211
8212 if ((b_offset = (offset & 3))) {
8213 /* adjustments to start on required 4 byte boundary */
8214 ret = tg3_nvram_read(tp, offset-b_offset, &start);
8215 if (ret)
8216 return ret;
8217 start = cpu_to_le32(start);
8218 len += b_offset;
8219 offset &= ~3;
1c8594b4
MC
8220 if (len < 4)
8221 len = 4;
1da177e4
LT
8222 }
8223
8224 odd_len = 0;
1c8594b4 8225 if (len & 3) {
1da177e4
LT
8226 /* adjustments to end on required 4 byte boundary */
8227 odd_len = 1;
8228 len = (len + 3) & ~3;
8229 ret = tg3_nvram_read(tp, offset+len-4, &end);
8230 if (ret)
8231 return ret;
8232 end = cpu_to_le32(end);
8233 }
8234
8235 buf = data;
8236 if (b_offset || odd_len) {
8237 buf = kmalloc(len, GFP_KERNEL);
ab0049b4 8238 if (!buf)
1da177e4
LT
8239 return -ENOMEM;
8240 if (b_offset)
8241 memcpy(buf, &start, 4);
8242 if (odd_len)
8243 memcpy(buf+len-4, &end, 4);
8244 memcpy(buf + b_offset, data, eeprom->len);
8245 }
8246
8247 ret = tg3_nvram_write_block(tp, offset, len, buf);
8248
8249 if (buf != data)
8250 kfree(buf);
8251
8252 return ret;
8253}
8254
8255static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8256{
8257 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8258
1da177e4
LT
8259 cmd->supported = (SUPPORTED_Autoneg);
8260
8261 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8262 cmd->supported |= (SUPPORTED_1000baseT_Half |
8263 SUPPORTED_1000baseT_Full);
8264
ef348144 8265 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1da177e4
LT
8266 cmd->supported |= (SUPPORTED_100baseT_Half |
8267 SUPPORTED_100baseT_Full |
8268 SUPPORTED_10baseT_Half |
8269 SUPPORTED_10baseT_Full |
8270 SUPPORTED_MII);
ef348144
KK
8271 cmd->port = PORT_TP;
8272 } else {
1da177e4 8273 cmd->supported |= SUPPORTED_FIBRE;
ef348144
KK
8274 cmd->port = PORT_FIBRE;
8275 }
6aa20a22 8276
1da177e4
LT
8277 cmd->advertising = tp->link_config.advertising;
8278 if (netif_running(dev)) {
8279 cmd->speed = tp->link_config.active_speed;
8280 cmd->duplex = tp->link_config.active_duplex;
8281 }
1da177e4
LT
8282 cmd->phy_address = PHY_ADDR;
8283 cmd->transceiver = 0;
8284 cmd->autoneg = tp->link_config.autoneg;
8285 cmd->maxtxpkt = 0;
8286 cmd->maxrxpkt = 0;
8287 return 0;
8288}
6aa20a22 8289
1da177e4
LT
8290static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8291{
8292 struct tg3 *tp = netdev_priv(dev);
6aa20a22
JG
8293
8294 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
1da177e4
LT
8295 /* These are the only valid advertisement bits allowed. */
8296 if (cmd->autoneg == AUTONEG_ENABLE &&
8297 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8298 ADVERTISED_1000baseT_Full |
8299 ADVERTISED_Autoneg |
8300 ADVERTISED_FIBRE)))
8301 return -EINVAL;
37ff238d
MC
8302 /* Fiber can only do SPEED_1000. */
8303 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8304 (cmd->speed != SPEED_1000))
8305 return -EINVAL;
8306 /* Copper cannot force SPEED_1000. */
8307 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8308 (cmd->speed == SPEED_1000))
8309 return -EINVAL;
8310 else if ((cmd->speed == SPEED_1000) &&
8311 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8312 return -EINVAL;
1da177e4 8313
f47c11ee 8314 tg3_full_lock(tp, 0);
1da177e4
LT
8315
8316 tp->link_config.autoneg = cmd->autoneg;
8317 if (cmd->autoneg == AUTONEG_ENABLE) {
405d8e5c
AG
8318 tp->link_config.advertising = (cmd->advertising |
8319 ADVERTISED_Autoneg);
1da177e4
LT
8320 tp->link_config.speed = SPEED_INVALID;
8321 tp->link_config.duplex = DUPLEX_INVALID;
8322 } else {
8323 tp->link_config.advertising = 0;
8324 tp->link_config.speed = cmd->speed;
8325 tp->link_config.duplex = cmd->duplex;
8326 }
6aa20a22 8327
24fcad6b
MC
8328 tp->link_config.orig_speed = tp->link_config.speed;
8329 tp->link_config.orig_duplex = tp->link_config.duplex;
8330 tp->link_config.orig_autoneg = tp->link_config.autoneg;
8331
1da177e4
LT
8332 if (netif_running(dev))
8333 tg3_setup_phy(tp, 1);
8334
f47c11ee 8335 tg3_full_unlock(tp);
6aa20a22 8336
1da177e4
LT
8337 return 0;
8338}
6aa20a22 8339
1da177e4
LT
8340static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8341{
8342 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8343
1da177e4
LT
8344 strcpy(info->driver, DRV_MODULE_NAME);
8345 strcpy(info->version, DRV_MODULE_VERSION);
c4e6575c 8346 strcpy(info->fw_version, tp->fw_ver);
1da177e4
LT
8347 strcpy(info->bus_info, pci_name(tp->pdev));
8348}
6aa20a22 8349
1da177e4
LT
8350static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8351{
8352 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8353
a85feb8c
GZ
8354 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8355 wol->supported = WAKE_MAGIC;
8356 else
8357 wol->supported = 0;
1da177e4
LT
8358 wol->wolopts = 0;
8359 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8360 wol->wolopts = WAKE_MAGIC;
8361 memset(&wol->sopass, 0, sizeof(wol->sopass));
8362}
6aa20a22 8363
1da177e4
LT
8364static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8365{
8366 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8367
1da177e4
LT
8368 if (wol->wolopts & ~WAKE_MAGIC)
8369 return -EINVAL;
8370 if ((wol->wolopts & WAKE_MAGIC) &&
a85feb8c 8371 !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
1da177e4 8372 return -EINVAL;
6aa20a22 8373
f47c11ee 8374 spin_lock_bh(&tp->lock);
1da177e4
LT
8375 if (wol->wolopts & WAKE_MAGIC)
8376 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8377 else
8378 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
f47c11ee 8379 spin_unlock_bh(&tp->lock);
6aa20a22 8380
1da177e4
LT
8381 return 0;
8382}
6aa20a22 8383
1da177e4
LT
8384static u32 tg3_get_msglevel(struct net_device *dev)
8385{
8386 struct tg3 *tp = netdev_priv(dev);
8387 return tp->msg_enable;
8388}
6aa20a22 8389
1da177e4
LT
8390static void tg3_set_msglevel(struct net_device *dev, u32 value)
8391{
8392 struct tg3 *tp = netdev_priv(dev);
8393 tp->msg_enable = value;
8394}
6aa20a22 8395
1da177e4
LT
8396static int tg3_set_tso(struct net_device *dev, u32 value)
8397{
8398 struct tg3 *tp = netdev_priv(dev);
8399
8400 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8401 if (value)
8402 return -EINVAL;
8403 return 0;
8404 }
b5d3772c
MC
8405 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8406 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
9936bcf6 8407 if (value) {
b0026624 8408 dev->features |= NETIF_F_TSO6;
9936bcf6
MC
8409 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8410 dev->features |= NETIF_F_TSO_ECN;
8411 } else
8412 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
b0026624 8413 }
1da177e4
LT
8414 return ethtool_op_set_tso(dev, value);
8415}
6aa20a22 8416
1da177e4
LT
8417static int tg3_nway_reset(struct net_device *dev)
8418{
8419 struct tg3 *tp = netdev_priv(dev);
8420 u32 bmcr;
8421 int r;
6aa20a22 8422
1da177e4
LT
8423 if (!netif_running(dev))
8424 return -EAGAIN;
8425
c94e3941
MC
8426 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8427 return -EINVAL;
8428
f47c11ee 8429 spin_lock_bh(&tp->lock);
1da177e4
LT
8430 r = -EINVAL;
8431 tg3_readphy(tp, MII_BMCR, &bmcr);
8432 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
c94e3941
MC
8433 ((bmcr & BMCR_ANENABLE) ||
8434 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8435 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8436 BMCR_ANENABLE);
1da177e4
LT
8437 r = 0;
8438 }
f47c11ee 8439 spin_unlock_bh(&tp->lock);
6aa20a22 8440
1da177e4
LT
8441 return r;
8442}
6aa20a22 8443
1da177e4
LT
8444static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8445{
8446 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8447
1da177e4
LT
8448 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8449 ering->rx_mini_max_pending = 0;
4f81c32b
MC
8450 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8451 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8452 else
8453 ering->rx_jumbo_max_pending = 0;
8454
8455 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
1da177e4
LT
8456
8457 ering->rx_pending = tp->rx_pending;
8458 ering->rx_mini_pending = 0;
4f81c32b
MC
8459 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8460 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8461 else
8462 ering->rx_jumbo_pending = 0;
8463
1da177e4
LT
8464 ering->tx_pending = tp->tx_pending;
8465}
6aa20a22 8466
1da177e4
LT
8467static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8468{
8469 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 8470 int irq_sync = 0, err = 0;
6aa20a22 8471
1da177e4
LT
8472 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8473 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
bc3a9254
MC
8474 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8475 (ering->tx_pending <= MAX_SKB_FRAGS) ||
7f62ad5d 8476 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
bc3a9254 8477 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
1da177e4 8478 return -EINVAL;
6aa20a22 8479
bbe832c0 8480 if (netif_running(dev)) {
1da177e4 8481 tg3_netif_stop(tp);
bbe832c0
MC
8482 irq_sync = 1;
8483 }
1da177e4 8484
bbe832c0 8485 tg3_full_lock(tp, irq_sync);
6aa20a22 8486
1da177e4
LT
8487 tp->rx_pending = ering->rx_pending;
8488
8489 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8490 tp->rx_pending > 63)
8491 tp->rx_pending = 63;
8492 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8493 tp->tx_pending = ering->tx_pending;
8494
8495 if (netif_running(dev)) {
944d980e 8496 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
b9ec6c1b
MC
8497 err = tg3_restart_hw(tp, 1);
8498 if (!err)
8499 tg3_netif_start(tp);
1da177e4
LT
8500 }
8501
f47c11ee 8502 tg3_full_unlock(tp);
6aa20a22 8503
b9ec6c1b 8504 return err;
1da177e4 8505}
6aa20a22 8506
1da177e4
LT
8507static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8508{
8509 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8510
1da177e4
LT
8511 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8512 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8513 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8514}
6aa20a22 8515
1da177e4
LT
8516static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8517{
8518 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 8519 int irq_sync = 0, err = 0;
6aa20a22 8520
bbe832c0 8521 if (netif_running(dev)) {
1da177e4 8522 tg3_netif_stop(tp);
bbe832c0
MC
8523 irq_sync = 1;
8524 }
1da177e4 8525
bbe832c0 8526 tg3_full_lock(tp, irq_sync);
f47c11ee 8527
1da177e4
LT
8528 if (epause->autoneg)
8529 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8530 else
8531 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8532 if (epause->rx_pause)
8533 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8534 else
8535 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8536 if (epause->tx_pause)
8537 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8538 else
8539 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8540
8541 if (netif_running(dev)) {
944d980e 8542 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
b9ec6c1b
MC
8543 err = tg3_restart_hw(tp, 1);
8544 if (!err)
8545 tg3_netif_start(tp);
1da177e4 8546 }
f47c11ee
DM
8547
8548 tg3_full_unlock(tp);
6aa20a22 8549
b9ec6c1b 8550 return err;
1da177e4 8551}
6aa20a22 8552
1da177e4
LT
8553static u32 tg3_get_rx_csum(struct net_device *dev)
8554{
8555 struct tg3 *tp = netdev_priv(dev);
8556 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8557}
6aa20a22 8558
1da177e4
LT
8559static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8560{
8561 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8562
1da177e4
LT
8563 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8564 if (data != 0)
8565 return -EINVAL;
8566 return 0;
8567 }
6aa20a22 8568
f47c11ee 8569 spin_lock_bh(&tp->lock);
1da177e4
LT
8570 if (data)
8571 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8572 else
8573 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
f47c11ee 8574 spin_unlock_bh(&tp->lock);
6aa20a22 8575
1da177e4
LT
8576 return 0;
8577}
6aa20a22 8578
1da177e4
LT
8579static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8580{
8581 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8582
1da177e4
LT
8583 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8584 if (data != 0)
8585 return -EINVAL;
8586 return 0;
8587 }
6aa20a22 8588
af36e6b6 8589 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 8590 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
8591 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8592 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6460d948 8593 ethtool_op_set_tx_ipv6_csum(dev, data);
1da177e4 8594 else
9c27dbdf 8595 ethtool_op_set_tx_csum(dev, data);
1da177e4
LT
8596
8597 return 0;
8598}
8599
b9f2c044 8600static int tg3_get_sset_count (struct net_device *dev, int sset)
1da177e4 8601{
b9f2c044
JG
8602 switch (sset) {
8603 case ETH_SS_TEST:
8604 return TG3_NUM_TEST;
8605 case ETH_SS_STATS:
8606 return TG3_NUM_STATS;
8607 default:
8608 return -EOPNOTSUPP;
8609 }
4cafd3f5
MC
8610}
8611
1da177e4
LT
8612static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8613{
8614 switch (stringset) {
8615 case ETH_SS_STATS:
8616 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8617 break;
4cafd3f5
MC
8618 case ETH_SS_TEST:
8619 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8620 break;
1da177e4
LT
8621 default:
8622 WARN_ON(1); /* we need a WARN() */
8623 break;
8624 }
8625}
8626
4009a93d
MC
8627static int tg3_phys_id(struct net_device *dev, u32 data)
8628{
8629 struct tg3 *tp = netdev_priv(dev);
8630 int i;
8631
8632 if (!netif_running(tp->dev))
8633 return -EAGAIN;
8634
8635 if (data == 0)
8636 data = 2;
8637
8638 for (i = 0; i < (data * 2); i++) {
8639 if ((i % 2) == 0)
8640 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8641 LED_CTRL_1000MBPS_ON |
8642 LED_CTRL_100MBPS_ON |
8643 LED_CTRL_10MBPS_ON |
8644 LED_CTRL_TRAFFIC_OVERRIDE |
8645 LED_CTRL_TRAFFIC_BLINK |
8646 LED_CTRL_TRAFFIC_LED);
6aa20a22 8647
4009a93d
MC
8648 else
8649 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8650 LED_CTRL_TRAFFIC_OVERRIDE);
8651
8652 if (msleep_interruptible(500))
8653 break;
8654 }
8655 tw32(MAC_LED_CTRL, tp->led_ctrl);
8656 return 0;
8657}
8658
1da177e4
LT
8659static void tg3_get_ethtool_stats (struct net_device *dev,
8660 struct ethtool_stats *estats, u64 *tmp_stats)
8661{
8662 struct tg3 *tp = netdev_priv(dev);
8663 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8664}
8665
566f86ad 8666#define NVRAM_TEST_SIZE 0x100
1b27777a 8667#define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
b16250e3
MC
8668#define NVRAM_SELFBOOT_HW_SIZE 0x20
8669#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
566f86ad
MC
8670
8671static int tg3_test_nvram(struct tg3 *tp)
8672{
1b27777a 8673 u32 *buf, csum, magic;
ab0049b4 8674 int i, j, k, err = 0, size;
566f86ad 8675
1820180b 8676 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
1b27777a
MC
8677 return -EIO;
8678
1b27777a
MC
8679 if (magic == TG3_EEPROM_MAGIC)
8680 size = NVRAM_TEST_SIZE;
b16250e3 8681 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
1b27777a
MC
8682 if ((magic & 0xe00000) == 0x200000)
8683 size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8684 else
8685 return 0;
b16250e3
MC
8686 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8687 size = NVRAM_SELFBOOT_HW_SIZE;
8688 else
1b27777a
MC
8689 return -EIO;
8690
8691 buf = kmalloc(size, GFP_KERNEL);
566f86ad
MC
8692 if (buf == NULL)
8693 return -ENOMEM;
8694
1b27777a
MC
8695 err = -EIO;
8696 for (i = 0, j = 0; i < size; i += 4, j++) {
566f86ad
MC
8697 u32 val;
8698
8699 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8700 break;
8701 buf[j] = cpu_to_le32(val);
8702 }
1b27777a 8703 if (i < size)
566f86ad
MC
8704 goto out;
8705
1b27777a 8706 /* Selfboot format */
b16250e3
MC
8707 if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
8708 TG3_EEPROM_MAGIC_FW) {
1b27777a
MC
8709 u8 *buf8 = (u8 *) buf, csum8 = 0;
8710
8711 for (i = 0; i < size; i++)
8712 csum8 += buf8[i];
8713
ad96b485
AB
8714 if (csum8 == 0) {
8715 err = 0;
8716 goto out;
8717 }
8718
8719 err = -EIO;
8720 goto out;
1b27777a 8721 }
566f86ad 8722
b16250e3
MC
8723 if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
8724 TG3_EEPROM_MAGIC_HW) {
8725 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8726 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8727 u8 *buf8 = (u8 *) buf;
b16250e3
MC
8728
8729 /* Separate the parity bits and the data bytes. */
8730 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8731 if ((i == 0) || (i == 8)) {
8732 int l;
8733 u8 msk;
8734
8735 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8736 parity[k++] = buf8[i] & msk;
8737 i++;
8738 }
8739 else if (i == 16) {
8740 int l;
8741 u8 msk;
8742
8743 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8744 parity[k++] = buf8[i] & msk;
8745 i++;
8746
8747 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8748 parity[k++] = buf8[i] & msk;
8749 i++;
8750 }
8751 data[j++] = buf8[i];
8752 }
8753
8754 err = -EIO;
8755 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8756 u8 hw8 = hweight8(data[i]);
8757
8758 if ((hw8 & 0x1) && parity[i])
8759 goto out;
8760 else if (!(hw8 & 0x1) && !parity[i])
8761 goto out;
8762 }
8763 err = 0;
8764 goto out;
8765 }
8766
566f86ad
MC
8767 /* Bootstrap checksum at offset 0x10 */
8768 csum = calc_crc((unsigned char *) buf, 0x10);
8769 if(csum != cpu_to_le32(buf[0x10/4]))
8770 goto out;
8771
8772 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8773 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8774 if (csum != cpu_to_le32(buf[0xfc/4]))
8775 goto out;
8776
8777 err = 0;
8778
8779out:
8780 kfree(buf);
8781 return err;
8782}
8783
ca43007a
MC
8784#define TG3_SERDES_TIMEOUT_SEC 2
8785#define TG3_COPPER_TIMEOUT_SEC 6
8786
8787static int tg3_test_link(struct tg3 *tp)
8788{
8789 int i, max;
8790
8791 if (!netif_running(tp->dev))
8792 return -ENODEV;
8793
4c987487 8794 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
ca43007a
MC
8795 max = TG3_SERDES_TIMEOUT_SEC;
8796 else
8797 max = TG3_COPPER_TIMEOUT_SEC;
8798
8799 for (i = 0; i < max; i++) {
8800 if (netif_carrier_ok(tp->dev))
8801 return 0;
8802
8803 if (msleep_interruptible(1000))
8804 break;
8805 }
8806
8807 return -EIO;
8808}
8809
a71116d1 8810/* Only test the commonly used registers */
30ca3e37 8811static int tg3_test_registers(struct tg3 *tp)
a71116d1 8812{
b16250e3 8813 int i, is_5705, is_5750;
a71116d1
MC
8814 u32 offset, read_mask, write_mask, val, save_val, read_val;
8815 static struct {
8816 u16 offset;
8817 u16 flags;
8818#define TG3_FL_5705 0x1
8819#define TG3_FL_NOT_5705 0x2
8820#define TG3_FL_NOT_5788 0x4
b16250e3 8821#define TG3_FL_NOT_5750 0x8
a71116d1
MC
8822 u32 read_mask;
8823 u32 write_mask;
8824 } reg_tbl[] = {
8825 /* MAC Control Registers */
8826 { MAC_MODE, TG3_FL_NOT_5705,
8827 0x00000000, 0x00ef6f8c },
8828 { MAC_MODE, TG3_FL_5705,
8829 0x00000000, 0x01ef6b8c },
8830 { MAC_STATUS, TG3_FL_NOT_5705,
8831 0x03800107, 0x00000000 },
8832 { MAC_STATUS, TG3_FL_5705,
8833 0x03800100, 0x00000000 },
8834 { MAC_ADDR_0_HIGH, 0x0000,
8835 0x00000000, 0x0000ffff },
8836 { MAC_ADDR_0_LOW, 0x0000,
8837 0x00000000, 0xffffffff },
8838 { MAC_RX_MTU_SIZE, 0x0000,
8839 0x00000000, 0x0000ffff },
8840 { MAC_TX_MODE, 0x0000,
8841 0x00000000, 0x00000070 },
8842 { MAC_TX_LENGTHS, 0x0000,
8843 0x00000000, 0x00003fff },
8844 { MAC_RX_MODE, TG3_FL_NOT_5705,
8845 0x00000000, 0x000007fc },
8846 { MAC_RX_MODE, TG3_FL_5705,
8847 0x00000000, 0x000007dc },
8848 { MAC_HASH_REG_0, 0x0000,
8849 0x00000000, 0xffffffff },
8850 { MAC_HASH_REG_1, 0x0000,
8851 0x00000000, 0xffffffff },
8852 { MAC_HASH_REG_2, 0x0000,
8853 0x00000000, 0xffffffff },
8854 { MAC_HASH_REG_3, 0x0000,
8855 0x00000000, 0xffffffff },
8856
8857 /* Receive Data and Receive BD Initiator Control Registers. */
8858 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8859 0x00000000, 0xffffffff },
8860 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8861 0x00000000, 0xffffffff },
8862 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8863 0x00000000, 0x00000003 },
8864 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8865 0x00000000, 0xffffffff },
8866 { RCVDBDI_STD_BD+0, 0x0000,
8867 0x00000000, 0xffffffff },
8868 { RCVDBDI_STD_BD+4, 0x0000,
8869 0x00000000, 0xffffffff },
8870 { RCVDBDI_STD_BD+8, 0x0000,
8871 0x00000000, 0xffff0002 },
8872 { RCVDBDI_STD_BD+0xc, 0x0000,
8873 0x00000000, 0xffffffff },
6aa20a22 8874
a71116d1
MC
8875 /* Receive BD Initiator Control Registers. */
8876 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8877 0x00000000, 0xffffffff },
8878 { RCVBDI_STD_THRESH, TG3_FL_5705,
8879 0x00000000, 0x000003ff },
8880 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8881 0x00000000, 0xffffffff },
6aa20a22 8882
a71116d1
MC
8883 /* Host Coalescing Control Registers. */
8884 { HOSTCC_MODE, TG3_FL_NOT_5705,
8885 0x00000000, 0x00000004 },
8886 { HOSTCC_MODE, TG3_FL_5705,
8887 0x00000000, 0x000000f6 },
8888 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8889 0x00000000, 0xffffffff },
8890 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8891 0x00000000, 0x000003ff },
8892 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8893 0x00000000, 0xffffffff },
8894 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8895 0x00000000, 0x000003ff },
8896 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8897 0x00000000, 0xffffffff },
8898 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8899 0x00000000, 0x000000ff },
8900 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8901 0x00000000, 0xffffffff },
8902 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8903 0x00000000, 0x000000ff },
8904 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8905 0x00000000, 0xffffffff },
8906 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8907 0x00000000, 0xffffffff },
8908 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8909 0x00000000, 0xffffffff },
8910 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8911 0x00000000, 0x000000ff },
8912 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8913 0x00000000, 0xffffffff },
8914 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8915 0x00000000, 0x000000ff },
8916 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8917 0x00000000, 0xffffffff },
8918 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8919 0x00000000, 0xffffffff },
8920 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8921 0x00000000, 0xffffffff },
8922 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8923 0x00000000, 0xffffffff },
8924 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8925 0x00000000, 0xffffffff },
8926 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8927 0xffffffff, 0x00000000 },
8928 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8929 0xffffffff, 0x00000000 },
8930
8931 /* Buffer Manager Control Registers. */
b16250e3 8932 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
a71116d1 8933 0x00000000, 0x007fff80 },
b16250e3 8934 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
a71116d1
MC
8935 0x00000000, 0x007fffff },
8936 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8937 0x00000000, 0x0000003f },
8938 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8939 0x00000000, 0x000001ff },
8940 { BUFMGR_MB_HIGH_WATER, 0x0000,
8941 0x00000000, 0x000001ff },
8942 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8943 0xffffffff, 0x00000000 },
8944 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8945 0xffffffff, 0x00000000 },
6aa20a22 8946
a71116d1
MC
8947 /* Mailbox Registers */
8948 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8949 0x00000000, 0x000001ff },
8950 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8951 0x00000000, 0x000001ff },
8952 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8953 0x00000000, 0x000007ff },
8954 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8955 0x00000000, 0x000001ff },
8956
8957 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8958 };
8959
b16250e3
MC
8960 is_5705 = is_5750 = 0;
8961 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
a71116d1 8962 is_5705 = 1;
b16250e3
MC
8963 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8964 is_5750 = 1;
8965 }
a71116d1
MC
8966
8967 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8968 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8969 continue;
8970
8971 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8972 continue;
8973
8974 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8975 (reg_tbl[i].flags & TG3_FL_NOT_5788))
8976 continue;
8977
b16250e3
MC
8978 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
8979 continue;
8980
a71116d1
MC
8981 offset = (u32) reg_tbl[i].offset;
8982 read_mask = reg_tbl[i].read_mask;
8983 write_mask = reg_tbl[i].write_mask;
8984
8985 /* Save the original register content */
8986 save_val = tr32(offset);
8987
8988 /* Determine the read-only value. */
8989 read_val = save_val & read_mask;
8990
8991 /* Write zero to the register, then make sure the read-only bits
8992 * are not changed and the read/write bits are all zeros.
8993 */
8994 tw32(offset, 0);
8995
8996 val = tr32(offset);
8997
8998 /* Test the read-only and read/write bits. */
8999 if (((val & read_mask) != read_val) || (val & write_mask))
9000 goto out;
9001
9002 /* Write ones to all the bits defined by RdMask and WrMask, then
9003 * make sure the read-only bits are not changed and the
9004 * read/write bits are all ones.
9005 */
9006 tw32(offset, read_mask | write_mask);
9007
9008 val = tr32(offset);
9009
9010 /* Test the read-only bits. */
9011 if ((val & read_mask) != read_val)
9012 goto out;
9013
9014 /* Test the read/write bits. */
9015 if ((val & write_mask) != write_mask)
9016 goto out;
9017
9018 tw32(offset, save_val);
9019 }
9020
9021 return 0;
9022
9023out:
9f88f29f
MC
9024 if (netif_msg_hw(tp))
9025 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9026 offset);
a71116d1
MC
9027 tw32(offset, save_val);
9028 return -EIO;
9029}
9030
7942e1db
MC
9031static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9032{
f71e1309 9033 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7942e1db
MC
9034 int i;
9035 u32 j;
9036
e9edda69 9037 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
7942e1db
MC
9038 for (j = 0; j < len; j += 4) {
9039 u32 val;
9040
9041 tg3_write_mem(tp, offset + j, test_pattern[i]);
9042 tg3_read_mem(tp, offset + j, &val);
9043 if (val != test_pattern[i])
9044 return -EIO;
9045 }
9046 }
9047 return 0;
9048}
9049
9050static int tg3_test_memory(struct tg3 *tp)
9051{
9052 static struct mem_entry {
9053 u32 offset;
9054 u32 len;
9055 } mem_tbl_570x[] = {
38690194 9056 { 0x00000000, 0x00b50},
7942e1db
MC
9057 { 0x00002000, 0x1c000},
9058 { 0xffffffff, 0x00000}
9059 }, mem_tbl_5705[] = {
9060 { 0x00000100, 0x0000c},
9061 { 0x00000200, 0x00008},
7942e1db
MC
9062 { 0x00004000, 0x00800},
9063 { 0x00006000, 0x01000},
9064 { 0x00008000, 0x02000},
9065 { 0x00010000, 0x0e000},
9066 { 0xffffffff, 0x00000}
79f4d13a
MC
9067 }, mem_tbl_5755[] = {
9068 { 0x00000200, 0x00008},
9069 { 0x00004000, 0x00800},
9070 { 0x00006000, 0x00800},
9071 { 0x00008000, 0x02000},
9072 { 0x00010000, 0x0c000},
9073 { 0xffffffff, 0x00000}
b16250e3
MC
9074 }, mem_tbl_5906[] = {
9075 { 0x00000200, 0x00008},
9076 { 0x00004000, 0x00400},
9077 { 0x00006000, 0x00400},
9078 { 0x00008000, 0x01000},
9079 { 0x00010000, 0x01000},
9080 { 0xffffffff, 0x00000}
7942e1db
MC
9081 };
9082 struct mem_entry *mem_tbl;
9083 int err = 0;
9084 int i;
9085
79f4d13a 9086 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
af36e6b6 9087 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 9088 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
9089 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9090 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
79f4d13a 9091 mem_tbl = mem_tbl_5755;
b16250e3
MC
9092 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9093 mem_tbl = mem_tbl_5906;
79f4d13a
MC
9094 else
9095 mem_tbl = mem_tbl_5705;
9096 } else
7942e1db
MC
9097 mem_tbl = mem_tbl_570x;
9098
9099 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9100 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9101 mem_tbl[i].len)) != 0)
9102 break;
9103 }
6aa20a22 9104
7942e1db
MC
9105 return err;
9106}
9107
9f40dead
MC
9108#define TG3_MAC_LOOPBACK 0
9109#define TG3_PHY_LOOPBACK 1
9110
9111static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
c76949a6 9112{
9f40dead 9113 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
c76949a6
MC
9114 u32 desc_idx;
9115 struct sk_buff *skb, *rx_skb;
9116 u8 *tx_data;
9117 dma_addr_t map;
9118 int num_pkts, tx_len, rx_len, i, err;
9119 struct tg3_rx_buffer_desc *desc;
9120
9f40dead 9121 if (loopback_mode == TG3_MAC_LOOPBACK) {
c94e3941
MC
9122 /* HW errata - mac loopback fails in some cases on 5780.
9123 * Normal traffic and PHY loopback are not affected by
9124 * errata.
9125 */
9126 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9127 return 0;
9128
9f40dead 9129 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
e8f3f6ca
MC
9130 MAC_MODE_PORT_INT_LPBACK;
9131 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9132 mac_mode |= MAC_MODE_LINK_POLARITY;
3f7045c1
MC
9133 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9134 mac_mode |= MAC_MODE_PORT_MODE_MII;
9135 else
9136 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9f40dead
MC
9137 tw32(MAC_MODE, mac_mode);
9138 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
3f7045c1
MC
9139 u32 val;
9140
b16250e3
MC
9141 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9142 u32 phytest;
9143
9144 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9145 u32 phy;
9146
9147 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9148 phytest | MII_TG3_EPHY_SHADOW_EN);
9149 if (!tg3_readphy(tp, 0x1b, &phy))
9150 tg3_writephy(tp, 0x1b, phy & ~0x20);
b16250e3
MC
9151 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9152 }
5d64ad34
MC
9153 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9154 } else
9155 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
3f7045c1 9156
9ef8ca99
MC
9157 tg3_phy_toggle_automdix(tp, 0);
9158
3f7045c1 9159 tg3_writephy(tp, MII_BMCR, val);
c94e3941 9160 udelay(40);
5d64ad34 9161
e8f3f6ca 9162 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5d64ad34 9163 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
b16250e3 9164 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
5d64ad34
MC
9165 mac_mode |= MAC_MODE_PORT_MODE_MII;
9166 } else
9167 mac_mode |= MAC_MODE_PORT_MODE_GMII;
b16250e3 9168
c94e3941
MC
9169 /* reset to prevent losing 1st rx packet intermittently */
9170 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9171 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9172 udelay(10);
9173 tw32_f(MAC_RX_MODE, tp->rx_mode);
9174 }
e8f3f6ca
MC
9175 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9176 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9177 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9178 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9179 mac_mode |= MAC_MODE_LINK_POLARITY;
ff18ff02
MC
9180 tg3_writephy(tp, MII_TG3_EXT_CTRL,
9181 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9182 }
9f40dead 9183 tw32(MAC_MODE, mac_mode);
9f40dead
MC
9184 }
9185 else
9186 return -EINVAL;
c76949a6
MC
9187
9188 err = -EIO;
9189
c76949a6 9190 tx_len = 1514;
a20e9c62 9191 skb = netdev_alloc_skb(tp->dev, tx_len);
a50bb7b9
JJ
9192 if (!skb)
9193 return -ENOMEM;
9194
c76949a6
MC
9195 tx_data = skb_put(skb, tx_len);
9196 memcpy(tx_data, tp->dev->dev_addr, 6);
9197 memset(tx_data + 6, 0x0, 8);
9198
9199 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9200
9201 for (i = 14; i < tx_len; i++)
9202 tx_data[i] = (u8) (i & 0xff);
9203
9204 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9205
9206 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9207 HOSTCC_MODE_NOW);
9208
9209 udelay(10);
9210
9211 rx_start_idx = tp->hw_status->idx[0].rx_producer;
9212
c76949a6
MC
9213 num_pkts = 0;
9214
9f40dead 9215 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
c76949a6 9216
9f40dead 9217 tp->tx_prod++;
c76949a6
MC
9218 num_pkts++;
9219
9f40dead
MC
9220 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9221 tp->tx_prod);
09ee929c 9222 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
c76949a6
MC
9223
9224 udelay(10);
9225
3f7045c1
MC
9226 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
9227 for (i = 0; i < 25; i++) {
c76949a6
MC
9228 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9229 HOSTCC_MODE_NOW);
9230
9231 udelay(10);
9232
9233 tx_idx = tp->hw_status->idx[0].tx_consumer;
9234 rx_idx = tp->hw_status->idx[0].rx_producer;
9f40dead 9235 if ((tx_idx == tp->tx_prod) &&
c76949a6
MC
9236 (rx_idx == (rx_start_idx + num_pkts)))
9237 break;
9238 }
9239
9240 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9241 dev_kfree_skb(skb);
9242
9f40dead 9243 if (tx_idx != tp->tx_prod)
c76949a6
MC
9244 goto out;
9245
9246 if (rx_idx != rx_start_idx + num_pkts)
9247 goto out;
9248
9249 desc = &tp->rx_rcb[rx_start_idx];
9250 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9251 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9252 if (opaque_key != RXD_OPAQUE_RING_STD)
9253 goto out;
9254
9255 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9256 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9257 goto out;
9258
9259 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9260 if (rx_len != tx_len)
9261 goto out;
9262
9263 rx_skb = tp->rx_std_buffers[desc_idx].skb;
9264
9265 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9266 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9267
9268 for (i = 14; i < tx_len; i++) {
9269 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9270 goto out;
9271 }
9272 err = 0;
6aa20a22 9273
c76949a6
MC
9274 /* tg3_free_rings will unmap and free the rx_skb */
9275out:
9276 return err;
9277}
9278
9f40dead
MC
9279#define TG3_MAC_LOOPBACK_FAILED 1
9280#define TG3_PHY_LOOPBACK_FAILED 2
9281#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
9282 TG3_PHY_LOOPBACK_FAILED)
9283
9284static int tg3_test_loopback(struct tg3 *tp)
9285{
9286 int err = 0;
9936bcf6 9287 u32 cpmuctrl = 0;
9f40dead
MC
9288
9289 if (!netif_running(tp->dev))
9290 return TG3_LOOPBACK_FAILED;
9291
b9ec6c1b
MC
9292 err = tg3_reset_hw(tp, 1);
9293 if (err)
9294 return TG3_LOOPBACK_FAILED;
9f40dead 9295
9936bcf6
MC
9296 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9297 int i;
9298 u32 status;
9299
9300 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9301
9302 /* Wait for up to 40 microseconds to acquire lock. */
9303 for (i = 0; i < 4; i++) {
9304 status = tr32(TG3_CPMU_MUTEX_GNT);
9305 if (status == CPMU_MUTEX_GNT_DRIVER)
9306 break;
9307 udelay(10);
9308 }
9309
9310 if (status != CPMU_MUTEX_GNT_DRIVER)
9311 return TG3_LOOPBACK_FAILED;
9312
9313 cpmuctrl = tr32(TG3_CPMU_CTRL);
9314
9315 /* Turn off power management based on link speed. */
9316 tw32(TG3_CPMU_CTRL,
9317 cpmuctrl & ~CPMU_CTRL_LINK_SPEED_MODE);
9318 }
9319
9f40dead
MC
9320 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9321 err |= TG3_MAC_LOOPBACK_FAILED;
9936bcf6
MC
9322
9323 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9324 tw32(TG3_CPMU_CTRL, cpmuctrl);
9325
9326 /* Release the mutex */
9327 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9328 }
9329
9f40dead
MC
9330 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9331 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9332 err |= TG3_PHY_LOOPBACK_FAILED;
9333 }
9334
9335 return err;
9336}
9337
4cafd3f5
MC
9338static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9339 u64 *data)
9340{
566f86ad
MC
9341 struct tg3 *tp = netdev_priv(dev);
9342
bc1c7567
MC
9343 if (tp->link_config.phy_is_low_power)
9344 tg3_set_power_state(tp, PCI_D0);
9345
566f86ad
MC
9346 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9347
9348 if (tg3_test_nvram(tp) != 0) {
9349 etest->flags |= ETH_TEST_FL_FAILED;
9350 data[0] = 1;
9351 }
ca43007a
MC
9352 if (tg3_test_link(tp) != 0) {
9353 etest->flags |= ETH_TEST_FL_FAILED;
9354 data[1] = 1;
9355 }
a71116d1 9356 if (etest->flags & ETH_TEST_FL_OFFLINE) {
ec41c7df 9357 int err, irq_sync = 0;
bbe832c0
MC
9358
9359 if (netif_running(dev)) {
a71116d1 9360 tg3_netif_stop(tp);
bbe832c0
MC
9361 irq_sync = 1;
9362 }
a71116d1 9363
bbe832c0 9364 tg3_full_lock(tp, irq_sync);
a71116d1
MC
9365
9366 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
ec41c7df 9367 err = tg3_nvram_lock(tp);
a71116d1
MC
9368 tg3_halt_cpu(tp, RX_CPU_BASE);
9369 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9370 tg3_halt_cpu(tp, TX_CPU_BASE);
ec41c7df
MC
9371 if (!err)
9372 tg3_nvram_unlock(tp);
a71116d1 9373
d9ab5ad1
MC
9374 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9375 tg3_phy_reset(tp);
9376
a71116d1
MC
9377 if (tg3_test_registers(tp) != 0) {
9378 etest->flags |= ETH_TEST_FL_FAILED;
9379 data[2] = 1;
9380 }
7942e1db
MC
9381 if (tg3_test_memory(tp) != 0) {
9382 etest->flags |= ETH_TEST_FL_FAILED;
9383 data[3] = 1;
9384 }
9f40dead 9385 if ((data[4] = tg3_test_loopback(tp)) != 0)
c76949a6 9386 etest->flags |= ETH_TEST_FL_FAILED;
a71116d1 9387
f47c11ee
DM
9388 tg3_full_unlock(tp);
9389
d4bc3927
MC
9390 if (tg3_test_interrupt(tp) != 0) {
9391 etest->flags |= ETH_TEST_FL_FAILED;
9392 data[5] = 1;
9393 }
f47c11ee
DM
9394
9395 tg3_full_lock(tp, 0);
d4bc3927 9396
a71116d1
MC
9397 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9398 if (netif_running(dev)) {
9399 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
b9ec6c1b
MC
9400 if (!tg3_restart_hw(tp, 1))
9401 tg3_netif_start(tp);
a71116d1 9402 }
f47c11ee
DM
9403
9404 tg3_full_unlock(tp);
a71116d1 9405 }
bc1c7567
MC
9406 if (tp->link_config.phy_is_low_power)
9407 tg3_set_power_state(tp, PCI_D3hot);
9408
4cafd3f5
MC
9409}
9410
1da177e4
LT
9411static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9412{
9413 struct mii_ioctl_data *data = if_mii(ifr);
9414 struct tg3 *tp = netdev_priv(dev);
9415 int err;
9416
9417 switch(cmd) {
9418 case SIOCGMIIPHY:
9419 data->phy_id = PHY_ADDR;
9420
9421 /* fallthru */
9422 case SIOCGMIIREG: {
9423 u32 mii_regval;
9424
9425 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9426 break; /* We have no PHY */
9427
bc1c7567
MC
9428 if (tp->link_config.phy_is_low_power)
9429 return -EAGAIN;
9430
f47c11ee 9431 spin_lock_bh(&tp->lock);
1da177e4 9432 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
f47c11ee 9433 spin_unlock_bh(&tp->lock);
1da177e4
LT
9434
9435 data->val_out = mii_regval;
9436
9437 return err;
9438 }
9439
9440 case SIOCSMIIREG:
9441 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9442 break; /* We have no PHY */
9443
9444 if (!capable(CAP_NET_ADMIN))
9445 return -EPERM;
9446
bc1c7567
MC
9447 if (tp->link_config.phy_is_low_power)
9448 return -EAGAIN;
9449
f47c11ee 9450 spin_lock_bh(&tp->lock);
1da177e4 9451 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
f47c11ee 9452 spin_unlock_bh(&tp->lock);
1da177e4
LT
9453
9454 return err;
9455
9456 default:
9457 /* do nothing */
9458 break;
9459 }
9460 return -EOPNOTSUPP;
9461}
9462
9463#if TG3_VLAN_TAG_USED
9464static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9465{
9466 struct tg3 *tp = netdev_priv(dev);
9467
29315e87
MC
9468 if (netif_running(dev))
9469 tg3_netif_stop(tp);
9470
f47c11ee 9471 tg3_full_lock(tp, 0);
1da177e4
LT
9472
9473 tp->vlgrp = grp;
9474
9475 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9476 __tg3_set_rx_mode(dev);
9477
29315e87
MC
9478 if (netif_running(dev))
9479 tg3_netif_start(tp);
46966545
MC
9480
9481 tg3_full_unlock(tp);
1da177e4 9482}
1da177e4
LT
9483#endif
9484
15f9850d
DM
9485static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9486{
9487 struct tg3 *tp = netdev_priv(dev);
9488
9489 memcpy(ec, &tp->coal, sizeof(*ec));
9490 return 0;
9491}
9492
d244c892
MC
9493static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9494{
9495 struct tg3 *tp = netdev_priv(dev);
9496 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9497 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9498
9499 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9500 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9501 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9502 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9503 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9504 }
9505
9506 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9507 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9508 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9509 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9510 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9511 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9512 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9513 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9514 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9515 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9516 return -EINVAL;
9517
9518 /* No rx interrupts will be generated if both are zero */
9519 if ((ec->rx_coalesce_usecs == 0) &&
9520 (ec->rx_max_coalesced_frames == 0))
9521 return -EINVAL;
9522
9523 /* No tx interrupts will be generated if both are zero */
9524 if ((ec->tx_coalesce_usecs == 0) &&
9525 (ec->tx_max_coalesced_frames == 0))
9526 return -EINVAL;
9527
9528 /* Only copy relevant parameters, ignore all others. */
9529 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9530 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9531 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9532 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9533 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9534 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9535 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9536 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9537 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9538
9539 if (netif_running(dev)) {
9540 tg3_full_lock(tp, 0);
9541 __tg3_set_coalesce(tp, &tp->coal);
9542 tg3_full_unlock(tp);
9543 }
9544 return 0;
9545}
9546
7282d491 9547static const struct ethtool_ops tg3_ethtool_ops = {
1da177e4
LT
9548 .get_settings = tg3_get_settings,
9549 .set_settings = tg3_set_settings,
9550 .get_drvinfo = tg3_get_drvinfo,
9551 .get_regs_len = tg3_get_regs_len,
9552 .get_regs = tg3_get_regs,
9553 .get_wol = tg3_get_wol,
9554 .set_wol = tg3_set_wol,
9555 .get_msglevel = tg3_get_msglevel,
9556 .set_msglevel = tg3_set_msglevel,
9557 .nway_reset = tg3_nway_reset,
9558 .get_link = ethtool_op_get_link,
9559 .get_eeprom_len = tg3_get_eeprom_len,
9560 .get_eeprom = tg3_get_eeprom,
9561 .set_eeprom = tg3_set_eeprom,
9562 .get_ringparam = tg3_get_ringparam,
9563 .set_ringparam = tg3_set_ringparam,
9564 .get_pauseparam = tg3_get_pauseparam,
9565 .set_pauseparam = tg3_set_pauseparam,
9566 .get_rx_csum = tg3_get_rx_csum,
9567 .set_rx_csum = tg3_set_rx_csum,
1da177e4 9568 .set_tx_csum = tg3_set_tx_csum,
1da177e4 9569 .set_sg = ethtool_op_set_sg,
1da177e4 9570 .set_tso = tg3_set_tso,
4cafd3f5 9571 .self_test = tg3_self_test,
1da177e4 9572 .get_strings = tg3_get_strings,
4009a93d 9573 .phys_id = tg3_phys_id,
1da177e4 9574 .get_ethtool_stats = tg3_get_ethtool_stats,
15f9850d 9575 .get_coalesce = tg3_get_coalesce,
d244c892 9576 .set_coalesce = tg3_set_coalesce,
b9f2c044 9577 .get_sset_count = tg3_get_sset_count,
1da177e4
LT
9578};
9579
9580static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9581{
1b27777a 9582 u32 cursize, val, magic;
1da177e4
LT
9583
9584 tp->nvram_size = EEPROM_CHIP_SIZE;
9585
1820180b 9586 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
1da177e4
LT
9587 return;
9588
b16250e3
MC
9589 if ((magic != TG3_EEPROM_MAGIC) &&
9590 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9591 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
1da177e4
LT
9592 return;
9593
9594 /*
9595 * Size the chip by reading offsets at increasing powers of two.
9596 * When we encounter our validation signature, we know the addressing
9597 * has wrapped around, and thus have our chip size.
9598 */
1b27777a 9599 cursize = 0x10;
1da177e4
LT
9600
9601 while (cursize < tp->nvram_size) {
1820180b 9602 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
1da177e4
LT
9603 return;
9604
1820180b 9605 if (val == magic)
1da177e4
LT
9606 break;
9607
9608 cursize <<= 1;
9609 }
9610
9611 tp->nvram_size = cursize;
9612}
6aa20a22 9613
1da177e4
LT
9614static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9615{
9616 u32 val;
9617
1820180b 9618 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
1b27777a
MC
9619 return;
9620
9621 /* Selfboot format */
1820180b 9622 if (val != TG3_EEPROM_MAGIC) {
1b27777a
MC
9623 tg3_get_eeprom_size(tp);
9624 return;
9625 }
9626
1da177e4
LT
9627 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9628 if (val != 0) {
9629 tp->nvram_size = (val >> 16) * 1024;
9630 return;
9631 }
9632 }
989a9d23 9633 tp->nvram_size = 0x80000;
1da177e4
LT
9634}
9635
9636static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9637{
9638 u32 nvcfg1;
9639
9640 nvcfg1 = tr32(NVRAM_CFG1);
9641 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9642 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9643 }
9644 else {
9645 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9646 tw32(NVRAM_CFG1, nvcfg1);
9647 }
9648
4c987487 9649 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
a4e2b347 9650 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
9651 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9652 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9653 tp->nvram_jedecnum = JEDEC_ATMEL;
9654 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9655 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9656 break;
9657 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9658 tp->nvram_jedecnum = JEDEC_ATMEL;
9659 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9660 break;
9661 case FLASH_VENDOR_ATMEL_EEPROM:
9662 tp->nvram_jedecnum = JEDEC_ATMEL;
9663 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9664 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9665 break;
9666 case FLASH_VENDOR_ST:
9667 tp->nvram_jedecnum = JEDEC_ST;
9668 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9669 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9670 break;
9671 case FLASH_VENDOR_SAIFUN:
9672 tp->nvram_jedecnum = JEDEC_SAIFUN;
9673 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9674 break;
9675 case FLASH_VENDOR_SST_SMALL:
9676 case FLASH_VENDOR_SST_LARGE:
9677 tp->nvram_jedecnum = JEDEC_SST;
9678 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9679 break;
9680 }
9681 }
9682 else {
9683 tp->nvram_jedecnum = JEDEC_ATMEL;
9684 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9685 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9686 }
9687}
9688
361b4ac2
MC
9689static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9690{
9691 u32 nvcfg1;
9692
9693 nvcfg1 = tr32(NVRAM_CFG1);
9694
e6af301b
MC
9695 /* NVRAM protection for TPM */
9696 if (nvcfg1 & (1 << 27))
9697 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9698
361b4ac2
MC
9699 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9700 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9701 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9702 tp->nvram_jedecnum = JEDEC_ATMEL;
9703 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9704 break;
9705 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9706 tp->nvram_jedecnum = JEDEC_ATMEL;
9707 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9708 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9709 break;
9710 case FLASH_5752VENDOR_ST_M45PE10:
9711 case FLASH_5752VENDOR_ST_M45PE20:
9712 case FLASH_5752VENDOR_ST_M45PE40:
9713 tp->nvram_jedecnum = JEDEC_ST;
9714 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9715 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9716 break;
9717 }
9718
9719 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9720 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9721 case FLASH_5752PAGE_SIZE_256:
9722 tp->nvram_pagesize = 256;
9723 break;
9724 case FLASH_5752PAGE_SIZE_512:
9725 tp->nvram_pagesize = 512;
9726 break;
9727 case FLASH_5752PAGE_SIZE_1K:
9728 tp->nvram_pagesize = 1024;
9729 break;
9730 case FLASH_5752PAGE_SIZE_2K:
9731 tp->nvram_pagesize = 2048;
9732 break;
9733 case FLASH_5752PAGE_SIZE_4K:
9734 tp->nvram_pagesize = 4096;
9735 break;
9736 case FLASH_5752PAGE_SIZE_264:
9737 tp->nvram_pagesize = 264;
9738 break;
9739 }
9740 }
9741 else {
9742 /* For eeprom, set pagesize to maximum eeprom size */
9743 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9744
9745 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9746 tw32(NVRAM_CFG1, nvcfg1);
9747 }
9748}
9749
d3c7b886
MC
9750static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9751{
989a9d23 9752 u32 nvcfg1, protect = 0;
d3c7b886
MC
9753
9754 nvcfg1 = tr32(NVRAM_CFG1);
9755
9756 /* NVRAM protection for TPM */
989a9d23 9757 if (nvcfg1 & (1 << 27)) {
d3c7b886 9758 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
989a9d23
MC
9759 protect = 1;
9760 }
d3c7b886 9761
989a9d23
MC
9762 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9763 switch (nvcfg1) {
d3c7b886
MC
9764 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9765 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9766 case FLASH_5755VENDOR_ATMEL_FLASH_3:
70b65a2d 9767 case FLASH_5755VENDOR_ATMEL_FLASH_5:
d3c7b886
MC
9768 tp->nvram_jedecnum = JEDEC_ATMEL;
9769 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9770 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9771 tp->nvram_pagesize = 264;
70b65a2d
MC
9772 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
9773 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
989a9d23
MC
9774 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9775 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9776 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
9777 else
9778 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
d3c7b886
MC
9779 break;
9780 case FLASH_5752VENDOR_ST_M45PE10:
9781 case FLASH_5752VENDOR_ST_M45PE20:
9782 case FLASH_5752VENDOR_ST_M45PE40:
9783 tp->nvram_jedecnum = JEDEC_ST;
9784 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9785 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9786 tp->nvram_pagesize = 256;
989a9d23
MC
9787 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
9788 tp->nvram_size = (protect ? 0x10000 : 0x20000);
9789 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
9790 tp->nvram_size = (protect ? 0x10000 : 0x40000);
9791 else
9792 tp->nvram_size = (protect ? 0x20000 : 0x80000);
d3c7b886
MC
9793 break;
9794 }
9795}
9796
1b27777a
MC
9797static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9798{
9799 u32 nvcfg1;
9800
9801 nvcfg1 = tr32(NVRAM_CFG1);
9802
9803 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9804 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9805 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9806 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9807 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9808 tp->nvram_jedecnum = JEDEC_ATMEL;
9809 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9810 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9811
9812 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9813 tw32(NVRAM_CFG1, nvcfg1);
9814 break;
9815 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9816 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9817 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9818 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9819 tp->nvram_jedecnum = JEDEC_ATMEL;
9820 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9821 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9822 tp->nvram_pagesize = 264;
9823 break;
9824 case FLASH_5752VENDOR_ST_M45PE10:
9825 case FLASH_5752VENDOR_ST_M45PE20:
9826 case FLASH_5752VENDOR_ST_M45PE40:
9827 tp->nvram_jedecnum = JEDEC_ST;
9828 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9829 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9830 tp->nvram_pagesize = 256;
9831 break;
9832 }
9833}
9834
6b91fa02
MC
9835static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
9836{
9837 u32 nvcfg1, protect = 0;
9838
9839 nvcfg1 = tr32(NVRAM_CFG1);
9840
9841 /* NVRAM protection for TPM */
9842 if (nvcfg1 & (1 << 27)) {
9843 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9844 protect = 1;
9845 }
9846
9847 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9848 switch (nvcfg1) {
9849 case FLASH_5761VENDOR_ATMEL_ADB021D:
9850 case FLASH_5761VENDOR_ATMEL_ADB041D:
9851 case FLASH_5761VENDOR_ATMEL_ADB081D:
9852 case FLASH_5761VENDOR_ATMEL_ADB161D:
9853 case FLASH_5761VENDOR_ATMEL_MDB021D:
9854 case FLASH_5761VENDOR_ATMEL_MDB041D:
9855 case FLASH_5761VENDOR_ATMEL_MDB081D:
9856 case FLASH_5761VENDOR_ATMEL_MDB161D:
9857 tp->nvram_jedecnum = JEDEC_ATMEL;
9858 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9859 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9860 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
9861 tp->nvram_pagesize = 256;
9862 break;
9863 case FLASH_5761VENDOR_ST_A_M45PE20:
9864 case FLASH_5761VENDOR_ST_A_M45PE40:
9865 case FLASH_5761VENDOR_ST_A_M45PE80:
9866 case FLASH_5761VENDOR_ST_A_M45PE16:
9867 case FLASH_5761VENDOR_ST_M_M45PE20:
9868 case FLASH_5761VENDOR_ST_M_M45PE40:
9869 case FLASH_5761VENDOR_ST_M_M45PE80:
9870 case FLASH_5761VENDOR_ST_M_M45PE16:
9871 tp->nvram_jedecnum = JEDEC_ST;
9872 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9873 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9874 tp->nvram_pagesize = 256;
9875 break;
9876 }
9877
9878 if (protect) {
9879 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
9880 } else {
9881 switch (nvcfg1) {
9882 case FLASH_5761VENDOR_ATMEL_ADB161D:
9883 case FLASH_5761VENDOR_ATMEL_MDB161D:
9884 case FLASH_5761VENDOR_ST_A_M45PE16:
9885 case FLASH_5761VENDOR_ST_M_M45PE16:
9886 tp->nvram_size = 0x100000;
9887 break;
9888 case FLASH_5761VENDOR_ATMEL_ADB081D:
9889 case FLASH_5761VENDOR_ATMEL_MDB081D:
9890 case FLASH_5761VENDOR_ST_A_M45PE80:
9891 case FLASH_5761VENDOR_ST_M_M45PE80:
9892 tp->nvram_size = 0x80000;
9893 break;
9894 case FLASH_5761VENDOR_ATMEL_ADB041D:
9895 case FLASH_5761VENDOR_ATMEL_MDB041D:
9896 case FLASH_5761VENDOR_ST_A_M45PE40:
9897 case FLASH_5761VENDOR_ST_M_M45PE40:
9898 tp->nvram_size = 0x40000;
9899 break;
9900 case FLASH_5761VENDOR_ATMEL_ADB021D:
9901 case FLASH_5761VENDOR_ATMEL_MDB021D:
9902 case FLASH_5761VENDOR_ST_A_M45PE20:
9903 case FLASH_5761VENDOR_ST_M_M45PE20:
9904 tp->nvram_size = 0x20000;
9905 break;
9906 }
9907 }
9908}
9909
b5d3772c
MC
9910static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9911{
9912 tp->nvram_jedecnum = JEDEC_ATMEL;
9913 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9914 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9915}
9916
1da177e4
LT
9917/* Chips other than 5700/5701 use the NVRAM for fetching info. */
9918static void __devinit tg3_nvram_init(struct tg3 *tp)
9919{
1da177e4
LT
9920 tw32_f(GRC_EEPROM_ADDR,
9921 (EEPROM_ADDR_FSM_RESET |
9922 (EEPROM_DEFAULT_CLOCK_PERIOD <<
9923 EEPROM_ADDR_CLKPERD_SHIFT)));
9924
9d57f01c 9925 msleep(1);
1da177e4
LT
9926
9927 /* Enable seeprom accesses. */
9928 tw32_f(GRC_LOCAL_CTRL,
9929 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9930 udelay(100);
9931
9932 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9933 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9934 tp->tg3_flags |= TG3_FLAG_NVRAM;
9935
ec41c7df
MC
9936 if (tg3_nvram_lock(tp)) {
9937 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9938 "tg3_nvram_init failed.\n", tp->dev->name);
9939 return;
9940 }
e6af301b 9941 tg3_enable_nvram_access(tp);
1da177e4 9942
989a9d23
MC
9943 tp->nvram_size = 0;
9944
361b4ac2
MC
9945 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9946 tg3_get_5752_nvram_info(tp);
d3c7b886
MC
9947 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9948 tg3_get_5755_nvram_info(tp);
d30cdd28
MC
9949 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9950 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
1b27777a 9951 tg3_get_5787_nvram_info(tp);
6b91fa02
MC
9952 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9953 tg3_get_5761_nvram_info(tp);
b5d3772c
MC
9954 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9955 tg3_get_5906_nvram_info(tp);
361b4ac2
MC
9956 else
9957 tg3_get_nvram_info(tp);
9958
989a9d23
MC
9959 if (tp->nvram_size == 0)
9960 tg3_get_nvram_size(tp);
1da177e4 9961
e6af301b 9962 tg3_disable_nvram_access(tp);
381291b7 9963 tg3_nvram_unlock(tp);
1da177e4
LT
9964
9965 } else {
9966 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9967
9968 tg3_get_eeprom_size(tp);
9969 }
9970}
9971
9972static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9973 u32 offset, u32 *val)
9974{
9975 u32 tmp;
9976 int i;
9977
9978 if (offset > EEPROM_ADDR_ADDR_MASK ||
9979 (offset % 4) != 0)
9980 return -EINVAL;
9981
9982 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9983 EEPROM_ADDR_DEVID_MASK |
9984 EEPROM_ADDR_READ);
9985 tw32(GRC_EEPROM_ADDR,
9986 tmp |
9987 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9988 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9989 EEPROM_ADDR_ADDR_MASK) |
9990 EEPROM_ADDR_READ | EEPROM_ADDR_START);
9991
9d57f01c 9992 for (i = 0; i < 1000; i++) {
1da177e4
LT
9993 tmp = tr32(GRC_EEPROM_ADDR);
9994
9995 if (tmp & EEPROM_ADDR_COMPLETE)
9996 break;
9d57f01c 9997 msleep(1);
1da177e4
LT
9998 }
9999 if (!(tmp & EEPROM_ADDR_COMPLETE))
10000 return -EBUSY;
10001
10002 *val = tr32(GRC_EEPROM_DATA);
10003 return 0;
10004}
10005
10006#define NVRAM_CMD_TIMEOUT 10000
10007
10008static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10009{
10010 int i;
10011
10012 tw32(NVRAM_CMD, nvram_cmd);
10013 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10014 udelay(10);
10015 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10016 udelay(10);
10017 break;
10018 }
10019 }
10020 if (i == NVRAM_CMD_TIMEOUT) {
10021 return -EBUSY;
10022 }
10023 return 0;
10024}
10025
1820180b
MC
10026static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10027{
10028 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10029 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10030 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
6b91fa02 10031 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
1820180b
MC
10032 (tp->nvram_jedecnum == JEDEC_ATMEL))
10033
10034 addr = ((addr / tp->nvram_pagesize) <<
10035 ATMEL_AT45DB0X1B_PAGE_POS) +
10036 (addr % tp->nvram_pagesize);
10037
10038 return addr;
10039}
10040
c4e6575c
MC
10041static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10042{
10043 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10044 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10045 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
6b91fa02 10046 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
c4e6575c
MC
10047 (tp->nvram_jedecnum == JEDEC_ATMEL))
10048
10049 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10050 tp->nvram_pagesize) +
10051 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10052
10053 return addr;
10054}
10055
1da177e4
LT
10056static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10057{
10058 int ret;
10059
1da177e4
LT
10060 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10061 return tg3_nvram_read_using_eeprom(tp, offset, val);
10062
1820180b 10063 offset = tg3_nvram_phys_addr(tp, offset);
1da177e4
LT
10064
10065 if (offset > NVRAM_ADDR_MSK)
10066 return -EINVAL;
10067
ec41c7df
MC
10068 ret = tg3_nvram_lock(tp);
10069 if (ret)
10070 return ret;
1da177e4 10071
e6af301b 10072 tg3_enable_nvram_access(tp);
1da177e4
LT
10073
10074 tw32(NVRAM_ADDR, offset);
10075 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10076 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10077
10078 if (ret == 0)
10079 *val = swab32(tr32(NVRAM_RDDATA));
10080
e6af301b 10081 tg3_disable_nvram_access(tp);
1da177e4 10082
381291b7
MC
10083 tg3_nvram_unlock(tp);
10084
1da177e4
LT
10085 return ret;
10086}
10087
1820180b
MC
10088static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10089{
10090 int err;
10091 u32 tmp;
10092
10093 err = tg3_nvram_read(tp, offset, &tmp);
10094 *val = swab32(tmp);
10095 return err;
10096}
10097
1da177e4
LT
10098static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10099 u32 offset, u32 len, u8 *buf)
10100{
10101 int i, j, rc = 0;
10102 u32 val;
10103
10104 for (i = 0; i < len; i += 4) {
10105 u32 addr, data;
10106
10107 addr = offset + i;
10108
10109 memcpy(&data, buf + i, 4);
10110
10111 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
10112
10113 val = tr32(GRC_EEPROM_ADDR);
10114 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10115
10116 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10117 EEPROM_ADDR_READ);
10118 tw32(GRC_EEPROM_ADDR, val |
10119 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10120 (addr & EEPROM_ADDR_ADDR_MASK) |
10121 EEPROM_ADDR_START |
10122 EEPROM_ADDR_WRITE);
6aa20a22 10123
9d57f01c 10124 for (j = 0; j < 1000; j++) {
1da177e4
LT
10125 val = tr32(GRC_EEPROM_ADDR);
10126
10127 if (val & EEPROM_ADDR_COMPLETE)
10128 break;
9d57f01c 10129 msleep(1);
1da177e4
LT
10130 }
10131 if (!(val & EEPROM_ADDR_COMPLETE)) {
10132 rc = -EBUSY;
10133 break;
10134 }
10135 }
10136
10137 return rc;
10138}
10139
10140/* offset and length are dword aligned */
10141static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10142 u8 *buf)
10143{
10144 int ret = 0;
10145 u32 pagesize = tp->nvram_pagesize;
10146 u32 pagemask = pagesize - 1;
10147 u32 nvram_cmd;
10148 u8 *tmp;
10149
10150 tmp = kmalloc(pagesize, GFP_KERNEL);
10151 if (tmp == NULL)
10152 return -ENOMEM;
10153
10154 while (len) {
10155 int j;
e6af301b 10156 u32 phy_addr, page_off, size;
1da177e4
LT
10157
10158 phy_addr = offset & ~pagemask;
6aa20a22 10159
1da177e4
LT
10160 for (j = 0; j < pagesize; j += 4) {
10161 if ((ret = tg3_nvram_read(tp, phy_addr + j,
10162 (u32 *) (tmp + j))))
10163 break;
10164 }
10165 if (ret)
10166 break;
10167
10168 page_off = offset & pagemask;
10169 size = pagesize;
10170 if (len < size)
10171 size = len;
10172
10173 len -= size;
10174
10175 memcpy(tmp + page_off, buf, size);
10176
10177 offset = offset + (pagesize - page_off);
10178
e6af301b 10179 tg3_enable_nvram_access(tp);
1da177e4
LT
10180
10181 /*
10182 * Before we can erase the flash page, we need
10183 * to issue a special "write enable" command.
10184 */
10185 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10186
10187 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10188 break;
10189
10190 /* Erase the target page */
10191 tw32(NVRAM_ADDR, phy_addr);
10192
10193 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10194 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10195
10196 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10197 break;
10198
10199 /* Issue another write enable to start the write. */
10200 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10201
10202 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10203 break;
10204
10205 for (j = 0; j < pagesize; j += 4) {
10206 u32 data;
10207
10208 data = *((u32 *) (tmp + j));
10209 tw32(NVRAM_WRDATA, cpu_to_be32(data));
10210
10211 tw32(NVRAM_ADDR, phy_addr + j);
10212
10213 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10214 NVRAM_CMD_WR;
10215
10216 if (j == 0)
10217 nvram_cmd |= NVRAM_CMD_FIRST;
10218 else if (j == (pagesize - 4))
10219 nvram_cmd |= NVRAM_CMD_LAST;
10220
10221 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10222 break;
10223 }
10224 if (ret)
10225 break;
10226 }
10227
10228 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10229 tg3_nvram_exec_cmd(tp, nvram_cmd);
10230
10231 kfree(tmp);
10232
10233 return ret;
10234}
10235
10236/* offset and length are dword aligned */
10237static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10238 u8 *buf)
10239{
10240 int i, ret = 0;
10241
10242 for (i = 0; i < len; i += 4, offset += 4) {
10243 u32 data, page_off, phy_addr, nvram_cmd;
10244
10245 memcpy(&data, buf + i, 4);
10246 tw32(NVRAM_WRDATA, cpu_to_be32(data));
10247
10248 page_off = offset % tp->nvram_pagesize;
10249
1820180b 10250 phy_addr = tg3_nvram_phys_addr(tp, offset);
1da177e4
LT
10251
10252 tw32(NVRAM_ADDR, phy_addr);
10253
10254 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10255
10256 if ((page_off == 0) || (i == 0))
10257 nvram_cmd |= NVRAM_CMD_FIRST;
f6d9a256 10258 if (page_off == (tp->nvram_pagesize - 4))
1da177e4
LT
10259 nvram_cmd |= NVRAM_CMD_LAST;
10260
10261 if (i == (len - 4))
10262 nvram_cmd |= NVRAM_CMD_LAST;
10263
4c987487 10264 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
af36e6b6 10265 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
1b27777a 10266 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
d30cdd28 10267 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
9936bcf6 10268 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
4c987487
MC
10269 (tp->nvram_jedecnum == JEDEC_ST) &&
10270 (nvram_cmd & NVRAM_CMD_FIRST)) {
1da177e4
LT
10271
10272 if ((ret = tg3_nvram_exec_cmd(tp,
10273 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10274 NVRAM_CMD_DONE)))
10275
10276 break;
10277 }
10278 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10279 /* We always do complete word writes to eeprom. */
10280 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10281 }
10282
10283 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10284 break;
10285 }
10286 return ret;
10287}
10288
10289/* offset and length are dword aligned */
10290static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10291{
10292 int ret;
10293
1da177e4 10294 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34
MC
10295 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10296 ~GRC_LCLCTRL_GPIO_OUTPUT1);
1da177e4
LT
10297 udelay(40);
10298 }
10299
10300 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10301 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10302 }
10303 else {
10304 u32 grc_mode;
10305
ec41c7df
MC
10306 ret = tg3_nvram_lock(tp);
10307 if (ret)
10308 return ret;
1da177e4 10309
e6af301b
MC
10310 tg3_enable_nvram_access(tp);
10311 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10312 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
1da177e4 10313 tw32(NVRAM_WRITE1, 0x406);
1da177e4
LT
10314
10315 grc_mode = tr32(GRC_MODE);
10316 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10317
10318 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10319 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10320
10321 ret = tg3_nvram_write_block_buffered(tp, offset, len,
10322 buf);
10323 }
10324 else {
10325 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10326 buf);
10327 }
10328
10329 grc_mode = tr32(GRC_MODE);
10330 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10331
e6af301b 10332 tg3_disable_nvram_access(tp);
1da177e4
LT
10333 tg3_nvram_unlock(tp);
10334 }
10335
10336 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34 10337 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1da177e4
LT
10338 udelay(40);
10339 }
10340
10341 return ret;
10342}
10343
10344struct subsys_tbl_ent {
10345 u16 subsys_vendor, subsys_devid;
10346 u32 phy_id;
10347};
10348
10349static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10350 /* Broadcom boards. */
10351 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10352 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10353 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10354 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
10355 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10356 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10357 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
10358 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10359 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10360 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10361 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10362
10363 /* 3com boards. */
10364 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10365 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10366 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
10367 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10368 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10369
10370 /* DELL boards. */
10371 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10372 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10373 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10374 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10375
10376 /* Compaq boards. */
10377 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10378 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10379 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
10380 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10381 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10382
10383 /* IBM boards. */
10384 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10385};
10386
10387static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10388{
10389 int i;
10390
10391 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10392 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10393 tp->pdev->subsystem_vendor) &&
10394 (subsys_id_to_phy_id[i].subsys_devid ==
10395 tp->pdev->subsystem_device))
10396 return &subsys_id_to_phy_id[i];
10397 }
10398 return NULL;
10399}
10400
7d0c41ef 10401static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
1da177e4 10402{
1da177e4 10403 u32 val;
caf636c7
MC
10404 u16 pmcsr;
10405
10406 /* On some early chips the SRAM cannot be accessed in D3hot state,
10407 * so need make sure we're in D0.
10408 */
10409 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10410 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10411 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10412 msleep(1);
7d0c41ef
MC
10413
10414 /* Make sure register accesses (indirect or otherwise)
10415 * will function correctly.
10416 */
10417 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10418 tp->misc_host_ctrl);
1da177e4 10419
f49639e6
DM
10420 /* The memory arbiter has to be enabled in order for SRAM accesses
10421 * to succeed. Normally on powerup the tg3 chip firmware will make
10422 * sure it is enabled, but other entities such as system netboot
10423 * code might disable it.
10424 */
10425 val = tr32(MEMARB_MODE);
10426 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10427
1da177e4 10428 tp->phy_id = PHY_ID_INVALID;
7d0c41ef
MC
10429 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10430
a85feb8c
GZ
10431 /* Assume an onboard device and WOL capable by default. */
10432 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
72b845e0 10433
b5d3772c 10434 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9d26e213 10435 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
b5d3772c 10436 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9d26e213
MC
10437 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10438 }
0527ba35
MC
10439 val = tr32(VCPU_CFGSHDW);
10440 if (val & VCPU_CFGSHDW_ASPM_DBNC)
8ed5d97e 10441 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
0527ba35
MC
10442 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10443 (val & VCPU_CFGSHDW_WOL_MAGPKT))
10444 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
b5d3772c
MC
10445 return;
10446 }
10447
1da177e4
LT
10448 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10449 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10450 u32 nic_cfg, led_cfg;
7d0c41ef
MC
10451 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10452 int eeprom_phy_serdes = 0;
1da177e4
LT
10453
10454 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10455 tp->nic_sram_data_cfg = nic_cfg;
10456
10457 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10458 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10459 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10460 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10461 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10462 (ver > 0) && (ver < 0x100))
10463 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10464
1da177e4
LT
10465 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10466 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10467 eeprom_phy_serdes = 1;
10468
10469 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10470 if (nic_phy_id != 0) {
10471 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10472 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10473
10474 eeprom_phy_id = (id1 >> 16) << 10;
10475 eeprom_phy_id |= (id2 & 0xfc00) << 16;
10476 eeprom_phy_id |= (id2 & 0x03ff) << 0;
10477 } else
10478 eeprom_phy_id = 0;
10479
7d0c41ef 10480 tp->phy_id = eeprom_phy_id;
747e8f8b 10481 if (eeprom_phy_serdes) {
a4e2b347 10482 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
747e8f8b
MC
10483 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10484 else
10485 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10486 }
7d0c41ef 10487
cbf46853 10488 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
10489 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10490 SHASTA_EXT_LED_MODE_MASK);
cbf46853 10491 else
1da177e4
LT
10492 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10493
10494 switch (led_cfg) {
10495 default:
10496 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10497 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10498 break;
10499
10500 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10501 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10502 break;
10503
10504 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10505 tp->led_ctrl = LED_CTRL_MODE_MAC;
9ba27794
MC
10506
10507 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10508 * read on some older 5700/5701 bootcode.
10509 */
10510 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10511 ASIC_REV_5700 ||
10512 GET_ASIC_REV(tp->pci_chip_rev_id) ==
10513 ASIC_REV_5701)
10514 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10515
1da177e4
LT
10516 break;
10517
10518 case SHASTA_EXT_LED_SHARED:
10519 tp->led_ctrl = LED_CTRL_MODE_SHARED;
10520 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10521 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10522 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10523 LED_CTRL_MODE_PHY_2);
10524 break;
10525
10526 case SHASTA_EXT_LED_MAC:
10527 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10528 break;
10529
10530 case SHASTA_EXT_LED_COMBO:
10531 tp->led_ctrl = LED_CTRL_MODE_COMBO;
10532 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10533 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10534 LED_CTRL_MODE_PHY_2);
10535 break;
10536
10537 };
10538
10539 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10540 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10541 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10542 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10543
9d26e213 10544 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
1da177e4 10545 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9d26e213
MC
10546 if ((tp->pdev->subsystem_vendor ==
10547 PCI_VENDOR_ID_ARIMA) &&
10548 (tp->pdev->subsystem_device == 0x205a ||
10549 tp->pdev->subsystem_device == 0x2063))
10550 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10551 } else {
f49639e6 10552 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9d26e213
MC
10553 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10554 }
1da177e4
LT
10555
10556 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10557 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 10558 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
10559 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10560 }
0d3031d9
MC
10561 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10562 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
a85feb8c
GZ
10563 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10564 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10565 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
1da177e4 10566
0527ba35
MC
10567 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10568 nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10569 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10570
1da177e4
LT
10571 if (cfg2 & (1 << 17))
10572 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10573
10574 /* serdes signal pre-emphasis in register 0x590 set by */
10575 /* bootcode if bit 18 is set */
10576 if (cfg2 & (1 << 18))
10577 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
8ed5d97e
MC
10578
10579 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10580 u32 cfg3;
10581
10582 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10583 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10584 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10585 }
1da177e4 10586 }
7d0c41ef
MC
10587}
10588
10589static int __devinit tg3_phy_probe(struct tg3 *tp)
10590{
10591 u32 hw_phy_id_1, hw_phy_id_2;
10592 u32 hw_phy_id, hw_phy_id_masked;
10593 int err;
1da177e4
LT
10594
10595 /* Reading the PHY ID register can conflict with ASF
10596 * firwmare access to the PHY hardware.
10597 */
10598 err = 0;
0d3031d9
MC
10599 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10600 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
1da177e4
LT
10601 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10602 } else {
10603 /* Now read the physical PHY_ID from the chip and verify
10604 * that it is sane. If it doesn't look good, we fall back
10605 * to either the hard-coded table based PHY_ID and failing
10606 * that the value found in the eeprom area.
10607 */
10608 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10609 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10610
10611 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
10612 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10613 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
10614
10615 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10616 }
10617
10618 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10619 tp->phy_id = hw_phy_id;
10620 if (hw_phy_id_masked == PHY_ID_BCM8002)
10621 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
da6b2d01
MC
10622 else
10623 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
1da177e4 10624 } else {
7d0c41ef
MC
10625 if (tp->phy_id != PHY_ID_INVALID) {
10626 /* Do nothing, phy ID already set up in
10627 * tg3_get_eeprom_hw_cfg().
10628 */
1da177e4
LT
10629 } else {
10630 struct subsys_tbl_ent *p;
10631
10632 /* No eeprom signature? Try the hardcoded
10633 * subsys device table.
10634 */
10635 p = lookup_by_subsys(tp);
10636 if (!p)
10637 return -ENODEV;
10638
10639 tp->phy_id = p->phy_id;
10640 if (!tp->phy_id ||
10641 tp->phy_id == PHY_ID_BCM8002)
10642 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10643 }
10644 }
10645
747e8f8b 10646 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
0d3031d9 10647 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
1da177e4 10648 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
3600d918 10649 u32 bmsr, adv_reg, tg3_ctrl, mask;
1da177e4
LT
10650
10651 tg3_readphy(tp, MII_BMSR, &bmsr);
10652 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10653 (bmsr & BMSR_LSTATUS))
10654 goto skip_phy_reset;
6aa20a22 10655
1da177e4
LT
10656 err = tg3_phy_reset(tp);
10657 if (err)
10658 return err;
10659
10660 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10661 ADVERTISE_100HALF | ADVERTISE_100FULL |
10662 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10663 tg3_ctrl = 0;
10664 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10665 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10666 MII_TG3_CTRL_ADV_1000_FULL);
10667 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10668 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10669 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10670 MII_TG3_CTRL_ENABLE_AS_MASTER);
10671 }
10672
3600d918
MC
10673 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10674 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10675 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10676 if (!tg3_copper_is_advertising_all(tp, mask)) {
1da177e4
LT
10677 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10678
10679 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10680 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10681
10682 tg3_writephy(tp, MII_BMCR,
10683 BMCR_ANENABLE | BMCR_ANRESTART);
10684 }
10685 tg3_phy_set_wirespeed(tp);
10686
10687 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10688 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10689 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10690 }
10691
10692skip_phy_reset:
10693 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10694 err = tg3_init_5401phy_dsp(tp);
10695 if (err)
10696 return err;
10697 }
10698
10699 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10700 err = tg3_init_5401phy_dsp(tp);
10701 }
10702
747e8f8b 10703 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1da177e4
LT
10704 tp->link_config.advertising =
10705 (ADVERTISED_1000baseT_Half |
10706 ADVERTISED_1000baseT_Full |
10707 ADVERTISED_Autoneg |
10708 ADVERTISED_FIBRE);
10709 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10710 tp->link_config.advertising &=
10711 ~(ADVERTISED_1000baseT_Half |
10712 ADVERTISED_1000baseT_Full);
10713
10714 return err;
10715}
10716
10717static void __devinit tg3_read_partno(struct tg3 *tp)
10718{
10719 unsigned char vpd_data[256];
af2c6a4a 10720 unsigned int i;
1b27777a 10721 u32 magic;
1da177e4 10722
1820180b 10723 if (tg3_nvram_read_swab(tp, 0x0, &magic))
f49639e6 10724 goto out_not_found;
1da177e4 10725
1820180b 10726 if (magic == TG3_EEPROM_MAGIC) {
1b27777a
MC
10727 for (i = 0; i < 256; i += 4) {
10728 u32 tmp;
1da177e4 10729
1b27777a
MC
10730 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10731 goto out_not_found;
10732
10733 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
10734 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
10735 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10736 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10737 }
10738 } else {
10739 int vpd_cap;
10740
10741 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10742 for (i = 0; i < 256; i += 4) {
10743 u32 tmp, j = 0;
10744 u16 tmp16;
10745
10746 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10747 i);
10748 while (j++ < 100) {
10749 pci_read_config_word(tp->pdev, vpd_cap +
10750 PCI_VPD_ADDR, &tmp16);
10751 if (tmp16 & 0x8000)
10752 break;
10753 msleep(1);
10754 }
f49639e6
DM
10755 if (!(tmp16 & 0x8000))
10756 goto out_not_found;
10757
1b27777a
MC
10758 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10759 &tmp);
10760 tmp = cpu_to_le32(tmp);
10761 memcpy(&vpd_data[i], &tmp, 4);
10762 }
1da177e4
LT
10763 }
10764
10765 /* Now parse and find the part number. */
af2c6a4a 10766 for (i = 0; i < 254; ) {
1da177e4 10767 unsigned char val = vpd_data[i];
af2c6a4a 10768 unsigned int block_end;
1da177e4
LT
10769
10770 if (val == 0x82 || val == 0x91) {
10771 i = (i + 3 +
10772 (vpd_data[i + 1] +
10773 (vpd_data[i + 2] << 8)));
10774 continue;
10775 }
10776
10777 if (val != 0x90)
10778 goto out_not_found;
10779
10780 block_end = (i + 3 +
10781 (vpd_data[i + 1] +
10782 (vpd_data[i + 2] << 8)));
10783 i += 3;
af2c6a4a
MC
10784
10785 if (block_end > 256)
10786 goto out_not_found;
10787
10788 while (i < (block_end - 2)) {
1da177e4
LT
10789 if (vpd_data[i + 0] == 'P' &&
10790 vpd_data[i + 1] == 'N') {
10791 int partno_len = vpd_data[i + 2];
10792
af2c6a4a
MC
10793 i += 3;
10794 if (partno_len > 24 || (partno_len + i) > 256)
1da177e4
LT
10795 goto out_not_found;
10796
10797 memcpy(tp->board_part_number,
af2c6a4a 10798 &vpd_data[i], partno_len);
1da177e4
LT
10799
10800 /* Success. */
10801 return;
10802 }
af2c6a4a 10803 i += 3 + vpd_data[i + 2];
1da177e4
LT
10804 }
10805
10806 /* Part number not found. */
10807 goto out_not_found;
10808 }
10809
10810out_not_found:
b5d3772c
MC
10811 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10812 strcpy(tp->board_part_number, "BCM95906");
10813 else
10814 strcpy(tp->board_part_number, "none");
1da177e4
LT
10815}
10816
9c8a620e
MC
10817static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
10818{
10819 u32 val;
10820
10821 if (tg3_nvram_read_swab(tp, offset, &val) ||
10822 (val & 0xfc000000) != 0x0c000000 ||
10823 tg3_nvram_read_swab(tp, offset + 4, &val) ||
10824 val != 0)
10825 return 0;
10826
10827 return 1;
10828}
10829
c4e6575c
MC
10830static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10831{
10832 u32 val, offset, start;
9c8a620e
MC
10833 u32 ver_offset;
10834 int i, bcnt;
c4e6575c
MC
10835
10836 if (tg3_nvram_read_swab(tp, 0, &val))
10837 return;
10838
10839 if (val != TG3_EEPROM_MAGIC)
10840 return;
10841
10842 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10843 tg3_nvram_read_swab(tp, 0x4, &start))
10844 return;
10845
10846 offset = tg3_nvram_logical_addr(tp, offset);
9c8a620e
MC
10847
10848 if (!tg3_fw_img_is_valid(tp, offset) ||
10849 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
c4e6575c
MC
10850 return;
10851
9c8a620e
MC
10852 offset = offset + ver_offset - start;
10853 for (i = 0; i < 16; i += 4) {
10854 if (tg3_nvram_read(tp, offset + i, &val))
10855 return;
10856
10857 val = le32_to_cpu(val);
10858 memcpy(tp->fw_ver + i, &val, 4);
10859 }
c4e6575c 10860
9c8a620e
MC
10861 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10862 (tp->tg3_flags & TG3_FLG3_ENABLE_APE))
10863 return;
10864
10865 for (offset = TG3_NVM_DIR_START;
10866 offset < TG3_NVM_DIR_END;
10867 offset += TG3_NVM_DIRENT_SIZE) {
10868 if (tg3_nvram_read_swab(tp, offset, &val))
c4e6575c
MC
10869 return;
10870
9c8a620e
MC
10871 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
10872 break;
10873 }
10874
10875 if (offset == TG3_NVM_DIR_END)
10876 return;
10877
10878 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10879 start = 0x08000000;
10880 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
10881 return;
10882
10883 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
10884 !tg3_fw_img_is_valid(tp, offset) ||
10885 tg3_nvram_read_swab(tp, offset + 8, &val))
10886 return;
10887
10888 offset += val - start;
10889
10890 bcnt = strlen(tp->fw_ver);
10891
10892 tp->fw_ver[bcnt++] = ',';
10893 tp->fw_ver[bcnt++] = ' ';
10894
10895 for (i = 0; i < 4; i++) {
10896 if (tg3_nvram_read(tp, offset, &val))
c4e6575c
MC
10897 return;
10898
9c8a620e
MC
10899 val = le32_to_cpu(val);
10900 offset += sizeof(val);
c4e6575c 10901
9c8a620e
MC
10902 if (bcnt > TG3_VER_SIZE - sizeof(val)) {
10903 memcpy(&tp->fw_ver[bcnt], &val, TG3_VER_SIZE - bcnt);
10904 break;
c4e6575c 10905 }
9c8a620e
MC
10906
10907 memcpy(&tp->fw_ver[bcnt], &val, sizeof(val));
10908 bcnt += sizeof(val);
c4e6575c 10909 }
9c8a620e
MC
10910
10911 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
c4e6575c
MC
10912}
10913
7544b097
MC
10914static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
10915
1da177e4
LT
10916static int __devinit tg3_get_invariants(struct tg3 *tp)
10917{
10918 static struct pci_device_id write_reorder_chipsets[] = {
1da177e4
LT
10919 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10920 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
c165b004
JL
10921 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10922 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
399de50b
MC
10923 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10924 PCI_DEVICE_ID_VIA_8385_0) },
1da177e4
LT
10925 { },
10926 };
10927 u32 misc_ctrl_reg;
10928 u32 cacheline_sz_reg;
10929 u32 pci_state_reg, grc_misc_cfg;
10930 u32 val;
10931 u16 pci_cmd;
c7835a77 10932 int err, pcie_cap;
1da177e4 10933
1da177e4
LT
10934 /* Force memory write invalidate off. If we leave it on,
10935 * then on 5700_BX chips we have to enable a workaround.
10936 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10937 * to match the cacheline size. The Broadcom driver have this
10938 * workaround but turns MWI off all the times so never uses
10939 * it. This seems to suggest that the workaround is insufficient.
10940 */
10941 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10942 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10943 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10944
10945 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10946 * has the register indirect write enable bit set before
10947 * we try to access any of the MMIO registers. It is also
10948 * critical that the PCI-X hw workaround situation is decided
10949 * before that as well.
10950 */
10951 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10952 &misc_ctrl_reg);
10953
10954 tp->pci_chip_rev_id = (misc_ctrl_reg >>
10955 MISC_HOST_CTRL_CHIPREV_SHIFT);
795d01c5
MC
10956 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
10957 u32 prod_id_asic_rev;
10958
10959 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
10960 &prod_id_asic_rev);
10961 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
10962 }
1da177e4 10963
ff645bec
MC
10964 /* Wrong chip ID in 5752 A0. This code can be removed later
10965 * as A0 is not in production.
10966 */
10967 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10968 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10969
6892914f
MC
10970 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10971 * we need to disable memory and use config. cycles
10972 * only to access all registers. The 5702/03 chips
10973 * can mistakenly decode the special cycles from the
10974 * ICH chipsets as memory write cycles, causing corruption
10975 * of register and memory space. Only certain ICH bridges
10976 * will drive special cycles with non-zero data during the
10977 * address phase which can fall within the 5703's address
10978 * range. This is not an ICH bug as the PCI spec allows
10979 * non-zero address during special cycles. However, only
10980 * these ICH bridges are known to drive non-zero addresses
10981 * during special cycles.
10982 *
10983 * Since special cycles do not cross PCI bridges, we only
10984 * enable this workaround if the 5703 is on the secondary
10985 * bus of these ICH bridges.
10986 */
10987 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10988 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10989 static struct tg3_dev_id {
10990 u32 vendor;
10991 u32 device;
10992 u32 rev;
10993 } ich_chipsets[] = {
10994 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10995 PCI_ANY_ID },
10996 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10997 PCI_ANY_ID },
10998 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10999 0xa },
11000 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11001 PCI_ANY_ID },
11002 { },
11003 };
11004 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11005 struct pci_dev *bridge = NULL;
11006
11007 while (pci_id->vendor != 0) {
11008 bridge = pci_get_device(pci_id->vendor, pci_id->device,
11009 bridge);
11010 if (!bridge) {
11011 pci_id++;
11012 continue;
11013 }
11014 if (pci_id->rev != PCI_ANY_ID) {
44c10138 11015 if (bridge->revision > pci_id->rev)
6892914f
MC
11016 continue;
11017 }
11018 if (bridge->subordinate &&
11019 (bridge->subordinate->number ==
11020 tp->pdev->bus->number)) {
11021
11022 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11023 pci_dev_put(bridge);
11024 break;
11025 }
11026 }
11027 }
11028
4a29cc2e
MC
11029 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11030 * DMA addresses > 40-bit. This bridge may have other additional
11031 * 57xx devices behind it in some 4-port NIC designs for example.
11032 * Any tg3 device found behind the bridge will also need the 40-bit
11033 * DMA workaround.
11034 */
a4e2b347
MC
11035 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11036 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11037 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
4a29cc2e 11038 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
4cf78e4f 11039 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
a4e2b347 11040 }
4a29cc2e
MC
11041 else {
11042 struct pci_dev *bridge = NULL;
11043
11044 do {
11045 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11046 PCI_DEVICE_ID_SERVERWORKS_EPB,
11047 bridge);
11048 if (bridge && bridge->subordinate &&
11049 (bridge->subordinate->number <=
11050 tp->pdev->bus->number) &&
11051 (bridge->subordinate->subordinate >=
11052 tp->pdev->bus->number)) {
11053 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11054 pci_dev_put(bridge);
11055 break;
11056 }
11057 } while (bridge);
11058 }
4cf78e4f 11059
1da177e4
LT
11060 /* Initialize misc host control in PCI block. */
11061 tp->misc_host_ctrl |= (misc_ctrl_reg &
11062 MISC_HOST_CTRL_CHIPREV);
11063 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11064 tp->misc_host_ctrl);
11065
11066 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11067 &cacheline_sz_reg);
11068
11069 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
11070 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
11071 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
11072 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
11073
7544b097
MC
11074 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11075 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11076 tp->pdev_peer = tg3_find_peer(tp);
11077
6708e5cc 11078 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
4cf78e4f 11079 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
af36e6b6 11080 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d9ab5ad1 11081 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
d30cdd28 11082 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9936bcf6 11083 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
b5d3772c 11084 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
a4e2b347 11085 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6708e5cc
JL
11086 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11087
1b440c56
JL
11088 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11089 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11090 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11091
5a6f3074 11092 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7544b097
MC
11093 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11094 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11095 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11096 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11097 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11098 tp->pdev_peer == tp->pdev))
11099 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11100
af36e6b6 11101 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
b5d3772c 11102 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
d30cdd28 11103 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9936bcf6 11104 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
b5d3772c 11105 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5a6f3074 11106 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
fcfa0a32 11107 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
52c0fd83 11108 } else {
7f62ad5d 11109 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
52c0fd83
MC
11110 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11111 ASIC_REV_5750 &&
11112 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
7f62ad5d 11113 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
52c0fd83 11114 }
5a6f3074 11115 }
1da177e4 11116
0f893dc6
MC
11117 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11118 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
d9ab5ad1 11119 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
af36e6b6 11120 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
b5d3772c 11121 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
d30cdd28 11122 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
9936bcf6 11123 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
b5d3772c 11124 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
0f893dc6
MC
11125 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11126
c7835a77
MC
11127 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11128 if (pcie_cap != 0) {
1da177e4 11129 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
c7835a77
MC
11130 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11131 u16 lnkctl;
11132
11133 pci_read_config_word(tp->pdev,
11134 pcie_cap + PCI_EXP_LNKCTL,
11135 &lnkctl);
11136 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11137 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11138 }
11139 }
1da177e4 11140
399de50b
MC
11141 /* If we have an AMD 762 or VIA K8T800 chipset, write
11142 * reordering to the mailbox registers done by the host
11143 * controller can cause major troubles. We read back from
11144 * every mailbox register write to force the writes to be
11145 * posted to the chip in order.
11146 */
11147 if (pci_dev_present(write_reorder_chipsets) &&
11148 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11149 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11150
1da177e4
LT
11151 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11152 tp->pci_lat_timer < 64) {
11153 tp->pci_lat_timer = 64;
11154
11155 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
11156 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
11157 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
11158 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
11159
11160 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11161 cacheline_sz_reg);
11162 }
11163
9974a356
MC
11164 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11165 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11166 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11167 if (!tp->pcix_cap) {
11168 printk(KERN_ERR PFX "Cannot find PCI-X "
11169 "capability, aborting.\n");
11170 return -EIO;
11171 }
11172 }
11173
1da177e4
LT
11174 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11175 &pci_state_reg);
11176
9974a356 11177 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
1da177e4
LT
11178 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11179
11180 /* If this is a 5700 BX chipset, and we are in PCI-X
11181 * mode, enable register write workaround.
11182 *
11183 * The workaround is to use indirect register accesses
11184 * for all chip writes not to mailbox registers.
11185 */
11186 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11187 u32 pm_reg;
1da177e4
LT
11188
11189 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11190
11191 /* The chip can have it's power management PCI config
11192 * space registers clobbered due to this bug.
11193 * So explicitly force the chip into D0 here.
11194 */
9974a356
MC
11195 pci_read_config_dword(tp->pdev,
11196 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
11197 &pm_reg);
11198 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11199 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9974a356
MC
11200 pci_write_config_dword(tp->pdev,
11201 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
11202 pm_reg);
11203
11204 /* Also, force SERR#/PERR# in PCI command. */
11205 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11206 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11207 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11208 }
11209 }
11210
087fe256
MC
11211 /* 5700 BX chips need to have their TX producer index mailboxes
11212 * written twice to workaround a bug.
11213 */
11214 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11215 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11216
1da177e4
LT
11217 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11218 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11219 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11220 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11221
11222 /* Chip-specific fixup from Broadcom driver */
11223 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11224 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11225 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11226 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11227 }
11228
1ee582d8 11229 /* Default fast path register access methods */
20094930 11230 tp->read32 = tg3_read32;
1ee582d8 11231 tp->write32 = tg3_write32;
09ee929c 11232 tp->read32_mbox = tg3_read32;
20094930 11233 tp->write32_mbox = tg3_write32;
1ee582d8
MC
11234 tp->write32_tx_mbox = tg3_write32;
11235 tp->write32_rx_mbox = tg3_write32;
11236
11237 /* Various workaround register access methods */
11238 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11239 tp->write32 = tg3_write_indirect_reg32;
98efd8a6
MC
11240 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11241 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11242 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11243 /*
11244 * Back to back register writes can cause problems on these
11245 * chips, the workaround is to read back all reg writes
11246 * except those to mailbox regs.
11247 *
11248 * See tg3_write_indirect_reg32().
11249 */
1ee582d8 11250 tp->write32 = tg3_write_flush_reg32;
98efd8a6
MC
11251 }
11252
1ee582d8
MC
11253
11254 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11255 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11256 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11257 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11258 tp->write32_rx_mbox = tg3_write_flush_reg32;
11259 }
20094930 11260
6892914f
MC
11261 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11262 tp->read32 = tg3_read_indirect_reg32;
11263 tp->write32 = tg3_write_indirect_reg32;
11264 tp->read32_mbox = tg3_read_indirect_mbox;
11265 tp->write32_mbox = tg3_write_indirect_mbox;
11266 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11267 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11268
11269 iounmap(tp->regs);
22abe310 11270 tp->regs = NULL;
6892914f
MC
11271
11272 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11273 pci_cmd &= ~PCI_COMMAND_MEMORY;
11274 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11275 }
b5d3772c
MC
11276 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11277 tp->read32_mbox = tg3_read32_mbox_5906;
11278 tp->write32_mbox = tg3_write32_mbox_5906;
11279 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11280 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11281 }
6892914f 11282
bbadf503
MC
11283 if (tp->write32 == tg3_write_indirect_reg32 ||
11284 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11285 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
f49639e6 11286 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
bbadf503
MC
11287 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11288
7d0c41ef 11289 /* Get eeprom hw config before calling tg3_set_power_state().
9d26e213 11290 * In particular, the TG3_FLG2_IS_NIC flag must be
7d0c41ef
MC
11291 * determined before calling tg3_set_power_state() so that
11292 * we know whether or not to switch out of Vaux power.
11293 * When the flag is set, it means that GPIO1 is used for eeprom
11294 * write protect and also implies that it is a LOM where GPIOs
11295 * are not used to switch power.
6aa20a22 11296 */
7d0c41ef
MC
11297 tg3_get_eeprom_hw_cfg(tp);
11298
0d3031d9
MC
11299 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11300 /* Allow reads and writes to the
11301 * APE register and memory space.
11302 */
11303 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11304 PCISTATE_ALLOW_APE_SHMEM_WR;
11305 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11306 pci_state_reg);
11307 }
11308
9936bcf6
MC
11309 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11310 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
d30cdd28
MC
11311 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11312
314fba34
MC
11313 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11314 * GPIO1 driven high will bring 5700's external PHY out of reset.
11315 * It is also used as eeprom write protect on LOMs.
11316 */
11317 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11318 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11319 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11320 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11321 GRC_LCLCTRL_GPIO_OUTPUT1);
3e7d83bc
MC
11322 /* Unused GPIO3 must be driven as output on 5752 because there
11323 * are no pull-up resistors on unused GPIO pins.
11324 */
11325 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11326 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
314fba34 11327
af36e6b6
MC
11328 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11329 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11330
1da177e4 11331 /* Force the chip into D0. */
bc1c7567 11332 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
11333 if (err) {
11334 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11335 pci_name(tp->pdev));
11336 return err;
11337 }
11338
11339 /* 5700 B0 chips do not support checksumming correctly due
11340 * to hardware bugs.
11341 */
11342 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11343 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11344
1da177e4
LT
11345 /* Derive initial jumbo mode from MTU assigned in
11346 * ether_setup() via the alloc_etherdev() call
11347 */
0f893dc6 11348 if (tp->dev->mtu > ETH_DATA_LEN &&
a4e2b347 11349 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
0f893dc6 11350 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
1da177e4
LT
11351
11352 /* Determine WakeOnLan speed to use. */
11353 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11354 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11355 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11356 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11357 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11358 } else {
11359 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11360 }
11361
11362 /* A few boards don't want Ethernet@WireSpeed phy feature */
11363 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11364 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11365 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
747e8f8b 11366 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
b5d3772c 11367 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
747e8f8b 11368 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1da177e4
LT
11369 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11370
11371 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11372 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11373 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11374 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11375 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11376
c424cb24
MC
11377 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11378 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 11379 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
11380 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11381 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
d4011ada
MC
11382 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11383 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11384 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
c1d2a196
MC
11385 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11386 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11387 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
c424cb24
MC
11388 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11389 }
1da177e4 11390
1da177e4 11391 tp->coalesce_mode = 0;
1da177e4
LT
11392 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11393 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11394 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11395
11396 /* Initialize MAC MI mode, polling disabled. */
11397 tw32_f(MAC_MI_MODE, tp->mi_mode);
11398 udelay(80);
11399
11400 /* Initialize data/descriptor byte/word swapping. */
11401 val = tr32(GRC_MODE);
11402 val &= GRC_MODE_HOST_STACKUP;
11403 tw32(GRC_MODE, val | tp->grc_mode);
11404
11405 tg3_switch_clocks(tp);
11406
11407 /* Clear this out for sanity. */
11408 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11409
11410 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11411 &pci_state_reg);
11412 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11413 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11414 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11415
11416 if (chiprevid == CHIPREV_ID_5701_A0 ||
11417 chiprevid == CHIPREV_ID_5701_B0 ||
11418 chiprevid == CHIPREV_ID_5701_B2 ||
11419 chiprevid == CHIPREV_ID_5701_B5) {
11420 void __iomem *sram_base;
11421
11422 /* Write some dummy words into the SRAM status block
11423 * area, see if it reads back correctly. If the return
11424 * value is bad, force enable the PCIX workaround.
11425 */
11426 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11427
11428 writel(0x00000000, sram_base);
11429 writel(0x00000000, sram_base + 4);
11430 writel(0xffffffff, sram_base + 4);
11431 if (readl(sram_base) != 0x00000000)
11432 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11433 }
11434 }
11435
11436 udelay(50);
11437 tg3_nvram_init(tp);
11438
11439 grc_misc_cfg = tr32(GRC_MISC_CFG);
11440 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11441
1da177e4
LT
11442 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11443 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11444 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11445 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11446
fac9b83e
DM
11447 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11448 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11449 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11450 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11451 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11452 HOSTCC_MODE_CLRTICK_TXBD);
11453
11454 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11455 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11456 tp->misc_host_ctrl);
11457 }
11458
1da177e4
LT
11459 /* these are limited to 10/100 only */
11460 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11461 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11462 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11463 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11464 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11465 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11466 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11467 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11468 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
676917d4
MC
11469 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11470 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
b5d3772c 11471 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
1da177e4
LT
11472 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11473
11474 err = tg3_phy_probe(tp);
11475 if (err) {
11476 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11477 pci_name(tp->pdev), err);
11478 /* ... but do not return immediately ... */
11479 }
11480
11481 tg3_read_partno(tp);
c4e6575c 11482 tg3_read_fw_ver(tp);
1da177e4
LT
11483
11484 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11485 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11486 } else {
11487 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11488 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11489 else
11490 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11491 }
11492
11493 /* 5700 {AX,BX} chips have a broken status block link
11494 * change bit implementation, so we must use the
11495 * status register in those cases.
11496 */
11497 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11498 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11499 else
11500 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11501
11502 /* The led_ctrl is set during tg3_phy_probe, here we might
11503 * have to force the link status polling mechanism based
11504 * upon subsystem IDs.
11505 */
11506 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
007a880d 11507 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
1da177e4
LT
11508 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11509 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11510 TG3_FLAG_USE_LINKCHG_REG);
11511 }
11512
11513 /* For all SERDES we poll the MAC status register. */
11514 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11515 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11516 else
11517 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11518
5a6f3074 11519 /* All chips before 5787 can get confused if TX buffers
1da177e4
LT
11520 * straddle the 4GB address boundary in some cases.
11521 */
af36e6b6 11522 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
b5d3772c 11523 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
d30cdd28 11524 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9936bcf6 11525 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
b5d3772c 11526 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
5a6f3074
MC
11527 tp->dev->hard_start_xmit = tg3_start_xmit;
11528 else
11529 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
1da177e4
LT
11530
11531 tp->rx_offset = 2;
11532 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11533 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11534 tp->rx_offset = 0;
11535
f92905de
MC
11536 tp->rx_std_max_post = TG3_RX_RING_SIZE;
11537
11538 /* Increment the rx prod index on the rx std ring by at most
11539 * 8 for these chips to workaround hw errata.
11540 */
11541 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11542 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11543 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11544 tp->rx_std_max_post = 8;
11545
8ed5d97e
MC
11546 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11547 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11548 PCIE_PWR_MGMT_L1_THRESH_MSK;
11549
1da177e4
LT
11550 return err;
11551}
11552
49b6e95f 11553#ifdef CONFIG_SPARC
1da177e4
LT
11554static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11555{
11556 struct net_device *dev = tp->dev;
11557 struct pci_dev *pdev = tp->pdev;
49b6e95f 11558 struct device_node *dp = pci_device_to_OF_node(pdev);
374d4cac 11559 const unsigned char *addr;
49b6e95f
DM
11560 int len;
11561
11562 addr = of_get_property(dp, "local-mac-address", &len);
11563 if (addr && len == 6) {
11564 memcpy(dev->dev_addr, addr, 6);
11565 memcpy(dev->perm_addr, dev->dev_addr, 6);
11566 return 0;
1da177e4
LT
11567 }
11568 return -ENODEV;
11569}
11570
11571static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11572{
11573 struct net_device *dev = tp->dev;
11574
11575 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2ff43697 11576 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
1da177e4
LT
11577 return 0;
11578}
11579#endif
11580
11581static int __devinit tg3_get_device_address(struct tg3 *tp)
11582{
11583 struct net_device *dev = tp->dev;
11584 u32 hi, lo, mac_offset;
008652b3 11585 int addr_ok = 0;
1da177e4 11586
49b6e95f 11587#ifdef CONFIG_SPARC
1da177e4
LT
11588 if (!tg3_get_macaddr_sparc(tp))
11589 return 0;
11590#endif
11591
11592 mac_offset = 0x7c;
f49639e6 11593 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
a4e2b347 11594 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
11595 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11596 mac_offset = 0xcc;
11597 if (tg3_nvram_lock(tp))
11598 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11599 else
11600 tg3_nvram_unlock(tp);
11601 }
b5d3772c
MC
11602 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11603 mac_offset = 0x10;
1da177e4
LT
11604
11605 /* First try to get it from MAC address mailbox. */
11606 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11607 if ((hi >> 16) == 0x484b) {
11608 dev->dev_addr[0] = (hi >> 8) & 0xff;
11609 dev->dev_addr[1] = (hi >> 0) & 0xff;
11610
11611 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11612 dev->dev_addr[2] = (lo >> 24) & 0xff;
11613 dev->dev_addr[3] = (lo >> 16) & 0xff;
11614 dev->dev_addr[4] = (lo >> 8) & 0xff;
11615 dev->dev_addr[5] = (lo >> 0) & 0xff;
1da177e4 11616
008652b3
MC
11617 /* Some old bootcode may report a 0 MAC address in SRAM */
11618 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11619 }
11620 if (!addr_ok) {
11621 /* Next, try NVRAM. */
f49639e6 11622 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
008652b3
MC
11623 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11624 dev->dev_addr[0] = ((hi >> 16) & 0xff);
11625 dev->dev_addr[1] = ((hi >> 24) & 0xff);
11626 dev->dev_addr[2] = ((lo >> 0) & 0xff);
11627 dev->dev_addr[3] = ((lo >> 8) & 0xff);
11628 dev->dev_addr[4] = ((lo >> 16) & 0xff);
11629 dev->dev_addr[5] = ((lo >> 24) & 0xff);
11630 }
11631 /* Finally just fetch it out of the MAC control regs. */
11632 else {
11633 hi = tr32(MAC_ADDR_0_HIGH);
11634 lo = tr32(MAC_ADDR_0_LOW);
11635
11636 dev->dev_addr[5] = lo & 0xff;
11637 dev->dev_addr[4] = (lo >> 8) & 0xff;
11638 dev->dev_addr[3] = (lo >> 16) & 0xff;
11639 dev->dev_addr[2] = (lo >> 24) & 0xff;
11640 dev->dev_addr[1] = hi & 0xff;
11641 dev->dev_addr[0] = (hi >> 8) & 0xff;
11642 }
1da177e4
LT
11643 }
11644
11645 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11646#ifdef CONFIG_SPARC64
11647 if (!tg3_get_default_macaddr_sparc(tp))
11648 return 0;
11649#endif
11650 return -EINVAL;
11651 }
2ff43697 11652 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
11653 return 0;
11654}
11655
59e6b434
DM
11656#define BOUNDARY_SINGLE_CACHELINE 1
11657#define BOUNDARY_MULTI_CACHELINE 2
11658
11659static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11660{
11661 int cacheline_size;
11662 u8 byte;
11663 int goal;
11664
11665 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11666 if (byte == 0)
11667 cacheline_size = 1024;
11668 else
11669 cacheline_size = (int) byte * 4;
11670
11671 /* On 5703 and later chips, the boundary bits have no
11672 * effect.
11673 */
11674 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11675 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11676 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11677 goto out;
11678
11679#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11680 goal = BOUNDARY_MULTI_CACHELINE;
11681#else
11682#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11683 goal = BOUNDARY_SINGLE_CACHELINE;
11684#else
11685 goal = 0;
11686#endif
11687#endif
11688
11689 if (!goal)
11690 goto out;
11691
11692 /* PCI controllers on most RISC systems tend to disconnect
11693 * when a device tries to burst across a cache-line boundary.
11694 * Therefore, letting tg3 do so just wastes PCI bandwidth.
11695 *
11696 * Unfortunately, for PCI-E there are only limited
11697 * write-side controls for this, and thus for reads
11698 * we will still get the disconnects. We'll also waste
11699 * these PCI cycles for both read and write for chips
11700 * other than 5700 and 5701 which do not implement the
11701 * boundary bits.
11702 */
11703 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11704 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11705 switch (cacheline_size) {
11706 case 16:
11707 case 32:
11708 case 64:
11709 case 128:
11710 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11711 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11712 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11713 } else {
11714 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11715 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11716 }
11717 break;
11718
11719 case 256:
11720 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11721 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11722 break;
11723
11724 default:
11725 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11726 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11727 break;
11728 };
11729 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11730 switch (cacheline_size) {
11731 case 16:
11732 case 32:
11733 case 64:
11734 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11735 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11736 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11737 break;
11738 }
11739 /* fallthrough */
11740 case 128:
11741 default:
11742 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11743 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11744 break;
11745 };
11746 } else {
11747 switch (cacheline_size) {
11748 case 16:
11749 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11750 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11751 DMA_RWCTRL_WRITE_BNDRY_16);
11752 break;
11753 }
11754 /* fallthrough */
11755 case 32:
11756 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11757 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11758 DMA_RWCTRL_WRITE_BNDRY_32);
11759 break;
11760 }
11761 /* fallthrough */
11762 case 64:
11763 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11764 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11765 DMA_RWCTRL_WRITE_BNDRY_64);
11766 break;
11767 }
11768 /* fallthrough */
11769 case 128:
11770 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11771 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11772 DMA_RWCTRL_WRITE_BNDRY_128);
11773 break;
11774 }
11775 /* fallthrough */
11776 case 256:
11777 val |= (DMA_RWCTRL_READ_BNDRY_256 |
11778 DMA_RWCTRL_WRITE_BNDRY_256);
11779 break;
11780 case 512:
11781 val |= (DMA_RWCTRL_READ_BNDRY_512 |
11782 DMA_RWCTRL_WRITE_BNDRY_512);
11783 break;
11784 case 1024:
11785 default:
11786 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11787 DMA_RWCTRL_WRITE_BNDRY_1024);
11788 break;
11789 };
11790 }
11791
11792out:
11793 return val;
11794}
11795
1da177e4
LT
11796static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11797{
11798 struct tg3_internal_buffer_desc test_desc;
11799 u32 sram_dma_descs;
11800 int i, ret;
11801
11802 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11803
11804 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11805 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11806 tw32(RDMAC_STATUS, 0);
11807 tw32(WDMAC_STATUS, 0);
11808
11809 tw32(BUFMGR_MODE, 0);
11810 tw32(FTQ_RESET, 0);
11811
11812 test_desc.addr_hi = ((u64) buf_dma) >> 32;
11813 test_desc.addr_lo = buf_dma & 0xffffffff;
11814 test_desc.nic_mbuf = 0x00002100;
11815 test_desc.len = size;
11816
11817 /*
11818 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11819 * the *second* time the tg3 driver was getting loaded after an
11820 * initial scan.
11821 *
11822 * Broadcom tells me:
11823 * ...the DMA engine is connected to the GRC block and a DMA
11824 * reset may affect the GRC block in some unpredictable way...
11825 * The behavior of resets to individual blocks has not been tested.
11826 *
11827 * Broadcom noted the GRC reset will also reset all sub-components.
11828 */
11829 if (to_device) {
11830 test_desc.cqid_sqid = (13 << 8) | 2;
11831
11832 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11833 udelay(40);
11834 } else {
11835 test_desc.cqid_sqid = (16 << 8) | 7;
11836
11837 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11838 udelay(40);
11839 }
11840 test_desc.flags = 0x00000005;
11841
11842 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11843 u32 val;
11844
11845 val = *(((u32 *)&test_desc) + i);
11846 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11847 sram_dma_descs + (i * sizeof(u32)));
11848 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11849 }
11850 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11851
11852 if (to_device) {
11853 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11854 } else {
11855 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11856 }
11857
11858 ret = -ENODEV;
11859 for (i = 0; i < 40; i++) {
11860 u32 val;
11861
11862 if (to_device)
11863 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11864 else
11865 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11866 if ((val & 0xffff) == sram_dma_descs) {
11867 ret = 0;
11868 break;
11869 }
11870
11871 udelay(100);
11872 }
11873
11874 return ret;
11875}
11876
ded7340d 11877#define TEST_BUFFER_SIZE 0x2000
1da177e4
LT
11878
11879static int __devinit tg3_test_dma(struct tg3 *tp)
11880{
11881 dma_addr_t buf_dma;
59e6b434 11882 u32 *buf, saved_dma_rwctrl;
1da177e4
LT
11883 int ret;
11884
11885 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11886 if (!buf) {
11887 ret = -ENOMEM;
11888 goto out_nofree;
11889 }
11890
11891 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11892 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11893
59e6b434 11894 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
1da177e4
LT
11895
11896 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11897 /* DMA read watermark not used on PCIE */
11898 tp->dma_rwctrl |= 0x00180000;
11899 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
85e94ced
MC
11900 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11901 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
1da177e4
LT
11902 tp->dma_rwctrl |= 0x003f0000;
11903 else
11904 tp->dma_rwctrl |= 0x003f000f;
11905 } else {
11906 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11907 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11908 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
49afdeb6 11909 u32 read_water = 0x7;
1da177e4 11910
4a29cc2e
MC
11911 /* If the 5704 is behind the EPB bridge, we can
11912 * do the less restrictive ONE_DMA workaround for
11913 * better performance.
11914 */
11915 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11916 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11917 tp->dma_rwctrl |= 0x8000;
11918 else if (ccval == 0x6 || ccval == 0x7)
1da177e4
LT
11919 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11920
49afdeb6
MC
11921 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
11922 read_water = 4;
59e6b434 11923 /* Set bit 23 to enable PCIX hw bug fix */
49afdeb6
MC
11924 tp->dma_rwctrl |=
11925 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
11926 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
11927 (1 << 23);
4cf78e4f
MC
11928 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11929 /* 5780 always in PCIX mode */
11930 tp->dma_rwctrl |= 0x00144000;
a4e2b347
MC
11931 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11932 /* 5714 always in PCIX mode */
11933 tp->dma_rwctrl |= 0x00148000;
1da177e4
LT
11934 } else {
11935 tp->dma_rwctrl |= 0x001b000f;
11936 }
11937 }
11938
11939 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11940 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11941 tp->dma_rwctrl &= 0xfffffff0;
11942
11943 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11944 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11945 /* Remove this if it causes problems for some boards. */
11946 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11947
11948 /* On 5700/5701 chips, we need to set this bit.
11949 * Otherwise the chip will issue cacheline transactions
11950 * to streamable DMA memory with not all the byte
11951 * enables turned on. This is an error on several
11952 * RISC PCI controllers, in particular sparc64.
11953 *
11954 * On 5703/5704 chips, this bit has been reassigned
11955 * a different meaning. In particular, it is used
11956 * on those chips to enable a PCI-X workaround.
11957 */
11958 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11959 }
11960
11961 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11962
11963#if 0
11964 /* Unneeded, already done by tg3_get_invariants. */
11965 tg3_switch_clocks(tp);
11966#endif
11967
11968 ret = 0;
11969 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11970 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11971 goto out;
11972
59e6b434
DM
11973 /* It is best to perform DMA test with maximum write burst size
11974 * to expose the 5700/5701 write DMA bug.
11975 */
11976 saved_dma_rwctrl = tp->dma_rwctrl;
11977 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11978 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11979
1da177e4
LT
11980 while (1) {
11981 u32 *p = buf, i;
11982
11983 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11984 p[i] = i;
11985
11986 /* Send the buffer to the chip. */
11987 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11988 if (ret) {
11989 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11990 break;
11991 }
11992
11993#if 0
11994 /* validate data reached card RAM correctly. */
11995 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11996 u32 val;
11997 tg3_read_mem(tp, 0x2100 + (i*4), &val);
11998 if (le32_to_cpu(val) != p[i]) {
11999 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
12000 /* ret = -ENODEV here? */
12001 }
12002 p[i] = 0;
12003 }
12004#endif
12005 /* Now read it back. */
12006 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12007 if (ret) {
12008 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12009
12010 break;
12011 }
12012
12013 /* Verify it. */
12014 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12015 if (p[i] == i)
12016 continue;
12017
59e6b434
DM
12018 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12019 DMA_RWCTRL_WRITE_BNDRY_16) {
12020 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
1da177e4
LT
12021 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12022 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12023 break;
12024 } else {
12025 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12026 ret = -ENODEV;
12027 goto out;
12028 }
12029 }
12030
12031 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12032 /* Success. */
12033 ret = 0;
12034 break;
12035 }
12036 }
59e6b434
DM
12037 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12038 DMA_RWCTRL_WRITE_BNDRY_16) {
6d1cfbab
MC
12039 static struct pci_device_id dma_wait_state_chipsets[] = {
12040 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12041 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12042 { },
12043 };
12044
59e6b434 12045 /* DMA test passed without adjusting DMA boundary,
6d1cfbab
MC
12046 * now look for chipsets that are known to expose the
12047 * DMA bug without failing the test.
59e6b434 12048 */
6d1cfbab
MC
12049 if (pci_dev_present(dma_wait_state_chipsets)) {
12050 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12051 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12052 }
12053 else
12054 /* Safe to use the calculated DMA boundary. */
12055 tp->dma_rwctrl = saved_dma_rwctrl;
12056
59e6b434
DM
12057 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12058 }
1da177e4
LT
12059
12060out:
12061 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12062out_nofree:
12063 return ret;
12064}
12065
12066static void __devinit tg3_init_link_config(struct tg3 *tp)
12067{
12068 tp->link_config.advertising =
12069 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12070 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12071 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12072 ADVERTISED_Autoneg | ADVERTISED_MII);
12073 tp->link_config.speed = SPEED_INVALID;
12074 tp->link_config.duplex = DUPLEX_INVALID;
12075 tp->link_config.autoneg = AUTONEG_ENABLE;
1da177e4
LT
12076 tp->link_config.active_speed = SPEED_INVALID;
12077 tp->link_config.active_duplex = DUPLEX_INVALID;
12078 tp->link_config.phy_is_low_power = 0;
12079 tp->link_config.orig_speed = SPEED_INVALID;
12080 tp->link_config.orig_duplex = DUPLEX_INVALID;
12081 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12082}
12083
12084static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12085{
fdfec172
MC
12086 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12087 tp->bufmgr_config.mbuf_read_dma_low_water =
12088 DEFAULT_MB_RDMA_LOW_WATER_5705;
12089 tp->bufmgr_config.mbuf_mac_rx_low_water =
12090 DEFAULT_MB_MACRX_LOW_WATER_5705;
12091 tp->bufmgr_config.mbuf_high_water =
12092 DEFAULT_MB_HIGH_WATER_5705;
b5d3772c
MC
12093 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12094 tp->bufmgr_config.mbuf_mac_rx_low_water =
12095 DEFAULT_MB_MACRX_LOW_WATER_5906;
12096 tp->bufmgr_config.mbuf_high_water =
12097 DEFAULT_MB_HIGH_WATER_5906;
12098 }
fdfec172
MC
12099
12100 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12101 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12102 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12103 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12104 tp->bufmgr_config.mbuf_high_water_jumbo =
12105 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12106 } else {
12107 tp->bufmgr_config.mbuf_read_dma_low_water =
12108 DEFAULT_MB_RDMA_LOW_WATER;
12109 tp->bufmgr_config.mbuf_mac_rx_low_water =
12110 DEFAULT_MB_MACRX_LOW_WATER;
12111 tp->bufmgr_config.mbuf_high_water =
12112 DEFAULT_MB_HIGH_WATER;
12113
12114 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12115 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12116 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12117 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12118 tp->bufmgr_config.mbuf_high_water_jumbo =
12119 DEFAULT_MB_HIGH_WATER_JUMBO;
12120 }
1da177e4
LT
12121
12122 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12123 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12124}
12125
12126static char * __devinit tg3_phy_string(struct tg3 *tp)
12127{
12128 switch (tp->phy_id & PHY_ID_MASK) {
12129 case PHY_ID_BCM5400: return "5400";
12130 case PHY_ID_BCM5401: return "5401";
12131 case PHY_ID_BCM5411: return "5411";
12132 case PHY_ID_BCM5701: return "5701";
12133 case PHY_ID_BCM5703: return "5703";
12134 case PHY_ID_BCM5704: return "5704";
12135 case PHY_ID_BCM5705: return "5705";
12136 case PHY_ID_BCM5750: return "5750";
85e94ced 12137 case PHY_ID_BCM5752: return "5752";
a4e2b347 12138 case PHY_ID_BCM5714: return "5714";
4cf78e4f 12139 case PHY_ID_BCM5780: return "5780";
af36e6b6 12140 case PHY_ID_BCM5755: return "5755";
d9ab5ad1 12141 case PHY_ID_BCM5787: return "5787";
d30cdd28 12142 case PHY_ID_BCM5784: return "5784";
126a3368 12143 case PHY_ID_BCM5756: return "5722/5756";
b5d3772c 12144 case PHY_ID_BCM5906: return "5906";
9936bcf6 12145 case PHY_ID_BCM5761: return "5761";
1da177e4
LT
12146 case PHY_ID_BCM8002: return "8002/serdes";
12147 case 0: return "serdes";
12148 default: return "unknown";
12149 };
12150}
12151
f9804ddb
MC
12152static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12153{
12154 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12155 strcpy(str, "PCI Express");
12156 return str;
12157 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12158 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12159
12160 strcpy(str, "PCIX:");
12161
12162 if ((clock_ctrl == 7) ||
12163 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12164 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12165 strcat(str, "133MHz");
12166 else if (clock_ctrl == 0)
12167 strcat(str, "33MHz");
12168 else if (clock_ctrl == 2)
12169 strcat(str, "50MHz");
12170 else if (clock_ctrl == 4)
12171 strcat(str, "66MHz");
12172 else if (clock_ctrl == 6)
12173 strcat(str, "100MHz");
f9804ddb
MC
12174 } else {
12175 strcpy(str, "PCI:");
12176 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12177 strcat(str, "66MHz");
12178 else
12179 strcat(str, "33MHz");
12180 }
12181 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12182 strcat(str, ":32-bit");
12183 else
12184 strcat(str, ":64-bit");
12185 return str;
12186}
12187
8c2dc7e1 12188static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
1da177e4
LT
12189{
12190 struct pci_dev *peer;
12191 unsigned int func, devnr = tp->pdev->devfn & ~7;
12192
12193 for (func = 0; func < 8; func++) {
12194 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12195 if (peer && peer != tp->pdev)
12196 break;
12197 pci_dev_put(peer);
12198 }
16fe9d74
MC
12199 /* 5704 can be configured in single-port mode, set peer to
12200 * tp->pdev in that case.
12201 */
12202 if (!peer) {
12203 peer = tp->pdev;
12204 return peer;
12205 }
1da177e4
LT
12206
12207 /*
12208 * We don't need to keep the refcount elevated; there's no way
12209 * to remove one half of this device without removing the other
12210 */
12211 pci_dev_put(peer);
12212
12213 return peer;
12214}
12215
15f9850d
DM
12216static void __devinit tg3_init_coal(struct tg3 *tp)
12217{
12218 struct ethtool_coalesce *ec = &tp->coal;
12219
12220 memset(ec, 0, sizeof(*ec));
12221 ec->cmd = ETHTOOL_GCOALESCE;
12222 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12223 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12224 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12225 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12226 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12227 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12228 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12229 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12230 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12231
12232 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12233 HOSTCC_MODE_CLRTICK_TXBD)) {
12234 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12235 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12236 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12237 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12238 }
d244c892
MC
12239
12240 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12241 ec->rx_coalesce_usecs_irq = 0;
12242 ec->tx_coalesce_usecs_irq = 0;
12243 ec->stats_block_coalesce_usecs = 0;
12244 }
15f9850d
DM
12245}
12246
1da177e4
LT
12247static int __devinit tg3_init_one(struct pci_dev *pdev,
12248 const struct pci_device_id *ent)
12249{
12250 static int tg3_version_printed = 0;
12251 unsigned long tg3reg_base, tg3reg_len;
12252 struct net_device *dev;
12253 struct tg3 *tp;
72f2afb8 12254 int i, err, pm_cap;
f9804ddb 12255 char str[40];
72f2afb8 12256 u64 dma_mask, persist_dma_mask;
1da177e4
LT
12257
12258 if (tg3_version_printed++ == 0)
12259 printk(KERN_INFO "%s", version);
12260
12261 err = pci_enable_device(pdev);
12262 if (err) {
12263 printk(KERN_ERR PFX "Cannot enable PCI device, "
12264 "aborting.\n");
12265 return err;
12266 }
12267
12268 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12269 printk(KERN_ERR PFX "Cannot find proper PCI device "
12270 "base address, aborting.\n");
12271 err = -ENODEV;
12272 goto err_out_disable_pdev;
12273 }
12274
12275 err = pci_request_regions(pdev, DRV_MODULE_NAME);
12276 if (err) {
12277 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12278 "aborting.\n");
12279 goto err_out_disable_pdev;
12280 }
12281
12282 pci_set_master(pdev);
12283
12284 /* Find power-management capability. */
12285 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12286 if (pm_cap == 0) {
12287 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12288 "aborting.\n");
12289 err = -EIO;
12290 goto err_out_free_res;
12291 }
12292
1da177e4
LT
12293 tg3reg_base = pci_resource_start(pdev, 0);
12294 tg3reg_len = pci_resource_len(pdev, 0);
12295
12296 dev = alloc_etherdev(sizeof(*tp));
12297 if (!dev) {
12298 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12299 err = -ENOMEM;
12300 goto err_out_free_res;
12301 }
12302
1da177e4
LT
12303 SET_NETDEV_DEV(dev, &pdev->dev);
12304
1da177e4
LT
12305#if TG3_VLAN_TAG_USED
12306 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12307 dev->vlan_rx_register = tg3_vlan_rx_register;
1da177e4
LT
12308#endif
12309
12310 tp = netdev_priv(dev);
12311 tp->pdev = pdev;
12312 tp->dev = dev;
12313 tp->pm_cap = pm_cap;
12314 tp->mac_mode = TG3_DEF_MAC_MODE;
12315 tp->rx_mode = TG3_DEF_RX_MODE;
12316 tp->tx_mode = TG3_DEF_TX_MODE;
12317 tp->mi_mode = MAC_MI_MODE_BASE;
12318 if (tg3_debug > 0)
12319 tp->msg_enable = tg3_debug;
12320 else
12321 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12322
12323 /* The word/byte swap controls here control register access byte
12324 * swapping. DMA data byte swapping is controlled in the GRC_MODE
12325 * setting below.
12326 */
12327 tp->misc_host_ctrl =
12328 MISC_HOST_CTRL_MASK_PCI_INT |
12329 MISC_HOST_CTRL_WORD_SWAP |
12330 MISC_HOST_CTRL_INDIR_ACCESS |
12331 MISC_HOST_CTRL_PCISTATE_RW;
12332
12333 /* The NONFRM (non-frame) byte/word swap controls take effect
12334 * on descriptor entries, anything which isn't packet data.
12335 *
12336 * The StrongARM chips on the board (one for tx, one for rx)
12337 * are running in big-endian mode.
12338 */
12339 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12340 GRC_MODE_WSWAP_NONFRM_DATA);
12341#ifdef __BIG_ENDIAN
12342 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12343#endif
12344 spin_lock_init(&tp->lock);
1da177e4 12345 spin_lock_init(&tp->indirect_lock);
c4028958 12346 INIT_WORK(&tp->reset_task, tg3_reset_task);
1da177e4
LT
12347
12348 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
ab0049b4 12349 if (!tp->regs) {
1da177e4
LT
12350 printk(KERN_ERR PFX "Cannot map device registers, "
12351 "aborting.\n");
12352 err = -ENOMEM;
12353 goto err_out_free_dev;
12354 }
12355
12356 tg3_init_link_config(tp);
12357
1da177e4
LT
12358 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12359 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12360 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12361
12362 dev->open = tg3_open;
12363 dev->stop = tg3_close;
12364 dev->get_stats = tg3_get_stats;
12365 dev->set_multicast_list = tg3_set_rx_mode;
12366 dev->set_mac_address = tg3_set_mac_addr;
12367 dev->do_ioctl = tg3_ioctl;
12368 dev->tx_timeout = tg3_tx_timeout;
bea3348e 12369 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
1da177e4 12370 dev->ethtool_ops = &tg3_ethtool_ops;
1da177e4
LT
12371 dev->watchdog_timeo = TG3_TX_TIMEOUT;
12372 dev->change_mtu = tg3_change_mtu;
12373 dev->irq = pdev->irq;
12374#ifdef CONFIG_NET_POLL_CONTROLLER
12375 dev->poll_controller = tg3_poll_controller;
12376#endif
12377
12378 err = tg3_get_invariants(tp);
12379 if (err) {
12380 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12381 "aborting.\n");
12382 goto err_out_iounmap;
12383 }
12384
4a29cc2e
MC
12385 /* The EPB bridge inside 5714, 5715, and 5780 and any
12386 * device behind the EPB cannot support DMA addresses > 40-bit.
72f2afb8
MC
12387 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12388 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12389 * do DMA address check in tg3_start_xmit().
12390 */
4a29cc2e
MC
12391 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12392 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12393 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
72f2afb8
MC
12394 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12395#ifdef CONFIG_HIGHMEM
12396 dma_mask = DMA_64BIT_MASK;
12397#endif
4a29cc2e 12398 } else
72f2afb8
MC
12399 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12400
12401 /* Configure DMA attributes. */
12402 if (dma_mask > DMA_32BIT_MASK) {
12403 err = pci_set_dma_mask(pdev, dma_mask);
12404 if (!err) {
12405 dev->features |= NETIF_F_HIGHDMA;
12406 err = pci_set_consistent_dma_mask(pdev,
12407 persist_dma_mask);
12408 if (err < 0) {
12409 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12410 "DMA for consistent allocations\n");
12411 goto err_out_iounmap;
12412 }
12413 }
12414 }
12415 if (err || dma_mask == DMA_32BIT_MASK) {
12416 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12417 if (err) {
12418 printk(KERN_ERR PFX "No usable DMA configuration, "
12419 "aborting.\n");
12420 goto err_out_iounmap;
12421 }
12422 }
12423
fdfec172 12424 tg3_init_bufmgr_config(tp);
1da177e4 12425
1da177e4
LT
12426 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12427 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12428 }
12429 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12430 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12431 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
c7835a77 12432 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
1da177e4
LT
12433 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12434 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12435 } else {
7f62ad5d 12436 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
1da177e4
LT
12437 }
12438
4e3a7aaa
MC
12439 /* TSO is on by default on chips that support hardware TSO.
12440 * Firmware TSO on older chips gives lower performance, so it
12441 * is off by default, but can be enabled using ethtool.
12442 */
b0026624 12443 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
1da177e4 12444 dev->features |= NETIF_F_TSO;
b5d3772c
MC
12445 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12446 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
b0026624 12447 dev->features |= NETIF_F_TSO6;
9936bcf6
MC
12448 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12449 dev->features |= NETIF_F_TSO_ECN;
b0026624 12450 }
1da177e4 12451
1da177e4
LT
12452
12453 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12454 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12455 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12456 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12457 tp->rx_pending = 63;
12458 }
12459
1da177e4
LT
12460 err = tg3_get_device_address(tp);
12461 if (err) {
12462 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12463 "aborting.\n");
12464 goto err_out_iounmap;
12465 }
12466
12467 /*
12468 * Reset chip in case UNDI or EFI driver did not shutdown
12469 * DMA self test will enable WDMAC and we'll see (spurious)
12470 * pending DMA on the PCI bus at that point.
12471 */
12472 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12473 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
1da177e4 12474 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
944d980e 12475 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
12476 }
12477
12478 err = tg3_test_dma(tp);
12479 if (err) {
12480 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12481 goto err_out_iounmap;
12482 }
12483
12484 /* Tigon3 can do ipv4 only... and some chips have buggy
12485 * checksumming.
12486 */
12487 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
d212f87b 12488 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
af36e6b6 12489 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 12490 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
12491 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12492 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
d212f87b
SH
12493 dev->features |= NETIF_F_IPV6_CSUM;
12494
1da177e4
LT
12495 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12496 } else
12497 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12498
1da177e4
LT
12499 /* flow control autonegotiation is default behavior */
12500 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12501
15f9850d
DM
12502 tg3_init_coal(tp);
12503
0d3031d9
MC
12504 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12505 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12506 printk(KERN_ERR PFX "Cannot find proper PCI device "
12507 "base address for APE, aborting.\n");
12508 err = -ENODEV;
12509 goto err_out_iounmap;
12510 }
12511
12512 tg3reg_base = pci_resource_start(pdev, 2);
12513 tg3reg_len = pci_resource_len(pdev, 2);
12514
12515 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12516 if (tp->aperegs == 0UL) {
12517 printk(KERN_ERR PFX "Cannot map APE registers, "
12518 "aborting.\n");
12519 err = -ENOMEM;
12520 goto err_out_iounmap;
12521 }
12522
12523 tg3_ape_lock_init(tp);
12524 }
12525
c49a1561
MC
12526 pci_set_drvdata(pdev, dev);
12527
1da177e4
LT
12528 err = register_netdev(dev);
12529 if (err) {
12530 printk(KERN_ERR PFX "Cannot register net device, "
12531 "aborting.\n");
0d3031d9 12532 goto err_out_apeunmap;
1da177e4
LT
12533 }
12534
cbb45d21 12535 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
1da177e4
LT
12536 dev->name,
12537 tp->board_part_number,
12538 tp->pci_chip_rev_id,
12539 tg3_phy_string(tp),
f9804ddb 12540 tg3_bus_string(tp, str),
cbb45d21
MC
12541 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12542 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12543 "10/100/1000Base-T")));
1da177e4
LT
12544
12545 for (i = 0; i < 6; i++)
12546 printk("%2.2x%c", dev->dev_addr[i],
12547 i == 5 ? '\n' : ':');
12548
12549 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
1c46ae05 12550 "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
1da177e4
LT
12551 dev->name,
12552 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12553 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12554 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12555 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
1da177e4
LT
12556 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12557 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
4a29cc2e
MC
12558 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12559 dev->name, tp->dma_rwctrl,
12560 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12561 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
1da177e4
LT
12562
12563 return 0;
12564
0d3031d9
MC
12565err_out_apeunmap:
12566 if (tp->aperegs) {
12567 iounmap(tp->aperegs);
12568 tp->aperegs = NULL;
12569 }
12570
1da177e4 12571err_out_iounmap:
6892914f
MC
12572 if (tp->regs) {
12573 iounmap(tp->regs);
22abe310 12574 tp->regs = NULL;
6892914f 12575 }
1da177e4
LT
12576
12577err_out_free_dev:
12578 free_netdev(dev);
12579
12580err_out_free_res:
12581 pci_release_regions(pdev);
12582
12583err_out_disable_pdev:
12584 pci_disable_device(pdev);
12585 pci_set_drvdata(pdev, NULL);
12586 return err;
12587}
12588
12589static void __devexit tg3_remove_one(struct pci_dev *pdev)
12590{
12591 struct net_device *dev = pci_get_drvdata(pdev);
12592
12593 if (dev) {
12594 struct tg3 *tp = netdev_priv(dev);
12595
7faa006f 12596 flush_scheduled_work();
1da177e4 12597 unregister_netdev(dev);
0d3031d9
MC
12598 if (tp->aperegs) {
12599 iounmap(tp->aperegs);
12600 tp->aperegs = NULL;
12601 }
6892914f
MC
12602 if (tp->regs) {
12603 iounmap(tp->regs);
22abe310 12604 tp->regs = NULL;
6892914f 12605 }
1da177e4
LT
12606 free_netdev(dev);
12607 pci_release_regions(pdev);
12608 pci_disable_device(pdev);
12609 pci_set_drvdata(pdev, NULL);
12610 }
12611}
12612
12613static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12614{
12615 struct net_device *dev = pci_get_drvdata(pdev);
12616 struct tg3 *tp = netdev_priv(dev);
12617 int err;
12618
3e0c95fd
MC
12619 /* PCI register 4 needs to be saved whether netif_running() or not.
12620 * MSI address and data need to be saved if using MSI and
12621 * netif_running().
12622 */
12623 pci_save_state(pdev);
12624
1da177e4
LT
12625 if (!netif_running(dev))
12626 return 0;
12627
7faa006f 12628 flush_scheduled_work();
1da177e4
LT
12629 tg3_netif_stop(tp);
12630
12631 del_timer_sync(&tp->timer);
12632
f47c11ee 12633 tg3_full_lock(tp, 1);
1da177e4 12634 tg3_disable_ints(tp);
f47c11ee 12635 tg3_full_unlock(tp);
1da177e4
LT
12636
12637 netif_device_detach(dev);
12638
f47c11ee 12639 tg3_full_lock(tp, 0);
944d980e 12640 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6a9eba15 12641 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
f47c11ee 12642 tg3_full_unlock(tp);
1da177e4
LT
12643
12644 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12645 if (err) {
f47c11ee 12646 tg3_full_lock(tp, 0);
1da177e4 12647
6a9eba15 12648 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
b9ec6c1b
MC
12649 if (tg3_restart_hw(tp, 1))
12650 goto out;
1da177e4
LT
12651
12652 tp->timer.expires = jiffies + tp->timer_offset;
12653 add_timer(&tp->timer);
12654
12655 netif_device_attach(dev);
12656 tg3_netif_start(tp);
12657
b9ec6c1b 12658out:
f47c11ee 12659 tg3_full_unlock(tp);
1da177e4
LT
12660 }
12661
12662 return err;
12663}
12664
12665static int tg3_resume(struct pci_dev *pdev)
12666{
12667 struct net_device *dev = pci_get_drvdata(pdev);
12668 struct tg3 *tp = netdev_priv(dev);
12669 int err;
12670
3e0c95fd
MC
12671 pci_restore_state(tp->pdev);
12672
1da177e4
LT
12673 if (!netif_running(dev))
12674 return 0;
12675
bc1c7567 12676 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
12677 if (err)
12678 return err;
12679
12680 netif_device_attach(dev);
12681
f47c11ee 12682 tg3_full_lock(tp, 0);
1da177e4 12683
6a9eba15 12684 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
b9ec6c1b
MC
12685 err = tg3_restart_hw(tp, 1);
12686 if (err)
12687 goto out;
1da177e4
LT
12688
12689 tp->timer.expires = jiffies + tp->timer_offset;
12690 add_timer(&tp->timer);
12691
1da177e4
LT
12692 tg3_netif_start(tp);
12693
b9ec6c1b 12694out:
f47c11ee 12695 tg3_full_unlock(tp);
1da177e4 12696
b9ec6c1b 12697 return err;
1da177e4
LT
12698}
12699
12700static struct pci_driver tg3_driver = {
12701 .name = DRV_MODULE_NAME,
12702 .id_table = tg3_pci_tbl,
12703 .probe = tg3_init_one,
12704 .remove = __devexit_p(tg3_remove_one),
12705 .suspend = tg3_suspend,
12706 .resume = tg3_resume
12707};
12708
12709static int __init tg3_init(void)
12710{
29917620 12711 return pci_register_driver(&tg3_driver);
1da177e4
LT
12712}
12713
12714static void __exit tg3_cleanup(void)
12715{
12716 pci_unregister_driver(&tg3_driver);
12717}
12718
12719module_init(tg3_init);
12720module_exit(tg3_cleanup);