]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/tg3.c
[TG3]: Add A1 revs
[net-next-2.6.git] / drivers / net / tg3.c
CommitLineData
1da177e4
LT
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
65610fba 7 * Copyright (C) 2005-2007 Broadcom Corporation.
1da177e4
LT
8 *
9 * Firmware is:
49cabf49
MC
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
1da177e4
LT
16 */
17
1da177e4
LT
18
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/compiler.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
14c85021 26#include <linux/in.h>
1da177e4
LT
27#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/workqueue.h>
61487480 39#include <linux/prefetch.h>
f9a5f7d3 40#include <linux/dma-mapping.h>
1da177e4
LT
41
42#include <net/checksum.h>
c9bdd4b5 43#include <net/ip.h>
1da177e4
LT
44
45#include <asm/system.h>
46#include <asm/io.h>
47#include <asm/byteorder.h>
48#include <asm/uaccess.h>
49
49b6e95f 50#ifdef CONFIG_SPARC
1da177e4 51#include <asm/idprom.h>
49b6e95f 52#include <asm/prom.h>
1da177e4
LT
53#endif
54
55#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56#define TG3_VLAN_TAG_USED 1
57#else
58#define TG3_VLAN_TAG_USED 0
59#endif
60
1da177e4 61#define TG3_TSO_SUPPORT 1
1da177e4
LT
62
63#include "tg3.h"
64
65#define DRV_MODULE_NAME "tg3"
66#define PFX DRV_MODULE_NAME ": "
33b0c4fe
MC
67#define DRV_MODULE_VERSION "3.85"
68#define DRV_MODULE_RELDATE "October 18, 2007"
1da177e4
LT
69
70#define TG3_DEF_MAC_MODE 0
71#define TG3_DEF_RX_MODE 0
72#define TG3_DEF_TX_MODE 0
73#define TG3_DEF_MSG_ENABLE \
74 (NETIF_MSG_DRV | \
75 NETIF_MSG_PROBE | \
76 NETIF_MSG_LINK | \
77 NETIF_MSG_TIMER | \
78 NETIF_MSG_IFDOWN | \
79 NETIF_MSG_IFUP | \
80 NETIF_MSG_RX_ERR | \
81 NETIF_MSG_TX_ERR)
82
83/* length of time before we decide the hardware is borked,
84 * and dev->tx_timeout() should be called to fix the problem
85 */
86#define TG3_TX_TIMEOUT (5 * HZ)
87
88/* hardware minimum and maximum for a single frame's data payload */
89#define TG3_MIN_MTU 60
90#define TG3_MAX_MTU(tp) \
0f893dc6 91 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
1da177e4
LT
92
93/* These numbers seem to be hard coded in the NIC firmware somehow.
94 * You can't change the ring sizes, but you can change where you place
95 * them in the NIC onboard memory.
96 */
97#define TG3_RX_RING_SIZE 512
98#define TG3_DEF_RX_RING_PENDING 200
99#define TG3_RX_JUMBO_RING_SIZE 256
100#define TG3_DEF_RX_JUMBO_RING_PENDING 100
101
102/* Do not place this n-ring entries value into the tp struct itself,
103 * we really want to expose these constants to GCC so that modulo et
104 * al. operations are done with shifts and masks instead of with
105 * hw multiply/modulo instructions. Another solution would be to
106 * replace things like '% foo' with '& (foo - 1)'.
107 */
108#define TG3_RX_RCB_RING_SIZE(tp) \
109 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
110
111#define TG3_TX_RING_SIZE 512
112#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
113
114#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
115 TG3_RX_RING_SIZE)
116#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117 TG3_RX_JUMBO_RING_SIZE)
118#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_RCB_RING_SIZE(tp))
120#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
121 TG3_TX_RING_SIZE)
1da177e4
LT
122#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
125#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
126
127/* minimum number of free TX descriptors required to wake up TX process */
42952231 128#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
1da177e4
LT
129
130/* number of ETHTOOL_GSTATS u64's */
131#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
4cafd3f5
MC
133#define TG3_NUM_TEST 6
134
1da177e4
LT
135static char version[] __devinitdata =
136 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140MODULE_LICENSE("GPL");
141MODULE_VERSION(DRV_MODULE_VERSION);
142
143static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
144module_param(tg3_debug, int, 0);
145MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147static struct pci_device_id tg3_pci_tbl[] = {
13185217
HK
148 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
126a3368 172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
13185217
HK
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
126a3368 187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
13185217
HK
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
676917d4 191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
13185217
HK
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
b5d3772c
MC
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
d30cdd28
MC
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
6c7af27c 203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
9936bcf6
MC
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
13185217
HK
206 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213 {}
1da177e4
LT
214};
215
216MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
50da859d 218static const struct {
1da177e4
LT
219 const char string[ETH_GSTRING_LEN];
220} ethtool_stats_keys[TG3_NUM_STATS] = {
221 { "rx_octets" },
222 { "rx_fragments" },
223 { "rx_ucast_packets" },
224 { "rx_mcast_packets" },
225 { "rx_bcast_packets" },
226 { "rx_fcs_errors" },
227 { "rx_align_errors" },
228 { "rx_xon_pause_rcvd" },
229 { "rx_xoff_pause_rcvd" },
230 { "rx_mac_ctrl_rcvd" },
231 { "rx_xoff_entered" },
232 { "rx_frame_too_long_errors" },
233 { "rx_jabbers" },
234 { "rx_undersize_packets" },
235 { "rx_in_length_errors" },
236 { "rx_out_length_errors" },
237 { "rx_64_or_less_octet_packets" },
238 { "rx_65_to_127_octet_packets" },
239 { "rx_128_to_255_octet_packets" },
240 { "rx_256_to_511_octet_packets" },
241 { "rx_512_to_1023_octet_packets" },
242 { "rx_1024_to_1522_octet_packets" },
243 { "rx_1523_to_2047_octet_packets" },
244 { "rx_2048_to_4095_octet_packets" },
245 { "rx_4096_to_8191_octet_packets" },
246 { "rx_8192_to_9022_octet_packets" },
247
248 { "tx_octets" },
249 { "tx_collisions" },
250
251 { "tx_xon_sent" },
252 { "tx_xoff_sent" },
253 { "tx_flow_control" },
254 { "tx_mac_errors" },
255 { "tx_single_collisions" },
256 { "tx_mult_collisions" },
257 { "tx_deferred" },
258 { "tx_excessive_collisions" },
259 { "tx_late_collisions" },
260 { "tx_collide_2times" },
261 { "tx_collide_3times" },
262 { "tx_collide_4times" },
263 { "tx_collide_5times" },
264 { "tx_collide_6times" },
265 { "tx_collide_7times" },
266 { "tx_collide_8times" },
267 { "tx_collide_9times" },
268 { "tx_collide_10times" },
269 { "tx_collide_11times" },
270 { "tx_collide_12times" },
271 { "tx_collide_13times" },
272 { "tx_collide_14times" },
273 { "tx_collide_15times" },
274 { "tx_ucast_packets" },
275 { "tx_mcast_packets" },
276 { "tx_bcast_packets" },
277 { "tx_carrier_sense_errors" },
278 { "tx_discards" },
279 { "tx_errors" },
280
281 { "dma_writeq_full" },
282 { "dma_write_prioq_full" },
283 { "rxbds_empty" },
284 { "rx_discards" },
285 { "rx_errors" },
286 { "rx_threshold_hit" },
287
288 { "dma_readq_full" },
289 { "dma_read_prioq_full" },
290 { "tx_comp_queue_full" },
291
292 { "ring_set_send_prod_index" },
293 { "ring_status_update" },
294 { "nic_irqs" },
295 { "nic_avoided_irqs" },
296 { "nic_tx_threshold_hit" }
297};
298
50da859d 299static const struct {
4cafd3f5
MC
300 const char string[ETH_GSTRING_LEN];
301} ethtool_test_keys[TG3_NUM_TEST] = {
302 { "nvram test (online) " },
303 { "link test (online) " },
304 { "register test (offline)" },
305 { "memory test (offline)" },
306 { "loopback test (offline)" },
307 { "interrupt test (offline)" },
308};
309
b401e9e2
MC
310static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311{
312 writel(val, tp->regs + off);
313}
314
315static u32 tg3_read32(struct tg3 *tp, u32 off)
316{
6aa20a22 317 return (readl(tp->regs + off));
b401e9e2
MC
318}
319
0d3031d9
MC
320static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321{
322 writel(val, tp->aperegs + off);
323}
324
325static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326{
327 return (readl(tp->aperegs + off));
328}
329
1da177e4
LT
330static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331{
6892914f
MC
332 unsigned long flags;
333
334 spin_lock_irqsave(&tp->indirect_lock, flags);
1ee582d8
MC
335 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
6892914f 337 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1ee582d8
MC
338}
339
340static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341{
342 writel(val, tp->regs + off);
343 readl(tp->regs + off);
1da177e4
LT
344}
345
6892914f 346static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
1da177e4 347{
6892914f
MC
348 unsigned long flags;
349 u32 val;
350
351 spin_lock_irqsave(&tp->indirect_lock, flags);
352 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354 spin_unlock_irqrestore(&tp->indirect_lock, flags);
355 return val;
356}
357
358static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359{
360 unsigned long flags;
361
362 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364 TG3_64BIT_REG_LOW, val);
365 return;
366 }
367 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369 TG3_64BIT_REG_LOW, val);
370 return;
1da177e4 371 }
6892914f
MC
372
373 spin_lock_irqsave(&tp->indirect_lock, flags);
374 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376 spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378 /* In indirect mode when disabling interrupts, we also need
379 * to clear the interrupt bit in the GRC local ctrl register.
380 */
381 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382 (val == 0x1)) {
383 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385 }
386}
387
388static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389{
390 unsigned long flags;
391 u32 val;
392
393 spin_lock_irqsave(&tp->indirect_lock, flags);
394 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396 spin_unlock_irqrestore(&tp->indirect_lock, flags);
397 return val;
398}
399
b401e9e2
MC
400/* usec_wait specifies the wait time in usec when writing to certain registers
401 * where it is unsafe to read back the register without some delay.
402 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404 */
405static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
6892914f 406{
b401e9e2
MC
407 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409 /* Non-posted methods */
410 tp->write32(tp, off, val);
411 else {
412 /* Posted method */
413 tg3_write32(tp, off, val);
414 if (usec_wait)
415 udelay(usec_wait);
416 tp->read32(tp, off);
417 }
418 /* Wait again after the read for the posted method to guarantee that
419 * the wait time is met.
420 */
421 if (usec_wait)
422 udelay(usec_wait);
1da177e4
LT
423}
424
09ee929c
MC
425static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426{
427 tp->write32_mbox(tp, off, val);
6892914f
MC
428 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430 tp->read32_mbox(tp, off);
09ee929c
MC
431}
432
20094930 433static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
1da177e4
LT
434{
435 void __iomem *mbox = tp->regs + off;
436 writel(val, mbox);
437 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438 writel(val, mbox);
439 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440 readl(mbox);
441}
442
b5d3772c
MC
443static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444{
445 return (readl(tp->regs + off + GRCMBOX_BASE));
446}
447
448static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449{
450 writel(val, tp->regs + off + GRCMBOX_BASE);
451}
452
20094930 453#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
09ee929c 454#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
20094930
MC
455#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
456#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
09ee929c 457#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
20094930
MC
458
459#define tw32(reg,val) tp->write32(tp, reg, val)
b401e9e2
MC
460#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
461#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
20094930 462#define tr32(reg) tp->read32(tp, reg)
1da177e4
LT
463
464static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465{
6892914f
MC
466 unsigned long flags;
467
b5d3772c
MC
468 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470 return;
471
6892914f 472 spin_lock_irqsave(&tp->indirect_lock, flags);
bbadf503
MC
473 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 476
bbadf503
MC
477 /* Always leave this as zero. */
478 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479 } else {
480 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481 tw32_f(TG3PCI_MEM_WIN_DATA, val);
28fbef78 482
bbadf503
MC
483 /* Always leave this as zero. */
484 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485 }
486 spin_unlock_irqrestore(&tp->indirect_lock, flags);
758a6139
DM
487}
488
1da177e4
LT
489static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490{
6892914f
MC
491 unsigned long flags;
492
b5d3772c
MC
493 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495 *val = 0;
496 return;
497 }
498
6892914f 499 spin_lock_irqsave(&tp->indirect_lock, flags);
bbadf503
MC
500 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 503
bbadf503
MC
504 /* Always leave this as zero. */
505 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506 } else {
507 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510 /* Always leave this as zero. */
511 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512 }
6892914f 513 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
514}
515
0d3031d9
MC
516static void tg3_ape_lock_init(struct tg3 *tp)
517{
518 int i;
519
520 /* Make sure the driver hasn't any stale locks. */
521 for (i = 0; i < 8; i++)
522 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523 APE_LOCK_GRANT_DRIVER);
524}
525
526static int tg3_ape_lock(struct tg3 *tp, int locknum)
527{
528 int i, off;
529 int ret = 0;
530 u32 status;
531
532 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533 return 0;
534
535 switch (locknum) {
536 case TG3_APE_LOCK_MEM:
537 break;
538 default:
539 return -EINVAL;
540 }
541
542 off = 4 * locknum;
543
544 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546 /* Wait for up to 1 millisecond to acquire lock. */
547 for (i = 0; i < 100; i++) {
548 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549 if (status == APE_LOCK_GRANT_DRIVER)
550 break;
551 udelay(10);
552 }
553
554 if (status != APE_LOCK_GRANT_DRIVER) {
555 /* Revoke the lock request. */
556 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557 APE_LOCK_GRANT_DRIVER);
558
559 ret = -EBUSY;
560 }
561
562 return ret;
563}
564
565static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566{
567 int off;
568
569 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570 return;
571
572 switch (locknum) {
573 case TG3_APE_LOCK_MEM:
574 break;
575 default:
576 return;
577 }
578
579 off = 4 * locknum;
580 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581}
582
1da177e4
LT
583static void tg3_disable_ints(struct tg3 *tp)
584{
585 tw32(TG3PCI_MISC_HOST_CTRL,
586 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c 587 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
1da177e4
LT
588}
589
590static inline void tg3_cond_int(struct tg3 *tp)
591{
38f3843e
MC
592 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593 (tp->hw_status->status & SD_STATUS_UPDATED))
1da177e4 594 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
b5d3772c
MC
595 else
596 tw32(HOSTCC_MODE, tp->coalesce_mode |
597 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
1da177e4
LT
598}
599
600static void tg3_enable_ints(struct tg3 *tp)
601{
bbe832c0
MC
602 tp->irq_sync = 0;
603 wmb();
604
1da177e4
LT
605 tw32(TG3PCI_MISC_HOST_CTRL,
606 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c
MC
607 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608 (tp->last_tag << 24));
fcfa0a32
MC
609 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611 (tp->last_tag << 24));
1da177e4
LT
612 tg3_cond_int(tp);
613}
614
04237ddd
MC
615static inline unsigned int tg3_has_work(struct tg3 *tp)
616{
617 struct tg3_hw_status *sblk = tp->hw_status;
618 unsigned int work_exists = 0;
619
620 /* check for phy events */
621 if (!(tp->tg3_flags &
622 (TG3_FLAG_USE_LINKCHG_REG |
623 TG3_FLAG_POLL_SERDES))) {
624 if (sblk->status & SD_STATUS_LINK_CHG)
625 work_exists = 1;
626 }
627 /* check for RX/TX work to do */
628 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630 work_exists = 1;
631
632 return work_exists;
633}
634
1da177e4 635/* tg3_restart_ints
04237ddd
MC
636 * similar to tg3_enable_ints, but it accurately determines whether there
637 * is new work pending and can return without flushing the PIO write
6aa20a22 638 * which reenables interrupts
1da177e4
LT
639 */
640static void tg3_restart_ints(struct tg3 *tp)
641{
fac9b83e
DM
642 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643 tp->last_tag << 24);
1da177e4
LT
644 mmiowb();
645
fac9b83e
DM
646 /* When doing tagged status, this work check is unnecessary.
647 * The last_tag we write above tells the chip which piece of
648 * work we've completed.
649 */
650 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651 tg3_has_work(tp))
04237ddd
MC
652 tw32(HOSTCC_MODE, tp->coalesce_mode |
653 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
1da177e4
LT
654}
655
656static inline void tg3_netif_stop(struct tg3 *tp)
657{
bbe832c0 658 tp->dev->trans_start = jiffies; /* prevent tx timeout */
bea3348e 659 napi_disable(&tp->napi);
1da177e4
LT
660 netif_tx_disable(tp->dev);
661}
662
663static inline void tg3_netif_start(struct tg3 *tp)
664{
665 netif_wake_queue(tp->dev);
666 /* NOTE: unconditional netif_wake_queue is only appropriate
667 * so long as all callers are assured to have free tx slots
668 * (such as after tg3_init_hw)
669 */
bea3348e 670 napi_enable(&tp->napi);
f47c11ee
DM
671 tp->hw_status->status |= SD_STATUS_UPDATED;
672 tg3_enable_ints(tp);
1da177e4
LT
673}
674
675static void tg3_switch_clocks(struct tg3 *tp)
676{
677 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678 u32 orig_clock_ctrl;
679
795d01c5
MC
680 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
4cf78e4f
MC
682 return;
683
1da177e4
LT
684 orig_clock_ctrl = clock_ctrl;
685 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686 CLOCK_CTRL_CLKRUN_OENABLE |
687 0x1f);
688 tp->pci_clock_ctrl = clock_ctrl;
689
690 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
b401e9e2
MC
692 tw32_wait_f(TG3PCI_CLOCK_CTRL,
693 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1da177e4
LT
694 }
695 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
b401e9e2
MC
696 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697 clock_ctrl |
698 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699 40);
700 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701 clock_ctrl | (CLOCK_CTRL_ALTCLK),
702 40);
1da177e4 703 }
b401e9e2 704 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1da177e4
LT
705}
706
707#define PHY_BUSY_LOOPS 5000
708
709static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710{
711 u32 frame_val;
712 unsigned int loops;
713 int ret;
714
715 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716 tw32_f(MAC_MI_MODE,
717 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718 udelay(80);
719 }
720
721 *val = 0x0;
722
723 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724 MI_COM_PHY_ADDR_MASK);
725 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726 MI_COM_REG_ADDR_MASK);
727 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
6aa20a22 728
1da177e4
LT
729 tw32_f(MAC_MI_COM, frame_val);
730
731 loops = PHY_BUSY_LOOPS;
732 while (loops != 0) {
733 udelay(10);
734 frame_val = tr32(MAC_MI_COM);
735
736 if ((frame_val & MI_COM_BUSY) == 0) {
737 udelay(5);
738 frame_val = tr32(MAC_MI_COM);
739 break;
740 }
741 loops -= 1;
742 }
743
744 ret = -EBUSY;
745 if (loops != 0) {
746 *val = frame_val & MI_COM_DATA_MASK;
747 ret = 0;
748 }
749
750 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751 tw32_f(MAC_MI_MODE, tp->mi_mode);
752 udelay(80);
753 }
754
755 return ret;
756}
757
758static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759{
760 u32 frame_val;
761 unsigned int loops;
762 int ret;
763
b5d3772c
MC
764 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766 return 0;
767
1da177e4
LT
768 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769 tw32_f(MAC_MI_MODE,
770 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771 udelay(80);
772 }
773
774 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775 MI_COM_PHY_ADDR_MASK);
776 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777 MI_COM_REG_ADDR_MASK);
778 frame_val |= (val & MI_COM_DATA_MASK);
779 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
6aa20a22 780
1da177e4
LT
781 tw32_f(MAC_MI_COM, frame_val);
782
783 loops = PHY_BUSY_LOOPS;
784 while (loops != 0) {
785 udelay(10);
786 frame_val = tr32(MAC_MI_COM);
787 if ((frame_val & MI_COM_BUSY) == 0) {
788 udelay(5);
789 frame_val = tr32(MAC_MI_COM);
790 break;
791 }
792 loops -= 1;
793 }
794
795 ret = -EBUSY;
796 if (loops != 0)
797 ret = 0;
798
799 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800 tw32_f(MAC_MI_MODE, tp->mi_mode);
801 udelay(80);
802 }
803
804 return ret;
805}
806
9ef8ca99
MC
807static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
808{
809 u32 phy;
810
811 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
812 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
813 return;
814
815 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
816 u32 ephy;
817
818 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
819 tg3_writephy(tp, MII_TG3_EPHY_TEST,
820 ephy | MII_TG3_EPHY_SHADOW_EN);
821 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
822 if (enable)
823 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
824 else
825 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
826 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
827 }
828 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
829 }
830 } else {
831 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
832 MII_TG3_AUXCTL_SHDWSEL_MISC;
833 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
834 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
835 if (enable)
836 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
837 else
838 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
839 phy |= MII_TG3_AUXCTL_MISC_WREN;
840 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
841 }
842 }
843}
844
1da177e4
LT
845static void tg3_phy_set_wirespeed(struct tg3 *tp)
846{
847 u32 val;
848
849 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
850 return;
851
852 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
853 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
854 tg3_writephy(tp, MII_TG3_AUX_CTRL,
855 (val | (1 << 15) | (1 << 4)));
856}
857
858static int tg3_bmcr_reset(struct tg3 *tp)
859{
860 u32 phy_control;
861 int limit, err;
862
863 /* OK, reset it, and poll the BMCR_RESET bit until it
864 * clears or we time out.
865 */
866 phy_control = BMCR_RESET;
867 err = tg3_writephy(tp, MII_BMCR, phy_control);
868 if (err != 0)
869 return -EBUSY;
870
871 limit = 5000;
872 while (limit--) {
873 err = tg3_readphy(tp, MII_BMCR, &phy_control);
874 if (err != 0)
875 return -EBUSY;
876
877 if ((phy_control & BMCR_RESET) == 0) {
878 udelay(40);
879 break;
880 }
881 udelay(10);
882 }
883 if (limit <= 0)
884 return -EBUSY;
885
886 return 0;
887}
888
889static int tg3_wait_macro_done(struct tg3 *tp)
890{
891 int limit = 100;
892
893 while (limit--) {
894 u32 tmp32;
895
896 if (!tg3_readphy(tp, 0x16, &tmp32)) {
897 if ((tmp32 & 0x1000) == 0)
898 break;
899 }
900 }
901 if (limit <= 0)
902 return -EBUSY;
903
904 return 0;
905}
906
907static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
908{
909 static const u32 test_pat[4][6] = {
910 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
911 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
912 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
913 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
914 };
915 int chan;
916
917 for (chan = 0; chan < 4; chan++) {
918 int i;
919
920 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
921 (chan * 0x2000) | 0x0200);
922 tg3_writephy(tp, 0x16, 0x0002);
923
924 for (i = 0; i < 6; i++)
925 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
926 test_pat[chan][i]);
927
928 tg3_writephy(tp, 0x16, 0x0202);
929 if (tg3_wait_macro_done(tp)) {
930 *resetp = 1;
931 return -EBUSY;
932 }
933
934 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
935 (chan * 0x2000) | 0x0200);
936 tg3_writephy(tp, 0x16, 0x0082);
937 if (tg3_wait_macro_done(tp)) {
938 *resetp = 1;
939 return -EBUSY;
940 }
941
942 tg3_writephy(tp, 0x16, 0x0802);
943 if (tg3_wait_macro_done(tp)) {
944 *resetp = 1;
945 return -EBUSY;
946 }
947
948 for (i = 0; i < 6; i += 2) {
949 u32 low, high;
950
951 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
952 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
953 tg3_wait_macro_done(tp)) {
954 *resetp = 1;
955 return -EBUSY;
956 }
957 low &= 0x7fff;
958 high &= 0x000f;
959 if (low != test_pat[chan][i] ||
960 high != test_pat[chan][i+1]) {
961 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
962 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
963 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
964
965 return -EBUSY;
966 }
967 }
968 }
969
970 return 0;
971}
972
973static int tg3_phy_reset_chanpat(struct tg3 *tp)
974{
975 int chan;
976
977 for (chan = 0; chan < 4; chan++) {
978 int i;
979
980 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
981 (chan * 0x2000) | 0x0200);
982 tg3_writephy(tp, 0x16, 0x0002);
983 for (i = 0; i < 6; i++)
984 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
985 tg3_writephy(tp, 0x16, 0x0202);
986 if (tg3_wait_macro_done(tp))
987 return -EBUSY;
988 }
989
990 return 0;
991}
992
993static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
994{
995 u32 reg32, phy9_orig;
996 int retries, do_phy_reset, err;
997
998 retries = 10;
999 do_phy_reset = 1;
1000 do {
1001 if (do_phy_reset) {
1002 err = tg3_bmcr_reset(tp);
1003 if (err)
1004 return err;
1005 do_phy_reset = 0;
1006 }
1007
1008 /* Disable transmitter and interrupt. */
1009 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1010 continue;
1011
1012 reg32 |= 0x3000;
1013 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1014
1015 /* Set full-duplex, 1000 mbps. */
1016 tg3_writephy(tp, MII_BMCR,
1017 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1018
1019 /* Set to master mode. */
1020 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1021 continue;
1022
1023 tg3_writephy(tp, MII_TG3_CTRL,
1024 (MII_TG3_CTRL_AS_MASTER |
1025 MII_TG3_CTRL_ENABLE_AS_MASTER));
1026
1027 /* Enable SM_DSP_CLOCK and 6dB. */
1028 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1029
1030 /* Block the PHY control access. */
1031 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1032 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1033
1034 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1035 if (!err)
1036 break;
1037 } while (--retries);
1038
1039 err = tg3_phy_reset_chanpat(tp);
1040 if (err)
1041 return err;
1042
1043 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1044 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1045
1046 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1047 tg3_writephy(tp, 0x16, 0x0000);
1048
1049 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1050 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1051 /* Set Extended packet length bit for jumbo frames */
1052 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1053 }
1054 else {
1055 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1056 }
1057
1058 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1059
1060 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1061 reg32 &= ~0x3000;
1062 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1063 } else if (!err)
1064 err = -EBUSY;
1065
1066 return err;
1067}
1068
c8e1e82b
MC
1069static void tg3_link_report(struct tg3 *);
1070
1da177e4
LT
1071/* This will reset the tigon3 PHY if there is no valid
1072 * link unless the FORCE argument is non-zero.
1073 */
1074static int tg3_phy_reset(struct tg3 *tp)
1075{
1076 u32 phy_status;
1077 int err;
1078
60189ddf
MC
1079 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1080 u32 val;
1081
1082 val = tr32(GRC_MISC_CFG);
1083 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1084 udelay(40);
1085 }
1da177e4
LT
1086 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1087 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1088 if (err != 0)
1089 return -EBUSY;
1090
c8e1e82b
MC
1091 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1092 netif_carrier_off(tp->dev);
1093 tg3_link_report(tp);
1094 }
1095
1da177e4
LT
1096 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1097 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1098 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1099 err = tg3_phy_reset_5703_4_5(tp);
1100 if (err)
1101 return err;
1102 goto out;
1103 }
1104
1105 err = tg3_bmcr_reset(tp);
1106 if (err)
1107 return err;
1108
b5af7126 1109 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
ce057f01
MC
1110 u32 val;
1111
1112 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1113 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1114 CPMU_LSPD_1000MB_MACCLK_12_5) {
1115 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1116 udelay(40);
1117 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1118 }
662f38d2
MC
1119
1120 /* Disable GPHY autopowerdown. */
1121 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1122 MII_TG3_MISC_SHDW_WREN |
1123 MII_TG3_MISC_SHDW_APD_SEL |
1124 MII_TG3_MISC_SHDW_APD_WKTM_84MS);
ce057f01
MC
1125 }
1126
1da177e4
LT
1127out:
1128 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1129 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1130 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1131 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1132 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1133 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1134 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1135 }
1136 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1137 tg3_writephy(tp, 0x1c, 0x8d68);
1138 tg3_writephy(tp, 0x1c, 0x8d68);
1139 }
1140 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1141 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1142 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1143 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1144 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1145 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1146 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1147 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1148 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1149 }
c424cb24
MC
1150 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1151 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1152 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
c1d2a196
MC
1153 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1154 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1155 tg3_writephy(tp, MII_TG3_TEST1,
1156 MII_TG3_TEST1_TRIM_EN | 0x4);
1157 } else
1158 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
c424cb24
MC
1159 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1160 }
1da177e4
LT
1161 /* Set Extended packet length bit (bit 14) on all chips that */
1162 /* support jumbo frames */
1163 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1164 /* Cannot do read-modify-write on 5401 */
1165 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
0f893dc6 1166 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1167 u32 phy_reg;
1168
1169 /* Set bit 14 with read-modify-write to preserve other bits */
1170 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1171 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1172 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1173 }
1174
1175 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1176 * jumbo frames transmission.
1177 */
0f893dc6 1178 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1179 u32 phy_reg;
1180
1181 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1182 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1183 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1184 }
1185
715116a1 1186 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
715116a1
MC
1187 /* adjust output voltage */
1188 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
715116a1
MC
1189 }
1190
9ef8ca99 1191 tg3_phy_toggle_automdix(tp, 1);
1da177e4
LT
1192 tg3_phy_set_wirespeed(tp);
1193 return 0;
1194}
1195
1196static void tg3_frob_aux_power(struct tg3 *tp)
1197{
1198 struct tg3 *tp_peer = tp;
1199
9d26e213 1200 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1da177e4
LT
1201 return;
1202
8c2dc7e1
MC
1203 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1204 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1205 struct net_device *dev_peer;
1206
1207 dev_peer = pci_get_drvdata(tp->pdev_peer);
bc1c7567 1208 /* remove_one() may have been run on the peer. */
8c2dc7e1 1209 if (!dev_peer)
bc1c7567
MC
1210 tp_peer = tp;
1211 else
1212 tp_peer = netdev_priv(dev_peer);
1da177e4
LT
1213 }
1214
1da177e4 1215 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
6921d201
MC
1216 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1217 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1218 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1da177e4
LT
1219 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1220 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
b401e9e2
MC
1221 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1222 (GRC_LCLCTRL_GPIO_OE0 |
1223 GRC_LCLCTRL_GPIO_OE1 |
1224 GRC_LCLCTRL_GPIO_OE2 |
1225 GRC_LCLCTRL_GPIO_OUTPUT0 |
1226 GRC_LCLCTRL_GPIO_OUTPUT1),
1227 100);
1da177e4
LT
1228 } else {
1229 u32 no_gpio2;
dc56b7d4 1230 u32 grc_local_ctrl = 0;
1da177e4
LT
1231
1232 if (tp_peer != tp &&
1233 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1234 return;
1235
dc56b7d4
MC
1236 /* Workaround to prevent overdrawing Amps. */
1237 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1238 ASIC_REV_5714) {
1239 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
b401e9e2
MC
1240 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1241 grc_local_ctrl, 100);
dc56b7d4
MC
1242 }
1243
1da177e4
LT
1244 /* On 5753 and variants, GPIO2 cannot be used. */
1245 no_gpio2 = tp->nic_sram_data_cfg &
1246 NIC_SRAM_DATA_CFG_NO_GPIO2;
1247
dc56b7d4 1248 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1da177e4
LT
1249 GRC_LCLCTRL_GPIO_OE1 |
1250 GRC_LCLCTRL_GPIO_OE2 |
1251 GRC_LCLCTRL_GPIO_OUTPUT1 |
1252 GRC_LCLCTRL_GPIO_OUTPUT2;
1253 if (no_gpio2) {
1254 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1255 GRC_LCLCTRL_GPIO_OUTPUT2);
1256 }
b401e9e2
MC
1257 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1258 grc_local_ctrl, 100);
1da177e4
LT
1259
1260 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1261
b401e9e2
MC
1262 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1263 grc_local_ctrl, 100);
1da177e4
LT
1264
1265 if (!no_gpio2) {
1266 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
b401e9e2
MC
1267 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1268 grc_local_ctrl, 100);
1da177e4
LT
1269 }
1270 }
1271 } else {
1272 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1273 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1274 if (tp_peer != tp &&
1275 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1276 return;
1277
b401e9e2
MC
1278 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1279 (GRC_LCLCTRL_GPIO_OE1 |
1280 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4 1281
b401e9e2
MC
1282 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1283 GRC_LCLCTRL_GPIO_OE1, 100);
1da177e4 1284
b401e9e2
MC
1285 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1286 (GRC_LCLCTRL_GPIO_OE1 |
1287 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4
LT
1288 }
1289 }
1290}
1291
e8f3f6ca
MC
1292static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1293{
1294 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1295 return 1;
1296 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1297 if (speed != SPEED_10)
1298 return 1;
1299 } else if (speed == SPEED_10)
1300 return 1;
1301
1302 return 0;
1303}
1304
1da177e4
LT
1305static int tg3_setup_phy(struct tg3 *, int);
1306
1307#define RESET_KIND_SHUTDOWN 0
1308#define RESET_KIND_INIT 1
1309#define RESET_KIND_SUSPEND 2
1310
1311static void tg3_write_sig_post_reset(struct tg3 *, int);
1312static int tg3_halt_cpu(struct tg3 *, u32);
6921d201
MC
1313static int tg3_nvram_lock(struct tg3 *);
1314static void tg3_nvram_unlock(struct tg3 *);
1da177e4 1315
15c3b696
MC
1316static void tg3_power_down_phy(struct tg3 *tp)
1317{
ce057f01
MC
1318 u32 val;
1319
5129724a
MC
1320 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1321 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1322 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1323 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1324
1325 sg_dig_ctrl |=
1326 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1327 tw32(SG_DIG_CTRL, sg_dig_ctrl);
1328 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1329 }
3f7045c1 1330 return;
5129724a 1331 }
3f7045c1 1332
60189ddf 1333 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
60189ddf
MC
1334 tg3_bmcr_reset(tp);
1335 val = tr32(GRC_MISC_CFG);
1336 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1337 udelay(40);
1338 return;
1339 } else {
715116a1
MC
1340 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1341 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1342 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1343 }
3f7045c1 1344
15c3b696
MC
1345 /* The PHY should not be powered down on some chips because
1346 * of bugs.
1347 */
1348 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1349 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1350 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1351 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1352 return;
ce057f01 1353
b5af7126 1354 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
ce057f01
MC
1355 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1356 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1357 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1358 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1359 }
1360
15c3b696
MC
1361 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1362}
1363
bc1c7567 1364static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1da177e4
LT
1365{
1366 u32 misc_host_ctrl;
1367 u16 power_control, power_caps;
1368 int pm = tp->pm_cap;
1369
1370 /* Make sure register accesses (indirect or otherwise)
1371 * will function correctly.
1372 */
1373 pci_write_config_dword(tp->pdev,
1374 TG3PCI_MISC_HOST_CTRL,
1375 tp->misc_host_ctrl);
1376
1377 pci_read_config_word(tp->pdev,
1378 pm + PCI_PM_CTRL,
1379 &power_control);
1380 power_control |= PCI_PM_CTRL_PME_STATUS;
1381 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1382 switch (state) {
bc1c7567 1383 case PCI_D0:
1da177e4
LT
1384 power_control |= 0;
1385 pci_write_config_word(tp->pdev,
1386 pm + PCI_PM_CTRL,
1387 power_control);
8c6bda1a
MC
1388 udelay(100); /* Delay after power state change */
1389
9d26e213
MC
1390 /* Switch out of Vaux if it is a NIC */
1391 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
b401e9e2 1392 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1da177e4
LT
1393
1394 return 0;
1395
bc1c7567 1396 case PCI_D1:
1da177e4
LT
1397 power_control |= 1;
1398 break;
1399
bc1c7567 1400 case PCI_D2:
1da177e4
LT
1401 power_control |= 2;
1402 break;
1403
bc1c7567 1404 case PCI_D3hot:
1da177e4
LT
1405 power_control |= 3;
1406 break;
1407
1408 default:
1409 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1410 "requested.\n",
1411 tp->dev->name, state);
1412 return -EINVAL;
1413 };
1414
1415 power_control |= PCI_PM_CTRL_PME_ENABLE;
1416
1417 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1418 tw32(TG3PCI_MISC_HOST_CTRL,
1419 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1420
1421 if (tp->link_config.phy_is_low_power == 0) {
1422 tp->link_config.phy_is_low_power = 1;
1423 tp->link_config.orig_speed = tp->link_config.speed;
1424 tp->link_config.orig_duplex = tp->link_config.duplex;
1425 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1426 }
1427
747e8f8b 1428 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1da177e4
LT
1429 tp->link_config.speed = SPEED_10;
1430 tp->link_config.duplex = DUPLEX_HALF;
1431 tp->link_config.autoneg = AUTONEG_ENABLE;
1432 tg3_setup_phy(tp, 0);
1433 }
1434
b5d3772c
MC
1435 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1436 u32 val;
1437
1438 val = tr32(GRC_VCPU_EXT_CTRL);
1439 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1440 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
6921d201
MC
1441 int i;
1442 u32 val;
1443
1444 for (i = 0; i < 200; i++) {
1445 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1446 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1447 break;
1448 msleep(1);
1449 }
1450 }
a85feb8c
GZ
1451 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1452 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1453 WOL_DRV_STATE_SHUTDOWN |
1454 WOL_DRV_WOL |
1455 WOL_SET_MAGIC_PKT);
6921d201 1456
1da177e4
LT
1457 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1458
1459 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1460 u32 mac_mode;
1461
1462 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1463 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1464 udelay(40);
1465
3f7045c1
MC
1466 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1467 mac_mode = MAC_MODE_PORT_MODE_GMII;
1468 else
1469 mac_mode = MAC_MODE_PORT_MODE_MII;
1da177e4 1470
e8f3f6ca
MC
1471 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1472 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1473 ASIC_REV_5700) {
1474 u32 speed = (tp->tg3_flags &
1475 TG3_FLAG_WOL_SPEED_100MB) ?
1476 SPEED_100 : SPEED_10;
1477 if (tg3_5700_link_polarity(tp, speed))
1478 mac_mode |= MAC_MODE_LINK_POLARITY;
1479 else
1480 mac_mode &= ~MAC_MODE_LINK_POLARITY;
1481 }
1da177e4
LT
1482 } else {
1483 mac_mode = MAC_MODE_PORT_MODE_TBI;
1484 }
1485
cbf46853 1486 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1da177e4
LT
1487 tw32(MAC_LED_CTRL, tp->led_ctrl);
1488
1489 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1490 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1491 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1492
1493 tw32_f(MAC_MODE, mac_mode);
1494 udelay(100);
1495
1496 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1497 udelay(10);
1498 }
1499
1500 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1501 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1502 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1503 u32 base_val;
1504
1505 base_val = tp->pci_clock_ctrl;
1506 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1507 CLOCK_CTRL_TXCLK_DISABLE);
1508
b401e9e2
MC
1509 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1510 CLOCK_CTRL_PWRDOWN_PLL133, 40);
d7b0a857 1511 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
795d01c5 1512 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
d7b0a857 1513 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
4cf78e4f 1514 /* do nothing */
85e94ced 1515 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1da177e4
LT
1516 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1517 u32 newbits1, newbits2;
1518
1519 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1520 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1521 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1522 CLOCK_CTRL_TXCLK_DISABLE |
1523 CLOCK_CTRL_ALTCLK);
1524 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1525 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1526 newbits1 = CLOCK_CTRL_625_CORE;
1527 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1528 } else {
1529 newbits1 = CLOCK_CTRL_ALTCLK;
1530 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1531 }
1532
b401e9e2
MC
1533 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1534 40);
1da177e4 1535
b401e9e2
MC
1536 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1537 40);
1da177e4
LT
1538
1539 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1540 u32 newbits3;
1541
1542 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1543 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1544 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1545 CLOCK_CTRL_TXCLK_DISABLE |
1546 CLOCK_CTRL_44MHZ_CORE);
1547 } else {
1548 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1549 }
1550
b401e9e2
MC
1551 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1552 tp->pci_clock_ctrl | newbits3, 40);
1da177e4
LT
1553 }
1554 }
1555
6921d201 1556 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
0d3031d9
MC
1557 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1558 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
3f7045c1 1559 tg3_power_down_phy(tp);
6921d201 1560
1da177e4
LT
1561 tg3_frob_aux_power(tp);
1562
1563 /* Workaround for unstable PLL clock */
1564 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1565 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1566 u32 val = tr32(0x7d00);
1567
1568 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1569 tw32(0x7d00, val);
6921d201 1570 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
ec41c7df
MC
1571 int err;
1572
1573 err = tg3_nvram_lock(tp);
1da177e4 1574 tg3_halt_cpu(tp, RX_CPU_BASE);
ec41c7df
MC
1575 if (!err)
1576 tg3_nvram_unlock(tp);
6921d201 1577 }
1da177e4
LT
1578 }
1579
bbadf503
MC
1580 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1581
1da177e4
LT
1582 /* Finally, set the new power state. */
1583 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
8c6bda1a 1584 udelay(100); /* Delay after power state change */
1da177e4 1585
1da177e4
LT
1586 return 0;
1587}
1588
1589static void tg3_link_report(struct tg3 *tp)
1590{
1591 if (!netif_carrier_ok(tp->dev)) {
9f88f29f
MC
1592 if (netif_msg_link(tp))
1593 printk(KERN_INFO PFX "%s: Link is down.\n",
1594 tp->dev->name);
1595 } else if (netif_msg_link(tp)) {
1da177e4
LT
1596 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1597 tp->dev->name,
1598 (tp->link_config.active_speed == SPEED_1000 ?
1599 1000 :
1600 (tp->link_config.active_speed == SPEED_100 ?
1601 100 : 10)),
1602 (tp->link_config.active_duplex == DUPLEX_FULL ?
1603 "full" : "half"));
1604
1605 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1606 "%s for RX.\n",
1607 tp->dev->name,
1608 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1609 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1610 }
1611}
1612
1613static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1614{
1615 u32 new_tg3_flags = 0;
1616 u32 old_rx_mode = tp->rx_mode;
1617 u32 old_tx_mode = tp->tx_mode;
1618
1619 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
747e8f8b
MC
1620
1621 /* Convert 1000BaseX flow control bits to 1000BaseT
1622 * bits before resolving flow control.
1623 */
1624 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1625 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1626 ADVERTISE_PAUSE_ASYM);
1627 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1628
1629 if (local_adv & ADVERTISE_1000XPAUSE)
1630 local_adv |= ADVERTISE_PAUSE_CAP;
1631 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1632 local_adv |= ADVERTISE_PAUSE_ASYM;
1633 if (remote_adv & LPA_1000XPAUSE)
1634 remote_adv |= LPA_PAUSE_CAP;
1635 if (remote_adv & LPA_1000XPAUSE_ASYM)
1636 remote_adv |= LPA_PAUSE_ASYM;
1637 }
1638
1da177e4
LT
1639 if (local_adv & ADVERTISE_PAUSE_CAP) {
1640 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1641 if (remote_adv & LPA_PAUSE_CAP)
1642 new_tg3_flags |=
1643 (TG3_FLAG_RX_PAUSE |
1644 TG3_FLAG_TX_PAUSE);
1645 else if (remote_adv & LPA_PAUSE_ASYM)
1646 new_tg3_flags |=
1647 (TG3_FLAG_RX_PAUSE);
1648 } else {
1649 if (remote_adv & LPA_PAUSE_CAP)
1650 new_tg3_flags |=
1651 (TG3_FLAG_RX_PAUSE |
1652 TG3_FLAG_TX_PAUSE);
1653 }
1654 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1655 if ((remote_adv & LPA_PAUSE_CAP) &&
1656 (remote_adv & LPA_PAUSE_ASYM))
1657 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1658 }
1659
1660 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1661 tp->tg3_flags |= new_tg3_flags;
1662 } else {
1663 new_tg3_flags = tp->tg3_flags;
1664 }
1665
1666 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1667 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1668 else
1669 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1670
1671 if (old_rx_mode != tp->rx_mode) {
1672 tw32_f(MAC_RX_MODE, tp->rx_mode);
1673 }
6aa20a22 1674
1da177e4
LT
1675 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1676 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1677 else
1678 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1679
1680 if (old_tx_mode != tp->tx_mode) {
1681 tw32_f(MAC_TX_MODE, tp->tx_mode);
1682 }
1683}
1684
1685static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1686{
1687 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1688 case MII_TG3_AUX_STAT_10HALF:
1689 *speed = SPEED_10;
1690 *duplex = DUPLEX_HALF;
1691 break;
1692
1693 case MII_TG3_AUX_STAT_10FULL:
1694 *speed = SPEED_10;
1695 *duplex = DUPLEX_FULL;
1696 break;
1697
1698 case MII_TG3_AUX_STAT_100HALF:
1699 *speed = SPEED_100;
1700 *duplex = DUPLEX_HALF;
1701 break;
1702
1703 case MII_TG3_AUX_STAT_100FULL:
1704 *speed = SPEED_100;
1705 *duplex = DUPLEX_FULL;
1706 break;
1707
1708 case MII_TG3_AUX_STAT_1000HALF:
1709 *speed = SPEED_1000;
1710 *duplex = DUPLEX_HALF;
1711 break;
1712
1713 case MII_TG3_AUX_STAT_1000FULL:
1714 *speed = SPEED_1000;
1715 *duplex = DUPLEX_FULL;
1716 break;
1717
1718 default:
715116a1
MC
1719 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1720 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1721 SPEED_10;
1722 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1723 DUPLEX_HALF;
1724 break;
1725 }
1da177e4
LT
1726 *speed = SPEED_INVALID;
1727 *duplex = DUPLEX_INVALID;
1728 break;
1729 };
1730}
1731
1732static void tg3_phy_copper_begin(struct tg3 *tp)
1733{
1734 u32 new_adv;
1735 int i;
1736
1737 if (tp->link_config.phy_is_low_power) {
1738 /* Entering low power mode. Disable gigabit and
1739 * 100baseT advertisements.
1740 */
1741 tg3_writephy(tp, MII_TG3_CTRL, 0);
1742
1743 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1744 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1745 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1746 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1747
1748 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1749 } else if (tp->link_config.speed == SPEED_INVALID) {
1da177e4
LT
1750 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1751 tp->link_config.advertising &=
1752 ~(ADVERTISED_1000baseT_Half |
1753 ADVERTISED_1000baseT_Full);
1754
1755 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1756 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1757 new_adv |= ADVERTISE_10HALF;
1758 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1759 new_adv |= ADVERTISE_10FULL;
1760 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1761 new_adv |= ADVERTISE_100HALF;
1762 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1763 new_adv |= ADVERTISE_100FULL;
1764 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1765
1766 if (tp->link_config.advertising &
1767 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1768 new_adv = 0;
1769 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1770 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1771 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1772 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1773 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1774 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1775 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1776 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1777 MII_TG3_CTRL_ENABLE_AS_MASTER);
1778 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1779 } else {
1780 tg3_writephy(tp, MII_TG3_CTRL, 0);
1781 }
1782 } else {
1783 /* Asking for a specific link mode. */
1784 if (tp->link_config.speed == SPEED_1000) {
1785 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1786 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1787
1788 if (tp->link_config.duplex == DUPLEX_FULL)
1789 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1790 else
1791 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1792 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1793 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1794 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1795 MII_TG3_CTRL_ENABLE_AS_MASTER);
1796 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1797 } else {
1798 tg3_writephy(tp, MII_TG3_CTRL, 0);
1799
1800 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1801 if (tp->link_config.speed == SPEED_100) {
1802 if (tp->link_config.duplex == DUPLEX_FULL)
1803 new_adv |= ADVERTISE_100FULL;
1804 else
1805 new_adv |= ADVERTISE_100HALF;
1806 } else {
1807 if (tp->link_config.duplex == DUPLEX_FULL)
1808 new_adv |= ADVERTISE_10FULL;
1809 else
1810 new_adv |= ADVERTISE_10HALF;
1811 }
1812 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1813 }
1814 }
1815
1816 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1817 tp->link_config.speed != SPEED_INVALID) {
1818 u32 bmcr, orig_bmcr;
1819
1820 tp->link_config.active_speed = tp->link_config.speed;
1821 tp->link_config.active_duplex = tp->link_config.duplex;
1822
1823 bmcr = 0;
1824 switch (tp->link_config.speed) {
1825 default:
1826 case SPEED_10:
1827 break;
1828
1829 case SPEED_100:
1830 bmcr |= BMCR_SPEED100;
1831 break;
1832
1833 case SPEED_1000:
1834 bmcr |= TG3_BMCR_SPEED1000;
1835 break;
1836 };
1837
1838 if (tp->link_config.duplex == DUPLEX_FULL)
1839 bmcr |= BMCR_FULLDPLX;
1840
1841 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1842 (bmcr != orig_bmcr)) {
1843 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1844 for (i = 0; i < 1500; i++) {
1845 u32 tmp;
1846
1847 udelay(10);
1848 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1849 tg3_readphy(tp, MII_BMSR, &tmp))
1850 continue;
1851 if (!(tmp & BMSR_LSTATUS)) {
1852 udelay(40);
1853 break;
1854 }
1855 }
1856 tg3_writephy(tp, MII_BMCR, bmcr);
1857 udelay(40);
1858 }
1859 } else {
1860 tg3_writephy(tp, MII_BMCR,
1861 BMCR_ANENABLE | BMCR_ANRESTART);
1862 }
1863}
1864
1865static int tg3_init_5401phy_dsp(struct tg3 *tp)
1866{
1867 int err;
1868
1869 /* Turn off tap power management. */
1870 /* Set Extended packet length bit */
1871 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1872
1873 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1874 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1875
1876 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1877 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1878
1879 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1880 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1881
1882 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1883 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1884
1885 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1886 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1887
1888 udelay(40);
1889
1890 return err;
1891}
1892
3600d918 1893static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1da177e4 1894{
3600d918
MC
1895 u32 adv_reg, all_mask = 0;
1896
1897 if (mask & ADVERTISED_10baseT_Half)
1898 all_mask |= ADVERTISE_10HALF;
1899 if (mask & ADVERTISED_10baseT_Full)
1900 all_mask |= ADVERTISE_10FULL;
1901 if (mask & ADVERTISED_100baseT_Half)
1902 all_mask |= ADVERTISE_100HALF;
1903 if (mask & ADVERTISED_100baseT_Full)
1904 all_mask |= ADVERTISE_100FULL;
1da177e4
LT
1905
1906 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1907 return 0;
1908
1da177e4
LT
1909 if ((adv_reg & all_mask) != all_mask)
1910 return 0;
1911 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1912 u32 tg3_ctrl;
1913
3600d918
MC
1914 all_mask = 0;
1915 if (mask & ADVERTISED_1000baseT_Half)
1916 all_mask |= ADVERTISE_1000HALF;
1917 if (mask & ADVERTISED_1000baseT_Full)
1918 all_mask |= ADVERTISE_1000FULL;
1919
1da177e4
LT
1920 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1921 return 0;
1922
1da177e4
LT
1923 if ((tg3_ctrl & all_mask) != all_mask)
1924 return 0;
1925 }
1926 return 1;
1927}
1928
1929static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1930{
1931 int current_link_up;
1932 u32 bmsr, dummy;
1933 u16 current_speed;
1934 u8 current_duplex;
1935 int i, err;
1936
1937 tw32(MAC_EVENT, 0);
1938
1939 tw32_f(MAC_STATUS,
1940 (MAC_STATUS_SYNC_CHANGED |
1941 MAC_STATUS_CFG_CHANGED |
1942 MAC_STATUS_MI_COMPLETION |
1943 MAC_STATUS_LNKSTATE_CHANGED));
1944 udelay(40);
1945
1946 tp->mi_mode = MAC_MI_MODE_BASE;
1947 tw32_f(MAC_MI_MODE, tp->mi_mode);
1948 udelay(80);
1949
1950 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1951
1952 /* Some third-party PHYs need to be reset on link going
1953 * down.
1954 */
1955 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1956 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1957 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1958 netif_carrier_ok(tp->dev)) {
1959 tg3_readphy(tp, MII_BMSR, &bmsr);
1960 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1961 !(bmsr & BMSR_LSTATUS))
1962 force_reset = 1;
1963 }
1964 if (force_reset)
1965 tg3_phy_reset(tp);
1966
1967 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1968 tg3_readphy(tp, MII_BMSR, &bmsr);
1969 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1970 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1971 bmsr = 0;
1972
1973 if (!(bmsr & BMSR_LSTATUS)) {
1974 err = tg3_init_5401phy_dsp(tp);
1975 if (err)
1976 return err;
1977
1978 tg3_readphy(tp, MII_BMSR, &bmsr);
1979 for (i = 0; i < 1000; i++) {
1980 udelay(10);
1981 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1982 (bmsr & BMSR_LSTATUS)) {
1983 udelay(40);
1984 break;
1985 }
1986 }
1987
1988 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1989 !(bmsr & BMSR_LSTATUS) &&
1990 tp->link_config.active_speed == SPEED_1000) {
1991 err = tg3_phy_reset(tp);
1992 if (!err)
1993 err = tg3_init_5401phy_dsp(tp);
1994 if (err)
1995 return err;
1996 }
1997 }
1998 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1999 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2000 /* 5701 {A0,B0} CRC bug workaround */
2001 tg3_writephy(tp, 0x15, 0x0a75);
2002 tg3_writephy(tp, 0x1c, 0x8c68);
2003 tg3_writephy(tp, 0x1c, 0x8d68);
2004 tg3_writephy(tp, 0x1c, 0x8c68);
2005 }
2006
2007 /* Clear pending interrupts... */
2008 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2009 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2010
2011 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2012 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
715116a1 2013 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1da177e4
LT
2014 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2015
2016 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2017 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2018 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2019 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2020 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2021 else
2022 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2023 }
2024
2025 current_link_up = 0;
2026 current_speed = SPEED_INVALID;
2027 current_duplex = DUPLEX_INVALID;
2028
2029 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2030 u32 val;
2031
2032 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2033 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2034 if (!(val & (1 << 10))) {
2035 val |= (1 << 10);
2036 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2037 goto relink;
2038 }
2039 }
2040
2041 bmsr = 0;
2042 for (i = 0; i < 100; i++) {
2043 tg3_readphy(tp, MII_BMSR, &bmsr);
2044 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2045 (bmsr & BMSR_LSTATUS))
2046 break;
2047 udelay(40);
2048 }
2049
2050 if (bmsr & BMSR_LSTATUS) {
2051 u32 aux_stat, bmcr;
2052
2053 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2054 for (i = 0; i < 2000; i++) {
2055 udelay(10);
2056 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2057 aux_stat)
2058 break;
2059 }
2060
2061 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2062 &current_speed,
2063 &current_duplex);
2064
2065 bmcr = 0;
2066 for (i = 0; i < 200; i++) {
2067 tg3_readphy(tp, MII_BMCR, &bmcr);
2068 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2069 continue;
2070 if (bmcr && bmcr != 0x7fff)
2071 break;
2072 udelay(10);
2073 }
2074
2075 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2076 if (bmcr & BMCR_ANENABLE) {
2077 current_link_up = 1;
2078
2079 /* Force autoneg restart if we are exiting
2080 * low power mode.
2081 */
3600d918
MC
2082 if (!tg3_copper_is_advertising_all(tp,
2083 tp->link_config.advertising))
1da177e4
LT
2084 current_link_up = 0;
2085 } else {
2086 current_link_up = 0;
2087 }
2088 } else {
2089 if (!(bmcr & BMCR_ANENABLE) &&
2090 tp->link_config.speed == current_speed &&
2091 tp->link_config.duplex == current_duplex) {
2092 current_link_up = 1;
2093 } else {
2094 current_link_up = 0;
2095 }
2096 }
2097
2098 tp->link_config.active_speed = current_speed;
2099 tp->link_config.active_duplex = current_duplex;
2100 }
2101
2102 if (current_link_up == 1 &&
2103 (tp->link_config.active_duplex == DUPLEX_FULL) &&
2104 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2105 u32 local_adv, remote_adv;
2106
2107 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2108 local_adv = 0;
2109 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2110
2111 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2112 remote_adv = 0;
2113
2114 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
2115
2116 /* If we are not advertising full pause capability,
2117 * something is wrong. Bring the link down and reconfigure.
2118 */
2119 if (local_adv != ADVERTISE_PAUSE_CAP) {
2120 current_link_up = 0;
2121 } else {
2122 tg3_setup_flow_control(tp, local_adv, remote_adv);
2123 }
2124 }
2125relink:
6921d201 2126 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1da177e4
LT
2127 u32 tmp;
2128
2129 tg3_phy_copper_begin(tp);
2130
2131 tg3_readphy(tp, MII_BMSR, &tmp);
2132 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2133 (tmp & BMSR_LSTATUS))
2134 current_link_up = 1;
2135 }
2136
2137 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2138 if (current_link_up == 1) {
2139 if (tp->link_config.active_speed == SPEED_100 ||
2140 tp->link_config.active_speed == SPEED_10)
2141 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2142 else
2143 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2144 } else
2145 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2146
2147 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2148 if (tp->link_config.active_duplex == DUPLEX_HALF)
2149 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2150
1da177e4 2151 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
e8f3f6ca
MC
2152 if (current_link_up == 1 &&
2153 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
1da177e4 2154 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
e8f3f6ca
MC
2155 else
2156 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1da177e4
LT
2157 }
2158
2159 /* ??? Without this setting Netgear GA302T PHY does not
2160 * ??? send/receive packets...
2161 */
2162 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2163 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2164 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2165 tw32_f(MAC_MI_MODE, tp->mi_mode);
2166 udelay(80);
2167 }
2168
2169 tw32_f(MAC_MODE, tp->mac_mode);
2170 udelay(40);
2171
2172 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2173 /* Polled via timer. */
2174 tw32_f(MAC_EVENT, 0);
2175 } else {
2176 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2177 }
2178 udelay(40);
2179
2180 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2181 current_link_up == 1 &&
2182 tp->link_config.active_speed == SPEED_1000 &&
2183 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2184 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2185 udelay(120);
2186 tw32_f(MAC_STATUS,
2187 (MAC_STATUS_SYNC_CHANGED |
2188 MAC_STATUS_CFG_CHANGED));
2189 udelay(40);
2190 tg3_write_mem(tp,
2191 NIC_SRAM_FIRMWARE_MBOX,
2192 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2193 }
2194
2195 if (current_link_up != netif_carrier_ok(tp->dev)) {
2196 if (current_link_up)
2197 netif_carrier_on(tp->dev);
2198 else
2199 netif_carrier_off(tp->dev);
2200 tg3_link_report(tp);
2201 }
2202
2203 return 0;
2204}
2205
2206struct tg3_fiber_aneginfo {
2207 int state;
2208#define ANEG_STATE_UNKNOWN 0
2209#define ANEG_STATE_AN_ENABLE 1
2210#define ANEG_STATE_RESTART_INIT 2
2211#define ANEG_STATE_RESTART 3
2212#define ANEG_STATE_DISABLE_LINK_OK 4
2213#define ANEG_STATE_ABILITY_DETECT_INIT 5
2214#define ANEG_STATE_ABILITY_DETECT 6
2215#define ANEG_STATE_ACK_DETECT_INIT 7
2216#define ANEG_STATE_ACK_DETECT 8
2217#define ANEG_STATE_COMPLETE_ACK_INIT 9
2218#define ANEG_STATE_COMPLETE_ACK 10
2219#define ANEG_STATE_IDLE_DETECT_INIT 11
2220#define ANEG_STATE_IDLE_DETECT 12
2221#define ANEG_STATE_LINK_OK 13
2222#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2223#define ANEG_STATE_NEXT_PAGE_WAIT 15
2224
2225 u32 flags;
2226#define MR_AN_ENABLE 0x00000001
2227#define MR_RESTART_AN 0x00000002
2228#define MR_AN_COMPLETE 0x00000004
2229#define MR_PAGE_RX 0x00000008
2230#define MR_NP_LOADED 0x00000010
2231#define MR_TOGGLE_TX 0x00000020
2232#define MR_LP_ADV_FULL_DUPLEX 0x00000040
2233#define MR_LP_ADV_HALF_DUPLEX 0x00000080
2234#define MR_LP_ADV_SYM_PAUSE 0x00000100
2235#define MR_LP_ADV_ASYM_PAUSE 0x00000200
2236#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2237#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2238#define MR_LP_ADV_NEXT_PAGE 0x00001000
2239#define MR_TOGGLE_RX 0x00002000
2240#define MR_NP_RX 0x00004000
2241
2242#define MR_LINK_OK 0x80000000
2243
2244 unsigned long link_time, cur_time;
2245
2246 u32 ability_match_cfg;
2247 int ability_match_count;
2248
2249 char ability_match, idle_match, ack_match;
2250
2251 u32 txconfig, rxconfig;
2252#define ANEG_CFG_NP 0x00000080
2253#define ANEG_CFG_ACK 0x00000040
2254#define ANEG_CFG_RF2 0x00000020
2255#define ANEG_CFG_RF1 0x00000010
2256#define ANEG_CFG_PS2 0x00000001
2257#define ANEG_CFG_PS1 0x00008000
2258#define ANEG_CFG_HD 0x00004000
2259#define ANEG_CFG_FD 0x00002000
2260#define ANEG_CFG_INVAL 0x00001f06
2261
2262};
2263#define ANEG_OK 0
2264#define ANEG_DONE 1
2265#define ANEG_TIMER_ENAB 2
2266#define ANEG_FAILED -1
2267
2268#define ANEG_STATE_SETTLE_TIME 10000
2269
2270static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2271 struct tg3_fiber_aneginfo *ap)
2272{
2273 unsigned long delta;
2274 u32 rx_cfg_reg;
2275 int ret;
2276
2277 if (ap->state == ANEG_STATE_UNKNOWN) {
2278 ap->rxconfig = 0;
2279 ap->link_time = 0;
2280 ap->cur_time = 0;
2281 ap->ability_match_cfg = 0;
2282 ap->ability_match_count = 0;
2283 ap->ability_match = 0;
2284 ap->idle_match = 0;
2285 ap->ack_match = 0;
2286 }
2287 ap->cur_time++;
2288
2289 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2290 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2291
2292 if (rx_cfg_reg != ap->ability_match_cfg) {
2293 ap->ability_match_cfg = rx_cfg_reg;
2294 ap->ability_match = 0;
2295 ap->ability_match_count = 0;
2296 } else {
2297 if (++ap->ability_match_count > 1) {
2298 ap->ability_match = 1;
2299 ap->ability_match_cfg = rx_cfg_reg;
2300 }
2301 }
2302 if (rx_cfg_reg & ANEG_CFG_ACK)
2303 ap->ack_match = 1;
2304 else
2305 ap->ack_match = 0;
2306
2307 ap->idle_match = 0;
2308 } else {
2309 ap->idle_match = 1;
2310 ap->ability_match_cfg = 0;
2311 ap->ability_match_count = 0;
2312 ap->ability_match = 0;
2313 ap->ack_match = 0;
2314
2315 rx_cfg_reg = 0;
2316 }
2317
2318 ap->rxconfig = rx_cfg_reg;
2319 ret = ANEG_OK;
2320
2321 switch(ap->state) {
2322 case ANEG_STATE_UNKNOWN:
2323 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2324 ap->state = ANEG_STATE_AN_ENABLE;
2325
2326 /* fallthru */
2327 case ANEG_STATE_AN_ENABLE:
2328 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2329 if (ap->flags & MR_AN_ENABLE) {
2330 ap->link_time = 0;
2331 ap->cur_time = 0;
2332 ap->ability_match_cfg = 0;
2333 ap->ability_match_count = 0;
2334 ap->ability_match = 0;
2335 ap->idle_match = 0;
2336 ap->ack_match = 0;
2337
2338 ap->state = ANEG_STATE_RESTART_INIT;
2339 } else {
2340 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2341 }
2342 break;
2343
2344 case ANEG_STATE_RESTART_INIT:
2345 ap->link_time = ap->cur_time;
2346 ap->flags &= ~(MR_NP_LOADED);
2347 ap->txconfig = 0;
2348 tw32(MAC_TX_AUTO_NEG, 0);
2349 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2350 tw32_f(MAC_MODE, tp->mac_mode);
2351 udelay(40);
2352
2353 ret = ANEG_TIMER_ENAB;
2354 ap->state = ANEG_STATE_RESTART;
2355
2356 /* fallthru */
2357 case ANEG_STATE_RESTART:
2358 delta = ap->cur_time - ap->link_time;
2359 if (delta > ANEG_STATE_SETTLE_TIME) {
2360 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2361 } else {
2362 ret = ANEG_TIMER_ENAB;
2363 }
2364 break;
2365
2366 case ANEG_STATE_DISABLE_LINK_OK:
2367 ret = ANEG_DONE;
2368 break;
2369
2370 case ANEG_STATE_ABILITY_DETECT_INIT:
2371 ap->flags &= ~(MR_TOGGLE_TX);
2372 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2373 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2374 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2375 tw32_f(MAC_MODE, tp->mac_mode);
2376 udelay(40);
2377
2378 ap->state = ANEG_STATE_ABILITY_DETECT;
2379 break;
2380
2381 case ANEG_STATE_ABILITY_DETECT:
2382 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2383 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2384 }
2385 break;
2386
2387 case ANEG_STATE_ACK_DETECT_INIT:
2388 ap->txconfig |= ANEG_CFG_ACK;
2389 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2390 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2391 tw32_f(MAC_MODE, tp->mac_mode);
2392 udelay(40);
2393
2394 ap->state = ANEG_STATE_ACK_DETECT;
2395
2396 /* fallthru */
2397 case ANEG_STATE_ACK_DETECT:
2398 if (ap->ack_match != 0) {
2399 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2400 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2401 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2402 } else {
2403 ap->state = ANEG_STATE_AN_ENABLE;
2404 }
2405 } else if (ap->ability_match != 0 &&
2406 ap->rxconfig == 0) {
2407 ap->state = ANEG_STATE_AN_ENABLE;
2408 }
2409 break;
2410
2411 case ANEG_STATE_COMPLETE_ACK_INIT:
2412 if (ap->rxconfig & ANEG_CFG_INVAL) {
2413 ret = ANEG_FAILED;
2414 break;
2415 }
2416 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2417 MR_LP_ADV_HALF_DUPLEX |
2418 MR_LP_ADV_SYM_PAUSE |
2419 MR_LP_ADV_ASYM_PAUSE |
2420 MR_LP_ADV_REMOTE_FAULT1 |
2421 MR_LP_ADV_REMOTE_FAULT2 |
2422 MR_LP_ADV_NEXT_PAGE |
2423 MR_TOGGLE_RX |
2424 MR_NP_RX);
2425 if (ap->rxconfig & ANEG_CFG_FD)
2426 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2427 if (ap->rxconfig & ANEG_CFG_HD)
2428 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2429 if (ap->rxconfig & ANEG_CFG_PS1)
2430 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2431 if (ap->rxconfig & ANEG_CFG_PS2)
2432 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2433 if (ap->rxconfig & ANEG_CFG_RF1)
2434 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2435 if (ap->rxconfig & ANEG_CFG_RF2)
2436 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2437 if (ap->rxconfig & ANEG_CFG_NP)
2438 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2439
2440 ap->link_time = ap->cur_time;
2441
2442 ap->flags ^= (MR_TOGGLE_TX);
2443 if (ap->rxconfig & 0x0008)
2444 ap->flags |= MR_TOGGLE_RX;
2445 if (ap->rxconfig & ANEG_CFG_NP)
2446 ap->flags |= MR_NP_RX;
2447 ap->flags |= MR_PAGE_RX;
2448
2449 ap->state = ANEG_STATE_COMPLETE_ACK;
2450 ret = ANEG_TIMER_ENAB;
2451 break;
2452
2453 case ANEG_STATE_COMPLETE_ACK:
2454 if (ap->ability_match != 0 &&
2455 ap->rxconfig == 0) {
2456 ap->state = ANEG_STATE_AN_ENABLE;
2457 break;
2458 }
2459 delta = ap->cur_time - ap->link_time;
2460 if (delta > ANEG_STATE_SETTLE_TIME) {
2461 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2462 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2463 } else {
2464 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2465 !(ap->flags & MR_NP_RX)) {
2466 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2467 } else {
2468 ret = ANEG_FAILED;
2469 }
2470 }
2471 }
2472 break;
2473
2474 case ANEG_STATE_IDLE_DETECT_INIT:
2475 ap->link_time = ap->cur_time;
2476 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2477 tw32_f(MAC_MODE, tp->mac_mode);
2478 udelay(40);
2479
2480 ap->state = ANEG_STATE_IDLE_DETECT;
2481 ret = ANEG_TIMER_ENAB;
2482 break;
2483
2484 case ANEG_STATE_IDLE_DETECT:
2485 if (ap->ability_match != 0 &&
2486 ap->rxconfig == 0) {
2487 ap->state = ANEG_STATE_AN_ENABLE;
2488 break;
2489 }
2490 delta = ap->cur_time - ap->link_time;
2491 if (delta > ANEG_STATE_SETTLE_TIME) {
2492 /* XXX another gem from the Broadcom driver :( */
2493 ap->state = ANEG_STATE_LINK_OK;
2494 }
2495 break;
2496
2497 case ANEG_STATE_LINK_OK:
2498 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2499 ret = ANEG_DONE;
2500 break;
2501
2502 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2503 /* ??? unimplemented */
2504 break;
2505
2506 case ANEG_STATE_NEXT_PAGE_WAIT:
2507 /* ??? unimplemented */
2508 break;
2509
2510 default:
2511 ret = ANEG_FAILED;
2512 break;
2513 };
2514
2515 return ret;
2516}
2517
2518static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2519{
2520 int res = 0;
2521 struct tg3_fiber_aneginfo aninfo;
2522 int status = ANEG_FAILED;
2523 unsigned int tick;
2524 u32 tmp;
2525
2526 tw32_f(MAC_TX_AUTO_NEG, 0);
2527
2528 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2529 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2530 udelay(40);
2531
2532 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2533 udelay(40);
2534
2535 memset(&aninfo, 0, sizeof(aninfo));
2536 aninfo.flags |= MR_AN_ENABLE;
2537 aninfo.state = ANEG_STATE_UNKNOWN;
2538 aninfo.cur_time = 0;
2539 tick = 0;
2540 while (++tick < 195000) {
2541 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2542 if (status == ANEG_DONE || status == ANEG_FAILED)
2543 break;
2544
2545 udelay(1);
2546 }
2547
2548 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2549 tw32_f(MAC_MODE, tp->mac_mode);
2550 udelay(40);
2551
2552 *flags = aninfo.flags;
2553
2554 if (status == ANEG_DONE &&
2555 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2556 MR_LP_ADV_FULL_DUPLEX)))
2557 res = 1;
2558
2559 return res;
2560}
2561
2562static void tg3_init_bcm8002(struct tg3 *tp)
2563{
2564 u32 mac_status = tr32(MAC_STATUS);
2565 int i;
2566
2567 /* Reset when initting first time or we have a link. */
2568 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2569 !(mac_status & MAC_STATUS_PCS_SYNCED))
2570 return;
2571
2572 /* Set PLL lock range. */
2573 tg3_writephy(tp, 0x16, 0x8007);
2574
2575 /* SW reset */
2576 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2577
2578 /* Wait for reset to complete. */
2579 /* XXX schedule_timeout() ... */
2580 for (i = 0; i < 500; i++)
2581 udelay(10);
2582
2583 /* Config mode; select PMA/Ch 1 regs. */
2584 tg3_writephy(tp, 0x10, 0x8411);
2585
2586 /* Enable auto-lock and comdet, select txclk for tx. */
2587 tg3_writephy(tp, 0x11, 0x0a10);
2588
2589 tg3_writephy(tp, 0x18, 0x00a0);
2590 tg3_writephy(tp, 0x16, 0x41ff);
2591
2592 /* Assert and deassert POR. */
2593 tg3_writephy(tp, 0x13, 0x0400);
2594 udelay(40);
2595 tg3_writephy(tp, 0x13, 0x0000);
2596
2597 tg3_writephy(tp, 0x11, 0x0a50);
2598 udelay(40);
2599 tg3_writephy(tp, 0x11, 0x0a10);
2600
2601 /* Wait for signal to stabilize */
2602 /* XXX schedule_timeout() ... */
2603 for (i = 0; i < 15000; i++)
2604 udelay(10);
2605
2606 /* Deselect the channel register so we can read the PHYID
2607 * later.
2608 */
2609 tg3_writephy(tp, 0x10, 0x8011);
2610}
2611
2612static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2613{
2614 u32 sg_dig_ctrl, sg_dig_status;
2615 u32 serdes_cfg, expected_sg_dig_ctrl;
2616 int workaround, port_a;
2617 int current_link_up;
2618
2619 serdes_cfg = 0;
2620 expected_sg_dig_ctrl = 0;
2621 workaround = 0;
2622 port_a = 1;
2623 current_link_up = 0;
2624
2625 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2626 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2627 workaround = 1;
2628 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2629 port_a = 0;
2630
2631 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2632 /* preserve bits 20-23 for voltage regulator */
2633 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2634 }
2635
2636 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2637
2638 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2639 if (sg_dig_ctrl & (1 << 31)) {
2640 if (workaround) {
2641 u32 val = serdes_cfg;
2642
2643 if (port_a)
2644 val |= 0xc010000;
2645 else
2646 val |= 0x4010000;
2647 tw32_f(MAC_SERDES_CFG, val);
2648 }
2649 tw32_f(SG_DIG_CTRL, 0x01388400);
2650 }
2651 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2652 tg3_setup_flow_control(tp, 0, 0);
2653 current_link_up = 1;
2654 }
2655 goto out;
2656 }
2657
2658 /* Want auto-negotiation. */
2659 expected_sg_dig_ctrl = 0x81388400;
2660
2661 /* Pause capability */
2662 expected_sg_dig_ctrl |= (1 << 11);
2663
2664 /* Asymettric pause */
2665 expected_sg_dig_ctrl |= (1 << 12);
2666
2667 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3d3ebe74
MC
2668 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2669 tp->serdes_counter &&
2670 ((mac_status & (MAC_STATUS_PCS_SYNCED |
2671 MAC_STATUS_RCVD_CFG)) ==
2672 MAC_STATUS_PCS_SYNCED)) {
2673 tp->serdes_counter--;
2674 current_link_up = 1;
2675 goto out;
2676 }
2677restart_autoneg:
1da177e4
LT
2678 if (workaround)
2679 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2680 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2681 udelay(5);
2682 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2683
3d3ebe74
MC
2684 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2685 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
1da177e4
LT
2686 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2687 MAC_STATUS_SIGNAL_DET)) {
3d3ebe74 2688 sg_dig_status = tr32(SG_DIG_STATUS);
1da177e4
LT
2689 mac_status = tr32(MAC_STATUS);
2690
2691 if ((sg_dig_status & (1 << 1)) &&
2692 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2693 u32 local_adv, remote_adv;
2694
2695 local_adv = ADVERTISE_PAUSE_CAP;
2696 remote_adv = 0;
2697 if (sg_dig_status & (1 << 19))
2698 remote_adv |= LPA_PAUSE_CAP;
2699 if (sg_dig_status & (1 << 20))
2700 remote_adv |= LPA_PAUSE_ASYM;
2701
2702 tg3_setup_flow_control(tp, local_adv, remote_adv);
2703 current_link_up = 1;
3d3ebe74
MC
2704 tp->serdes_counter = 0;
2705 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
1da177e4 2706 } else if (!(sg_dig_status & (1 << 1))) {
3d3ebe74
MC
2707 if (tp->serdes_counter)
2708 tp->serdes_counter--;
1da177e4
LT
2709 else {
2710 if (workaround) {
2711 u32 val = serdes_cfg;
2712
2713 if (port_a)
2714 val |= 0xc010000;
2715 else
2716 val |= 0x4010000;
2717
2718 tw32_f(MAC_SERDES_CFG, val);
2719 }
2720
2721 tw32_f(SG_DIG_CTRL, 0x01388400);
2722 udelay(40);
2723
2724 /* Link parallel detection - link is up */
2725 /* only if we have PCS_SYNC and not */
2726 /* receiving config code words */
2727 mac_status = tr32(MAC_STATUS);
2728 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2729 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2730 tg3_setup_flow_control(tp, 0, 0);
2731 current_link_up = 1;
3d3ebe74
MC
2732 tp->tg3_flags2 |=
2733 TG3_FLG2_PARALLEL_DETECT;
2734 tp->serdes_counter =
2735 SERDES_PARALLEL_DET_TIMEOUT;
2736 } else
2737 goto restart_autoneg;
1da177e4
LT
2738 }
2739 }
3d3ebe74
MC
2740 } else {
2741 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2742 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
1da177e4
LT
2743 }
2744
2745out:
2746 return current_link_up;
2747}
2748
2749static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2750{
2751 int current_link_up = 0;
2752
5cf64b8a 2753 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
1da177e4 2754 goto out;
1da177e4
LT
2755
2756 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2757 u32 flags;
2758 int i;
6aa20a22 2759
1da177e4
LT
2760 if (fiber_autoneg(tp, &flags)) {
2761 u32 local_adv, remote_adv;
2762
2763 local_adv = ADVERTISE_PAUSE_CAP;
2764 remote_adv = 0;
2765 if (flags & MR_LP_ADV_SYM_PAUSE)
2766 remote_adv |= LPA_PAUSE_CAP;
2767 if (flags & MR_LP_ADV_ASYM_PAUSE)
2768 remote_adv |= LPA_PAUSE_ASYM;
2769
2770 tg3_setup_flow_control(tp, local_adv, remote_adv);
2771
1da177e4
LT
2772 current_link_up = 1;
2773 }
2774 for (i = 0; i < 30; i++) {
2775 udelay(20);
2776 tw32_f(MAC_STATUS,
2777 (MAC_STATUS_SYNC_CHANGED |
2778 MAC_STATUS_CFG_CHANGED));
2779 udelay(40);
2780 if ((tr32(MAC_STATUS) &
2781 (MAC_STATUS_SYNC_CHANGED |
2782 MAC_STATUS_CFG_CHANGED)) == 0)
2783 break;
2784 }
2785
2786 mac_status = tr32(MAC_STATUS);
2787 if (current_link_up == 0 &&
2788 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2789 !(mac_status & MAC_STATUS_RCVD_CFG))
2790 current_link_up = 1;
2791 } else {
2792 /* Forcing 1000FD link up. */
2793 current_link_up = 1;
1da177e4
LT
2794
2795 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2796 udelay(40);
e8f3f6ca
MC
2797
2798 tw32_f(MAC_MODE, tp->mac_mode);
2799 udelay(40);
1da177e4
LT
2800 }
2801
2802out:
2803 return current_link_up;
2804}
2805
2806static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2807{
2808 u32 orig_pause_cfg;
2809 u16 orig_active_speed;
2810 u8 orig_active_duplex;
2811 u32 mac_status;
2812 int current_link_up;
2813 int i;
2814
2815 orig_pause_cfg =
2816 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2817 TG3_FLAG_TX_PAUSE));
2818 orig_active_speed = tp->link_config.active_speed;
2819 orig_active_duplex = tp->link_config.active_duplex;
2820
2821 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2822 netif_carrier_ok(tp->dev) &&
2823 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2824 mac_status = tr32(MAC_STATUS);
2825 mac_status &= (MAC_STATUS_PCS_SYNCED |
2826 MAC_STATUS_SIGNAL_DET |
2827 MAC_STATUS_CFG_CHANGED |
2828 MAC_STATUS_RCVD_CFG);
2829 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2830 MAC_STATUS_SIGNAL_DET)) {
2831 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2832 MAC_STATUS_CFG_CHANGED));
2833 return 0;
2834 }
2835 }
2836
2837 tw32_f(MAC_TX_AUTO_NEG, 0);
2838
2839 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2840 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2841 tw32_f(MAC_MODE, tp->mac_mode);
2842 udelay(40);
2843
2844 if (tp->phy_id == PHY_ID_BCM8002)
2845 tg3_init_bcm8002(tp);
2846
2847 /* Enable link change event even when serdes polling. */
2848 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2849 udelay(40);
2850
2851 current_link_up = 0;
2852 mac_status = tr32(MAC_STATUS);
2853
2854 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2855 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2856 else
2857 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2858
1da177e4
LT
2859 tp->hw_status->status =
2860 (SD_STATUS_UPDATED |
2861 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2862
2863 for (i = 0; i < 100; i++) {
2864 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2865 MAC_STATUS_CFG_CHANGED));
2866 udelay(5);
2867 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3d3ebe74
MC
2868 MAC_STATUS_CFG_CHANGED |
2869 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
1da177e4
LT
2870 break;
2871 }
2872
2873 mac_status = tr32(MAC_STATUS);
2874 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2875 current_link_up = 0;
3d3ebe74
MC
2876 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2877 tp->serdes_counter == 0) {
1da177e4
LT
2878 tw32_f(MAC_MODE, (tp->mac_mode |
2879 MAC_MODE_SEND_CONFIGS));
2880 udelay(1);
2881 tw32_f(MAC_MODE, tp->mac_mode);
2882 }
2883 }
2884
2885 if (current_link_up == 1) {
2886 tp->link_config.active_speed = SPEED_1000;
2887 tp->link_config.active_duplex = DUPLEX_FULL;
2888 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2889 LED_CTRL_LNKLED_OVERRIDE |
2890 LED_CTRL_1000MBPS_ON));
2891 } else {
2892 tp->link_config.active_speed = SPEED_INVALID;
2893 tp->link_config.active_duplex = DUPLEX_INVALID;
2894 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2895 LED_CTRL_LNKLED_OVERRIDE |
2896 LED_CTRL_TRAFFIC_OVERRIDE));
2897 }
2898
2899 if (current_link_up != netif_carrier_ok(tp->dev)) {
2900 if (current_link_up)
2901 netif_carrier_on(tp->dev);
2902 else
2903 netif_carrier_off(tp->dev);
2904 tg3_link_report(tp);
2905 } else {
2906 u32 now_pause_cfg =
2907 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2908 TG3_FLAG_TX_PAUSE);
2909 if (orig_pause_cfg != now_pause_cfg ||
2910 orig_active_speed != tp->link_config.active_speed ||
2911 orig_active_duplex != tp->link_config.active_duplex)
2912 tg3_link_report(tp);
2913 }
2914
2915 return 0;
2916}
2917
747e8f8b
MC
2918static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2919{
2920 int current_link_up, err = 0;
2921 u32 bmsr, bmcr;
2922 u16 current_speed;
2923 u8 current_duplex;
2924
2925 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2926 tw32_f(MAC_MODE, tp->mac_mode);
2927 udelay(40);
2928
2929 tw32(MAC_EVENT, 0);
2930
2931 tw32_f(MAC_STATUS,
2932 (MAC_STATUS_SYNC_CHANGED |
2933 MAC_STATUS_CFG_CHANGED |
2934 MAC_STATUS_MI_COMPLETION |
2935 MAC_STATUS_LNKSTATE_CHANGED));
2936 udelay(40);
2937
2938 if (force_reset)
2939 tg3_phy_reset(tp);
2940
2941 current_link_up = 0;
2942 current_speed = SPEED_INVALID;
2943 current_duplex = DUPLEX_INVALID;
2944
2945 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2946 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
2947 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2948 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2949 bmsr |= BMSR_LSTATUS;
2950 else
2951 bmsr &= ~BMSR_LSTATUS;
2952 }
747e8f8b
MC
2953
2954 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2955
2956 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2957 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2958 /* do nothing, just check for link up at the end */
2959 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2960 u32 adv, new_adv;
2961
2962 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2963 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2964 ADVERTISE_1000XPAUSE |
2965 ADVERTISE_1000XPSE_ASYM |
2966 ADVERTISE_SLCT);
2967
2968 /* Always advertise symmetric PAUSE just like copper */
2969 new_adv |= ADVERTISE_1000XPAUSE;
2970
2971 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2972 new_adv |= ADVERTISE_1000XHALF;
2973 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2974 new_adv |= ADVERTISE_1000XFULL;
2975
2976 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2977 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2978 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2979 tg3_writephy(tp, MII_BMCR, bmcr);
2980
2981 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3d3ebe74 2982 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
747e8f8b
MC
2983 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2984
2985 return err;
2986 }
2987 } else {
2988 u32 new_bmcr;
2989
2990 bmcr &= ~BMCR_SPEED1000;
2991 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2992
2993 if (tp->link_config.duplex == DUPLEX_FULL)
2994 new_bmcr |= BMCR_FULLDPLX;
2995
2996 if (new_bmcr != bmcr) {
2997 /* BMCR_SPEED1000 is a reserved bit that needs
2998 * to be set on write.
2999 */
3000 new_bmcr |= BMCR_SPEED1000;
3001
3002 /* Force a linkdown */
3003 if (netif_carrier_ok(tp->dev)) {
3004 u32 adv;
3005
3006 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3007 adv &= ~(ADVERTISE_1000XFULL |
3008 ADVERTISE_1000XHALF |
3009 ADVERTISE_SLCT);
3010 tg3_writephy(tp, MII_ADVERTISE, adv);
3011 tg3_writephy(tp, MII_BMCR, bmcr |
3012 BMCR_ANRESTART |
3013 BMCR_ANENABLE);
3014 udelay(10);
3015 netif_carrier_off(tp->dev);
3016 }
3017 tg3_writephy(tp, MII_BMCR, new_bmcr);
3018 bmcr = new_bmcr;
3019 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3020 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
3021 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3022 ASIC_REV_5714) {
3023 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3024 bmsr |= BMSR_LSTATUS;
3025 else
3026 bmsr &= ~BMSR_LSTATUS;
3027 }
747e8f8b
MC
3028 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3029 }
3030 }
3031
3032 if (bmsr & BMSR_LSTATUS) {
3033 current_speed = SPEED_1000;
3034 current_link_up = 1;
3035 if (bmcr & BMCR_FULLDPLX)
3036 current_duplex = DUPLEX_FULL;
3037 else
3038 current_duplex = DUPLEX_HALF;
3039
3040 if (bmcr & BMCR_ANENABLE) {
3041 u32 local_adv, remote_adv, common;
3042
3043 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3044 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3045 common = local_adv & remote_adv;
3046 if (common & (ADVERTISE_1000XHALF |
3047 ADVERTISE_1000XFULL)) {
3048 if (common & ADVERTISE_1000XFULL)
3049 current_duplex = DUPLEX_FULL;
3050 else
3051 current_duplex = DUPLEX_HALF;
3052
3053 tg3_setup_flow_control(tp, local_adv,
3054 remote_adv);
3055 }
3056 else
3057 current_link_up = 0;
3058 }
3059 }
3060
3061 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3062 if (tp->link_config.active_duplex == DUPLEX_HALF)
3063 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3064
3065 tw32_f(MAC_MODE, tp->mac_mode);
3066 udelay(40);
3067
3068 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3069
3070 tp->link_config.active_speed = current_speed;
3071 tp->link_config.active_duplex = current_duplex;
3072
3073 if (current_link_up != netif_carrier_ok(tp->dev)) {
3074 if (current_link_up)
3075 netif_carrier_on(tp->dev);
3076 else {
3077 netif_carrier_off(tp->dev);
3078 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3079 }
3080 tg3_link_report(tp);
3081 }
3082 return err;
3083}
3084
3085static void tg3_serdes_parallel_detect(struct tg3 *tp)
3086{
3d3ebe74 3087 if (tp->serdes_counter) {
747e8f8b 3088 /* Give autoneg time to complete. */
3d3ebe74 3089 tp->serdes_counter--;
747e8f8b
MC
3090 return;
3091 }
3092 if (!netif_carrier_ok(tp->dev) &&
3093 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3094 u32 bmcr;
3095
3096 tg3_readphy(tp, MII_BMCR, &bmcr);
3097 if (bmcr & BMCR_ANENABLE) {
3098 u32 phy1, phy2;
3099
3100 /* Select shadow register 0x1f */
3101 tg3_writephy(tp, 0x1c, 0x7c00);
3102 tg3_readphy(tp, 0x1c, &phy1);
3103
3104 /* Select expansion interrupt status register */
3105 tg3_writephy(tp, 0x17, 0x0f01);
3106 tg3_readphy(tp, 0x15, &phy2);
3107 tg3_readphy(tp, 0x15, &phy2);
3108
3109 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3110 /* We have signal detect and not receiving
3111 * config code words, link is up by parallel
3112 * detection.
3113 */
3114
3115 bmcr &= ~BMCR_ANENABLE;
3116 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3117 tg3_writephy(tp, MII_BMCR, bmcr);
3118 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3119 }
3120 }
3121 }
3122 else if (netif_carrier_ok(tp->dev) &&
3123 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3124 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3125 u32 phy2;
3126
3127 /* Select expansion interrupt status register */
3128 tg3_writephy(tp, 0x17, 0x0f01);
3129 tg3_readphy(tp, 0x15, &phy2);
3130 if (phy2 & 0x20) {
3131 u32 bmcr;
3132
3133 /* Config code words received, turn on autoneg. */
3134 tg3_readphy(tp, MII_BMCR, &bmcr);
3135 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3136
3137 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3138
3139 }
3140 }
3141}
3142
1da177e4
LT
3143static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3144{
3145 int err;
3146
3147 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3148 err = tg3_setup_fiber_phy(tp, force_reset);
747e8f8b
MC
3149 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3150 err = tg3_setup_fiber_mii_phy(tp, force_reset);
1da177e4
LT
3151 } else {
3152 err = tg3_setup_copper_phy(tp, force_reset);
3153 }
3154
b5af7126
MC
3155 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3156 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
aa6c91fe
MC
3157 u32 val, scale;
3158
3159 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3160 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3161 scale = 65;
3162 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3163 scale = 6;
3164 else
3165 scale = 12;
3166
3167 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3168 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3169 tw32(GRC_MISC_CFG, val);
3170 }
3171
1da177e4
LT
3172 if (tp->link_config.active_speed == SPEED_1000 &&
3173 tp->link_config.active_duplex == DUPLEX_HALF)
3174 tw32(MAC_TX_LENGTHS,
3175 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3176 (6 << TX_LENGTHS_IPG_SHIFT) |
3177 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3178 else
3179 tw32(MAC_TX_LENGTHS,
3180 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3181 (6 << TX_LENGTHS_IPG_SHIFT) |
3182 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3183
3184 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3185 if (netif_carrier_ok(tp->dev)) {
3186 tw32(HOSTCC_STAT_COAL_TICKS,
15f9850d 3187 tp->coal.stats_block_coalesce_usecs);
1da177e4
LT
3188 } else {
3189 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3190 }
3191 }
3192
8ed5d97e
MC
3193 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3194 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3195 if (!netif_carrier_ok(tp->dev))
3196 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3197 tp->pwrmgmt_thresh;
3198 else
3199 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3200 tw32(PCIE_PWR_MGMT_THRESH, val);
3201 }
3202
1da177e4
LT
3203 return err;
3204}
3205
df3e6548
MC
3206/* This is called whenever we suspect that the system chipset is re-
3207 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3208 * is bogus tx completions. We try to recover by setting the
3209 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3210 * in the workqueue.
3211 */
3212static void tg3_tx_recover(struct tg3 *tp)
3213{
3214 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3215 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3216
3217 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3218 "mapped I/O cycles to the network device, attempting to "
3219 "recover. Please report the problem to the driver maintainer "
3220 "and include system chipset information.\n", tp->dev->name);
3221
3222 spin_lock(&tp->lock);
df3e6548 3223 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
df3e6548
MC
3224 spin_unlock(&tp->lock);
3225}
3226
1b2a7205
MC
3227static inline u32 tg3_tx_avail(struct tg3 *tp)
3228{
3229 smp_mb();
3230 return (tp->tx_pending -
3231 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3232}
3233
1da177e4
LT
3234/* Tigon3 never reports partial packet sends. So we do not
3235 * need special logic to handle SKBs that have not had all
3236 * of their frags sent yet, like SunGEM does.
3237 */
3238static void tg3_tx(struct tg3 *tp)
3239{
3240 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3241 u32 sw_idx = tp->tx_cons;
3242
3243 while (sw_idx != hw_idx) {
3244 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3245 struct sk_buff *skb = ri->skb;
df3e6548
MC
3246 int i, tx_bug = 0;
3247
3248 if (unlikely(skb == NULL)) {
3249 tg3_tx_recover(tp);
3250 return;
3251 }
1da177e4 3252
1da177e4
LT
3253 pci_unmap_single(tp->pdev,
3254 pci_unmap_addr(ri, mapping),
3255 skb_headlen(skb),
3256 PCI_DMA_TODEVICE);
3257
3258 ri->skb = NULL;
3259
3260 sw_idx = NEXT_TX(sw_idx);
3261
3262 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1da177e4 3263 ri = &tp->tx_buffers[sw_idx];
df3e6548
MC
3264 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3265 tx_bug = 1;
1da177e4
LT
3266
3267 pci_unmap_page(tp->pdev,
3268 pci_unmap_addr(ri, mapping),
3269 skb_shinfo(skb)->frags[i].size,
3270 PCI_DMA_TODEVICE);
3271
3272 sw_idx = NEXT_TX(sw_idx);
3273 }
3274
f47c11ee 3275 dev_kfree_skb(skb);
df3e6548
MC
3276
3277 if (unlikely(tx_bug)) {
3278 tg3_tx_recover(tp);
3279 return;
3280 }
1da177e4
LT
3281 }
3282
3283 tp->tx_cons = sw_idx;
3284
1b2a7205
MC
3285 /* Need to make the tx_cons update visible to tg3_start_xmit()
3286 * before checking for netif_queue_stopped(). Without the
3287 * memory barrier, there is a small possibility that tg3_start_xmit()
3288 * will miss it and cause the queue to be stopped forever.
3289 */
3290 smp_mb();
3291
3292 if (unlikely(netif_queue_stopped(tp->dev) &&
42952231 3293 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
1b2a7205 3294 netif_tx_lock(tp->dev);
51b91468 3295 if (netif_queue_stopped(tp->dev) &&
42952231 3296 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
51b91468 3297 netif_wake_queue(tp->dev);
1b2a7205 3298 netif_tx_unlock(tp->dev);
51b91468 3299 }
1da177e4
LT
3300}
3301
3302/* Returns size of skb allocated or < 0 on error.
3303 *
3304 * We only need to fill in the address because the other members
3305 * of the RX descriptor are invariant, see tg3_init_rings.
3306 *
3307 * Note the purposeful assymetry of cpu vs. chip accesses. For
3308 * posting buffers we only dirty the first cache line of the RX
3309 * descriptor (containing the address). Whereas for the RX status
3310 * buffers the cpu only reads the last cacheline of the RX descriptor
3311 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3312 */
3313static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3314 int src_idx, u32 dest_idx_unmasked)
3315{
3316 struct tg3_rx_buffer_desc *desc;
3317 struct ring_info *map, *src_map;
3318 struct sk_buff *skb;
3319 dma_addr_t mapping;
3320 int skb_size, dest_idx;
3321
3322 src_map = NULL;
3323 switch (opaque_key) {
3324 case RXD_OPAQUE_RING_STD:
3325 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3326 desc = &tp->rx_std[dest_idx];
3327 map = &tp->rx_std_buffers[dest_idx];
3328 if (src_idx >= 0)
3329 src_map = &tp->rx_std_buffers[src_idx];
7e72aad4 3330 skb_size = tp->rx_pkt_buf_sz;
1da177e4
LT
3331 break;
3332
3333 case RXD_OPAQUE_RING_JUMBO:
3334 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3335 desc = &tp->rx_jumbo[dest_idx];
3336 map = &tp->rx_jumbo_buffers[dest_idx];
3337 if (src_idx >= 0)
3338 src_map = &tp->rx_jumbo_buffers[src_idx];
3339 skb_size = RX_JUMBO_PKT_BUF_SZ;
3340 break;
3341
3342 default:
3343 return -EINVAL;
3344 };
3345
3346 /* Do not overwrite any of the map or rp information
3347 * until we are sure we can commit to a new buffer.
3348 *
3349 * Callers depend upon this behavior and assume that
3350 * we leave everything unchanged if we fail.
3351 */
a20e9c62 3352 skb = netdev_alloc_skb(tp->dev, skb_size);
1da177e4
LT
3353 if (skb == NULL)
3354 return -ENOMEM;
3355
1da177e4
LT
3356 skb_reserve(skb, tp->rx_offset);
3357
3358 mapping = pci_map_single(tp->pdev, skb->data,
3359 skb_size - tp->rx_offset,
3360 PCI_DMA_FROMDEVICE);
3361
3362 map->skb = skb;
3363 pci_unmap_addr_set(map, mapping, mapping);
3364
3365 if (src_map != NULL)
3366 src_map->skb = NULL;
3367
3368 desc->addr_hi = ((u64)mapping >> 32);
3369 desc->addr_lo = ((u64)mapping & 0xffffffff);
3370
3371 return skb_size;
3372}
3373
3374/* We only need to move over in the address because the other
3375 * members of the RX descriptor are invariant. See notes above
3376 * tg3_alloc_rx_skb for full details.
3377 */
3378static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3379 int src_idx, u32 dest_idx_unmasked)
3380{
3381 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3382 struct ring_info *src_map, *dest_map;
3383 int dest_idx;
3384
3385 switch (opaque_key) {
3386 case RXD_OPAQUE_RING_STD:
3387 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3388 dest_desc = &tp->rx_std[dest_idx];
3389 dest_map = &tp->rx_std_buffers[dest_idx];
3390 src_desc = &tp->rx_std[src_idx];
3391 src_map = &tp->rx_std_buffers[src_idx];
3392 break;
3393
3394 case RXD_OPAQUE_RING_JUMBO:
3395 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3396 dest_desc = &tp->rx_jumbo[dest_idx];
3397 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3398 src_desc = &tp->rx_jumbo[src_idx];
3399 src_map = &tp->rx_jumbo_buffers[src_idx];
3400 break;
3401
3402 default:
3403 return;
3404 };
3405
3406 dest_map->skb = src_map->skb;
3407 pci_unmap_addr_set(dest_map, mapping,
3408 pci_unmap_addr(src_map, mapping));
3409 dest_desc->addr_hi = src_desc->addr_hi;
3410 dest_desc->addr_lo = src_desc->addr_lo;
3411
3412 src_map->skb = NULL;
3413}
3414
3415#if TG3_VLAN_TAG_USED
3416static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3417{
3418 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3419}
3420#endif
3421
3422/* The RX ring scheme is composed of multiple rings which post fresh
3423 * buffers to the chip, and one special ring the chip uses to report
3424 * status back to the host.
3425 *
3426 * The special ring reports the status of received packets to the
3427 * host. The chip does not write into the original descriptor the
3428 * RX buffer was obtained from. The chip simply takes the original
3429 * descriptor as provided by the host, updates the status and length
3430 * field, then writes this into the next status ring entry.
3431 *
3432 * Each ring the host uses to post buffers to the chip is described
3433 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3434 * it is first placed into the on-chip ram. When the packet's length
3435 * is known, it walks down the TG3_BDINFO entries to select the ring.
3436 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3437 * which is within the range of the new packet's length is chosen.
3438 *
3439 * The "separate ring for rx status" scheme may sound queer, but it makes
3440 * sense from a cache coherency perspective. If only the host writes
3441 * to the buffer post rings, and only the chip writes to the rx status
3442 * rings, then cache lines never move beyond shared-modified state.
3443 * If both the host and chip were to write into the same ring, cache line
3444 * eviction could occur since both entities want it in an exclusive state.
3445 */
3446static int tg3_rx(struct tg3 *tp, int budget)
3447{
f92905de 3448 u32 work_mask, rx_std_posted = 0;
483ba50b
MC
3449 u32 sw_idx = tp->rx_rcb_ptr;
3450 u16 hw_idx;
1da177e4
LT
3451 int received;
3452
3453 hw_idx = tp->hw_status->idx[0].rx_producer;
3454 /*
3455 * We need to order the read of hw_idx and the read of
3456 * the opaque cookie.
3457 */
3458 rmb();
1da177e4
LT
3459 work_mask = 0;
3460 received = 0;
3461 while (sw_idx != hw_idx && budget > 0) {
3462 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3463 unsigned int len;
3464 struct sk_buff *skb;
3465 dma_addr_t dma_addr;
3466 u32 opaque_key, desc_idx, *post_ptr;
3467
3468 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3469 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3470 if (opaque_key == RXD_OPAQUE_RING_STD) {
3471 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3472 mapping);
3473 skb = tp->rx_std_buffers[desc_idx].skb;
3474 post_ptr = &tp->rx_std_ptr;
f92905de 3475 rx_std_posted++;
1da177e4
LT
3476 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3477 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3478 mapping);
3479 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3480 post_ptr = &tp->rx_jumbo_ptr;
3481 }
3482 else {
3483 goto next_pkt_nopost;
3484 }
3485
3486 work_mask |= opaque_key;
3487
3488 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3489 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3490 drop_it:
3491 tg3_recycle_rx(tp, opaque_key,
3492 desc_idx, *post_ptr);
3493 drop_it_no_recycle:
3494 /* Other statistics kept track of by card. */
3495 tp->net_stats.rx_dropped++;
3496 goto next_pkt;
3497 }
3498
3499 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3500
6aa20a22 3501 if (len > RX_COPY_THRESHOLD
1da177e4
LT
3502 && tp->rx_offset == 2
3503 /* rx_offset != 2 iff this is a 5701 card running
3504 * in PCI-X mode [see tg3_get_invariants()] */
3505 ) {
3506 int skb_size;
3507
3508 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3509 desc_idx, *post_ptr);
3510 if (skb_size < 0)
3511 goto drop_it;
3512
3513 pci_unmap_single(tp->pdev, dma_addr,
3514 skb_size - tp->rx_offset,
3515 PCI_DMA_FROMDEVICE);
3516
3517 skb_put(skb, len);
3518 } else {
3519 struct sk_buff *copy_skb;
3520
3521 tg3_recycle_rx(tp, opaque_key,
3522 desc_idx, *post_ptr);
3523
a20e9c62 3524 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
1da177e4
LT
3525 if (copy_skb == NULL)
3526 goto drop_it_no_recycle;
3527
1da177e4
LT
3528 skb_reserve(copy_skb, 2);
3529 skb_put(copy_skb, len);
3530 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
d626f62b 3531 skb_copy_from_linear_data(skb, copy_skb->data, len);
1da177e4
LT
3532 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3533
3534 /* We'll reuse the original ring buffer. */
3535 skb = copy_skb;
3536 }
3537
3538 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3539 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3540 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3541 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3542 skb->ip_summed = CHECKSUM_UNNECESSARY;
3543 else
3544 skb->ip_summed = CHECKSUM_NONE;
3545
3546 skb->protocol = eth_type_trans(skb, tp->dev);
3547#if TG3_VLAN_TAG_USED
3548 if (tp->vlgrp != NULL &&
3549 desc->type_flags & RXD_FLAG_VLAN) {
3550 tg3_vlan_rx(tp, skb,
3551 desc->err_vlan & RXD_VLAN_MASK);
3552 } else
3553#endif
3554 netif_receive_skb(skb);
3555
3556 tp->dev->last_rx = jiffies;
3557 received++;
3558 budget--;
3559
3560next_pkt:
3561 (*post_ptr)++;
f92905de
MC
3562
3563 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3564 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3565
3566 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3567 TG3_64BIT_REG_LOW, idx);
3568 work_mask &= ~RXD_OPAQUE_RING_STD;
3569 rx_std_posted = 0;
3570 }
1da177e4 3571next_pkt_nopost:
483ba50b 3572 sw_idx++;
6b31a515 3573 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
52f6d697
MC
3574
3575 /* Refresh hw_idx to see if there is new work */
3576 if (sw_idx == hw_idx) {
3577 hw_idx = tp->hw_status->idx[0].rx_producer;
3578 rmb();
3579 }
1da177e4
LT
3580 }
3581
3582 /* ACK the status ring. */
483ba50b
MC
3583 tp->rx_rcb_ptr = sw_idx;
3584 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
1da177e4
LT
3585
3586 /* Refill RX ring(s). */
3587 if (work_mask & RXD_OPAQUE_RING_STD) {
3588 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3589 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3590 sw_idx);
3591 }
3592 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3593 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3594 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3595 sw_idx);
3596 }
3597 mmiowb();
3598
3599 return received;
3600}
3601
6f535763 3602static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
1da177e4 3603{
1da177e4 3604 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4 3605
1da177e4
LT
3606 /* handle link change and other phy events */
3607 if (!(tp->tg3_flags &
3608 (TG3_FLAG_USE_LINKCHG_REG |
3609 TG3_FLAG_POLL_SERDES))) {
3610 if (sblk->status & SD_STATUS_LINK_CHG) {
3611 sblk->status = SD_STATUS_UPDATED |
3612 (sblk->status & ~SD_STATUS_LINK_CHG);
f47c11ee 3613 spin_lock(&tp->lock);
1da177e4 3614 tg3_setup_phy(tp, 0);
f47c11ee 3615 spin_unlock(&tp->lock);
1da177e4
LT
3616 }
3617 }
3618
3619 /* run TX completion thread */
3620 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
1da177e4 3621 tg3_tx(tp);
6f535763 3622 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4fd7ab59 3623 return work_done;
1da177e4
LT
3624 }
3625
1da177e4
LT
3626 /* run RX thread, within the bounds set by NAPI.
3627 * All RX "locking" is done by ensuring outside
bea3348e 3628 * code synchronizes with tg3->napi.poll()
1da177e4 3629 */
bea3348e 3630 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
6f535763 3631 work_done += tg3_rx(tp, budget - work_done);
1da177e4 3632
6f535763
DM
3633 return work_done;
3634}
3635
3636static int tg3_poll(struct napi_struct *napi, int budget)
3637{
3638 struct tg3 *tp = container_of(napi, struct tg3, napi);
3639 int work_done = 0;
4fd7ab59 3640 struct tg3_hw_status *sblk = tp->hw_status;
6f535763
DM
3641
3642 while (1) {
3643 work_done = tg3_poll_work(tp, work_done, budget);
3644
3645 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3646 goto tx_recovery;
3647
3648 if (unlikely(work_done >= budget))
3649 break;
3650
4fd7ab59
MC
3651 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3652 /* tp->last_tag is used in tg3_restart_ints() below
3653 * to tell the hw how much work has been processed,
3654 * so we must read it before checking for more work.
3655 */
3656 tp->last_tag = sblk->status_tag;
3657 rmb();
3658 } else
3659 sblk->status &= ~SD_STATUS_UPDATED;
6f535763 3660
4fd7ab59 3661 if (likely(!tg3_has_work(tp))) {
6f535763
DM
3662 netif_rx_complete(tp->dev, napi);
3663 tg3_restart_ints(tp);
3664 break;
3665 }
1da177e4
LT
3666 }
3667
bea3348e 3668 return work_done;
6f535763
DM
3669
3670tx_recovery:
4fd7ab59 3671 /* work_done is guaranteed to be less than budget. */
6f535763
DM
3672 netif_rx_complete(tp->dev, napi);
3673 schedule_work(&tp->reset_task);
4fd7ab59 3674 return work_done;
1da177e4
LT
3675}
3676
f47c11ee
DM
3677static void tg3_irq_quiesce(struct tg3 *tp)
3678{
3679 BUG_ON(tp->irq_sync);
3680
3681 tp->irq_sync = 1;
3682 smp_mb();
3683
3684 synchronize_irq(tp->pdev->irq);
3685}
3686
3687static inline int tg3_irq_sync(struct tg3 *tp)
3688{
3689 return tp->irq_sync;
3690}
3691
3692/* Fully shutdown all tg3 driver activity elsewhere in the system.
3693 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3694 * with as well. Most of the time, this is not necessary except when
3695 * shutting down the device.
3696 */
3697static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3698{
46966545 3699 spin_lock_bh(&tp->lock);
f47c11ee
DM
3700 if (irq_sync)
3701 tg3_irq_quiesce(tp);
f47c11ee
DM
3702}
3703
3704static inline void tg3_full_unlock(struct tg3 *tp)
3705{
f47c11ee
DM
3706 spin_unlock_bh(&tp->lock);
3707}
3708
fcfa0a32
MC
3709/* One-shot MSI handler - Chip automatically disables interrupt
3710 * after sending MSI so driver doesn't have to do it.
3711 */
7d12e780 3712static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
fcfa0a32
MC
3713{
3714 struct net_device *dev = dev_id;
3715 struct tg3 *tp = netdev_priv(dev);
3716
3717 prefetch(tp->hw_status);
3718 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3719
3720 if (likely(!tg3_irq_sync(tp)))
bea3348e 3721 netif_rx_schedule(dev, &tp->napi);
fcfa0a32
MC
3722
3723 return IRQ_HANDLED;
3724}
3725
88b06bc2
MC
3726/* MSI ISR - No need to check for interrupt sharing and no need to
3727 * flush status block and interrupt mailbox. PCI ordering rules
3728 * guarantee that MSI will arrive after the status block.
3729 */
7d12e780 3730static irqreturn_t tg3_msi(int irq, void *dev_id)
88b06bc2
MC
3731{
3732 struct net_device *dev = dev_id;
3733 struct tg3 *tp = netdev_priv(dev);
88b06bc2 3734
61487480
MC
3735 prefetch(tp->hw_status);
3736 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
88b06bc2 3737 /*
fac9b83e 3738 * Writing any value to intr-mbox-0 clears PCI INTA# and
88b06bc2 3739 * chip-internal interrupt pending events.
fac9b83e 3740 * Writing non-zero to intr-mbox-0 additional tells the
88b06bc2
MC
3741 * NIC to stop sending us irqs, engaging "in-intr-handler"
3742 * event coalescing.
3743 */
3744 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
61487480 3745 if (likely(!tg3_irq_sync(tp)))
bea3348e 3746 netif_rx_schedule(dev, &tp->napi);
61487480 3747
88b06bc2
MC
3748 return IRQ_RETVAL(1);
3749}
3750
7d12e780 3751static irqreturn_t tg3_interrupt(int irq, void *dev_id)
1da177e4
LT
3752{
3753 struct net_device *dev = dev_id;
3754 struct tg3 *tp = netdev_priv(dev);
3755 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4
LT
3756 unsigned int handled = 1;
3757
1da177e4
LT
3758 /* In INTx mode, it is possible for the interrupt to arrive at
3759 * the CPU before the status block posted prior to the interrupt.
3760 * Reading the PCI State register will confirm whether the
3761 * interrupt is ours and will flush the status block.
3762 */
d18edcb2
MC
3763 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3764 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3765 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3766 handled = 0;
f47c11ee 3767 goto out;
fac9b83e 3768 }
d18edcb2
MC
3769 }
3770
3771 /*
3772 * Writing any value to intr-mbox-0 clears PCI INTA# and
3773 * chip-internal interrupt pending events.
3774 * Writing non-zero to intr-mbox-0 additional tells the
3775 * NIC to stop sending us irqs, engaging "in-intr-handler"
3776 * event coalescing.
c04cb347
MC
3777 *
3778 * Flush the mailbox to de-assert the IRQ immediately to prevent
3779 * spurious interrupts. The flush impacts performance but
3780 * excessive spurious interrupts can be worse in some cases.
d18edcb2 3781 */
c04cb347 3782 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
d18edcb2
MC
3783 if (tg3_irq_sync(tp))
3784 goto out;
3785 sblk->status &= ~SD_STATUS_UPDATED;
3786 if (likely(tg3_has_work(tp))) {
3787 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
bea3348e 3788 netif_rx_schedule(dev, &tp->napi);
d18edcb2
MC
3789 } else {
3790 /* No work, shared interrupt perhaps? re-enable
3791 * interrupts, and flush that PCI write
3792 */
3793 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3794 0x00000000);
fac9b83e 3795 }
f47c11ee 3796out:
fac9b83e
DM
3797 return IRQ_RETVAL(handled);
3798}
3799
7d12e780 3800static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
fac9b83e
DM
3801{
3802 struct net_device *dev = dev_id;
3803 struct tg3 *tp = netdev_priv(dev);
3804 struct tg3_hw_status *sblk = tp->hw_status;
fac9b83e
DM
3805 unsigned int handled = 1;
3806
fac9b83e
DM
3807 /* In INTx mode, it is possible for the interrupt to arrive at
3808 * the CPU before the status block posted prior to the interrupt.
3809 * Reading the PCI State register will confirm whether the
3810 * interrupt is ours and will flush the status block.
3811 */
d18edcb2
MC
3812 if (unlikely(sblk->status_tag == tp->last_tag)) {
3813 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3814 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3815 handled = 0;
f47c11ee 3816 goto out;
1da177e4 3817 }
d18edcb2
MC
3818 }
3819
3820 /*
3821 * writing any value to intr-mbox-0 clears PCI INTA# and
3822 * chip-internal interrupt pending events.
3823 * writing non-zero to intr-mbox-0 additional tells the
3824 * NIC to stop sending us irqs, engaging "in-intr-handler"
3825 * event coalescing.
c04cb347
MC
3826 *
3827 * Flush the mailbox to de-assert the IRQ immediately to prevent
3828 * spurious interrupts. The flush impacts performance but
3829 * excessive spurious interrupts can be worse in some cases.
d18edcb2 3830 */
c04cb347 3831 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
d18edcb2
MC
3832 if (tg3_irq_sync(tp))
3833 goto out;
bea3348e 3834 if (netif_rx_schedule_prep(dev, &tp->napi)) {
d18edcb2
MC
3835 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3836 /* Update last_tag to mark that this status has been
3837 * seen. Because interrupt may be shared, we may be
3838 * racing with tg3_poll(), so only update last_tag
3839 * if tg3_poll() is not scheduled.
3840 */
3841 tp->last_tag = sblk->status_tag;
bea3348e 3842 __netif_rx_schedule(dev, &tp->napi);
1da177e4 3843 }
f47c11ee 3844out:
1da177e4
LT
3845 return IRQ_RETVAL(handled);
3846}
3847
7938109f 3848/* ISR for interrupt test */
7d12e780 3849static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7938109f
MC
3850{
3851 struct net_device *dev = dev_id;
3852 struct tg3 *tp = netdev_priv(dev);
3853 struct tg3_hw_status *sblk = tp->hw_status;
3854
f9804ddb
MC
3855 if ((sblk->status & SD_STATUS_UPDATED) ||
3856 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
b16250e3 3857 tg3_disable_ints(tp);
7938109f
MC
3858 return IRQ_RETVAL(1);
3859 }
3860 return IRQ_RETVAL(0);
3861}
3862
8e7a22e3 3863static int tg3_init_hw(struct tg3 *, int);
944d980e 3864static int tg3_halt(struct tg3 *, int, int);
1da177e4 3865
b9ec6c1b
MC
3866/* Restart hardware after configuration changes, self-test, etc.
3867 * Invoked with tp->lock held.
3868 */
3869static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3870{
3871 int err;
3872
3873 err = tg3_init_hw(tp, reset_phy);
3874 if (err) {
3875 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3876 "aborting.\n", tp->dev->name);
3877 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3878 tg3_full_unlock(tp);
3879 del_timer_sync(&tp->timer);
3880 tp->irq_sync = 0;
bea3348e 3881 napi_enable(&tp->napi);
b9ec6c1b
MC
3882 dev_close(tp->dev);
3883 tg3_full_lock(tp, 0);
3884 }
3885 return err;
3886}
3887
1da177e4
LT
3888#ifdef CONFIG_NET_POLL_CONTROLLER
3889static void tg3_poll_controller(struct net_device *dev)
3890{
88b06bc2
MC
3891 struct tg3 *tp = netdev_priv(dev);
3892
7d12e780 3893 tg3_interrupt(tp->pdev->irq, dev);
1da177e4
LT
3894}
3895#endif
3896
c4028958 3897static void tg3_reset_task(struct work_struct *work)
1da177e4 3898{
c4028958 3899 struct tg3 *tp = container_of(work, struct tg3, reset_task);
1da177e4
LT
3900 unsigned int restart_timer;
3901
7faa006f 3902 tg3_full_lock(tp, 0);
7faa006f
MC
3903
3904 if (!netif_running(tp->dev)) {
7faa006f
MC
3905 tg3_full_unlock(tp);
3906 return;
3907 }
3908
3909 tg3_full_unlock(tp);
3910
1da177e4
LT
3911 tg3_netif_stop(tp);
3912
f47c11ee 3913 tg3_full_lock(tp, 1);
1da177e4
LT
3914
3915 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3916 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3917
df3e6548
MC
3918 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3919 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3920 tp->write32_rx_mbox = tg3_write_flush_reg32;
3921 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3922 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3923 }
3924
944d980e 3925 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
b9ec6c1b
MC
3926 if (tg3_init_hw(tp, 1))
3927 goto out;
1da177e4
LT
3928
3929 tg3_netif_start(tp);
3930
1da177e4
LT
3931 if (restart_timer)
3932 mod_timer(&tp->timer, jiffies + 1);
7faa006f 3933
b9ec6c1b 3934out:
7faa006f 3935 tg3_full_unlock(tp);
1da177e4
LT
3936}
3937
b0408751
MC
3938static void tg3_dump_short_state(struct tg3 *tp)
3939{
3940 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3941 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3942 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3943 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3944}
3945
1da177e4
LT
3946static void tg3_tx_timeout(struct net_device *dev)
3947{
3948 struct tg3 *tp = netdev_priv(dev);
3949
b0408751 3950 if (netif_msg_tx_err(tp)) {
9f88f29f
MC
3951 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3952 dev->name);
b0408751
MC
3953 tg3_dump_short_state(tp);
3954 }
1da177e4
LT
3955
3956 schedule_work(&tp->reset_task);
3957}
3958
c58ec932
MC
3959/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3960static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3961{
3962 u32 base = (u32) mapping & 0xffffffff;
3963
3964 return ((base > 0xffffdcc0) &&
3965 (base + len + 8 < base));
3966}
3967
72f2afb8
MC
3968/* Test for DMA addresses > 40-bit */
3969static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3970 int len)
3971{
3972#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6728a8e2 3973 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
72f2afb8
MC
3974 return (((u64) mapping + len) > DMA_40BIT_MASK);
3975 return 0;
3976#else
3977 return 0;
3978#endif
3979}
3980
1da177e4
LT
3981static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3982
72f2afb8
MC
3983/* Workaround 4GB and 40-bit hardware DMA bugs. */
3984static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
c58ec932
MC
3985 u32 last_plus_one, u32 *start,
3986 u32 base_flags, u32 mss)
1da177e4
LT
3987{
3988 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
c58ec932 3989 dma_addr_t new_addr = 0;
1da177e4 3990 u32 entry = *start;
c58ec932 3991 int i, ret = 0;
1da177e4
LT
3992
3993 if (!new_skb) {
c58ec932
MC
3994 ret = -1;
3995 } else {
3996 /* New SKB is guaranteed to be linear. */
3997 entry = *start;
3998 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3999 PCI_DMA_TODEVICE);
4000 /* Make sure new skb does not cross any 4G boundaries.
4001 * Drop the packet if it does.
4002 */
4003 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4004 ret = -1;
4005 dev_kfree_skb(new_skb);
4006 new_skb = NULL;
4007 } else {
4008 tg3_set_txd(tp, entry, new_addr, new_skb->len,
4009 base_flags, 1 | (mss << 1));
4010 *start = NEXT_TX(entry);
4011 }
1da177e4
LT
4012 }
4013
1da177e4
LT
4014 /* Now clean up the sw ring entries. */
4015 i = 0;
4016 while (entry != last_plus_one) {
4017 int len;
4018
4019 if (i == 0)
4020 len = skb_headlen(skb);
4021 else
4022 len = skb_shinfo(skb)->frags[i-1].size;
4023 pci_unmap_single(tp->pdev,
4024 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4025 len, PCI_DMA_TODEVICE);
4026 if (i == 0) {
4027 tp->tx_buffers[entry].skb = new_skb;
4028 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4029 } else {
4030 tp->tx_buffers[entry].skb = NULL;
4031 }
4032 entry = NEXT_TX(entry);
4033 i++;
4034 }
4035
4036 dev_kfree_skb(skb);
4037
c58ec932 4038 return ret;
1da177e4
LT
4039}
4040
4041static void tg3_set_txd(struct tg3 *tp, int entry,
4042 dma_addr_t mapping, int len, u32 flags,
4043 u32 mss_and_is_end)
4044{
4045 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4046 int is_end = (mss_and_is_end & 0x1);
4047 u32 mss = (mss_and_is_end >> 1);
4048 u32 vlan_tag = 0;
4049
4050 if (is_end)
4051 flags |= TXD_FLAG_END;
4052 if (flags & TXD_FLAG_VLAN) {
4053 vlan_tag = flags >> 16;
4054 flags &= 0xffff;
4055 }
4056 vlan_tag |= (mss << TXD_MSS_SHIFT);
4057
4058 txd->addr_hi = ((u64) mapping >> 32);
4059 txd->addr_lo = ((u64) mapping & 0xffffffff);
4060 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4061 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4062}
4063
5a6f3074
MC
4064/* hard_start_xmit for devices that don't have any bugs and
4065 * support TG3_FLG2_HW_TSO_2 only.
4066 */
1da177e4 4067static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5a6f3074
MC
4068{
4069 struct tg3 *tp = netdev_priv(dev);
4070 dma_addr_t mapping;
4071 u32 len, entry, base_flags, mss;
4072
4073 len = skb_headlen(skb);
4074
00b70504 4075 /* We are running in BH disabled context with netif_tx_lock
bea3348e 4076 * and TX reclaim runs via tp->napi.poll inside of a software
5a6f3074
MC
4077 * interrupt. Furthermore, IRQ processing runs lockless so we have
4078 * no IRQ context deadlocks to worry about either. Rejoice!
4079 */
1b2a7205 4080 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
5a6f3074
MC
4081 if (!netif_queue_stopped(dev)) {
4082 netif_stop_queue(dev);
4083
4084 /* This is a hard error, log it. */
4085 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4086 "queue awake!\n", dev->name);
4087 }
5a6f3074
MC
4088 return NETDEV_TX_BUSY;
4089 }
4090
4091 entry = tp->tx_prod;
4092 base_flags = 0;
5a6f3074 4093 mss = 0;
c13e3713 4094 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5a6f3074
MC
4095 int tcp_opt_len, ip_tcp_len;
4096
4097 if (skb_header_cloned(skb) &&
4098 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4099 dev_kfree_skb(skb);
4100 goto out_unlock;
4101 }
4102
b0026624
MC
4103 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4104 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4105 else {
eddc9ec5
ACM
4106 struct iphdr *iph = ip_hdr(skb);
4107
ab6a5bb6 4108 tcp_opt_len = tcp_optlen(skb);
c9bdd4b5 4109 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
b0026624 4110
eddc9ec5
ACM
4111 iph->check = 0;
4112 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
b0026624
MC
4113 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4114 }
5a6f3074
MC
4115
4116 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4117 TXD_FLAG_CPU_POST_DMA);
4118
aa8223c7 4119 tcp_hdr(skb)->check = 0;
5a6f3074 4120
5a6f3074 4121 }
84fa7933 4122 else if (skb->ip_summed == CHECKSUM_PARTIAL)
5a6f3074 4123 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5a6f3074
MC
4124#if TG3_VLAN_TAG_USED
4125 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4126 base_flags |= (TXD_FLAG_VLAN |
4127 (vlan_tx_tag_get(skb) << 16));
4128#endif
4129
4130 /* Queue skb data, a.k.a. the main skb fragment. */
4131 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4132
4133 tp->tx_buffers[entry].skb = skb;
4134 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4135
4136 tg3_set_txd(tp, entry, mapping, len, base_flags,
4137 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4138
4139 entry = NEXT_TX(entry);
4140
4141 /* Now loop through additional data fragments, and queue them. */
4142 if (skb_shinfo(skb)->nr_frags > 0) {
4143 unsigned int i, last;
4144
4145 last = skb_shinfo(skb)->nr_frags - 1;
4146 for (i = 0; i <= last; i++) {
4147 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4148
4149 len = frag->size;
4150 mapping = pci_map_page(tp->pdev,
4151 frag->page,
4152 frag->page_offset,
4153 len, PCI_DMA_TODEVICE);
4154
4155 tp->tx_buffers[entry].skb = NULL;
4156 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4157
4158 tg3_set_txd(tp, entry, mapping, len,
4159 base_flags, (i == last) | (mss << 1));
4160
4161 entry = NEXT_TX(entry);
4162 }
4163 }
4164
4165 /* Packets are ready, update Tx producer idx local and on card. */
4166 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4167
4168 tp->tx_prod = entry;
1b2a7205 4169 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5a6f3074 4170 netif_stop_queue(dev);
42952231 4171 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5a6f3074
MC
4172 netif_wake_queue(tp->dev);
4173 }
4174
4175out_unlock:
4176 mmiowb();
5a6f3074
MC
4177
4178 dev->trans_start = jiffies;
4179
4180 return NETDEV_TX_OK;
4181}
4182
52c0fd83
MC
4183static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4184
4185/* Use GSO to workaround a rare TSO bug that may be triggered when the
4186 * TSO header is greater than 80 bytes.
4187 */
4188static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4189{
4190 struct sk_buff *segs, *nskb;
4191
4192 /* Estimate the number of fragments in the worst case */
1b2a7205 4193 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
52c0fd83 4194 netif_stop_queue(tp->dev);
7f62ad5d
MC
4195 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4196 return NETDEV_TX_BUSY;
4197
4198 netif_wake_queue(tp->dev);
52c0fd83
MC
4199 }
4200
4201 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4202 if (unlikely(IS_ERR(segs)))
4203 goto tg3_tso_bug_end;
4204
4205 do {
4206 nskb = segs;
4207 segs = segs->next;
4208 nskb->next = NULL;
4209 tg3_start_xmit_dma_bug(nskb, tp->dev);
4210 } while (segs);
4211
4212tg3_tso_bug_end:
4213 dev_kfree_skb(skb);
4214
4215 return NETDEV_TX_OK;
4216}
52c0fd83 4217
5a6f3074
MC
4218/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4219 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4220 */
4221static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
4222{
4223 struct tg3 *tp = netdev_priv(dev);
4224 dma_addr_t mapping;
1da177e4
LT
4225 u32 len, entry, base_flags, mss;
4226 int would_hit_hwbug;
1da177e4
LT
4227
4228 len = skb_headlen(skb);
4229
00b70504 4230 /* We are running in BH disabled context with netif_tx_lock
bea3348e 4231 * and TX reclaim runs via tp->napi.poll inside of a software
f47c11ee
DM
4232 * interrupt. Furthermore, IRQ processing runs lockless so we have
4233 * no IRQ context deadlocks to worry about either. Rejoice!
1da177e4 4234 */
1b2a7205 4235 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
1f064a87
SH
4236 if (!netif_queue_stopped(dev)) {
4237 netif_stop_queue(dev);
4238
4239 /* This is a hard error, log it. */
4240 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4241 "queue awake!\n", dev->name);
4242 }
1da177e4
LT
4243 return NETDEV_TX_BUSY;
4244 }
4245
4246 entry = tp->tx_prod;
4247 base_flags = 0;
84fa7933 4248 if (skb->ip_summed == CHECKSUM_PARTIAL)
1da177e4 4249 base_flags |= TXD_FLAG_TCPUDP_CSUM;
1da177e4 4250 mss = 0;
c13e3713 4251 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
eddc9ec5 4252 struct iphdr *iph;
52c0fd83 4253 int tcp_opt_len, ip_tcp_len, hdr_len;
1da177e4
LT
4254
4255 if (skb_header_cloned(skb) &&
4256 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4257 dev_kfree_skb(skb);
4258 goto out_unlock;
4259 }
4260
ab6a5bb6 4261 tcp_opt_len = tcp_optlen(skb);
c9bdd4b5 4262 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
1da177e4 4263
52c0fd83
MC
4264 hdr_len = ip_tcp_len + tcp_opt_len;
4265 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7f62ad5d 4266 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
52c0fd83
MC
4267 return (tg3_tso_bug(tp, skb));
4268
1da177e4
LT
4269 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4270 TXD_FLAG_CPU_POST_DMA);
4271
eddc9ec5
ACM
4272 iph = ip_hdr(skb);
4273 iph->check = 0;
4274 iph->tot_len = htons(mss + hdr_len);
1da177e4 4275 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
aa8223c7 4276 tcp_hdr(skb)->check = 0;
1da177e4 4277 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
aa8223c7
ACM
4278 } else
4279 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4280 iph->daddr, 0,
4281 IPPROTO_TCP,
4282 0);
1da177e4
LT
4283
4284 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4285 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
eddc9ec5 4286 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
4287 int tsflags;
4288
eddc9ec5 4289 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
4290 mss |= (tsflags << 11);
4291 }
4292 } else {
eddc9ec5 4293 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
4294 int tsflags;
4295
eddc9ec5 4296 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
4297 base_flags |= tsflags << 12;
4298 }
4299 }
4300 }
1da177e4
LT
4301#if TG3_VLAN_TAG_USED
4302 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4303 base_flags |= (TXD_FLAG_VLAN |
4304 (vlan_tx_tag_get(skb) << 16));
4305#endif
4306
4307 /* Queue skb data, a.k.a. the main skb fragment. */
4308 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4309
4310 tp->tx_buffers[entry].skb = skb;
4311 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4312
4313 would_hit_hwbug = 0;
4314
4315 if (tg3_4g_overflow_test(mapping, len))
c58ec932 4316 would_hit_hwbug = 1;
1da177e4
LT
4317
4318 tg3_set_txd(tp, entry, mapping, len, base_flags,
4319 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4320
4321 entry = NEXT_TX(entry);
4322
4323 /* Now loop through additional data fragments, and queue them. */
4324 if (skb_shinfo(skb)->nr_frags > 0) {
4325 unsigned int i, last;
4326
4327 last = skb_shinfo(skb)->nr_frags - 1;
4328 for (i = 0; i <= last; i++) {
4329 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4330
4331 len = frag->size;
4332 mapping = pci_map_page(tp->pdev,
4333 frag->page,
4334 frag->page_offset,
4335 len, PCI_DMA_TODEVICE);
4336
4337 tp->tx_buffers[entry].skb = NULL;
4338 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4339
c58ec932
MC
4340 if (tg3_4g_overflow_test(mapping, len))
4341 would_hit_hwbug = 1;
1da177e4 4342
72f2afb8
MC
4343 if (tg3_40bit_overflow_test(tp, mapping, len))
4344 would_hit_hwbug = 1;
4345
1da177e4
LT
4346 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4347 tg3_set_txd(tp, entry, mapping, len,
4348 base_flags, (i == last)|(mss << 1));
4349 else
4350 tg3_set_txd(tp, entry, mapping, len,
4351 base_flags, (i == last));
4352
4353 entry = NEXT_TX(entry);
4354 }
4355 }
4356
4357 if (would_hit_hwbug) {
4358 u32 last_plus_one = entry;
4359 u32 start;
1da177e4 4360
c58ec932
MC
4361 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4362 start &= (TG3_TX_RING_SIZE - 1);
1da177e4
LT
4363
4364 /* If the workaround fails due to memory/mapping
4365 * failure, silently drop this packet.
4366 */
72f2afb8 4367 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
c58ec932 4368 &start, base_flags, mss))
1da177e4
LT
4369 goto out_unlock;
4370
4371 entry = start;
4372 }
4373
4374 /* Packets are ready, update Tx producer idx local and on card. */
4375 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4376
4377 tp->tx_prod = entry;
1b2a7205 4378 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
1da177e4 4379 netif_stop_queue(dev);
42952231 4380 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
51b91468
MC
4381 netif_wake_queue(tp->dev);
4382 }
1da177e4
LT
4383
4384out_unlock:
4385 mmiowb();
1da177e4
LT
4386
4387 dev->trans_start = jiffies;
4388
4389 return NETDEV_TX_OK;
4390}
4391
4392static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4393 int new_mtu)
4394{
4395 dev->mtu = new_mtu;
4396
ef7f5ec0 4397 if (new_mtu > ETH_DATA_LEN) {
a4e2b347 4398 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
ef7f5ec0
MC
4399 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4400 ethtool_op_set_tso(dev, 0);
4401 }
4402 else
4403 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4404 } else {
a4e2b347 4405 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
ef7f5ec0 4406 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
0f893dc6 4407 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
ef7f5ec0 4408 }
1da177e4
LT
4409}
4410
4411static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4412{
4413 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 4414 int err;
1da177e4
LT
4415
4416 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4417 return -EINVAL;
4418
4419 if (!netif_running(dev)) {
4420 /* We'll just catch it later when the
4421 * device is up'd.
4422 */
4423 tg3_set_mtu(dev, tp, new_mtu);
4424 return 0;
4425 }
4426
4427 tg3_netif_stop(tp);
f47c11ee
DM
4428
4429 tg3_full_lock(tp, 1);
1da177e4 4430
944d980e 4431 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
4432
4433 tg3_set_mtu(dev, tp, new_mtu);
4434
b9ec6c1b 4435 err = tg3_restart_hw(tp, 0);
1da177e4 4436
b9ec6c1b
MC
4437 if (!err)
4438 tg3_netif_start(tp);
1da177e4 4439
f47c11ee 4440 tg3_full_unlock(tp);
1da177e4 4441
b9ec6c1b 4442 return err;
1da177e4
LT
4443}
4444
4445/* Free up pending packets in all rx/tx rings.
4446 *
4447 * The chip has been shut down and the driver detached from
4448 * the networking, so no interrupts or new tx packets will
4449 * end up in the driver. tp->{tx,}lock is not held and we are not
4450 * in an interrupt context and thus may sleep.
4451 */
4452static void tg3_free_rings(struct tg3 *tp)
4453{
4454 struct ring_info *rxp;
4455 int i;
4456
4457 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4458 rxp = &tp->rx_std_buffers[i];
4459
4460 if (rxp->skb == NULL)
4461 continue;
4462 pci_unmap_single(tp->pdev,
4463 pci_unmap_addr(rxp, mapping),
7e72aad4 4464 tp->rx_pkt_buf_sz - tp->rx_offset,
1da177e4
LT
4465 PCI_DMA_FROMDEVICE);
4466 dev_kfree_skb_any(rxp->skb);
4467 rxp->skb = NULL;
4468 }
4469
4470 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4471 rxp = &tp->rx_jumbo_buffers[i];
4472
4473 if (rxp->skb == NULL)
4474 continue;
4475 pci_unmap_single(tp->pdev,
4476 pci_unmap_addr(rxp, mapping),
4477 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4478 PCI_DMA_FROMDEVICE);
4479 dev_kfree_skb_any(rxp->skb);
4480 rxp->skb = NULL;
4481 }
4482
4483 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4484 struct tx_ring_info *txp;
4485 struct sk_buff *skb;
4486 int j;
4487
4488 txp = &tp->tx_buffers[i];
4489 skb = txp->skb;
4490
4491 if (skb == NULL) {
4492 i++;
4493 continue;
4494 }
4495
4496 pci_unmap_single(tp->pdev,
4497 pci_unmap_addr(txp, mapping),
4498 skb_headlen(skb),
4499 PCI_DMA_TODEVICE);
4500 txp->skb = NULL;
4501
4502 i++;
4503
4504 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4505 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4506 pci_unmap_page(tp->pdev,
4507 pci_unmap_addr(txp, mapping),
4508 skb_shinfo(skb)->frags[j].size,
4509 PCI_DMA_TODEVICE);
4510 i++;
4511 }
4512
4513 dev_kfree_skb_any(skb);
4514 }
4515}
4516
4517/* Initialize tx/rx rings for packet processing.
4518 *
4519 * The chip has been shut down and the driver detached from
4520 * the networking, so no interrupts or new tx packets will
4521 * end up in the driver. tp->{tx,}lock are held and thus
4522 * we may not sleep.
4523 */
32d8c572 4524static int tg3_init_rings(struct tg3 *tp)
1da177e4
LT
4525{
4526 u32 i;
4527
4528 /* Free up all the SKBs. */
4529 tg3_free_rings(tp);
4530
4531 /* Zero out all descriptors. */
4532 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4533 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4534 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4535 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4536
7e72aad4 4537 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
a4e2b347 4538 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
7e72aad4
MC
4539 (tp->dev->mtu > ETH_DATA_LEN))
4540 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4541
1da177e4
LT
4542 /* Initialize invariants of the rings, we only set this
4543 * stuff once. This works because the card does not
4544 * write into the rx buffer posting rings.
4545 */
4546 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4547 struct tg3_rx_buffer_desc *rxd;
4548
4549 rxd = &tp->rx_std[i];
7e72aad4 4550 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
1da177e4
LT
4551 << RXD_LEN_SHIFT;
4552 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4553 rxd->opaque = (RXD_OPAQUE_RING_STD |
4554 (i << RXD_OPAQUE_INDEX_SHIFT));
4555 }
4556
0f893dc6 4557 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
4558 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4559 struct tg3_rx_buffer_desc *rxd;
4560
4561 rxd = &tp->rx_jumbo[i];
4562 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4563 << RXD_LEN_SHIFT;
4564 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4565 RXD_FLAG_JUMBO;
4566 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4567 (i << RXD_OPAQUE_INDEX_SHIFT));
4568 }
4569 }
4570
4571 /* Now allocate fresh SKBs for each rx ring. */
4572 for (i = 0; i < tp->rx_pending; i++) {
32d8c572
MC
4573 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4574 printk(KERN_WARNING PFX
4575 "%s: Using a smaller RX standard ring, "
4576 "only %d out of %d buffers were allocated "
4577 "successfully.\n",
4578 tp->dev->name, i, tp->rx_pending);
4579 if (i == 0)
4580 return -ENOMEM;
4581 tp->rx_pending = i;
1da177e4 4582 break;
32d8c572 4583 }
1da177e4
LT
4584 }
4585
0f893dc6 4586 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
4587 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4588 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
32d8c572
MC
4589 -1, i) < 0) {
4590 printk(KERN_WARNING PFX
4591 "%s: Using a smaller RX jumbo ring, "
4592 "only %d out of %d buffers were "
4593 "allocated successfully.\n",
4594 tp->dev->name, i, tp->rx_jumbo_pending);
4595 if (i == 0) {
4596 tg3_free_rings(tp);
4597 return -ENOMEM;
4598 }
4599 tp->rx_jumbo_pending = i;
1da177e4 4600 break;
32d8c572 4601 }
1da177e4
LT
4602 }
4603 }
32d8c572 4604 return 0;
1da177e4
LT
4605}
4606
4607/*
4608 * Must not be invoked with interrupt sources disabled and
4609 * the hardware shutdown down.
4610 */
4611static void tg3_free_consistent(struct tg3 *tp)
4612{
b4558ea9
JJ
4613 kfree(tp->rx_std_buffers);
4614 tp->rx_std_buffers = NULL;
1da177e4
LT
4615 if (tp->rx_std) {
4616 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4617 tp->rx_std, tp->rx_std_mapping);
4618 tp->rx_std = NULL;
4619 }
4620 if (tp->rx_jumbo) {
4621 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4622 tp->rx_jumbo, tp->rx_jumbo_mapping);
4623 tp->rx_jumbo = NULL;
4624 }
4625 if (tp->rx_rcb) {
4626 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4627 tp->rx_rcb, tp->rx_rcb_mapping);
4628 tp->rx_rcb = NULL;
4629 }
4630 if (tp->tx_ring) {
4631 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4632 tp->tx_ring, tp->tx_desc_mapping);
4633 tp->tx_ring = NULL;
4634 }
4635 if (tp->hw_status) {
4636 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4637 tp->hw_status, tp->status_mapping);
4638 tp->hw_status = NULL;
4639 }
4640 if (tp->hw_stats) {
4641 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4642 tp->hw_stats, tp->stats_mapping);
4643 tp->hw_stats = NULL;
4644 }
4645}
4646
4647/*
4648 * Must not be invoked with interrupt sources disabled and
4649 * the hardware shutdown down. Can sleep.
4650 */
4651static int tg3_alloc_consistent(struct tg3 *tp)
4652{
bd2b3343 4653 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
1da177e4
LT
4654 (TG3_RX_RING_SIZE +
4655 TG3_RX_JUMBO_RING_SIZE)) +
4656 (sizeof(struct tx_ring_info) *
4657 TG3_TX_RING_SIZE),
4658 GFP_KERNEL);
4659 if (!tp->rx_std_buffers)
4660 return -ENOMEM;
4661
1da177e4
LT
4662 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4663 tp->tx_buffers = (struct tx_ring_info *)
4664 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4665
4666 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4667 &tp->rx_std_mapping);
4668 if (!tp->rx_std)
4669 goto err_out;
4670
4671 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4672 &tp->rx_jumbo_mapping);
4673
4674 if (!tp->rx_jumbo)
4675 goto err_out;
4676
4677 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4678 &tp->rx_rcb_mapping);
4679 if (!tp->rx_rcb)
4680 goto err_out;
4681
4682 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4683 &tp->tx_desc_mapping);
4684 if (!tp->tx_ring)
4685 goto err_out;
4686
4687 tp->hw_status = pci_alloc_consistent(tp->pdev,
4688 TG3_HW_STATUS_SIZE,
4689 &tp->status_mapping);
4690 if (!tp->hw_status)
4691 goto err_out;
4692
4693 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4694 sizeof(struct tg3_hw_stats),
4695 &tp->stats_mapping);
4696 if (!tp->hw_stats)
4697 goto err_out;
4698
4699 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4700 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4701
4702 return 0;
4703
4704err_out:
4705 tg3_free_consistent(tp);
4706 return -ENOMEM;
4707}
4708
4709#define MAX_WAIT_CNT 1000
4710
4711/* To stop a block, clear the enable bit and poll till it
4712 * clears. tp->lock is held.
4713 */
b3b7d6be 4714static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
1da177e4
LT
4715{
4716 unsigned int i;
4717 u32 val;
4718
4719 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4720 switch (ofs) {
4721 case RCVLSC_MODE:
4722 case DMAC_MODE:
4723 case MBFREE_MODE:
4724 case BUFMGR_MODE:
4725 case MEMARB_MODE:
4726 /* We can't enable/disable these bits of the
4727 * 5705/5750, just say success.
4728 */
4729 return 0;
4730
4731 default:
4732 break;
4733 };
4734 }
4735
4736 val = tr32(ofs);
4737 val &= ~enable_bit;
4738 tw32_f(ofs, val);
4739
4740 for (i = 0; i < MAX_WAIT_CNT; i++) {
4741 udelay(100);
4742 val = tr32(ofs);
4743 if ((val & enable_bit) == 0)
4744 break;
4745 }
4746
b3b7d6be 4747 if (i == MAX_WAIT_CNT && !silent) {
1da177e4
LT
4748 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4749 "ofs=%lx enable_bit=%x\n",
4750 ofs, enable_bit);
4751 return -ENODEV;
4752 }
4753
4754 return 0;
4755}
4756
4757/* tp->lock is held. */
b3b7d6be 4758static int tg3_abort_hw(struct tg3 *tp, int silent)
1da177e4
LT
4759{
4760 int i, err;
4761
4762 tg3_disable_ints(tp);
4763
4764 tp->rx_mode &= ~RX_MODE_ENABLE;
4765 tw32_f(MAC_RX_MODE, tp->rx_mode);
4766 udelay(10);
4767
b3b7d6be
DM
4768 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4769 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4770 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4771 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4772 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4773 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4774
4775 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4776 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4777 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4778 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4779 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4780 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4781 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
1da177e4
LT
4782
4783 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4784 tw32_f(MAC_MODE, tp->mac_mode);
4785 udelay(40);
4786
4787 tp->tx_mode &= ~TX_MODE_ENABLE;
4788 tw32_f(MAC_TX_MODE, tp->tx_mode);
4789
4790 for (i = 0; i < MAX_WAIT_CNT; i++) {
4791 udelay(100);
4792 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4793 break;
4794 }
4795 if (i >= MAX_WAIT_CNT) {
4796 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4797 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4798 tp->dev->name, tr32(MAC_TX_MODE));
e6de8ad1 4799 err |= -ENODEV;
1da177e4
LT
4800 }
4801
e6de8ad1 4802 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
b3b7d6be
DM
4803 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4804 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
1da177e4
LT
4805
4806 tw32(FTQ_RESET, 0xffffffff);
4807 tw32(FTQ_RESET, 0x00000000);
4808
b3b7d6be
DM
4809 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4810 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
1da177e4
LT
4811
4812 if (tp->hw_status)
4813 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4814 if (tp->hw_stats)
4815 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4816
1da177e4
LT
4817 return err;
4818}
4819
4820/* tp->lock is held. */
4821static int tg3_nvram_lock(struct tg3 *tp)
4822{
4823 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4824 int i;
4825
ec41c7df
MC
4826 if (tp->nvram_lock_cnt == 0) {
4827 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4828 for (i = 0; i < 8000; i++) {
4829 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4830 break;
4831 udelay(20);
4832 }
4833 if (i == 8000) {
4834 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4835 return -ENODEV;
4836 }
1da177e4 4837 }
ec41c7df 4838 tp->nvram_lock_cnt++;
1da177e4
LT
4839 }
4840 return 0;
4841}
4842
4843/* tp->lock is held. */
4844static void tg3_nvram_unlock(struct tg3 *tp)
4845{
ec41c7df
MC
4846 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4847 if (tp->nvram_lock_cnt > 0)
4848 tp->nvram_lock_cnt--;
4849 if (tp->nvram_lock_cnt == 0)
4850 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4851 }
1da177e4
LT
4852}
4853
e6af301b
MC
4854/* tp->lock is held. */
4855static void tg3_enable_nvram_access(struct tg3 *tp)
4856{
4857 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4858 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4859 u32 nvaccess = tr32(NVRAM_ACCESS);
4860
4861 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4862 }
4863}
4864
4865/* tp->lock is held. */
4866static void tg3_disable_nvram_access(struct tg3 *tp)
4867{
4868 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4869 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4870 u32 nvaccess = tr32(NVRAM_ACCESS);
4871
4872 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4873 }
4874}
4875
0d3031d9
MC
4876static void tg3_ape_send_event(struct tg3 *tp, u32 event)
4877{
4878 int i;
4879 u32 apedata;
4880
4881 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
4882 if (apedata != APE_SEG_SIG_MAGIC)
4883 return;
4884
4885 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
4886 if (apedata != APE_FW_STATUS_READY)
4887 return;
4888
4889 /* Wait for up to 1 millisecond for APE to service previous event. */
4890 for (i = 0; i < 10; i++) {
4891 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
4892 return;
4893
4894 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
4895
4896 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4897 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
4898 event | APE_EVENT_STATUS_EVENT_PENDING);
4899
4900 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
4901
4902 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4903 break;
4904
4905 udelay(100);
4906 }
4907
4908 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4909 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
4910}
4911
4912static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
4913{
4914 u32 event;
4915 u32 apedata;
4916
4917 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
4918 return;
4919
4920 switch (kind) {
4921 case RESET_KIND_INIT:
4922 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
4923 APE_HOST_SEG_SIG_MAGIC);
4924 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
4925 APE_HOST_SEG_LEN_MAGIC);
4926 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
4927 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
4928 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
4929 APE_HOST_DRIVER_ID_MAGIC);
4930 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
4931 APE_HOST_BEHAV_NO_PHYLOCK);
4932
4933 event = APE_EVENT_STATUS_STATE_START;
4934 break;
4935 case RESET_KIND_SHUTDOWN:
4936 event = APE_EVENT_STATUS_STATE_UNLOAD;
4937 break;
4938 case RESET_KIND_SUSPEND:
4939 event = APE_EVENT_STATUS_STATE_SUSPEND;
4940 break;
4941 default:
4942 return;
4943 }
4944
4945 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
4946
4947 tg3_ape_send_event(tp, event);
4948}
4949
1da177e4
LT
4950/* tp->lock is held. */
4951static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4952{
f49639e6
DM
4953 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4954 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1da177e4
LT
4955
4956 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4957 switch (kind) {
4958 case RESET_KIND_INIT:
4959 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4960 DRV_STATE_START);
4961 break;
4962
4963 case RESET_KIND_SHUTDOWN:
4964 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4965 DRV_STATE_UNLOAD);
4966 break;
4967
4968 case RESET_KIND_SUSPEND:
4969 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4970 DRV_STATE_SUSPEND);
4971 break;
4972
4973 default:
4974 break;
4975 };
4976 }
0d3031d9
MC
4977
4978 if (kind == RESET_KIND_INIT ||
4979 kind == RESET_KIND_SUSPEND)
4980 tg3_ape_driver_state_change(tp, kind);
1da177e4
LT
4981}
4982
4983/* tp->lock is held. */
4984static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4985{
4986 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4987 switch (kind) {
4988 case RESET_KIND_INIT:
4989 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4990 DRV_STATE_START_DONE);
4991 break;
4992
4993 case RESET_KIND_SHUTDOWN:
4994 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4995 DRV_STATE_UNLOAD_DONE);
4996 break;
4997
4998 default:
4999 break;
5000 };
5001 }
0d3031d9
MC
5002
5003 if (kind == RESET_KIND_SHUTDOWN)
5004 tg3_ape_driver_state_change(tp, kind);
1da177e4
LT
5005}
5006
5007/* tp->lock is held. */
5008static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5009{
5010 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5011 switch (kind) {
5012 case RESET_KIND_INIT:
5013 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5014 DRV_STATE_START);
5015 break;
5016
5017 case RESET_KIND_SHUTDOWN:
5018 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5019 DRV_STATE_UNLOAD);
5020 break;
5021
5022 case RESET_KIND_SUSPEND:
5023 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5024 DRV_STATE_SUSPEND);
5025 break;
5026
5027 default:
5028 break;
5029 };
5030 }
5031}
5032
7a6f4369
MC
5033static int tg3_poll_fw(struct tg3 *tp)
5034{
5035 int i;
5036 u32 val;
5037
b5d3772c 5038 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
0ccead18
GZ
5039 /* Wait up to 20ms for init done. */
5040 for (i = 0; i < 200; i++) {
b5d3772c
MC
5041 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5042 return 0;
0ccead18 5043 udelay(100);
b5d3772c
MC
5044 }
5045 return -ENODEV;
5046 }
5047
7a6f4369
MC
5048 /* Wait for firmware initialization to complete. */
5049 for (i = 0; i < 100000; i++) {
5050 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5051 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5052 break;
5053 udelay(10);
5054 }
5055
5056 /* Chip might not be fitted with firmware. Some Sun onboard
5057 * parts are configured like that. So don't signal the timeout
5058 * of the above loop as an error, but do report the lack of
5059 * running firmware once.
5060 */
5061 if (i >= 100000 &&
5062 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5063 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5064
5065 printk(KERN_INFO PFX "%s: No firmware running.\n",
5066 tp->dev->name);
5067 }
5068
5069 return 0;
5070}
5071
ee6a99b5
MC
5072/* Save PCI command register before chip reset */
5073static void tg3_save_pci_state(struct tg3 *tp)
5074{
8a6eac90 5075 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
ee6a99b5
MC
5076}
5077
5078/* Restore PCI state after chip reset */
5079static void tg3_restore_pci_state(struct tg3 *tp)
5080{
5081 u32 val;
5082
5083 /* Re-enable indirect register accesses. */
5084 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5085 tp->misc_host_ctrl);
5086
5087 /* Set MAX PCI retry to zero. */
5088 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5089 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5090 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5091 val |= PCISTATE_RETRY_SAME_DMA;
0d3031d9
MC
5092 /* Allow reads and writes to the APE register and memory space. */
5093 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5094 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5095 PCISTATE_ALLOW_APE_SHMEM_WR;
ee6a99b5
MC
5096 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5097
8a6eac90 5098 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
ee6a99b5 5099
5f5c51e3
MC
5100 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5101 pcie_set_readrq(tp->pdev, 4096);
5102 else {
114342f2
MC
5103 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5104 tp->pci_cacheline_sz);
5105 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5106 tp->pci_lat_timer);
5107 }
5f5c51e3 5108
ee6a99b5 5109 /* Make sure PCI-X relaxed ordering bit is clear. */
9974a356
MC
5110 if (tp->pcix_cap) {
5111 u16 pcix_cmd;
5112
5113 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5114 &pcix_cmd);
5115 pcix_cmd &= ~PCI_X_CMD_ERO;
5116 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5117 pcix_cmd);
5118 }
ee6a99b5
MC
5119
5120 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
ee6a99b5
MC
5121
5122 /* Chip reset on 5780 will reset MSI enable bit,
5123 * so need to restore it.
5124 */
5125 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5126 u16 ctrl;
5127
5128 pci_read_config_word(tp->pdev,
5129 tp->msi_cap + PCI_MSI_FLAGS,
5130 &ctrl);
5131 pci_write_config_word(tp->pdev,
5132 tp->msi_cap + PCI_MSI_FLAGS,
5133 ctrl | PCI_MSI_FLAGS_ENABLE);
5134 val = tr32(MSGINT_MODE);
5135 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5136 }
5137 }
5138}
5139
1da177e4
LT
5140static void tg3_stop_fw(struct tg3 *);
5141
5142/* tp->lock is held. */
5143static int tg3_chip_reset(struct tg3 *tp)
5144{
5145 u32 val;
1ee582d8 5146 void (*write_op)(struct tg3 *, u32, u32);
7a6f4369 5147 int err;
1da177e4 5148
f49639e6
DM
5149 tg3_nvram_lock(tp);
5150
5151 /* No matching tg3_nvram_unlock() after this because
5152 * chip reset below will undo the nvram lock.
5153 */
5154 tp->nvram_lock_cnt = 0;
1da177e4 5155
ee6a99b5
MC
5156 /* GRC_MISC_CFG core clock reset will clear the memory
5157 * enable bit in PCI register 4 and the MSI enable bit
5158 * on some chips, so we save relevant registers here.
5159 */
5160 tg3_save_pci_state(tp);
5161
d9ab5ad1 5162 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
af36e6b6 5163 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 5164 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
5165 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5166 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
d9ab5ad1
MC
5167 tw32(GRC_FASTBOOT_PC, 0);
5168
1da177e4
LT
5169 /*
5170 * We must avoid the readl() that normally takes place.
5171 * It locks machines, causes machine checks, and other
5172 * fun things. So, temporarily disable the 5701
5173 * hardware workaround, while we do the reset.
5174 */
1ee582d8
MC
5175 write_op = tp->write32;
5176 if (write_op == tg3_write_flush_reg32)
5177 tp->write32 = tg3_write32;
1da177e4 5178
d18edcb2
MC
5179 /* Prevent the irq handler from reading or writing PCI registers
5180 * during chip reset when the memory enable bit in the PCI command
5181 * register may be cleared. The chip does not generate interrupt
5182 * at this time, but the irq handler may still be called due to irq
5183 * sharing or irqpoll.
5184 */
5185 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
b8fa2f3a
MC
5186 if (tp->hw_status) {
5187 tp->hw_status->status = 0;
5188 tp->hw_status->status_tag = 0;
5189 }
d18edcb2
MC
5190 tp->last_tag = 0;
5191 smp_mb();
5192 synchronize_irq(tp->pdev->irq);
5193
1da177e4
LT
5194 /* do the reset */
5195 val = GRC_MISC_CFG_CORECLK_RESET;
5196
5197 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5198 if (tr32(0x7e2c) == 0x60) {
5199 tw32(0x7e2c, 0x20);
5200 }
5201 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5202 tw32(GRC_MISC_CFG, (1 << 29));
5203 val |= (1 << 29);
5204 }
5205 }
5206
b5d3772c
MC
5207 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5208 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5209 tw32(GRC_VCPU_EXT_CTRL,
5210 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5211 }
5212
1da177e4
LT
5213 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5214 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5215 tw32(GRC_MISC_CFG, val);
5216
1ee582d8
MC
5217 /* restore 5701 hardware bug workaround write method */
5218 tp->write32 = write_op;
1da177e4
LT
5219
5220 /* Unfortunately, we have to delay before the PCI read back.
5221 * Some 575X chips even will not respond to a PCI cfg access
5222 * when the reset command is given to the chip.
5223 *
5224 * How do these hardware designers expect things to work
5225 * properly if the PCI write is posted for a long period
5226 * of time? It is always necessary to have some method by
5227 * which a register read back can occur to push the write
5228 * out which does the reset.
5229 *
5230 * For most tg3 variants the trick below was working.
5231 * Ho hum...
5232 */
5233 udelay(120);
5234
5235 /* Flush PCI posted writes. The normal MMIO registers
5236 * are inaccessible at this time so this is the only
5237 * way to make this reliably (actually, this is no longer
5238 * the case, see above). I tried to use indirect
5239 * register read/write but this upset some 5701 variants.
5240 */
5241 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5242
5243 udelay(120);
5244
5245 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5246 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5247 int i;
5248 u32 cfg_val;
5249
5250 /* Wait for link training to complete. */
5251 for (i = 0; i < 5000; i++)
5252 udelay(100);
5253
5254 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5255 pci_write_config_dword(tp->pdev, 0xc4,
5256 cfg_val | (1 << 15));
5257 }
5258 /* Set PCIE max payload size and clear error status. */
5259 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5260 }
5261
ee6a99b5 5262 tg3_restore_pci_state(tp);
1da177e4 5263
d18edcb2
MC
5264 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5265
ee6a99b5
MC
5266 val = 0;
5267 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4cf78e4f 5268 val = tr32(MEMARB_MODE);
ee6a99b5 5269 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
1da177e4
LT
5270
5271 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5272 tg3_stop_fw(tp);
5273 tw32(0x5000, 0x400);
5274 }
5275
5276 tw32(GRC_MODE, tp->grc_mode);
5277
5278 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
ab0049b4 5279 val = tr32(0xc4);
1da177e4
LT
5280
5281 tw32(0xc4, val | (1 << 15));
5282 }
5283
5284 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5285 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5286 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5287 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5288 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5289 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5290 }
5291
5292 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5293 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5294 tw32_f(MAC_MODE, tp->mac_mode);
747e8f8b
MC
5295 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5296 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5297 tw32_f(MAC_MODE, tp->mac_mode);
1da177e4
LT
5298 } else
5299 tw32_f(MAC_MODE, 0);
5300 udelay(40);
5301
7a6f4369
MC
5302 err = tg3_poll_fw(tp);
5303 if (err)
5304 return err;
1da177e4
LT
5305
5306 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5307 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
ab0049b4 5308 val = tr32(0x7c00);
1da177e4
LT
5309
5310 tw32(0x7c00, val | (1 << 25));
5311 }
5312
5313 /* Reprobe ASF enable state. */
5314 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5315 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5316 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5317 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5318 u32 nic_cfg;
5319
5320 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5321 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5322 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 5323 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
5324 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5325 }
5326 }
5327
5328 return 0;
5329}
5330
5331/* tp->lock is held. */
5332static void tg3_stop_fw(struct tg3 *tp)
5333{
0d3031d9
MC
5334 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5335 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
1da177e4
LT
5336 u32 val;
5337 int i;
5338
5339 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5340 val = tr32(GRC_RX_CPU_EVENT);
5341 val |= (1 << 14);
5342 tw32(GRC_RX_CPU_EVENT, val);
5343
5344 /* Wait for RX cpu to ACK the event. */
5345 for (i = 0; i < 100; i++) {
5346 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5347 break;
5348 udelay(1);
5349 }
5350 }
5351}
5352
5353/* tp->lock is held. */
944d980e 5354static int tg3_halt(struct tg3 *tp, int kind, int silent)
1da177e4
LT
5355{
5356 int err;
5357
5358 tg3_stop_fw(tp);
5359
944d980e 5360 tg3_write_sig_pre_reset(tp, kind);
1da177e4 5361
b3b7d6be 5362 tg3_abort_hw(tp, silent);
1da177e4
LT
5363 err = tg3_chip_reset(tp);
5364
944d980e
MC
5365 tg3_write_sig_legacy(tp, kind);
5366 tg3_write_sig_post_reset(tp, kind);
1da177e4
LT
5367
5368 if (err)
5369 return err;
5370
5371 return 0;
5372}
5373
5374#define TG3_FW_RELEASE_MAJOR 0x0
5375#define TG3_FW_RELASE_MINOR 0x0
5376#define TG3_FW_RELEASE_FIX 0x0
5377#define TG3_FW_START_ADDR 0x08000000
5378#define TG3_FW_TEXT_ADDR 0x08000000
5379#define TG3_FW_TEXT_LEN 0x9c0
5380#define TG3_FW_RODATA_ADDR 0x080009c0
5381#define TG3_FW_RODATA_LEN 0x60
5382#define TG3_FW_DATA_ADDR 0x08000a40
5383#define TG3_FW_DATA_LEN 0x20
5384#define TG3_FW_SBSS_ADDR 0x08000a60
5385#define TG3_FW_SBSS_LEN 0xc
5386#define TG3_FW_BSS_ADDR 0x08000a70
5387#define TG3_FW_BSS_LEN 0x10
5388
50da859d 5389static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
1da177e4
LT
5390 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5391 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5392 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5393 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5394 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5395 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5396 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5397 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5398 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5399 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5400 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5401 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5402 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5403 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5404 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5405 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5406 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5407 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5408 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5409 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5410 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5411 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5412 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5413 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5414 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5415 0, 0, 0, 0, 0, 0,
5416 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5417 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5418 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5419 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5420 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5421 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5422 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5423 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5424 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5425 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5426 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5427 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5428 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5429 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5430 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5431 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5432 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5433 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5434 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5435 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5436 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5437 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5438 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5439 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5440 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5441 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5442 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5443 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5444 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5445 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5446 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5447 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5448 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5449 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5450 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5451 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5452 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5453 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5454 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5455 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5456 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5457 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5458 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5459 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5460 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5461 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5462 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5463 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5464 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5465 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5466 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5467 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5468 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5469 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5470 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5471 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5472 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5473 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5474 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5475 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5476 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5477 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5478 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5479 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5480 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5481};
5482
50da859d 5483static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
1da177e4
LT
5484 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5485 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5486 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5487 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5488 0x00000000
5489};
5490
5491#if 0 /* All zeros, don't eat up space with it. */
5492u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5493 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5494 0x00000000, 0x00000000, 0x00000000, 0x00000000
5495};
5496#endif
5497
5498#define RX_CPU_SCRATCH_BASE 0x30000
5499#define RX_CPU_SCRATCH_SIZE 0x04000
5500#define TX_CPU_SCRATCH_BASE 0x34000
5501#define TX_CPU_SCRATCH_SIZE 0x04000
5502
5503/* tp->lock is held. */
5504static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5505{
5506 int i;
5507
5d9428de
ES
5508 BUG_ON(offset == TX_CPU_BASE &&
5509 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
1da177e4 5510
b5d3772c
MC
5511 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5512 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5513
5514 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5515 return 0;
5516 }
1da177e4
LT
5517 if (offset == RX_CPU_BASE) {
5518 for (i = 0; i < 10000; i++) {
5519 tw32(offset + CPU_STATE, 0xffffffff);
5520 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5521 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5522 break;
5523 }
5524
5525 tw32(offset + CPU_STATE, 0xffffffff);
5526 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
5527 udelay(10);
5528 } else {
5529 for (i = 0; i < 10000; i++) {
5530 tw32(offset + CPU_STATE, 0xffffffff);
5531 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5532 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5533 break;
5534 }
5535 }
5536
5537 if (i >= 10000) {
5538 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5539 "and %s CPU\n",
5540 tp->dev->name,
5541 (offset == RX_CPU_BASE ? "RX" : "TX"));
5542 return -ENODEV;
5543 }
ec41c7df
MC
5544
5545 /* Clear firmware's nvram arbitration. */
5546 if (tp->tg3_flags & TG3_FLAG_NVRAM)
5547 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
1da177e4
LT
5548 return 0;
5549}
5550
5551struct fw_info {
5552 unsigned int text_base;
5553 unsigned int text_len;
50da859d 5554 const u32 *text_data;
1da177e4
LT
5555 unsigned int rodata_base;
5556 unsigned int rodata_len;
50da859d 5557 const u32 *rodata_data;
1da177e4
LT
5558 unsigned int data_base;
5559 unsigned int data_len;
50da859d 5560 const u32 *data_data;
1da177e4
LT
5561};
5562
5563/* tp->lock is held. */
5564static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5565 int cpu_scratch_size, struct fw_info *info)
5566{
ec41c7df 5567 int err, lock_err, i;
1da177e4
LT
5568 void (*write_op)(struct tg3 *, u32, u32);
5569
5570 if (cpu_base == TX_CPU_BASE &&
5571 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5572 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5573 "TX cpu firmware on %s which is 5705.\n",
5574 tp->dev->name);
5575 return -EINVAL;
5576 }
5577
5578 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5579 write_op = tg3_write_mem;
5580 else
5581 write_op = tg3_write_indirect_reg32;
5582
1b628151
MC
5583 /* It is possible that bootcode is still loading at this point.
5584 * Get the nvram lock first before halting the cpu.
5585 */
ec41c7df 5586 lock_err = tg3_nvram_lock(tp);
1da177e4 5587 err = tg3_halt_cpu(tp, cpu_base);
ec41c7df
MC
5588 if (!lock_err)
5589 tg3_nvram_unlock(tp);
1da177e4
LT
5590 if (err)
5591 goto out;
5592
5593 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5594 write_op(tp, cpu_scratch_base + i, 0);
5595 tw32(cpu_base + CPU_STATE, 0xffffffff);
5596 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5597 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5598 write_op(tp, (cpu_scratch_base +
5599 (info->text_base & 0xffff) +
5600 (i * sizeof(u32))),
5601 (info->text_data ?
5602 info->text_data[i] : 0));
5603 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5604 write_op(tp, (cpu_scratch_base +
5605 (info->rodata_base & 0xffff) +
5606 (i * sizeof(u32))),
5607 (info->rodata_data ?
5608 info->rodata_data[i] : 0));
5609 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5610 write_op(tp, (cpu_scratch_base +
5611 (info->data_base & 0xffff) +
5612 (i * sizeof(u32))),
5613 (info->data_data ?
5614 info->data_data[i] : 0));
5615
5616 err = 0;
5617
5618out:
1da177e4
LT
5619 return err;
5620}
5621
5622/* tp->lock is held. */
5623static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5624{
5625 struct fw_info info;
5626 int err, i;
5627
5628 info.text_base = TG3_FW_TEXT_ADDR;
5629 info.text_len = TG3_FW_TEXT_LEN;
5630 info.text_data = &tg3FwText[0];
5631 info.rodata_base = TG3_FW_RODATA_ADDR;
5632 info.rodata_len = TG3_FW_RODATA_LEN;
5633 info.rodata_data = &tg3FwRodata[0];
5634 info.data_base = TG3_FW_DATA_ADDR;
5635 info.data_len = TG3_FW_DATA_LEN;
5636 info.data_data = NULL;
5637
5638 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5639 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5640 &info);
5641 if (err)
5642 return err;
5643
5644 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5645 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5646 &info);
5647 if (err)
5648 return err;
5649
5650 /* Now startup only the RX cpu. */
5651 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5652 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5653
5654 for (i = 0; i < 5; i++) {
5655 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5656 break;
5657 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5658 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
5659 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5660 udelay(1000);
5661 }
5662 if (i >= 5) {
5663 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5664 "to set RX CPU PC, is %08x should be %08x\n",
5665 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5666 TG3_FW_TEXT_ADDR);
5667 return -ENODEV;
5668 }
5669 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5670 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
5671
5672 return 0;
5673}
5674
1da177e4
LT
5675
5676#define TG3_TSO_FW_RELEASE_MAJOR 0x1
5677#define TG3_TSO_FW_RELASE_MINOR 0x6
5678#define TG3_TSO_FW_RELEASE_FIX 0x0
5679#define TG3_TSO_FW_START_ADDR 0x08000000
5680#define TG3_TSO_FW_TEXT_ADDR 0x08000000
5681#define TG3_TSO_FW_TEXT_LEN 0x1aa0
5682#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5683#define TG3_TSO_FW_RODATA_LEN 0x60
5684#define TG3_TSO_FW_DATA_ADDR 0x08001b20
5685#define TG3_TSO_FW_DATA_LEN 0x30
5686#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5687#define TG3_TSO_FW_SBSS_LEN 0x2c
5688#define TG3_TSO_FW_BSS_ADDR 0x08001b80
5689#define TG3_TSO_FW_BSS_LEN 0x894
5690
50da859d 5691static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
1da177e4
LT
5692 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5693 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5694 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5695 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5696 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5697 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5698 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5699 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5700 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5701 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5702 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5703 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5704 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5705 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5706 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5707 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5708 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5709 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5710 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5711 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5712 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5713 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5714 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5715 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5716 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5717 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5718 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5719 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5720 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5721 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5722 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5723 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5724 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5725 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5726 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5727 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5728 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5729 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5730 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5731 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5732 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5733 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5734 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5735 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5736 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5737 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5738 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5739 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5740 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5741 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5742 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5743 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5744 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5745 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5746 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5747 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5748 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5749 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5750 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5751 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5752 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5753 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5754 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5755 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5756 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5757 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5758 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5759 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5760 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5761 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5762 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5763 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5764 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5765 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5766 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5767 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5768 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5769 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5770 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5771 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5772 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5773 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5774 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5775 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5776 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5777 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5778 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5779 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5780 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5781 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5782 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5783 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5784 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5785 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5786 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5787 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5788 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5789 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5790 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5791 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5792 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5793 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5794 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5795 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5796 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5797 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5798 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5799 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5800 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5801 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5802 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5803 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5804 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5805 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5806 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5807 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5808 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5809 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5810 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5811 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5812 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5813 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5814 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5815 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5816 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5817 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5818 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5819 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5820 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5821 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5822 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5823 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5824 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5825 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5826 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5827 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5828 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5829 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5830 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5831 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5832 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5833 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5834 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5835 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5836 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5837 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5838 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5839 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5840 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5841 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5842 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5843 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5844 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5845 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5846 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5847 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5848 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5849 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5850 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5851 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5852 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5853 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5854 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5855 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5856 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5857 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5858 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5859 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5860 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5861 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5862 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5863 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5864 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5865 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5866 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5867 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5868 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5869 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5870 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5871 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5872 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5873 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5874 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5875 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5876 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5877 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5878 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5879 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5880 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5881 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5882 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5883 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5884 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5885 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5886 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5887 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5888 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5889 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5890 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5891 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5892 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5893 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5894 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5895 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5896 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5897 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5898 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5899 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5900 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5901 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5902 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5903 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5904 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5905 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5906 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5907 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5908 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5909 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5910 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5911 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5912 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5913 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5914 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5915 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5916 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5917 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5918 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5919 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5920 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5921 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5922 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5923 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5924 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5925 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5926 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5927 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5928 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5929 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5930 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5931 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5932 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5933 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5934 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5935 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5936 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5937 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5938 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5939 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5940 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5941 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5942 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5943 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5944 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5945 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5946 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5947 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5948 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5949 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5950 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5951 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5952 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5953 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5954 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5955 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5956 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5957 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5958 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5959 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5960 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5961 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5962 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5963 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5964 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5965 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5966 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5967 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5968 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5969 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5970 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5971 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5972 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5973 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5974 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5975 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5976};
5977
50da859d 5978static const u32 tg3TsoFwRodata[] = {
1da177e4
LT
5979 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5980 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5981 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5982 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5983 0x00000000,
5984};
5985
50da859d 5986static const u32 tg3TsoFwData[] = {
1da177e4
LT
5987 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5988 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5989 0x00000000,
5990};
5991
5992/* 5705 needs a special version of the TSO firmware. */
5993#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5994#define TG3_TSO5_FW_RELASE_MINOR 0x2
5995#define TG3_TSO5_FW_RELEASE_FIX 0x0
5996#define TG3_TSO5_FW_START_ADDR 0x00010000
5997#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5998#define TG3_TSO5_FW_TEXT_LEN 0xe90
5999#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
6000#define TG3_TSO5_FW_RODATA_LEN 0x50
6001#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
6002#define TG3_TSO5_FW_DATA_LEN 0x20
6003#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
6004#define TG3_TSO5_FW_SBSS_LEN 0x28
6005#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
6006#define TG3_TSO5_FW_BSS_LEN 0x88
6007
50da859d 6008static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
1da177e4
LT
6009 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6010 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6011 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6012 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6013 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6014 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6015 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6016 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6017 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6018 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6019 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6020 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6021 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6022 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6023 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6024 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6025 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6026 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6027 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6028 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6029 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6030 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6031 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6032 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6033 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6034 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6035 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6036 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6037 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6038 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6039 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6040 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6041 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6042 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6043 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6044 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6045 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6046 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6047 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6048 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6049 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6050 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6051 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6052 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6053 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6054 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6055 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6056 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6057 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6058 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6059 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6060 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6061 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6062 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6063 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6064 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6065 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6066 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6067 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6068 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6069 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6070 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6071 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6072 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6073 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6074 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6075 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6076 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6077 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6078 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6079 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6080 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6081 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6082 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6083 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6084 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6085 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6086 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6087 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6088 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6089 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6090 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6091 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6092 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6093 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6094 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6095 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6096 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6097 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6098 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6099 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6100 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6101 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6102 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6103 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6104 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6105 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6106 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6107 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6108 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6109 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6110 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6111 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6112 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6113 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6114 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6115 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6116 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6117 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6118 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6119 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6120 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6121 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6122 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6123 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6124 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6125 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6126 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6127 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6128 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6129 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6130 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6131 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6132 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6133 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6134 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6135 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6136 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6137 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6138 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6139 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6140 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6141 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6142 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6143 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6144 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6145 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6146 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6147 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6148 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6149 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6150 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6151 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6152 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6153 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6154 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6155 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6156 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6157 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6158 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6159 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6160 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6161 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6162 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6163 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6164 0x00000000, 0x00000000, 0x00000000,
6165};
6166
50da859d 6167static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
1da177e4
LT
6168 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6169 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6170 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6171 0x00000000, 0x00000000, 0x00000000,
6172};
6173
50da859d 6174static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
1da177e4
LT
6175 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6176 0x00000000, 0x00000000, 0x00000000,
6177};
6178
6179/* tp->lock is held. */
6180static int tg3_load_tso_firmware(struct tg3 *tp)
6181{
6182 struct fw_info info;
6183 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6184 int err, i;
6185
6186 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6187 return 0;
6188
6189 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6190 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6191 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6192 info.text_data = &tg3Tso5FwText[0];
6193 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6194 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6195 info.rodata_data = &tg3Tso5FwRodata[0];
6196 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6197 info.data_len = TG3_TSO5_FW_DATA_LEN;
6198 info.data_data = &tg3Tso5FwData[0];
6199 cpu_base = RX_CPU_BASE;
6200 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6201 cpu_scratch_size = (info.text_len +
6202 info.rodata_len +
6203 info.data_len +
6204 TG3_TSO5_FW_SBSS_LEN +
6205 TG3_TSO5_FW_BSS_LEN);
6206 } else {
6207 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6208 info.text_len = TG3_TSO_FW_TEXT_LEN;
6209 info.text_data = &tg3TsoFwText[0];
6210 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6211 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6212 info.rodata_data = &tg3TsoFwRodata[0];
6213 info.data_base = TG3_TSO_FW_DATA_ADDR;
6214 info.data_len = TG3_TSO_FW_DATA_LEN;
6215 info.data_data = &tg3TsoFwData[0];
6216 cpu_base = TX_CPU_BASE;
6217 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6218 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6219 }
6220
6221 err = tg3_load_firmware_cpu(tp, cpu_base,
6222 cpu_scratch_base, cpu_scratch_size,
6223 &info);
6224 if (err)
6225 return err;
6226
6227 /* Now startup the cpu. */
6228 tw32(cpu_base + CPU_STATE, 0xffffffff);
6229 tw32_f(cpu_base + CPU_PC, info.text_base);
6230
6231 for (i = 0; i < 5; i++) {
6232 if (tr32(cpu_base + CPU_PC) == info.text_base)
6233 break;
6234 tw32(cpu_base + CPU_STATE, 0xffffffff);
6235 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6236 tw32_f(cpu_base + CPU_PC, info.text_base);
6237 udelay(1000);
6238 }
6239 if (i >= 5) {
6240 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6241 "to set CPU PC, is %08x should be %08x\n",
6242 tp->dev->name, tr32(cpu_base + CPU_PC),
6243 info.text_base);
6244 return -ENODEV;
6245 }
6246 tw32(cpu_base + CPU_STATE, 0xffffffff);
6247 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6248 return 0;
6249}
6250
1da177e4
LT
6251
6252/* tp->lock is held. */
986e0aeb 6253static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
1da177e4
LT
6254{
6255 u32 addr_high, addr_low;
6256 int i;
6257
6258 addr_high = ((tp->dev->dev_addr[0] << 8) |
6259 tp->dev->dev_addr[1]);
6260 addr_low = ((tp->dev->dev_addr[2] << 24) |
6261 (tp->dev->dev_addr[3] << 16) |
6262 (tp->dev->dev_addr[4] << 8) |
6263 (tp->dev->dev_addr[5] << 0));
6264 for (i = 0; i < 4; i++) {
986e0aeb
MC
6265 if (i == 1 && skip_mac_1)
6266 continue;
1da177e4
LT
6267 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6268 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6269 }
6270
6271 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6272 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6273 for (i = 0; i < 12; i++) {
6274 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6275 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6276 }
6277 }
6278
6279 addr_high = (tp->dev->dev_addr[0] +
6280 tp->dev->dev_addr[1] +
6281 tp->dev->dev_addr[2] +
6282 tp->dev->dev_addr[3] +
6283 tp->dev->dev_addr[4] +
6284 tp->dev->dev_addr[5]) &
6285 TX_BACKOFF_SEED_MASK;
6286 tw32(MAC_TX_BACKOFF_SEED, addr_high);
6287}
6288
6289static int tg3_set_mac_addr(struct net_device *dev, void *p)
6290{
6291 struct tg3 *tp = netdev_priv(dev);
6292 struct sockaddr *addr = p;
986e0aeb 6293 int err = 0, skip_mac_1 = 0;
1da177e4 6294
f9804ddb
MC
6295 if (!is_valid_ether_addr(addr->sa_data))
6296 return -EINVAL;
6297
1da177e4
LT
6298 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6299
e75f7c90
MC
6300 if (!netif_running(dev))
6301 return 0;
6302
58712ef9 6303 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
986e0aeb 6304 u32 addr0_high, addr0_low, addr1_high, addr1_low;
58712ef9 6305
986e0aeb
MC
6306 addr0_high = tr32(MAC_ADDR_0_HIGH);
6307 addr0_low = tr32(MAC_ADDR_0_LOW);
6308 addr1_high = tr32(MAC_ADDR_1_HIGH);
6309 addr1_low = tr32(MAC_ADDR_1_LOW);
6310
6311 /* Skip MAC addr 1 if ASF is using it. */
6312 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6313 !(addr1_high == 0 && addr1_low == 0))
6314 skip_mac_1 = 1;
58712ef9 6315 }
986e0aeb
MC
6316 spin_lock_bh(&tp->lock);
6317 __tg3_set_mac_addr(tp, skip_mac_1);
6318 spin_unlock_bh(&tp->lock);
1da177e4 6319
b9ec6c1b 6320 return err;
1da177e4
LT
6321}
6322
6323/* tp->lock is held. */
6324static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6325 dma_addr_t mapping, u32 maxlen_flags,
6326 u32 nic_addr)
6327{
6328 tg3_write_mem(tp,
6329 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6330 ((u64) mapping >> 32));
6331 tg3_write_mem(tp,
6332 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6333 ((u64) mapping & 0xffffffff));
6334 tg3_write_mem(tp,
6335 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6336 maxlen_flags);
6337
6338 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6339 tg3_write_mem(tp,
6340 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6341 nic_addr);
6342}
6343
6344static void __tg3_set_rx_mode(struct net_device *);
d244c892 6345static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
15f9850d
DM
6346{
6347 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6348 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6349 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6350 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6351 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6352 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6353 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6354 }
6355 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6356 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6357 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6358 u32 val = ec->stats_block_coalesce_usecs;
6359
6360 if (!netif_carrier_ok(tp->dev))
6361 val = 0;
6362
6363 tw32(HOSTCC_STAT_COAL_TICKS, val);
6364 }
6365}
1da177e4
LT
6366
6367/* tp->lock is held. */
8e7a22e3 6368static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
6369{
6370 u32 val, rdmac_mode;
6371 int i, err, limit;
6372
6373 tg3_disable_ints(tp);
6374
6375 tg3_stop_fw(tp);
6376
6377 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6378
6379 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
e6de8ad1 6380 tg3_abort_hw(tp, 1);
1da177e4
LT
6381 }
6382
36da4d86 6383 if (reset_phy)
d4d2c558
MC
6384 tg3_phy_reset(tp);
6385
1da177e4
LT
6386 err = tg3_chip_reset(tp);
6387 if (err)
6388 return err;
6389
6390 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6391
b5af7126
MC
6392 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
6393 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
d30cdd28
MC
6394 val = tr32(TG3_CPMU_CTRL);
6395 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6396 tw32(TG3_CPMU_CTRL, val);
9acb961e
MC
6397
6398 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6399 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6400 val |= CPMU_LSPD_10MB_MACCLK_6_25;
6401 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6402
6403 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6404 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6405 val |= CPMU_LNK_AWARE_MACCLK_6_25;
6406 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6407
6408 val = tr32(TG3_CPMU_HST_ACC);
6409 val &= ~CPMU_HST_ACC_MACCLK_MASK;
6410 val |= CPMU_HST_ACC_MACCLK_6_25;
6411 tw32(TG3_CPMU_HST_ACC, val);
d30cdd28
MC
6412 }
6413
1da177e4
LT
6414 /* This works around an issue with Athlon chipsets on
6415 * B3 tigon3 silicon. This bit has no effect on any
6416 * other revision. But do not set this on PCI Express
795d01c5 6417 * chips and don't even touch the clocks if the CPMU is present.
1da177e4 6418 */
795d01c5
MC
6419 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6420 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6421 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6422 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6423 }
1da177e4
LT
6424
6425 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6426 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6427 val = tr32(TG3PCI_PCISTATE);
6428 val |= PCISTATE_RETRY_SAME_DMA;
6429 tw32(TG3PCI_PCISTATE, val);
6430 }
6431
0d3031d9
MC
6432 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6433 /* Allow reads and writes to the
6434 * APE register and memory space.
6435 */
6436 val = tr32(TG3PCI_PCISTATE);
6437 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6438 PCISTATE_ALLOW_APE_SHMEM_WR;
6439 tw32(TG3PCI_PCISTATE, val);
6440 }
6441
1da177e4
LT
6442 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6443 /* Enable some hw fixes. */
6444 val = tr32(TG3PCI_MSI_DATA);
6445 val |= (1 << 26) | (1 << 28) | (1 << 29);
6446 tw32(TG3PCI_MSI_DATA, val);
6447 }
6448
6449 /* Descriptor ring init may make accesses to the
6450 * NIC SRAM area to setup the TX descriptors, so we
6451 * can only do this after the hardware has been
6452 * successfully reset.
6453 */
32d8c572
MC
6454 err = tg3_init_rings(tp);
6455 if (err)
6456 return err;
1da177e4 6457
9936bcf6
MC
6458 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6459 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
d30cdd28
MC
6460 /* This value is determined during the probe time DMA
6461 * engine test, tg3_test_dma.
6462 */
6463 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6464 }
1da177e4
LT
6465
6466 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6467 GRC_MODE_4X_NIC_SEND_RINGS |
6468 GRC_MODE_NO_TX_PHDR_CSUM |
6469 GRC_MODE_NO_RX_PHDR_CSUM);
6470 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
d2d746f8
MC
6471
6472 /* Pseudo-header checksum is done by hardware logic and not
6473 * the offload processers, so make the chip do the pseudo-
6474 * header checksums on receive. For transmit it is more
6475 * convenient to do the pseudo-header checksum in software
6476 * as Linux does that on transmit for us in all cases.
6477 */
6478 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
1da177e4
LT
6479
6480 tw32(GRC_MODE,
6481 tp->grc_mode |
6482 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6483
6484 /* Setup the timer prescalar register. Clock is always 66Mhz. */
6485 val = tr32(GRC_MISC_CFG);
6486 val &= ~0xff;
6487 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6488 tw32(GRC_MISC_CFG, val);
6489
6490 /* Initialize MBUF/DESC pool. */
cbf46853 6491 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
1da177e4
LT
6492 /* Do nothing. */
6493 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6494 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6495 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6496 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6497 else
6498 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6499 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6500 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6501 }
1da177e4
LT
6502 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6503 int fw_len;
6504
6505 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6506 TG3_TSO5_FW_RODATA_LEN +
6507 TG3_TSO5_FW_DATA_LEN +
6508 TG3_TSO5_FW_SBSS_LEN +
6509 TG3_TSO5_FW_BSS_LEN);
6510 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6511 tw32(BUFMGR_MB_POOL_ADDR,
6512 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6513 tw32(BUFMGR_MB_POOL_SIZE,
6514 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6515 }
1da177e4 6516
0f893dc6 6517 if (tp->dev->mtu <= ETH_DATA_LEN) {
1da177e4
LT
6518 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6519 tp->bufmgr_config.mbuf_read_dma_low_water);
6520 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6521 tp->bufmgr_config.mbuf_mac_rx_low_water);
6522 tw32(BUFMGR_MB_HIGH_WATER,
6523 tp->bufmgr_config.mbuf_high_water);
6524 } else {
6525 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6526 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6527 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6528 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6529 tw32(BUFMGR_MB_HIGH_WATER,
6530 tp->bufmgr_config.mbuf_high_water_jumbo);
6531 }
6532 tw32(BUFMGR_DMA_LOW_WATER,
6533 tp->bufmgr_config.dma_low_water);
6534 tw32(BUFMGR_DMA_HIGH_WATER,
6535 tp->bufmgr_config.dma_high_water);
6536
6537 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6538 for (i = 0; i < 2000; i++) {
6539 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6540 break;
6541 udelay(10);
6542 }
6543 if (i >= 2000) {
6544 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6545 tp->dev->name);
6546 return -ENODEV;
6547 }
6548
6549 /* Setup replenish threshold. */
f92905de
MC
6550 val = tp->rx_pending / 8;
6551 if (val == 0)
6552 val = 1;
6553 else if (val > tp->rx_std_max_post)
6554 val = tp->rx_std_max_post;
b5d3772c
MC
6555 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6556 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6557 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6558
6559 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6560 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6561 }
f92905de
MC
6562
6563 tw32(RCVBDI_STD_THRESH, val);
1da177e4
LT
6564
6565 /* Initialize TG3_BDINFO's at:
6566 * RCVDBDI_STD_BD: standard eth size rx ring
6567 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6568 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6569 *
6570 * like so:
6571 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6572 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6573 * ring attribute flags
6574 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6575 *
6576 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6577 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6578 *
6579 * The size of each ring is fixed in the firmware, but the location is
6580 * configurable.
6581 */
6582 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6583 ((u64) tp->rx_std_mapping >> 32));
6584 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6585 ((u64) tp->rx_std_mapping & 0xffffffff));
6586 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6587 NIC_SRAM_RX_BUFFER_DESC);
6588
6589 /* Don't even try to program the JUMBO/MINI buffer descriptor
6590 * configs on 5705.
6591 */
6592 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6593 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6594 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6595 } else {
6596 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6597 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6598
6599 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6600 BDINFO_FLAGS_DISABLED);
6601
6602 /* Setup replenish threshold. */
6603 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6604
0f893dc6 6605 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
6606 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6607 ((u64) tp->rx_jumbo_mapping >> 32));
6608 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6609 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6610 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6611 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6612 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6613 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6614 } else {
6615 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6616 BDINFO_FLAGS_DISABLED);
6617 }
6618
6619 }
6620
6621 /* There is only one send ring on 5705/5750, no need to explicitly
6622 * disable the others.
6623 */
6624 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6625 /* Clear out send RCB ring in SRAM. */
6626 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6627 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6628 BDINFO_FLAGS_DISABLED);
6629 }
6630
6631 tp->tx_prod = 0;
6632 tp->tx_cons = 0;
6633 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6634 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6635
6636 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6637 tp->tx_desc_mapping,
6638 (TG3_TX_RING_SIZE <<
6639 BDINFO_FLAGS_MAXLEN_SHIFT),
6640 NIC_SRAM_TX_BUFFER_DESC);
6641
6642 /* There is only one receive return ring on 5705/5750, no need
6643 * to explicitly disable the others.
6644 */
6645 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6646 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6647 i += TG3_BDINFO_SIZE) {
6648 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6649 BDINFO_FLAGS_DISABLED);
6650 }
6651 }
6652
6653 tp->rx_rcb_ptr = 0;
6654 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6655
6656 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6657 tp->rx_rcb_mapping,
6658 (TG3_RX_RCB_RING_SIZE(tp) <<
6659 BDINFO_FLAGS_MAXLEN_SHIFT),
6660 0);
6661
6662 tp->rx_std_ptr = tp->rx_pending;
6663 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6664 tp->rx_std_ptr);
6665
0f893dc6 6666 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
1da177e4
LT
6667 tp->rx_jumbo_pending : 0;
6668 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6669 tp->rx_jumbo_ptr);
6670
6671 /* Initialize MAC address and backoff seed. */
986e0aeb 6672 __tg3_set_mac_addr(tp, 0);
1da177e4
LT
6673
6674 /* MTU + ethernet header + FCS + optional VLAN tag */
6675 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6676
6677 /* The slot time is changed by tg3_setup_phy if we
6678 * run at gigabit with half duplex.
6679 */
6680 tw32(MAC_TX_LENGTHS,
6681 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6682 (6 << TX_LENGTHS_IPG_SHIFT) |
6683 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6684
6685 /* Receive rules. */
6686 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6687 tw32(RCVLPC_CONFIG, 0x0181);
6688
6689 /* Calculate RDMAC_MODE setting early, we need it to determine
6690 * the RCVLPC_STATE_ENABLE mask.
6691 */
6692 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6693 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6694 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6695 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6696 RDMAC_MODE_LNGREAD_ENAB);
85e94ced 6697
d30cdd28
MC
6698 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6699 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6700 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6701 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6702
85e94ced
MC
6703 /* If statement applies to 5705 and 5750 PCI devices only */
6704 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6705 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6706 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
1da177e4 6707 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
c13e3713 6708 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1da177e4
LT
6709 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6710 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6711 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6712 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6713 }
6714 }
6715
85e94ced
MC
6716 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6717 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6718
1da177e4
LT
6719 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6720 rdmac_mode |= (1 << 27);
1da177e4
LT
6721
6722 /* Receive/send statistics. */
1661394e
MC
6723 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6724 val = tr32(RCVLPC_STATS_ENABLE);
6725 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6726 tw32(RCVLPC_STATS_ENABLE, val);
6727 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6728 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
1da177e4
LT
6729 val = tr32(RCVLPC_STATS_ENABLE);
6730 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6731 tw32(RCVLPC_STATS_ENABLE, val);
6732 } else {
6733 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6734 }
6735 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6736 tw32(SNDDATAI_STATSENAB, 0xffffff);
6737 tw32(SNDDATAI_STATSCTRL,
6738 (SNDDATAI_SCTRL_ENABLE |
6739 SNDDATAI_SCTRL_FASTUPD));
6740
6741 /* Setup host coalescing engine. */
6742 tw32(HOSTCC_MODE, 0);
6743 for (i = 0; i < 2000; i++) {
6744 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6745 break;
6746 udelay(10);
6747 }
6748
d244c892 6749 __tg3_set_coalesce(tp, &tp->coal);
1da177e4
LT
6750
6751 /* set status block DMA address */
6752 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6753 ((u64) tp->status_mapping >> 32));
6754 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6755 ((u64) tp->status_mapping & 0xffffffff));
6756
6757 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6758 /* Status/statistics block address. See tg3_timer,
6759 * the tg3_periodic_fetch_stats call there, and
6760 * tg3_get_stats to see how this works for 5705/5750 chips.
6761 */
1da177e4
LT
6762 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6763 ((u64) tp->stats_mapping >> 32));
6764 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6765 ((u64) tp->stats_mapping & 0xffffffff));
6766 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6767 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6768 }
6769
6770 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6771
6772 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6773 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6774 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6775 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6776
6777 /* Clear statistics/status block in chip, and status block in ram. */
6778 for (i = NIC_SRAM_STATS_BLK;
6779 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6780 i += sizeof(u32)) {
6781 tg3_write_mem(tp, i, 0);
6782 udelay(40);
6783 }
6784 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6785
c94e3941
MC
6786 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6787 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6788 /* reset to prevent losing 1st rx packet intermittently */
6789 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6790 udelay(10);
6791 }
6792
1da177e4
LT
6793 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6794 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
e8f3f6ca
MC
6795 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6796 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6797 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6798 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1da177e4
LT
6799 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6800 udelay(40);
6801
314fba34 6802 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9d26e213 6803 * If TG3_FLG2_IS_NIC is zero, we should read the
314fba34
MC
6804 * register to preserve the GPIO settings for LOMs. The GPIOs,
6805 * whether used as inputs or outputs, are set by boot code after
6806 * reset.
6807 */
9d26e213 6808 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
314fba34
MC
6809 u32 gpio_mask;
6810
9d26e213
MC
6811 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6812 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6813 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
3e7d83bc
MC
6814
6815 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6816 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6817 GRC_LCLCTRL_GPIO_OUTPUT3;
6818
af36e6b6
MC
6819 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6820 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6821
aaf84465 6822 tp->grc_local_ctrl &= ~gpio_mask;
314fba34
MC
6823 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6824
6825 /* GPIO1 must be driven high for eeprom write protect */
9d26e213
MC
6826 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6827 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6828 GRC_LCLCTRL_GPIO_OUTPUT1);
314fba34 6829 }
1da177e4
LT
6830 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6831 udelay(100);
6832
09ee929c 6833 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
fac9b83e 6834 tp->last_tag = 0;
1da177e4
LT
6835
6836 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6837 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6838 udelay(40);
6839 }
6840
6841 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6842 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6843 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6844 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6845 WDMAC_MODE_LNGREAD_ENAB);
6846
85e94ced
MC
6847 /* If statement applies to 5705 and 5750 PCI devices only */
6848 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6849 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6850 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1da177e4
LT
6851 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6852 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6853 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6854 /* nothing */
6855 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6856 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6857 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6858 val |= WDMAC_MODE_RX_ACCEL;
6859 }
6860 }
6861
d9ab5ad1 6862 /* Enable host coalescing bug fix */
af36e6b6 6863 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
d30cdd28 6864 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
9936bcf6
MC
6865 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
6866 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
d9ab5ad1
MC
6867 val |= (1 << 29);
6868
1da177e4
LT
6869 tw32_f(WDMAC_MODE, val);
6870 udelay(40);
6871
9974a356
MC
6872 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6873 u16 pcix_cmd;
6874
6875 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6876 &pcix_cmd);
1da177e4 6877 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9974a356
MC
6878 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
6879 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 6880 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9974a356
MC
6881 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
6882 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 6883 }
9974a356
MC
6884 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6885 pcix_cmd);
1da177e4
LT
6886 }
6887
6888 tw32_f(RDMAC_MODE, rdmac_mode);
6889 udelay(40);
6890
6891 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6892 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6893 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9936bcf6
MC
6894
6895 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6896 tw32(SNDDATAC_MODE,
6897 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
6898 else
6899 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6900
1da177e4
LT
6901 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6902 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6903 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6904 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
1da177e4
LT
6905 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6906 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
1da177e4
LT
6907 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6908 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6909
6910 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6911 err = tg3_load_5701_a0_firmware_fix(tp);
6912 if (err)
6913 return err;
6914 }
6915
1da177e4
LT
6916 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6917 err = tg3_load_tso_firmware(tp);
6918 if (err)
6919 return err;
6920 }
1da177e4
LT
6921
6922 tp->tx_mode = TX_MODE_ENABLE;
6923 tw32_f(MAC_TX_MODE, tp->tx_mode);
6924 udelay(100);
6925
6926 tp->rx_mode = RX_MODE_ENABLE;
9936bcf6
MC
6927 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
6928 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
af36e6b6
MC
6929 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6930
1da177e4
LT
6931 tw32_f(MAC_RX_MODE, tp->rx_mode);
6932 udelay(10);
6933
6934 if (tp->link_config.phy_is_low_power) {
6935 tp->link_config.phy_is_low_power = 0;
6936 tp->link_config.speed = tp->link_config.orig_speed;
6937 tp->link_config.duplex = tp->link_config.orig_duplex;
6938 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6939 }
6940
6941 tp->mi_mode = MAC_MI_MODE_BASE;
6942 tw32_f(MAC_MI_MODE, tp->mi_mode);
6943 udelay(80);
6944
6945 tw32(MAC_LED_CTRL, tp->led_ctrl);
6946
6947 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
c94e3941 6948 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1da177e4
LT
6949 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6950 udelay(10);
6951 }
6952 tw32_f(MAC_RX_MODE, tp->rx_mode);
6953 udelay(10);
6954
6955 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6956 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6957 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6958 /* Set drive transmission level to 1.2V */
6959 /* only if the signal pre-emphasis bit is not set */
6960 val = tr32(MAC_SERDES_CFG);
6961 val &= 0xfffff000;
6962 val |= 0x880;
6963 tw32(MAC_SERDES_CFG, val);
6964 }
6965 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6966 tw32(MAC_SERDES_CFG, 0x616000);
6967 }
6968
6969 /* Prevent chip from dropping frames when flow control
6970 * is enabled.
6971 */
6972 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6973
6974 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6975 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6976 /* Use hardware link auto-negotiation */
6977 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6978 }
6979
d4d2c558
MC
6980 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6981 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6982 u32 tmp;
6983
6984 tmp = tr32(SERDES_RX_CTRL);
6985 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6986 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6987 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6988 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6989 }
6990
36da4d86 6991 err = tg3_setup_phy(tp, 0);
1da177e4
LT
6992 if (err)
6993 return err;
6994
715116a1
MC
6995 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6996 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
1da177e4
LT
6997 u32 tmp;
6998
6999 /* Clear CRC stats. */
569a5df8
MC
7000 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7001 tg3_writephy(tp, MII_TG3_TEST1,
7002 tmp | MII_TG3_TEST1_CRC_EN);
1da177e4
LT
7003 tg3_readphy(tp, 0x14, &tmp);
7004 }
7005 }
7006
7007 __tg3_set_rx_mode(tp->dev);
7008
7009 /* Initialize receive rules. */
7010 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7011 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7012 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7013 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7014
4cf78e4f 7015 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
a4e2b347 7016 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
1da177e4
LT
7017 limit = 8;
7018 else
7019 limit = 16;
7020 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7021 limit -= 4;
7022 switch (limit) {
7023 case 16:
7024 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7025 case 15:
7026 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7027 case 14:
7028 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7029 case 13:
7030 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7031 case 12:
7032 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7033 case 11:
7034 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7035 case 10:
7036 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7037 case 9:
7038 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7039 case 8:
7040 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7041 case 7:
7042 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7043 case 6:
7044 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7045 case 5:
7046 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7047 case 4:
7048 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7049 case 3:
7050 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7051 case 2:
7052 case 1:
7053
7054 default:
7055 break;
7056 };
7057
9ce768ea
MC
7058 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7059 /* Write our heartbeat update interval to APE. */
7060 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7061 APE_HOST_HEARTBEAT_INT_DISABLE);
0d3031d9 7062
1da177e4
LT
7063 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7064
1da177e4
LT
7065 return 0;
7066}
7067
7068/* Called at device open time to get the chip ready for
7069 * packet processing. Invoked with tp->lock held.
7070 */
8e7a22e3 7071static int tg3_init_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
7072{
7073 int err;
7074
7075 /* Force the chip into D0. */
bc1c7567 7076 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
7077 if (err)
7078 goto out;
7079
7080 tg3_switch_clocks(tp);
7081
7082 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7083
8e7a22e3 7084 err = tg3_reset_hw(tp, reset_phy);
1da177e4
LT
7085
7086out:
7087 return err;
7088}
7089
7090#define TG3_STAT_ADD32(PSTAT, REG) \
7091do { u32 __val = tr32(REG); \
7092 (PSTAT)->low += __val; \
7093 if ((PSTAT)->low < __val) \
7094 (PSTAT)->high += 1; \
7095} while (0)
7096
7097static void tg3_periodic_fetch_stats(struct tg3 *tp)
7098{
7099 struct tg3_hw_stats *sp = tp->hw_stats;
7100
7101 if (!netif_carrier_ok(tp->dev))
7102 return;
7103
7104 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7105 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7106 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7107 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7108 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7109 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7110 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7111 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7112 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7113 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7114 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7115 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7116 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7117
7118 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7119 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7120 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7121 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7122 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7123 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7124 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7125 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7126 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7127 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7128 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7129 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7130 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7131 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
463d305b
MC
7132
7133 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7134 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7135 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
1da177e4
LT
7136}
7137
7138static void tg3_timer(unsigned long __opaque)
7139{
7140 struct tg3 *tp = (struct tg3 *) __opaque;
1da177e4 7141
f475f163
MC
7142 if (tp->irq_sync)
7143 goto restart_timer;
7144
f47c11ee 7145 spin_lock(&tp->lock);
1da177e4 7146
fac9b83e
DM
7147 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7148 /* All of this garbage is because when using non-tagged
7149 * IRQ status the mailbox/status_block protocol the chip
7150 * uses with the cpu is race prone.
7151 */
7152 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7153 tw32(GRC_LOCAL_CTRL,
7154 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7155 } else {
7156 tw32(HOSTCC_MODE, tp->coalesce_mode |
7157 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7158 }
1da177e4 7159
fac9b83e
DM
7160 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7161 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
f47c11ee 7162 spin_unlock(&tp->lock);
fac9b83e
DM
7163 schedule_work(&tp->reset_task);
7164 return;
7165 }
1da177e4
LT
7166 }
7167
1da177e4
LT
7168 /* This part only runs once per second. */
7169 if (!--tp->timer_counter) {
fac9b83e
DM
7170 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7171 tg3_periodic_fetch_stats(tp);
7172
1da177e4
LT
7173 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7174 u32 mac_stat;
7175 int phy_event;
7176
7177 mac_stat = tr32(MAC_STATUS);
7178
7179 phy_event = 0;
7180 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7181 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7182 phy_event = 1;
7183 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7184 phy_event = 1;
7185
7186 if (phy_event)
7187 tg3_setup_phy(tp, 0);
7188 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7189 u32 mac_stat = tr32(MAC_STATUS);
7190 int need_setup = 0;
7191
7192 if (netif_carrier_ok(tp->dev) &&
7193 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7194 need_setup = 1;
7195 }
7196 if (! netif_carrier_ok(tp->dev) &&
7197 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7198 MAC_STATUS_SIGNAL_DET))) {
7199 need_setup = 1;
7200 }
7201 if (need_setup) {
3d3ebe74
MC
7202 if (!tp->serdes_counter) {
7203 tw32_f(MAC_MODE,
7204 (tp->mac_mode &
7205 ~MAC_MODE_PORT_MODE_MASK));
7206 udelay(40);
7207 tw32_f(MAC_MODE, tp->mac_mode);
7208 udelay(40);
7209 }
1da177e4
LT
7210 tg3_setup_phy(tp, 0);
7211 }
747e8f8b
MC
7212 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7213 tg3_serdes_parallel_detect(tp);
1da177e4
LT
7214
7215 tp->timer_counter = tp->timer_multiplier;
7216 }
7217
130b8e4d
MC
7218 /* Heartbeat is only sent once every 2 seconds.
7219 *
7220 * The heartbeat is to tell the ASF firmware that the host
7221 * driver is still alive. In the event that the OS crashes,
7222 * ASF needs to reset the hardware to free up the FIFO space
7223 * that may be filled with rx packets destined for the host.
7224 * If the FIFO is full, ASF will no longer function properly.
7225 *
7226 * Unintended resets have been reported on real time kernels
7227 * where the timer doesn't run on time. Netpoll will also have
7228 * same problem.
7229 *
7230 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7231 * to check the ring condition when the heartbeat is expiring
7232 * before doing the reset. This will prevent most unintended
7233 * resets.
7234 */
1da177e4
LT
7235 if (!--tp->asf_counter) {
7236 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7237 u32 val;
7238
bbadf503 7239 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
130b8e4d 7240 FWCMD_NICDRV_ALIVE3);
bbadf503 7241 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
28fbef78 7242 /* 5 seconds timeout */
bbadf503 7243 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
1da177e4
LT
7244 val = tr32(GRC_RX_CPU_EVENT);
7245 val |= (1 << 14);
7246 tw32(GRC_RX_CPU_EVENT, val);
7247 }
7248 tp->asf_counter = tp->asf_multiplier;
7249 }
7250
f47c11ee 7251 spin_unlock(&tp->lock);
1da177e4 7252
f475f163 7253restart_timer:
1da177e4
LT
7254 tp->timer.expires = jiffies + tp->timer_offset;
7255 add_timer(&tp->timer);
7256}
7257
81789ef5 7258static int tg3_request_irq(struct tg3 *tp)
fcfa0a32 7259{
7d12e780 7260 irq_handler_t fn;
fcfa0a32
MC
7261 unsigned long flags;
7262 struct net_device *dev = tp->dev;
7263
7264 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7265 fn = tg3_msi;
7266 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7267 fn = tg3_msi_1shot;
1fb9df5d 7268 flags = IRQF_SAMPLE_RANDOM;
fcfa0a32
MC
7269 } else {
7270 fn = tg3_interrupt;
7271 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7272 fn = tg3_interrupt_tagged;
1fb9df5d 7273 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
fcfa0a32
MC
7274 }
7275 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7276}
7277
7938109f
MC
7278static int tg3_test_interrupt(struct tg3 *tp)
7279{
7280 struct net_device *dev = tp->dev;
b16250e3 7281 int err, i, intr_ok = 0;
7938109f 7282
d4bc3927
MC
7283 if (!netif_running(dev))
7284 return -ENODEV;
7285
7938109f
MC
7286 tg3_disable_ints(tp);
7287
7288 free_irq(tp->pdev->irq, dev);
7289
7290 err = request_irq(tp->pdev->irq, tg3_test_isr,
1fb9df5d 7291 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7938109f
MC
7292 if (err)
7293 return err;
7294
38f3843e 7295 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7938109f
MC
7296 tg3_enable_ints(tp);
7297
7298 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7299 HOSTCC_MODE_NOW);
7300
7301 for (i = 0; i < 5; i++) {
b16250e3
MC
7302 u32 int_mbox, misc_host_ctrl;
7303
09ee929c
MC
7304 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7305 TG3_64BIT_REG_LOW);
b16250e3
MC
7306 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7307
7308 if ((int_mbox != 0) ||
7309 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7310 intr_ok = 1;
7938109f 7311 break;
b16250e3
MC
7312 }
7313
7938109f
MC
7314 msleep(10);
7315 }
7316
7317 tg3_disable_ints(tp);
7318
7319 free_irq(tp->pdev->irq, dev);
6aa20a22 7320
fcfa0a32 7321 err = tg3_request_irq(tp);
7938109f
MC
7322
7323 if (err)
7324 return err;
7325
b16250e3 7326 if (intr_ok)
7938109f
MC
7327 return 0;
7328
7329 return -EIO;
7330}
7331
7332/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7333 * successfully restored
7334 */
7335static int tg3_test_msi(struct tg3 *tp)
7336{
7337 struct net_device *dev = tp->dev;
7338 int err;
7339 u16 pci_cmd;
7340
7341 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7342 return 0;
7343
7344 /* Turn off SERR reporting in case MSI terminates with Master
7345 * Abort.
7346 */
7347 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7348 pci_write_config_word(tp->pdev, PCI_COMMAND,
7349 pci_cmd & ~PCI_COMMAND_SERR);
7350
7351 err = tg3_test_interrupt(tp);
7352
7353 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7354
7355 if (!err)
7356 return 0;
7357
7358 /* other failures */
7359 if (err != -EIO)
7360 return err;
7361
7362 /* MSI test failed, go back to INTx mode */
7363 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7364 "switching to INTx mode. Please report this failure to "
7365 "the PCI maintainer and include system chipset information.\n",
7366 tp->dev->name);
7367
7368 free_irq(tp->pdev->irq, dev);
7369 pci_disable_msi(tp->pdev);
7370
7371 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7372
fcfa0a32 7373 err = tg3_request_irq(tp);
7938109f
MC
7374 if (err)
7375 return err;
7376
7377 /* Need to reset the chip because the MSI cycle may have terminated
7378 * with Master Abort.
7379 */
f47c11ee 7380 tg3_full_lock(tp, 1);
7938109f 7381
944d980e 7382 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8e7a22e3 7383 err = tg3_init_hw(tp, 1);
7938109f 7384
f47c11ee 7385 tg3_full_unlock(tp);
7938109f
MC
7386
7387 if (err)
7388 free_irq(tp->pdev->irq, dev);
7389
7390 return err;
7391}
7392
1da177e4
LT
7393static int tg3_open(struct net_device *dev)
7394{
7395 struct tg3 *tp = netdev_priv(dev);
7396 int err;
7397
c49a1561
MC
7398 netif_carrier_off(tp->dev);
7399
f47c11ee 7400 tg3_full_lock(tp, 0);
1da177e4 7401
bc1c7567 7402 err = tg3_set_power_state(tp, PCI_D0);
12862086
IS
7403 if (err) {
7404 tg3_full_unlock(tp);
bc1c7567 7405 return err;
12862086 7406 }
bc1c7567 7407
1da177e4
LT
7408 tg3_disable_ints(tp);
7409 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7410
f47c11ee 7411 tg3_full_unlock(tp);
1da177e4
LT
7412
7413 /* The placement of this call is tied
7414 * to the setup and use of Host TX descriptors.
7415 */
7416 err = tg3_alloc_consistent(tp);
7417 if (err)
7418 return err;
7419
7544b097 7420 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
fac9b83e
DM
7421 /* All MSI supporting chips should support tagged
7422 * status. Assert that this is the case.
7423 */
7424 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7425 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7426 "Not using MSI.\n", tp->dev->name);
7427 } else if (pci_enable_msi(tp->pdev) == 0) {
88b06bc2
MC
7428 u32 msi_mode;
7429
7430 msi_mode = tr32(MSGINT_MODE);
7431 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7432 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7433 }
7434 }
fcfa0a32 7435 err = tg3_request_irq(tp);
1da177e4
LT
7436
7437 if (err) {
88b06bc2
MC
7438 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7439 pci_disable_msi(tp->pdev);
7440 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7441 }
1da177e4
LT
7442 tg3_free_consistent(tp);
7443 return err;
7444 }
7445
bea3348e
SH
7446 napi_enable(&tp->napi);
7447
f47c11ee 7448 tg3_full_lock(tp, 0);
1da177e4 7449
8e7a22e3 7450 err = tg3_init_hw(tp, 1);
1da177e4 7451 if (err) {
944d980e 7452 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7453 tg3_free_rings(tp);
7454 } else {
fac9b83e
DM
7455 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7456 tp->timer_offset = HZ;
7457 else
7458 tp->timer_offset = HZ / 10;
7459
7460 BUG_ON(tp->timer_offset > HZ);
7461 tp->timer_counter = tp->timer_multiplier =
7462 (HZ / tp->timer_offset);
7463 tp->asf_counter = tp->asf_multiplier =
28fbef78 7464 ((HZ / tp->timer_offset) * 2);
1da177e4
LT
7465
7466 init_timer(&tp->timer);
7467 tp->timer.expires = jiffies + tp->timer_offset;
7468 tp->timer.data = (unsigned long) tp;
7469 tp->timer.function = tg3_timer;
1da177e4
LT
7470 }
7471
f47c11ee 7472 tg3_full_unlock(tp);
1da177e4
LT
7473
7474 if (err) {
bea3348e 7475 napi_disable(&tp->napi);
88b06bc2
MC
7476 free_irq(tp->pdev->irq, dev);
7477 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7478 pci_disable_msi(tp->pdev);
7479 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7480 }
1da177e4
LT
7481 tg3_free_consistent(tp);
7482 return err;
7483 }
7484
7938109f
MC
7485 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7486 err = tg3_test_msi(tp);
fac9b83e 7487
7938109f 7488 if (err) {
f47c11ee 7489 tg3_full_lock(tp, 0);
7938109f
MC
7490
7491 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7492 pci_disable_msi(tp->pdev);
7493 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7494 }
944d980e 7495 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f
MC
7496 tg3_free_rings(tp);
7497 tg3_free_consistent(tp);
7498
f47c11ee 7499 tg3_full_unlock(tp);
7938109f 7500
bea3348e
SH
7501 napi_disable(&tp->napi);
7502
7938109f
MC
7503 return err;
7504 }
fcfa0a32
MC
7505
7506 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7507 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
b5d3772c 7508 u32 val = tr32(PCIE_TRANSACTION_CFG);
fcfa0a32 7509
b5d3772c
MC
7510 tw32(PCIE_TRANSACTION_CFG,
7511 val | PCIE_TRANS_CFG_1SHOT_MSI);
fcfa0a32
MC
7512 }
7513 }
7938109f
MC
7514 }
7515
f47c11ee 7516 tg3_full_lock(tp, 0);
1da177e4 7517
7938109f
MC
7518 add_timer(&tp->timer);
7519 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
1da177e4
LT
7520 tg3_enable_ints(tp);
7521
f47c11ee 7522 tg3_full_unlock(tp);
1da177e4
LT
7523
7524 netif_start_queue(dev);
7525
7526 return 0;
7527}
7528
7529#if 0
7530/*static*/ void tg3_dump_state(struct tg3 *tp)
7531{
7532 u32 val32, val32_2, val32_3, val32_4, val32_5;
7533 u16 val16;
7534 int i;
7535
7536 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7537 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7538 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7539 val16, val32);
7540
7541 /* MAC block */
7542 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7543 tr32(MAC_MODE), tr32(MAC_STATUS));
7544 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7545 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7546 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7547 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7548 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7549 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7550
7551 /* Send data initiator control block */
7552 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7553 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7554 printk(" SNDDATAI_STATSCTRL[%08x]\n",
7555 tr32(SNDDATAI_STATSCTRL));
7556
7557 /* Send data completion control block */
7558 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7559
7560 /* Send BD ring selector block */
7561 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7562 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7563
7564 /* Send BD initiator control block */
7565 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7566 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7567
7568 /* Send BD completion control block */
7569 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7570
7571 /* Receive list placement control block */
7572 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7573 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7574 printk(" RCVLPC_STATSCTRL[%08x]\n",
7575 tr32(RCVLPC_STATSCTRL));
7576
7577 /* Receive data and receive BD initiator control block */
7578 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7579 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7580
7581 /* Receive data completion control block */
7582 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7583 tr32(RCVDCC_MODE));
7584
7585 /* Receive BD initiator control block */
7586 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7587 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7588
7589 /* Receive BD completion control block */
7590 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7591 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7592
7593 /* Receive list selector control block */
7594 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7595 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7596
7597 /* Mbuf cluster free block */
7598 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7599 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7600
7601 /* Host coalescing control block */
7602 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7603 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7604 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7605 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7606 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7607 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7608 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7609 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7610 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7611 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7612 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7613 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7614
7615 /* Memory arbiter control block */
7616 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7617 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7618
7619 /* Buffer manager control block */
7620 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7621 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7622 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7623 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7624 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7625 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7626 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7627 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7628
7629 /* Read DMA control block */
7630 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7631 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7632
7633 /* Write DMA control block */
7634 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7635 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7636
7637 /* DMA completion block */
7638 printk("DEBUG: DMAC_MODE[%08x]\n",
7639 tr32(DMAC_MODE));
7640
7641 /* GRC block */
7642 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7643 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7644 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7645 tr32(GRC_LOCAL_CTRL));
7646
7647 /* TG3_BDINFOs */
7648 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7649 tr32(RCVDBDI_JUMBO_BD + 0x0),
7650 tr32(RCVDBDI_JUMBO_BD + 0x4),
7651 tr32(RCVDBDI_JUMBO_BD + 0x8),
7652 tr32(RCVDBDI_JUMBO_BD + 0xc));
7653 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7654 tr32(RCVDBDI_STD_BD + 0x0),
7655 tr32(RCVDBDI_STD_BD + 0x4),
7656 tr32(RCVDBDI_STD_BD + 0x8),
7657 tr32(RCVDBDI_STD_BD + 0xc));
7658 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7659 tr32(RCVDBDI_MINI_BD + 0x0),
7660 tr32(RCVDBDI_MINI_BD + 0x4),
7661 tr32(RCVDBDI_MINI_BD + 0x8),
7662 tr32(RCVDBDI_MINI_BD + 0xc));
7663
7664 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7665 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7666 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7667 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7668 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7669 val32, val32_2, val32_3, val32_4);
7670
7671 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7672 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7673 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7674 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7675 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7676 val32, val32_2, val32_3, val32_4);
7677
7678 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7679 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7680 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7681 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7682 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7683 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7684 val32, val32_2, val32_3, val32_4, val32_5);
7685
7686 /* SW status block */
7687 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7688 tp->hw_status->status,
7689 tp->hw_status->status_tag,
7690 tp->hw_status->rx_jumbo_consumer,
7691 tp->hw_status->rx_consumer,
7692 tp->hw_status->rx_mini_consumer,
7693 tp->hw_status->idx[0].rx_producer,
7694 tp->hw_status->idx[0].tx_consumer);
7695
7696 /* SW statistics block */
7697 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7698 ((u32 *)tp->hw_stats)[0],
7699 ((u32 *)tp->hw_stats)[1],
7700 ((u32 *)tp->hw_stats)[2],
7701 ((u32 *)tp->hw_stats)[3]);
7702
7703 /* Mailboxes */
7704 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
09ee929c
MC
7705 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7706 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7707 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7708 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
1da177e4
LT
7709
7710 /* NIC side send descriptors. */
7711 for (i = 0; i < 6; i++) {
7712 unsigned long txd;
7713
7714 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7715 + (i * sizeof(struct tg3_tx_buffer_desc));
7716 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7717 i,
7718 readl(txd + 0x0), readl(txd + 0x4),
7719 readl(txd + 0x8), readl(txd + 0xc));
7720 }
7721
7722 /* NIC side RX descriptors. */
7723 for (i = 0; i < 6; i++) {
7724 unsigned long rxd;
7725
7726 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7727 + (i * sizeof(struct tg3_rx_buffer_desc));
7728 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7729 i,
7730 readl(rxd + 0x0), readl(rxd + 0x4),
7731 readl(rxd + 0x8), readl(rxd + 0xc));
7732 rxd += (4 * sizeof(u32));
7733 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7734 i,
7735 readl(rxd + 0x0), readl(rxd + 0x4),
7736 readl(rxd + 0x8), readl(rxd + 0xc));
7737 }
7738
7739 for (i = 0; i < 6; i++) {
7740 unsigned long rxd;
7741
7742 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7743 + (i * sizeof(struct tg3_rx_buffer_desc));
7744 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7745 i,
7746 readl(rxd + 0x0), readl(rxd + 0x4),
7747 readl(rxd + 0x8), readl(rxd + 0xc));
7748 rxd += (4 * sizeof(u32));
7749 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7750 i,
7751 readl(rxd + 0x0), readl(rxd + 0x4),
7752 readl(rxd + 0x8), readl(rxd + 0xc));
7753 }
7754}
7755#endif
7756
7757static struct net_device_stats *tg3_get_stats(struct net_device *);
7758static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7759
7760static int tg3_close(struct net_device *dev)
7761{
7762 struct tg3 *tp = netdev_priv(dev);
7763
bea3348e 7764 napi_disable(&tp->napi);
28e53bdd 7765 cancel_work_sync(&tp->reset_task);
7faa006f 7766
1da177e4
LT
7767 netif_stop_queue(dev);
7768
7769 del_timer_sync(&tp->timer);
7770
f47c11ee 7771 tg3_full_lock(tp, 1);
1da177e4
LT
7772#if 0
7773 tg3_dump_state(tp);
7774#endif
7775
7776 tg3_disable_ints(tp);
7777
944d980e 7778 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4 7779 tg3_free_rings(tp);
5cf64b8a 7780 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
1da177e4 7781
f47c11ee 7782 tg3_full_unlock(tp);
1da177e4 7783
88b06bc2
MC
7784 free_irq(tp->pdev->irq, dev);
7785 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7786 pci_disable_msi(tp->pdev);
7787 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7788 }
1da177e4
LT
7789
7790 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7791 sizeof(tp->net_stats_prev));
7792 memcpy(&tp->estats_prev, tg3_get_estats(tp),
7793 sizeof(tp->estats_prev));
7794
7795 tg3_free_consistent(tp);
7796
bc1c7567
MC
7797 tg3_set_power_state(tp, PCI_D3hot);
7798
7799 netif_carrier_off(tp->dev);
7800
1da177e4
LT
7801 return 0;
7802}
7803
7804static inline unsigned long get_stat64(tg3_stat64_t *val)
7805{
7806 unsigned long ret;
7807
7808#if (BITS_PER_LONG == 32)
7809 ret = val->low;
7810#else
7811 ret = ((u64)val->high << 32) | ((u64)val->low);
7812#endif
7813 return ret;
7814}
7815
7816static unsigned long calc_crc_errors(struct tg3 *tp)
7817{
7818 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7819
7820 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7821 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7822 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1da177e4
LT
7823 u32 val;
7824
f47c11ee 7825 spin_lock_bh(&tp->lock);
569a5df8
MC
7826 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7827 tg3_writephy(tp, MII_TG3_TEST1,
7828 val | MII_TG3_TEST1_CRC_EN);
1da177e4
LT
7829 tg3_readphy(tp, 0x14, &val);
7830 } else
7831 val = 0;
f47c11ee 7832 spin_unlock_bh(&tp->lock);
1da177e4
LT
7833
7834 tp->phy_crc_errors += val;
7835
7836 return tp->phy_crc_errors;
7837 }
7838
7839 return get_stat64(&hw_stats->rx_fcs_errors);
7840}
7841
7842#define ESTAT_ADD(member) \
7843 estats->member = old_estats->member + \
7844 get_stat64(&hw_stats->member)
7845
7846static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7847{
7848 struct tg3_ethtool_stats *estats = &tp->estats;
7849 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7850 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7851
7852 if (!hw_stats)
7853 return old_estats;
7854
7855 ESTAT_ADD(rx_octets);
7856 ESTAT_ADD(rx_fragments);
7857 ESTAT_ADD(rx_ucast_packets);
7858 ESTAT_ADD(rx_mcast_packets);
7859 ESTAT_ADD(rx_bcast_packets);
7860 ESTAT_ADD(rx_fcs_errors);
7861 ESTAT_ADD(rx_align_errors);
7862 ESTAT_ADD(rx_xon_pause_rcvd);
7863 ESTAT_ADD(rx_xoff_pause_rcvd);
7864 ESTAT_ADD(rx_mac_ctrl_rcvd);
7865 ESTAT_ADD(rx_xoff_entered);
7866 ESTAT_ADD(rx_frame_too_long_errors);
7867 ESTAT_ADD(rx_jabbers);
7868 ESTAT_ADD(rx_undersize_packets);
7869 ESTAT_ADD(rx_in_length_errors);
7870 ESTAT_ADD(rx_out_length_errors);
7871 ESTAT_ADD(rx_64_or_less_octet_packets);
7872 ESTAT_ADD(rx_65_to_127_octet_packets);
7873 ESTAT_ADD(rx_128_to_255_octet_packets);
7874 ESTAT_ADD(rx_256_to_511_octet_packets);
7875 ESTAT_ADD(rx_512_to_1023_octet_packets);
7876 ESTAT_ADD(rx_1024_to_1522_octet_packets);
7877 ESTAT_ADD(rx_1523_to_2047_octet_packets);
7878 ESTAT_ADD(rx_2048_to_4095_octet_packets);
7879 ESTAT_ADD(rx_4096_to_8191_octet_packets);
7880 ESTAT_ADD(rx_8192_to_9022_octet_packets);
7881
7882 ESTAT_ADD(tx_octets);
7883 ESTAT_ADD(tx_collisions);
7884 ESTAT_ADD(tx_xon_sent);
7885 ESTAT_ADD(tx_xoff_sent);
7886 ESTAT_ADD(tx_flow_control);
7887 ESTAT_ADD(tx_mac_errors);
7888 ESTAT_ADD(tx_single_collisions);
7889 ESTAT_ADD(tx_mult_collisions);
7890 ESTAT_ADD(tx_deferred);
7891 ESTAT_ADD(tx_excessive_collisions);
7892 ESTAT_ADD(tx_late_collisions);
7893 ESTAT_ADD(tx_collide_2times);
7894 ESTAT_ADD(tx_collide_3times);
7895 ESTAT_ADD(tx_collide_4times);
7896 ESTAT_ADD(tx_collide_5times);
7897 ESTAT_ADD(tx_collide_6times);
7898 ESTAT_ADD(tx_collide_7times);
7899 ESTAT_ADD(tx_collide_8times);
7900 ESTAT_ADD(tx_collide_9times);
7901 ESTAT_ADD(tx_collide_10times);
7902 ESTAT_ADD(tx_collide_11times);
7903 ESTAT_ADD(tx_collide_12times);
7904 ESTAT_ADD(tx_collide_13times);
7905 ESTAT_ADD(tx_collide_14times);
7906 ESTAT_ADD(tx_collide_15times);
7907 ESTAT_ADD(tx_ucast_packets);
7908 ESTAT_ADD(tx_mcast_packets);
7909 ESTAT_ADD(tx_bcast_packets);
7910 ESTAT_ADD(tx_carrier_sense_errors);
7911 ESTAT_ADD(tx_discards);
7912 ESTAT_ADD(tx_errors);
7913
7914 ESTAT_ADD(dma_writeq_full);
7915 ESTAT_ADD(dma_write_prioq_full);
7916 ESTAT_ADD(rxbds_empty);
7917 ESTAT_ADD(rx_discards);
7918 ESTAT_ADD(rx_errors);
7919 ESTAT_ADD(rx_threshold_hit);
7920
7921 ESTAT_ADD(dma_readq_full);
7922 ESTAT_ADD(dma_read_prioq_full);
7923 ESTAT_ADD(tx_comp_queue_full);
7924
7925 ESTAT_ADD(ring_set_send_prod_index);
7926 ESTAT_ADD(ring_status_update);
7927 ESTAT_ADD(nic_irqs);
7928 ESTAT_ADD(nic_avoided_irqs);
7929 ESTAT_ADD(nic_tx_threshold_hit);
7930
7931 return estats;
7932}
7933
7934static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7935{
7936 struct tg3 *tp = netdev_priv(dev);
7937 struct net_device_stats *stats = &tp->net_stats;
7938 struct net_device_stats *old_stats = &tp->net_stats_prev;
7939 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7940
7941 if (!hw_stats)
7942 return old_stats;
7943
7944 stats->rx_packets = old_stats->rx_packets +
7945 get_stat64(&hw_stats->rx_ucast_packets) +
7946 get_stat64(&hw_stats->rx_mcast_packets) +
7947 get_stat64(&hw_stats->rx_bcast_packets);
6aa20a22 7948
1da177e4
LT
7949 stats->tx_packets = old_stats->tx_packets +
7950 get_stat64(&hw_stats->tx_ucast_packets) +
7951 get_stat64(&hw_stats->tx_mcast_packets) +
7952 get_stat64(&hw_stats->tx_bcast_packets);
7953
7954 stats->rx_bytes = old_stats->rx_bytes +
7955 get_stat64(&hw_stats->rx_octets);
7956 stats->tx_bytes = old_stats->tx_bytes +
7957 get_stat64(&hw_stats->tx_octets);
7958
7959 stats->rx_errors = old_stats->rx_errors +
4f63b877 7960 get_stat64(&hw_stats->rx_errors);
1da177e4
LT
7961 stats->tx_errors = old_stats->tx_errors +
7962 get_stat64(&hw_stats->tx_errors) +
7963 get_stat64(&hw_stats->tx_mac_errors) +
7964 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7965 get_stat64(&hw_stats->tx_discards);
7966
7967 stats->multicast = old_stats->multicast +
7968 get_stat64(&hw_stats->rx_mcast_packets);
7969 stats->collisions = old_stats->collisions +
7970 get_stat64(&hw_stats->tx_collisions);
7971
7972 stats->rx_length_errors = old_stats->rx_length_errors +
7973 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7974 get_stat64(&hw_stats->rx_undersize_packets);
7975
7976 stats->rx_over_errors = old_stats->rx_over_errors +
7977 get_stat64(&hw_stats->rxbds_empty);
7978 stats->rx_frame_errors = old_stats->rx_frame_errors +
7979 get_stat64(&hw_stats->rx_align_errors);
7980 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7981 get_stat64(&hw_stats->tx_discards);
7982 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7983 get_stat64(&hw_stats->tx_carrier_sense_errors);
7984
7985 stats->rx_crc_errors = old_stats->rx_crc_errors +
7986 calc_crc_errors(tp);
7987
4f63b877
JL
7988 stats->rx_missed_errors = old_stats->rx_missed_errors +
7989 get_stat64(&hw_stats->rx_discards);
7990
1da177e4
LT
7991 return stats;
7992}
7993
7994static inline u32 calc_crc(unsigned char *buf, int len)
7995{
7996 u32 reg;
7997 u32 tmp;
7998 int j, k;
7999
8000 reg = 0xffffffff;
8001
8002 for (j = 0; j < len; j++) {
8003 reg ^= buf[j];
8004
8005 for (k = 0; k < 8; k++) {
8006 tmp = reg & 0x01;
8007
8008 reg >>= 1;
8009
8010 if (tmp) {
8011 reg ^= 0xedb88320;
8012 }
8013 }
8014 }
8015
8016 return ~reg;
8017}
8018
8019static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8020{
8021 /* accept or reject all multicast frames */
8022 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8023 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8024 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8025 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8026}
8027
8028static void __tg3_set_rx_mode(struct net_device *dev)
8029{
8030 struct tg3 *tp = netdev_priv(dev);
8031 u32 rx_mode;
8032
8033 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8034 RX_MODE_KEEP_VLAN_TAG);
8035
8036 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8037 * flag clear.
8038 */
8039#if TG3_VLAN_TAG_USED
8040 if (!tp->vlgrp &&
8041 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8042 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8043#else
8044 /* By definition, VLAN is disabled always in this
8045 * case.
8046 */
8047 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8048 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8049#endif
8050
8051 if (dev->flags & IFF_PROMISC) {
8052 /* Promiscuous mode. */
8053 rx_mode |= RX_MODE_PROMISC;
8054 } else if (dev->flags & IFF_ALLMULTI) {
8055 /* Accept all multicast. */
8056 tg3_set_multi (tp, 1);
8057 } else if (dev->mc_count < 1) {
8058 /* Reject all multicast. */
8059 tg3_set_multi (tp, 0);
8060 } else {
8061 /* Accept one or more multicast(s). */
8062 struct dev_mc_list *mclist;
8063 unsigned int i;
8064 u32 mc_filter[4] = { 0, };
8065 u32 regidx;
8066 u32 bit;
8067 u32 crc;
8068
8069 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8070 i++, mclist = mclist->next) {
8071
8072 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8073 bit = ~crc & 0x7f;
8074 regidx = (bit & 0x60) >> 5;
8075 bit &= 0x1f;
8076 mc_filter[regidx] |= (1 << bit);
8077 }
8078
8079 tw32(MAC_HASH_REG_0, mc_filter[0]);
8080 tw32(MAC_HASH_REG_1, mc_filter[1]);
8081 tw32(MAC_HASH_REG_2, mc_filter[2]);
8082 tw32(MAC_HASH_REG_3, mc_filter[3]);
8083 }
8084
8085 if (rx_mode != tp->rx_mode) {
8086 tp->rx_mode = rx_mode;
8087 tw32_f(MAC_RX_MODE, rx_mode);
8088 udelay(10);
8089 }
8090}
8091
8092static void tg3_set_rx_mode(struct net_device *dev)
8093{
8094 struct tg3 *tp = netdev_priv(dev);
8095
e75f7c90
MC
8096 if (!netif_running(dev))
8097 return;
8098
f47c11ee 8099 tg3_full_lock(tp, 0);
1da177e4 8100 __tg3_set_rx_mode(dev);
f47c11ee 8101 tg3_full_unlock(tp);
1da177e4
LT
8102}
8103
8104#define TG3_REGDUMP_LEN (32 * 1024)
8105
8106static int tg3_get_regs_len(struct net_device *dev)
8107{
8108 return TG3_REGDUMP_LEN;
8109}
8110
8111static void tg3_get_regs(struct net_device *dev,
8112 struct ethtool_regs *regs, void *_p)
8113{
8114 u32 *p = _p;
8115 struct tg3 *tp = netdev_priv(dev);
8116 u8 *orig_p = _p;
8117 int i;
8118
8119 regs->version = 0;
8120
8121 memset(p, 0, TG3_REGDUMP_LEN);
8122
bc1c7567
MC
8123 if (tp->link_config.phy_is_low_power)
8124 return;
8125
f47c11ee 8126 tg3_full_lock(tp, 0);
1da177e4
LT
8127
8128#define __GET_REG32(reg) (*(p)++ = tr32(reg))
8129#define GET_REG32_LOOP(base,len) \
8130do { p = (u32 *)(orig_p + (base)); \
8131 for (i = 0; i < len; i += 4) \
8132 __GET_REG32((base) + i); \
8133} while (0)
8134#define GET_REG32_1(reg) \
8135do { p = (u32 *)(orig_p + (reg)); \
8136 __GET_REG32((reg)); \
8137} while (0)
8138
8139 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8140 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8141 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8142 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8143 GET_REG32_1(SNDDATAC_MODE);
8144 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8145 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8146 GET_REG32_1(SNDBDC_MODE);
8147 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8148 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8149 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8150 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8151 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8152 GET_REG32_1(RCVDCC_MODE);
8153 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8154 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8155 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8156 GET_REG32_1(MBFREE_MODE);
8157 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8158 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8159 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8160 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8161 GET_REG32_LOOP(WDMAC_MODE, 0x08);
091465d7
CE
8162 GET_REG32_1(RX_CPU_MODE);
8163 GET_REG32_1(RX_CPU_STATE);
8164 GET_REG32_1(RX_CPU_PGMCTR);
8165 GET_REG32_1(RX_CPU_HWBKPT);
8166 GET_REG32_1(TX_CPU_MODE);
8167 GET_REG32_1(TX_CPU_STATE);
8168 GET_REG32_1(TX_CPU_PGMCTR);
1da177e4
LT
8169 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8170 GET_REG32_LOOP(FTQ_RESET, 0x120);
8171 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8172 GET_REG32_1(DMAC_MODE);
8173 GET_REG32_LOOP(GRC_MODE, 0x4c);
8174 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8175 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8176
8177#undef __GET_REG32
8178#undef GET_REG32_LOOP
8179#undef GET_REG32_1
8180
f47c11ee 8181 tg3_full_unlock(tp);
1da177e4
LT
8182}
8183
8184static int tg3_get_eeprom_len(struct net_device *dev)
8185{
8186 struct tg3 *tp = netdev_priv(dev);
8187
8188 return tp->nvram_size;
8189}
8190
8191static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
1820180b 8192static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
1da177e4
LT
8193
8194static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8195{
8196 struct tg3 *tp = netdev_priv(dev);
8197 int ret;
8198 u8 *pd;
8199 u32 i, offset, len, val, b_offset, b_count;
8200
bc1c7567
MC
8201 if (tp->link_config.phy_is_low_power)
8202 return -EAGAIN;
8203
1da177e4
LT
8204 offset = eeprom->offset;
8205 len = eeprom->len;
8206 eeprom->len = 0;
8207
8208 eeprom->magic = TG3_EEPROM_MAGIC;
8209
8210 if (offset & 3) {
8211 /* adjustments to start on required 4 byte boundary */
8212 b_offset = offset & 3;
8213 b_count = 4 - b_offset;
8214 if (b_count > len) {
8215 /* i.e. offset=1 len=2 */
8216 b_count = len;
8217 }
8218 ret = tg3_nvram_read(tp, offset-b_offset, &val);
8219 if (ret)
8220 return ret;
8221 val = cpu_to_le32(val);
8222 memcpy(data, ((char*)&val) + b_offset, b_count);
8223 len -= b_count;
8224 offset += b_count;
8225 eeprom->len += b_count;
8226 }
8227
8228 /* read bytes upto the last 4 byte boundary */
8229 pd = &data[eeprom->len];
8230 for (i = 0; i < (len - (len & 3)); i += 4) {
8231 ret = tg3_nvram_read(tp, offset + i, &val);
8232 if (ret) {
8233 eeprom->len += i;
8234 return ret;
8235 }
8236 val = cpu_to_le32(val);
8237 memcpy(pd + i, &val, 4);
8238 }
8239 eeprom->len += i;
8240
8241 if (len & 3) {
8242 /* read last bytes not ending on 4 byte boundary */
8243 pd = &data[eeprom->len];
8244 b_count = len & 3;
8245 b_offset = offset + len - b_count;
8246 ret = tg3_nvram_read(tp, b_offset, &val);
8247 if (ret)
8248 return ret;
8249 val = cpu_to_le32(val);
8250 memcpy(pd, ((char*)&val), b_count);
8251 eeprom->len += b_count;
8252 }
8253 return 0;
8254}
8255
6aa20a22 8256static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
1da177e4
LT
8257
8258static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8259{
8260 struct tg3 *tp = netdev_priv(dev);
8261 int ret;
8262 u32 offset, len, b_offset, odd_len, start, end;
8263 u8 *buf;
8264
bc1c7567
MC
8265 if (tp->link_config.phy_is_low_power)
8266 return -EAGAIN;
8267
1da177e4
LT
8268 if (eeprom->magic != TG3_EEPROM_MAGIC)
8269 return -EINVAL;
8270
8271 offset = eeprom->offset;
8272 len = eeprom->len;
8273
8274 if ((b_offset = (offset & 3))) {
8275 /* adjustments to start on required 4 byte boundary */
8276 ret = tg3_nvram_read(tp, offset-b_offset, &start);
8277 if (ret)
8278 return ret;
8279 start = cpu_to_le32(start);
8280 len += b_offset;
8281 offset &= ~3;
1c8594b4
MC
8282 if (len < 4)
8283 len = 4;
1da177e4
LT
8284 }
8285
8286 odd_len = 0;
1c8594b4 8287 if (len & 3) {
1da177e4
LT
8288 /* adjustments to end on required 4 byte boundary */
8289 odd_len = 1;
8290 len = (len + 3) & ~3;
8291 ret = tg3_nvram_read(tp, offset+len-4, &end);
8292 if (ret)
8293 return ret;
8294 end = cpu_to_le32(end);
8295 }
8296
8297 buf = data;
8298 if (b_offset || odd_len) {
8299 buf = kmalloc(len, GFP_KERNEL);
ab0049b4 8300 if (!buf)
1da177e4
LT
8301 return -ENOMEM;
8302 if (b_offset)
8303 memcpy(buf, &start, 4);
8304 if (odd_len)
8305 memcpy(buf+len-4, &end, 4);
8306 memcpy(buf + b_offset, data, eeprom->len);
8307 }
8308
8309 ret = tg3_nvram_write_block(tp, offset, len, buf);
8310
8311 if (buf != data)
8312 kfree(buf);
8313
8314 return ret;
8315}
8316
8317static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8318{
8319 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8320
1da177e4
LT
8321 cmd->supported = (SUPPORTED_Autoneg);
8322
8323 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8324 cmd->supported |= (SUPPORTED_1000baseT_Half |
8325 SUPPORTED_1000baseT_Full);
8326
ef348144 8327 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1da177e4
LT
8328 cmd->supported |= (SUPPORTED_100baseT_Half |
8329 SUPPORTED_100baseT_Full |
8330 SUPPORTED_10baseT_Half |
8331 SUPPORTED_10baseT_Full |
8332 SUPPORTED_MII);
ef348144
KK
8333 cmd->port = PORT_TP;
8334 } else {
1da177e4 8335 cmd->supported |= SUPPORTED_FIBRE;
ef348144
KK
8336 cmd->port = PORT_FIBRE;
8337 }
6aa20a22 8338
1da177e4
LT
8339 cmd->advertising = tp->link_config.advertising;
8340 if (netif_running(dev)) {
8341 cmd->speed = tp->link_config.active_speed;
8342 cmd->duplex = tp->link_config.active_duplex;
8343 }
1da177e4
LT
8344 cmd->phy_address = PHY_ADDR;
8345 cmd->transceiver = 0;
8346 cmd->autoneg = tp->link_config.autoneg;
8347 cmd->maxtxpkt = 0;
8348 cmd->maxrxpkt = 0;
8349 return 0;
8350}
6aa20a22 8351
1da177e4
LT
8352static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8353{
8354 struct tg3 *tp = netdev_priv(dev);
6aa20a22
JG
8355
8356 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
1da177e4
LT
8357 /* These are the only valid advertisement bits allowed. */
8358 if (cmd->autoneg == AUTONEG_ENABLE &&
8359 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8360 ADVERTISED_1000baseT_Full |
8361 ADVERTISED_Autoneg |
8362 ADVERTISED_FIBRE)))
8363 return -EINVAL;
37ff238d
MC
8364 /* Fiber can only do SPEED_1000. */
8365 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8366 (cmd->speed != SPEED_1000))
8367 return -EINVAL;
8368 /* Copper cannot force SPEED_1000. */
8369 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8370 (cmd->speed == SPEED_1000))
8371 return -EINVAL;
8372 else if ((cmd->speed == SPEED_1000) &&
8373 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8374 return -EINVAL;
1da177e4 8375
f47c11ee 8376 tg3_full_lock(tp, 0);
1da177e4
LT
8377
8378 tp->link_config.autoneg = cmd->autoneg;
8379 if (cmd->autoneg == AUTONEG_ENABLE) {
405d8e5c
AG
8380 tp->link_config.advertising = (cmd->advertising |
8381 ADVERTISED_Autoneg);
1da177e4
LT
8382 tp->link_config.speed = SPEED_INVALID;
8383 tp->link_config.duplex = DUPLEX_INVALID;
8384 } else {
8385 tp->link_config.advertising = 0;
8386 tp->link_config.speed = cmd->speed;
8387 tp->link_config.duplex = cmd->duplex;
8388 }
6aa20a22 8389
24fcad6b
MC
8390 tp->link_config.orig_speed = tp->link_config.speed;
8391 tp->link_config.orig_duplex = tp->link_config.duplex;
8392 tp->link_config.orig_autoneg = tp->link_config.autoneg;
8393
1da177e4
LT
8394 if (netif_running(dev))
8395 tg3_setup_phy(tp, 1);
8396
f47c11ee 8397 tg3_full_unlock(tp);
6aa20a22 8398
1da177e4
LT
8399 return 0;
8400}
6aa20a22 8401
1da177e4
LT
8402static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8403{
8404 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8405
1da177e4
LT
8406 strcpy(info->driver, DRV_MODULE_NAME);
8407 strcpy(info->version, DRV_MODULE_VERSION);
c4e6575c 8408 strcpy(info->fw_version, tp->fw_ver);
1da177e4
LT
8409 strcpy(info->bus_info, pci_name(tp->pdev));
8410}
6aa20a22 8411
1da177e4
LT
8412static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8413{
8414 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8415
a85feb8c
GZ
8416 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8417 wol->supported = WAKE_MAGIC;
8418 else
8419 wol->supported = 0;
1da177e4
LT
8420 wol->wolopts = 0;
8421 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8422 wol->wolopts = WAKE_MAGIC;
8423 memset(&wol->sopass, 0, sizeof(wol->sopass));
8424}
6aa20a22 8425
1da177e4
LT
8426static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8427{
8428 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8429
1da177e4
LT
8430 if (wol->wolopts & ~WAKE_MAGIC)
8431 return -EINVAL;
8432 if ((wol->wolopts & WAKE_MAGIC) &&
a85feb8c 8433 !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
1da177e4 8434 return -EINVAL;
6aa20a22 8435
f47c11ee 8436 spin_lock_bh(&tp->lock);
1da177e4
LT
8437 if (wol->wolopts & WAKE_MAGIC)
8438 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8439 else
8440 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
f47c11ee 8441 spin_unlock_bh(&tp->lock);
6aa20a22 8442
1da177e4
LT
8443 return 0;
8444}
6aa20a22 8445
1da177e4
LT
8446static u32 tg3_get_msglevel(struct net_device *dev)
8447{
8448 struct tg3 *tp = netdev_priv(dev);
8449 return tp->msg_enable;
8450}
6aa20a22 8451
1da177e4
LT
8452static void tg3_set_msglevel(struct net_device *dev, u32 value)
8453{
8454 struct tg3 *tp = netdev_priv(dev);
8455 tp->msg_enable = value;
8456}
6aa20a22 8457
1da177e4
LT
8458static int tg3_set_tso(struct net_device *dev, u32 value)
8459{
8460 struct tg3 *tp = netdev_priv(dev);
8461
8462 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8463 if (value)
8464 return -EINVAL;
8465 return 0;
8466 }
b5d3772c
MC
8467 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8468 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
9936bcf6 8469 if (value) {
b0026624 8470 dev->features |= NETIF_F_TSO6;
9936bcf6
MC
8471 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8472 dev->features |= NETIF_F_TSO_ECN;
8473 } else
8474 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
b0026624 8475 }
1da177e4
LT
8476 return ethtool_op_set_tso(dev, value);
8477}
6aa20a22 8478
1da177e4
LT
8479static int tg3_nway_reset(struct net_device *dev)
8480{
8481 struct tg3 *tp = netdev_priv(dev);
8482 u32 bmcr;
8483 int r;
6aa20a22 8484
1da177e4
LT
8485 if (!netif_running(dev))
8486 return -EAGAIN;
8487
c94e3941
MC
8488 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8489 return -EINVAL;
8490
f47c11ee 8491 spin_lock_bh(&tp->lock);
1da177e4
LT
8492 r = -EINVAL;
8493 tg3_readphy(tp, MII_BMCR, &bmcr);
8494 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
c94e3941
MC
8495 ((bmcr & BMCR_ANENABLE) ||
8496 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8497 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8498 BMCR_ANENABLE);
1da177e4
LT
8499 r = 0;
8500 }
f47c11ee 8501 spin_unlock_bh(&tp->lock);
6aa20a22 8502
1da177e4
LT
8503 return r;
8504}
6aa20a22 8505
1da177e4
LT
8506static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8507{
8508 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8509
1da177e4
LT
8510 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8511 ering->rx_mini_max_pending = 0;
4f81c32b
MC
8512 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8513 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8514 else
8515 ering->rx_jumbo_max_pending = 0;
8516
8517 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
1da177e4
LT
8518
8519 ering->rx_pending = tp->rx_pending;
8520 ering->rx_mini_pending = 0;
4f81c32b
MC
8521 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8522 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8523 else
8524 ering->rx_jumbo_pending = 0;
8525
1da177e4
LT
8526 ering->tx_pending = tp->tx_pending;
8527}
6aa20a22 8528
1da177e4
LT
8529static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8530{
8531 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 8532 int irq_sync = 0, err = 0;
6aa20a22 8533
1da177e4
LT
8534 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8535 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
bc3a9254
MC
8536 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8537 (ering->tx_pending <= MAX_SKB_FRAGS) ||
7f62ad5d 8538 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
bc3a9254 8539 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
1da177e4 8540 return -EINVAL;
6aa20a22 8541
bbe832c0 8542 if (netif_running(dev)) {
1da177e4 8543 tg3_netif_stop(tp);
bbe832c0
MC
8544 irq_sync = 1;
8545 }
1da177e4 8546
bbe832c0 8547 tg3_full_lock(tp, irq_sync);
6aa20a22 8548
1da177e4
LT
8549 tp->rx_pending = ering->rx_pending;
8550
8551 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8552 tp->rx_pending > 63)
8553 tp->rx_pending = 63;
8554 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8555 tp->tx_pending = ering->tx_pending;
8556
8557 if (netif_running(dev)) {
944d980e 8558 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
b9ec6c1b
MC
8559 err = tg3_restart_hw(tp, 1);
8560 if (!err)
8561 tg3_netif_start(tp);
1da177e4
LT
8562 }
8563
f47c11ee 8564 tg3_full_unlock(tp);
6aa20a22 8565
b9ec6c1b 8566 return err;
1da177e4 8567}
6aa20a22 8568
1da177e4
LT
8569static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8570{
8571 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8572
1da177e4
LT
8573 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8574 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8575 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8576}
6aa20a22 8577
1da177e4
LT
8578static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8579{
8580 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 8581 int irq_sync = 0, err = 0;
6aa20a22 8582
bbe832c0 8583 if (netif_running(dev)) {
1da177e4 8584 tg3_netif_stop(tp);
bbe832c0
MC
8585 irq_sync = 1;
8586 }
1da177e4 8587
bbe832c0 8588 tg3_full_lock(tp, irq_sync);
f47c11ee 8589
1da177e4
LT
8590 if (epause->autoneg)
8591 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8592 else
8593 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8594 if (epause->rx_pause)
8595 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8596 else
8597 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8598 if (epause->tx_pause)
8599 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8600 else
8601 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8602
8603 if (netif_running(dev)) {
944d980e 8604 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
b9ec6c1b
MC
8605 err = tg3_restart_hw(tp, 1);
8606 if (!err)
8607 tg3_netif_start(tp);
1da177e4 8608 }
f47c11ee
DM
8609
8610 tg3_full_unlock(tp);
6aa20a22 8611
b9ec6c1b 8612 return err;
1da177e4 8613}
6aa20a22 8614
1da177e4
LT
8615static u32 tg3_get_rx_csum(struct net_device *dev)
8616{
8617 struct tg3 *tp = netdev_priv(dev);
8618 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8619}
6aa20a22 8620
1da177e4
LT
8621static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8622{
8623 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8624
1da177e4
LT
8625 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8626 if (data != 0)
8627 return -EINVAL;
8628 return 0;
8629 }
6aa20a22 8630
f47c11ee 8631 spin_lock_bh(&tp->lock);
1da177e4
LT
8632 if (data)
8633 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8634 else
8635 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
f47c11ee 8636 spin_unlock_bh(&tp->lock);
6aa20a22 8637
1da177e4
LT
8638 return 0;
8639}
6aa20a22 8640
1da177e4
LT
8641static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8642{
8643 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8644
1da177e4
LT
8645 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8646 if (data != 0)
8647 return -EINVAL;
8648 return 0;
8649 }
6aa20a22 8650
af36e6b6 8651 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 8652 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
8653 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8654 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6460d948 8655 ethtool_op_set_tx_ipv6_csum(dev, data);
1da177e4 8656 else
9c27dbdf 8657 ethtool_op_set_tx_csum(dev, data);
1da177e4
LT
8658
8659 return 0;
8660}
8661
b9f2c044 8662static int tg3_get_sset_count (struct net_device *dev, int sset)
1da177e4 8663{
b9f2c044
JG
8664 switch (sset) {
8665 case ETH_SS_TEST:
8666 return TG3_NUM_TEST;
8667 case ETH_SS_STATS:
8668 return TG3_NUM_STATS;
8669 default:
8670 return -EOPNOTSUPP;
8671 }
4cafd3f5
MC
8672}
8673
1da177e4
LT
8674static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8675{
8676 switch (stringset) {
8677 case ETH_SS_STATS:
8678 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8679 break;
4cafd3f5
MC
8680 case ETH_SS_TEST:
8681 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8682 break;
1da177e4
LT
8683 default:
8684 WARN_ON(1); /* we need a WARN() */
8685 break;
8686 }
8687}
8688
4009a93d
MC
8689static int tg3_phys_id(struct net_device *dev, u32 data)
8690{
8691 struct tg3 *tp = netdev_priv(dev);
8692 int i;
8693
8694 if (!netif_running(tp->dev))
8695 return -EAGAIN;
8696
8697 if (data == 0)
8698 data = 2;
8699
8700 for (i = 0; i < (data * 2); i++) {
8701 if ((i % 2) == 0)
8702 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8703 LED_CTRL_1000MBPS_ON |
8704 LED_CTRL_100MBPS_ON |
8705 LED_CTRL_10MBPS_ON |
8706 LED_CTRL_TRAFFIC_OVERRIDE |
8707 LED_CTRL_TRAFFIC_BLINK |
8708 LED_CTRL_TRAFFIC_LED);
6aa20a22 8709
4009a93d
MC
8710 else
8711 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8712 LED_CTRL_TRAFFIC_OVERRIDE);
8713
8714 if (msleep_interruptible(500))
8715 break;
8716 }
8717 tw32(MAC_LED_CTRL, tp->led_ctrl);
8718 return 0;
8719}
8720
1da177e4
LT
8721static void tg3_get_ethtool_stats (struct net_device *dev,
8722 struct ethtool_stats *estats, u64 *tmp_stats)
8723{
8724 struct tg3 *tp = netdev_priv(dev);
8725 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8726}
8727
566f86ad 8728#define NVRAM_TEST_SIZE 0x100
a5767dec
MC
8729#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
8730#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
8731#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
b16250e3
MC
8732#define NVRAM_SELFBOOT_HW_SIZE 0x20
8733#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
566f86ad
MC
8734
8735static int tg3_test_nvram(struct tg3 *tp)
8736{
1b27777a 8737 u32 *buf, csum, magic;
ab0049b4 8738 int i, j, k, err = 0, size;
566f86ad 8739
1820180b 8740 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
1b27777a
MC
8741 return -EIO;
8742
1b27777a
MC
8743 if (magic == TG3_EEPROM_MAGIC)
8744 size = NVRAM_TEST_SIZE;
b16250e3 8745 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
a5767dec
MC
8746 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
8747 TG3_EEPROM_SB_FORMAT_1) {
8748 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
8749 case TG3_EEPROM_SB_REVISION_0:
8750 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
8751 break;
8752 case TG3_EEPROM_SB_REVISION_2:
8753 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
8754 break;
8755 case TG3_EEPROM_SB_REVISION_3:
8756 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
8757 break;
8758 default:
8759 return 0;
8760 }
8761 } else
1b27777a 8762 return 0;
b16250e3
MC
8763 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8764 size = NVRAM_SELFBOOT_HW_SIZE;
8765 else
1b27777a
MC
8766 return -EIO;
8767
8768 buf = kmalloc(size, GFP_KERNEL);
566f86ad
MC
8769 if (buf == NULL)
8770 return -ENOMEM;
8771
1b27777a
MC
8772 err = -EIO;
8773 for (i = 0, j = 0; i < size; i += 4, j++) {
566f86ad
MC
8774 u32 val;
8775
8776 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8777 break;
8778 buf[j] = cpu_to_le32(val);
8779 }
1b27777a 8780 if (i < size)
566f86ad
MC
8781 goto out;
8782
1b27777a 8783 /* Selfboot format */
b16250e3
MC
8784 if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
8785 TG3_EEPROM_MAGIC_FW) {
1b27777a
MC
8786 u8 *buf8 = (u8 *) buf, csum8 = 0;
8787
a5767dec
MC
8788 if ((cpu_to_be32(buf[0]) & TG3_EEPROM_SB_REVISION_MASK) ==
8789 TG3_EEPROM_SB_REVISION_2) {
8790 /* For rev 2, the csum doesn't include the MBA. */
8791 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
8792 csum8 += buf8[i];
8793 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
8794 csum8 += buf8[i];
8795 } else {
8796 for (i = 0; i < size; i++)
8797 csum8 += buf8[i];
8798 }
1b27777a 8799
ad96b485
AB
8800 if (csum8 == 0) {
8801 err = 0;
8802 goto out;
8803 }
8804
8805 err = -EIO;
8806 goto out;
1b27777a 8807 }
566f86ad 8808
b16250e3
MC
8809 if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
8810 TG3_EEPROM_MAGIC_HW) {
8811 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8812 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8813 u8 *buf8 = (u8 *) buf;
b16250e3
MC
8814
8815 /* Separate the parity bits and the data bytes. */
8816 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8817 if ((i == 0) || (i == 8)) {
8818 int l;
8819 u8 msk;
8820
8821 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8822 parity[k++] = buf8[i] & msk;
8823 i++;
8824 }
8825 else if (i == 16) {
8826 int l;
8827 u8 msk;
8828
8829 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8830 parity[k++] = buf8[i] & msk;
8831 i++;
8832
8833 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8834 parity[k++] = buf8[i] & msk;
8835 i++;
8836 }
8837 data[j++] = buf8[i];
8838 }
8839
8840 err = -EIO;
8841 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8842 u8 hw8 = hweight8(data[i]);
8843
8844 if ((hw8 & 0x1) && parity[i])
8845 goto out;
8846 else if (!(hw8 & 0x1) && !parity[i])
8847 goto out;
8848 }
8849 err = 0;
8850 goto out;
8851 }
8852
566f86ad
MC
8853 /* Bootstrap checksum at offset 0x10 */
8854 csum = calc_crc((unsigned char *) buf, 0x10);
8855 if(csum != cpu_to_le32(buf[0x10/4]))
8856 goto out;
8857
8858 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8859 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8860 if (csum != cpu_to_le32(buf[0xfc/4]))
8861 goto out;
8862
8863 err = 0;
8864
8865out:
8866 kfree(buf);
8867 return err;
8868}
8869
ca43007a
MC
8870#define TG3_SERDES_TIMEOUT_SEC 2
8871#define TG3_COPPER_TIMEOUT_SEC 6
8872
8873static int tg3_test_link(struct tg3 *tp)
8874{
8875 int i, max;
8876
8877 if (!netif_running(tp->dev))
8878 return -ENODEV;
8879
4c987487 8880 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
ca43007a
MC
8881 max = TG3_SERDES_TIMEOUT_SEC;
8882 else
8883 max = TG3_COPPER_TIMEOUT_SEC;
8884
8885 for (i = 0; i < max; i++) {
8886 if (netif_carrier_ok(tp->dev))
8887 return 0;
8888
8889 if (msleep_interruptible(1000))
8890 break;
8891 }
8892
8893 return -EIO;
8894}
8895
a71116d1 8896/* Only test the commonly used registers */
30ca3e37 8897static int tg3_test_registers(struct tg3 *tp)
a71116d1 8898{
b16250e3 8899 int i, is_5705, is_5750;
a71116d1
MC
8900 u32 offset, read_mask, write_mask, val, save_val, read_val;
8901 static struct {
8902 u16 offset;
8903 u16 flags;
8904#define TG3_FL_5705 0x1
8905#define TG3_FL_NOT_5705 0x2
8906#define TG3_FL_NOT_5788 0x4
b16250e3 8907#define TG3_FL_NOT_5750 0x8
a71116d1
MC
8908 u32 read_mask;
8909 u32 write_mask;
8910 } reg_tbl[] = {
8911 /* MAC Control Registers */
8912 { MAC_MODE, TG3_FL_NOT_5705,
8913 0x00000000, 0x00ef6f8c },
8914 { MAC_MODE, TG3_FL_5705,
8915 0x00000000, 0x01ef6b8c },
8916 { MAC_STATUS, TG3_FL_NOT_5705,
8917 0x03800107, 0x00000000 },
8918 { MAC_STATUS, TG3_FL_5705,
8919 0x03800100, 0x00000000 },
8920 { MAC_ADDR_0_HIGH, 0x0000,
8921 0x00000000, 0x0000ffff },
8922 { MAC_ADDR_0_LOW, 0x0000,
8923 0x00000000, 0xffffffff },
8924 { MAC_RX_MTU_SIZE, 0x0000,
8925 0x00000000, 0x0000ffff },
8926 { MAC_TX_MODE, 0x0000,
8927 0x00000000, 0x00000070 },
8928 { MAC_TX_LENGTHS, 0x0000,
8929 0x00000000, 0x00003fff },
8930 { MAC_RX_MODE, TG3_FL_NOT_5705,
8931 0x00000000, 0x000007fc },
8932 { MAC_RX_MODE, TG3_FL_5705,
8933 0x00000000, 0x000007dc },
8934 { MAC_HASH_REG_0, 0x0000,
8935 0x00000000, 0xffffffff },
8936 { MAC_HASH_REG_1, 0x0000,
8937 0x00000000, 0xffffffff },
8938 { MAC_HASH_REG_2, 0x0000,
8939 0x00000000, 0xffffffff },
8940 { MAC_HASH_REG_3, 0x0000,
8941 0x00000000, 0xffffffff },
8942
8943 /* Receive Data and Receive BD Initiator Control Registers. */
8944 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8945 0x00000000, 0xffffffff },
8946 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8947 0x00000000, 0xffffffff },
8948 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8949 0x00000000, 0x00000003 },
8950 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8951 0x00000000, 0xffffffff },
8952 { RCVDBDI_STD_BD+0, 0x0000,
8953 0x00000000, 0xffffffff },
8954 { RCVDBDI_STD_BD+4, 0x0000,
8955 0x00000000, 0xffffffff },
8956 { RCVDBDI_STD_BD+8, 0x0000,
8957 0x00000000, 0xffff0002 },
8958 { RCVDBDI_STD_BD+0xc, 0x0000,
8959 0x00000000, 0xffffffff },
6aa20a22 8960
a71116d1
MC
8961 /* Receive BD Initiator Control Registers. */
8962 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8963 0x00000000, 0xffffffff },
8964 { RCVBDI_STD_THRESH, TG3_FL_5705,
8965 0x00000000, 0x000003ff },
8966 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8967 0x00000000, 0xffffffff },
6aa20a22 8968
a71116d1
MC
8969 /* Host Coalescing Control Registers. */
8970 { HOSTCC_MODE, TG3_FL_NOT_5705,
8971 0x00000000, 0x00000004 },
8972 { HOSTCC_MODE, TG3_FL_5705,
8973 0x00000000, 0x000000f6 },
8974 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8975 0x00000000, 0xffffffff },
8976 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8977 0x00000000, 0x000003ff },
8978 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8979 0x00000000, 0xffffffff },
8980 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8981 0x00000000, 0x000003ff },
8982 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8983 0x00000000, 0xffffffff },
8984 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8985 0x00000000, 0x000000ff },
8986 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8987 0x00000000, 0xffffffff },
8988 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8989 0x00000000, 0x000000ff },
8990 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8991 0x00000000, 0xffffffff },
8992 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8993 0x00000000, 0xffffffff },
8994 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8995 0x00000000, 0xffffffff },
8996 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8997 0x00000000, 0x000000ff },
8998 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8999 0x00000000, 0xffffffff },
9000 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9001 0x00000000, 0x000000ff },
9002 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9003 0x00000000, 0xffffffff },
9004 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9005 0x00000000, 0xffffffff },
9006 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9007 0x00000000, 0xffffffff },
9008 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9009 0x00000000, 0xffffffff },
9010 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9011 0x00000000, 0xffffffff },
9012 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9013 0xffffffff, 0x00000000 },
9014 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9015 0xffffffff, 0x00000000 },
9016
9017 /* Buffer Manager Control Registers. */
b16250e3 9018 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
a71116d1 9019 0x00000000, 0x007fff80 },
b16250e3 9020 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
a71116d1
MC
9021 0x00000000, 0x007fffff },
9022 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9023 0x00000000, 0x0000003f },
9024 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9025 0x00000000, 0x000001ff },
9026 { BUFMGR_MB_HIGH_WATER, 0x0000,
9027 0x00000000, 0x000001ff },
9028 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9029 0xffffffff, 0x00000000 },
9030 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9031 0xffffffff, 0x00000000 },
6aa20a22 9032
a71116d1
MC
9033 /* Mailbox Registers */
9034 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9035 0x00000000, 0x000001ff },
9036 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9037 0x00000000, 0x000001ff },
9038 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9039 0x00000000, 0x000007ff },
9040 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9041 0x00000000, 0x000001ff },
9042
9043 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9044 };
9045
b16250e3
MC
9046 is_5705 = is_5750 = 0;
9047 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
a71116d1 9048 is_5705 = 1;
b16250e3
MC
9049 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9050 is_5750 = 1;
9051 }
a71116d1
MC
9052
9053 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9054 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9055 continue;
9056
9057 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9058 continue;
9059
9060 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9061 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9062 continue;
9063
b16250e3
MC
9064 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9065 continue;
9066
a71116d1
MC
9067 offset = (u32) reg_tbl[i].offset;
9068 read_mask = reg_tbl[i].read_mask;
9069 write_mask = reg_tbl[i].write_mask;
9070
9071 /* Save the original register content */
9072 save_val = tr32(offset);
9073
9074 /* Determine the read-only value. */
9075 read_val = save_val & read_mask;
9076
9077 /* Write zero to the register, then make sure the read-only bits
9078 * are not changed and the read/write bits are all zeros.
9079 */
9080 tw32(offset, 0);
9081
9082 val = tr32(offset);
9083
9084 /* Test the read-only and read/write bits. */
9085 if (((val & read_mask) != read_val) || (val & write_mask))
9086 goto out;
9087
9088 /* Write ones to all the bits defined by RdMask and WrMask, then
9089 * make sure the read-only bits are not changed and the
9090 * read/write bits are all ones.
9091 */
9092 tw32(offset, read_mask | write_mask);
9093
9094 val = tr32(offset);
9095
9096 /* Test the read-only bits. */
9097 if ((val & read_mask) != read_val)
9098 goto out;
9099
9100 /* Test the read/write bits. */
9101 if ((val & write_mask) != write_mask)
9102 goto out;
9103
9104 tw32(offset, save_val);
9105 }
9106
9107 return 0;
9108
9109out:
9f88f29f
MC
9110 if (netif_msg_hw(tp))
9111 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9112 offset);
a71116d1
MC
9113 tw32(offset, save_val);
9114 return -EIO;
9115}
9116
7942e1db
MC
9117static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9118{
f71e1309 9119 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7942e1db
MC
9120 int i;
9121 u32 j;
9122
e9edda69 9123 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
7942e1db
MC
9124 for (j = 0; j < len; j += 4) {
9125 u32 val;
9126
9127 tg3_write_mem(tp, offset + j, test_pattern[i]);
9128 tg3_read_mem(tp, offset + j, &val);
9129 if (val != test_pattern[i])
9130 return -EIO;
9131 }
9132 }
9133 return 0;
9134}
9135
9136static int tg3_test_memory(struct tg3 *tp)
9137{
9138 static struct mem_entry {
9139 u32 offset;
9140 u32 len;
9141 } mem_tbl_570x[] = {
38690194 9142 { 0x00000000, 0x00b50},
7942e1db
MC
9143 { 0x00002000, 0x1c000},
9144 { 0xffffffff, 0x00000}
9145 }, mem_tbl_5705[] = {
9146 { 0x00000100, 0x0000c},
9147 { 0x00000200, 0x00008},
7942e1db
MC
9148 { 0x00004000, 0x00800},
9149 { 0x00006000, 0x01000},
9150 { 0x00008000, 0x02000},
9151 { 0x00010000, 0x0e000},
9152 { 0xffffffff, 0x00000}
79f4d13a
MC
9153 }, mem_tbl_5755[] = {
9154 { 0x00000200, 0x00008},
9155 { 0x00004000, 0x00800},
9156 { 0x00006000, 0x00800},
9157 { 0x00008000, 0x02000},
9158 { 0x00010000, 0x0c000},
9159 { 0xffffffff, 0x00000}
b16250e3
MC
9160 }, mem_tbl_5906[] = {
9161 { 0x00000200, 0x00008},
9162 { 0x00004000, 0x00400},
9163 { 0x00006000, 0x00400},
9164 { 0x00008000, 0x01000},
9165 { 0x00010000, 0x01000},
9166 { 0xffffffff, 0x00000}
7942e1db
MC
9167 };
9168 struct mem_entry *mem_tbl;
9169 int err = 0;
9170 int i;
9171
79f4d13a 9172 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
af36e6b6 9173 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 9174 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
9175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9176 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
79f4d13a 9177 mem_tbl = mem_tbl_5755;
b16250e3
MC
9178 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9179 mem_tbl = mem_tbl_5906;
79f4d13a
MC
9180 else
9181 mem_tbl = mem_tbl_5705;
9182 } else
7942e1db
MC
9183 mem_tbl = mem_tbl_570x;
9184
9185 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9186 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9187 mem_tbl[i].len)) != 0)
9188 break;
9189 }
6aa20a22 9190
7942e1db
MC
9191 return err;
9192}
9193
9f40dead
MC
9194#define TG3_MAC_LOOPBACK 0
9195#define TG3_PHY_LOOPBACK 1
9196
9197static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
c76949a6 9198{
9f40dead 9199 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
c76949a6
MC
9200 u32 desc_idx;
9201 struct sk_buff *skb, *rx_skb;
9202 u8 *tx_data;
9203 dma_addr_t map;
9204 int num_pkts, tx_len, rx_len, i, err;
9205 struct tg3_rx_buffer_desc *desc;
9206
9f40dead 9207 if (loopback_mode == TG3_MAC_LOOPBACK) {
c94e3941
MC
9208 /* HW errata - mac loopback fails in some cases on 5780.
9209 * Normal traffic and PHY loopback are not affected by
9210 * errata.
9211 */
9212 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9213 return 0;
9214
9f40dead 9215 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
e8f3f6ca
MC
9216 MAC_MODE_PORT_INT_LPBACK;
9217 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9218 mac_mode |= MAC_MODE_LINK_POLARITY;
3f7045c1
MC
9219 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9220 mac_mode |= MAC_MODE_PORT_MODE_MII;
9221 else
9222 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9f40dead
MC
9223 tw32(MAC_MODE, mac_mode);
9224 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
3f7045c1
MC
9225 u32 val;
9226
b16250e3
MC
9227 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9228 u32 phytest;
9229
9230 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9231 u32 phy;
9232
9233 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9234 phytest | MII_TG3_EPHY_SHADOW_EN);
9235 if (!tg3_readphy(tp, 0x1b, &phy))
9236 tg3_writephy(tp, 0x1b, phy & ~0x20);
b16250e3
MC
9237 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9238 }
5d64ad34
MC
9239 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9240 } else
9241 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
3f7045c1 9242
9ef8ca99
MC
9243 tg3_phy_toggle_automdix(tp, 0);
9244
3f7045c1 9245 tg3_writephy(tp, MII_BMCR, val);
c94e3941 9246 udelay(40);
5d64ad34 9247
e8f3f6ca 9248 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5d64ad34 9249 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
b16250e3 9250 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
5d64ad34
MC
9251 mac_mode |= MAC_MODE_PORT_MODE_MII;
9252 } else
9253 mac_mode |= MAC_MODE_PORT_MODE_GMII;
b16250e3 9254
c94e3941
MC
9255 /* reset to prevent losing 1st rx packet intermittently */
9256 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9257 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9258 udelay(10);
9259 tw32_f(MAC_RX_MODE, tp->rx_mode);
9260 }
e8f3f6ca
MC
9261 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9262 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9263 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9264 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9265 mac_mode |= MAC_MODE_LINK_POLARITY;
ff18ff02
MC
9266 tg3_writephy(tp, MII_TG3_EXT_CTRL,
9267 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9268 }
9f40dead 9269 tw32(MAC_MODE, mac_mode);
9f40dead
MC
9270 }
9271 else
9272 return -EINVAL;
c76949a6
MC
9273
9274 err = -EIO;
9275
c76949a6 9276 tx_len = 1514;
a20e9c62 9277 skb = netdev_alloc_skb(tp->dev, tx_len);
a50bb7b9
JJ
9278 if (!skb)
9279 return -ENOMEM;
9280
c76949a6
MC
9281 tx_data = skb_put(skb, tx_len);
9282 memcpy(tx_data, tp->dev->dev_addr, 6);
9283 memset(tx_data + 6, 0x0, 8);
9284
9285 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9286
9287 for (i = 14; i < tx_len; i++)
9288 tx_data[i] = (u8) (i & 0xff);
9289
9290 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9291
9292 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9293 HOSTCC_MODE_NOW);
9294
9295 udelay(10);
9296
9297 rx_start_idx = tp->hw_status->idx[0].rx_producer;
9298
c76949a6
MC
9299 num_pkts = 0;
9300
9f40dead 9301 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
c76949a6 9302
9f40dead 9303 tp->tx_prod++;
c76949a6
MC
9304 num_pkts++;
9305
9f40dead
MC
9306 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9307 tp->tx_prod);
09ee929c 9308 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
c76949a6
MC
9309
9310 udelay(10);
9311
3f7045c1
MC
9312 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
9313 for (i = 0; i < 25; i++) {
c76949a6
MC
9314 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9315 HOSTCC_MODE_NOW);
9316
9317 udelay(10);
9318
9319 tx_idx = tp->hw_status->idx[0].tx_consumer;
9320 rx_idx = tp->hw_status->idx[0].rx_producer;
9f40dead 9321 if ((tx_idx == tp->tx_prod) &&
c76949a6
MC
9322 (rx_idx == (rx_start_idx + num_pkts)))
9323 break;
9324 }
9325
9326 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9327 dev_kfree_skb(skb);
9328
9f40dead 9329 if (tx_idx != tp->tx_prod)
c76949a6
MC
9330 goto out;
9331
9332 if (rx_idx != rx_start_idx + num_pkts)
9333 goto out;
9334
9335 desc = &tp->rx_rcb[rx_start_idx];
9336 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9337 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9338 if (opaque_key != RXD_OPAQUE_RING_STD)
9339 goto out;
9340
9341 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9342 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9343 goto out;
9344
9345 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9346 if (rx_len != tx_len)
9347 goto out;
9348
9349 rx_skb = tp->rx_std_buffers[desc_idx].skb;
9350
9351 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9352 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9353
9354 for (i = 14; i < tx_len; i++) {
9355 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9356 goto out;
9357 }
9358 err = 0;
6aa20a22 9359
c76949a6
MC
9360 /* tg3_free_rings will unmap and free the rx_skb */
9361out:
9362 return err;
9363}
9364
9f40dead
MC
9365#define TG3_MAC_LOOPBACK_FAILED 1
9366#define TG3_PHY_LOOPBACK_FAILED 2
9367#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
9368 TG3_PHY_LOOPBACK_FAILED)
9369
9370static int tg3_test_loopback(struct tg3 *tp)
9371{
9372 int err = 0;
9936bcf6 9373 u32 cpmuctrl = 0;
9f40dead
MC
9374
9375 if (!netif_running(tp->dev))
9376 return TG3_LOOPBACK_FAILED;
9377
b9ec6c1b
MC
9378 err = tg3_reset_hw(tp, 1);
9379 if (err)
9380 return TG3_LOOPBACK_FAILED;
9f40dead 9381
b5af7126 9382 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
9936bcf6
MC
9383 int i;
9384 u32 status;
9385
9386 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9387
9388 /* Wait for up to 40 microseconds to acquire lock. */
9389 for (i = 0; i < 4; i++) {
9390 status = tr32(TG3_CPMU_MUTEX_GNT);
9391 if (status == CPMU_MUTEX_GNT_DRIVER)
9392 break;
9393 udelay(10);
9394 }
9395
9396 if (status != CPMU_MUTEX_GNT_DRIVER)
9397 return TG3_LOOPBACK_FAILED;
9398
9936bcf6 9399 /* Turn off power management based on link speed. */
e875093c 9400 cpmuctrl = tr32(TG3_CPMU_CTRL);
9936bcf6 9401 tw32(TG3_CPMU_CTRL,
e875093c
MC
9402 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9403 CPMU_CTRL_LINK_AWARE_MODE));
9936bcf6
MC
9404 }
9405
9f40dead
MC
9406 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9407 err |= TG3_MAC_LOOPBACK_FAILED;
9936bcf6 9408
b5af7126 9409 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
9936bcf6
MC
9410 tw32(TG3_CPMU_CTRL, cpmuctrl);
9411
9412 /* Release the mutex */
9413 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9414 }
9415
9f40dead
MC
9416 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9417 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9418 err |= TG3_PHY_LOOPBACK_FAILED;
9419 }
9420
9421 return err;
9422}
9423
4cafd3f5
MC
9424static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9425 u64 *data)
9426{
566f86ad
MC
9427 struct tg3 *tp = netdev_priv(dev);
9428
bc1c7567
MC
9429 if (tp->link_config.phy_is_low_power)
9430 tg3_set_power_state(tp, PCI_D0);
9431
566f86ad
MC
9432 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9433
9434 if (tg3_test_nvram(tp) != 0) {
9435 etest->flags |= ETH_TEST_FL_FAILED;
9436 data[0] = 1;
9437 }
ca43007a
MC
9438 if (tg3_test_link(tp) != 0) {
9439 etest->flags |= ETH_TEST_FL_FAILED;
9440 data[1] = 1;
9441 }
a71116d1 9442 if (etest->flags & ETH_TEST_FL_OFFLINE) {
ec41c7df 9443 int err, irq_sync = 0;
bbe832c0
MC
9444
9445 if (netif_running(dev)) {
a71116d1 9446 tg3_netif_stop(tp);
bbe832c0
MC
9447 irq_sync = 1;
9448 }
a71116d1 9449
bbe832c0 9450 tg3_full_lock(tp, irq_sync);
a71116d1
MC
9451
9452 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
ec41c7df 9453 err = tg3_nvram_lock(tp);
a71116d1
MC
9454 tg3_halt_cpu(tp, RX_CPU_BASE);
9455 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9456 tg3_halt_cpu(tp, TX_CPU_BASE);
ec41c7df
MC
9457 if (!err)
9458 tg3_nvram_unlock(tp);
a71116d1 9459
d9ab5ad1
MC
9460 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9461 tg3_phy_reset(tp);
9462
a71116d1
MC
9463 if (tg3_test_registers(tp) != 0) {
9464 etest->flags |= ETH_TEST_FL_FAILED;
9465 data[2] = 1;
9466 }
7942e1db
MC
9467 if (tg3_test_memory(tp) != 0) {
9468 etest->flags |= ETH_TEST_FL_FAILED;
9469 data[3] = 1;
9470 }
9f40dead 9471 if ((data[4] = tg3_test_loopback(tp)) != 0)
c76949a6 9472 etest->flags |= ETH_TEST_FL_FAILED;
a71116d1 9473
f47c11ee
DM
9474 tg3_full_unlock(tp);
9475
d4bc3927
MC
9476 if (tg3_test_interrupt(tp) != 0) {
9477 etest->flags |= ETH_TEST_FL_FAILED;
9478 data[5] = 1;
9479 }
f47c11ee
DM
9480
9481 tg3_full_lock(tp, 0);
d4bc3927 9482
a71116d1
MC
9483 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9484 if (netif_running(dev)) {
9485 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
b9ec6c1b
MC
9486 if (!tg3_restart_hw(tp, 1))
9487 tg3_netif_start(tp);
a71116d1 9488 }
f47c11ee
DM
9489
9490 tg3_full_unlock(tp);
a71116d1 9491 }
bc1c7567
MC
9492 if (tp->link_config.phy_is_low_power)
9493 tg3_set_power_state(tp, PCI_D3hot);
9494
4cafd3f5
MC
9495}
9496
1da177e4
LT
9497static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9498{
9499 struct mii_ioctl_data *data = if_mii(ifr);
9500 struct tg3 *tp = netdev_priv(dev);
9501 int err;
9502
9503 switch(cmd) {
9504 case SIOCGMIIPHY:
9505 data->phy_id = PHY_ADDR;
9506
9507 /* fallthru */
9508 case SIOCGMIIREG: {
9509 u32 mii_regval;
9510
9511 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9512 break; /* We have no PHY */
9513
bc1c7567
MC
9514 if (tp->link_config.phy_is_low_power)
9515 return -EAGAIN;
9516
f47c11ee 9517 spin_lock_bh(&tp->lock);
1da177e4 9518 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
f47c11ee 9519 spin_unlock_bh(&tp->lock);
1da177e4
LT
9520
9521 data->val_out = mii_regval;
9522
9523 return err;
9524 }
9525
9526 case SIOCSMIIREG:
9527 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9528 break; /* We have no PHY */
9529
9530 if (!capable(CAP_NET_ADMIN))
9531 return -EPERM;
9532
bc1c7567
MC
9533 if (tp->link_config.phy_is_low_power)
9534 return -EAGAIN;
9535
f47c11ee 9536 spin_lock_bh(&tp->lock);
1da177e4 9537 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
f47c11ee 9538 spin_unlock_bh(&tp->lock);
1da177e4
LT
9539
9540 return err;
9541
9542 default:
9543 /* do nothing */
9544 break;
9545 }
9546 return -EOPNOTSUPP;
9547}
9548
9549#if TG3_VLAN_TAG_USED
9550static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9551{
9552 struct tg3 *tp = netdev_priv(dev);
9553
29315e87
MC
9554 if (netif_running(dev))
9555 tg3_netif_stop(tp);
9556
f47c11ee 9557 tg3_full_lock(tp, 0);
1da177e4
LT
9558
9559 tp->vlgrp = grp;
9560
9561 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9562 __tg3_set_rx_mode(dev);
9563
29315e87
MC
9564 if (netif_running(dev))
9565 tg3_netif_start(tp);
46966545
MC
9566
9567 tg3_full_unlock(tp);
1da177e4 9568}
1da177e4
LT
9569#endif
9570
15f9850d
DM
9571static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9572{
9573 struct tg3 *tp = netdev_priv(dev);
9574
9575 memcpy(ec, &tp->coal, sizeof(*ec));
9576 return 0;
9577}
9578
d244c892
MC
9579static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9580{
9581 struct tg3 *tp = netdev_priv(dev);
9582 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9583 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9584
9585 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9586 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9587 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9588 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9589 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9590 }
9591
9592 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9593 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9594 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9595 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9596 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9597 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9598 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9599 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9600 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9601 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9602 return -EINVAL;
9603
9604 /* No rx interrupts will be generated if both are zero */
9605 if ((ec->rx_coalesce_usecs == 0) &&
9606 (ec->rx_max_coalesced_frames == 0))
9607 return -EINVAL;
9608
9609 /* No tx interrupts will be generated if both are zero */
9610 if ((ec->tx_coalesce_usecs == 0) &&
9611 (ec->tx_max_coalesced_frames == 0))
9612 return -EINVAL;
9613
9614 /* Only copy relevant parameters, ignore all others. */
9615 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9616 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9617 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9618 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9619 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9620 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9621 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9622 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9623 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9624
9625 if (netif_running(dev)) {
9626 tg3_full_lock(tp, 0);
9627 __tg3_set_coalesce(tp, &tp->coal);
9628 tg3_full_unlock(tp);
9629 }
9630 return 0;
9631}
9632
7282d491 9633static const struct ethtool_ops tg3_ethtool_ops = {
1da177e4
LT
9634 .get_settings = tg3_get_settings,
9635 .set_settings = tg3_set_settings,
9636 .get_drvinfo = tg3_get_drvinfo,
9637 .get_regs_len = tg3_get_regs_len,
9638 .get_regs = tg3_get_regs,
9639 .get_wol = tg3_get_wol,
9640 .set_wol = tg3_set_wol,
9641 .get_msglevel = tg3_get_msglevel,
9642 .set_msglevel = tg3_set_msglevel,
9643 .nway_reset = tg3_nway_reset,
9644 .get_link = ethtool_op_get_link,
9645 .get_eeprom_len = tg3_get_eeprom_len,
9646 .get_eeprom = tg3_get_eeprom,
9647 .set_eeprom = tg3_set_eeprom,
9648 .get_ringparam = tg3_get_ringparam,
9649 .set_ringparam = tg3_set_ringparam,
9650 .get_pauseparam = tg3_get_pauseparam,
9651 .set_pauseparam = tg3_set_pauseparam,
9652 .get_rx_csum = tg3_get_rx_csum,
9653 .set_rx_csum = tg3_set_rx_csum,
1da177e4 9654 .set_tx_csum = tg3_set_tx_csum,
1da177e4 9655 .set_sg = ethtool_op_set_sg,
1da177e4 9656 .set_tso = tg3_set_tso,
4cafd3f5 9657 .self_test = tg3_self_test,
1da177e4 9658 .get_strings = tg3_get_strings,
4009a93d 9659 .phys_id = tg3_phys_id,
1da177e4 9660 .get_ethtool_stats = tg3_get_ethtool_stats,
15f9850d 9661 .get_coalesce = tg3_get_coalesce,
d244c892 9662 .set_coalesce = tg3_set_coalesce,
b9f2c044 9663 .get_sset_count = tg3_get_sset_count,
1da177e4
LT
9664};
9665
9666static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9667{
1b27777a 9668 u32 cursize, val, magic;
1da177e4
LT
9669
9670 tp->nvram_size = EEPROM_CHIP_SIZE;
9671
1820180b 9672 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
1da177e4
LT
9673 return;
9674
b16250e3
MC
9675 if ((magic != TG3_EEPROM_MAGIC) &&
9676 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9677 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
1da177e4
LT
9678 return;
9679
9680 /*
9681 * Size the chip by reading offsets at increasing powers of two.
9682 * When we encounter our validation signature, we know the addressing
9683 * has wrapped around, and thus have our chip size.
9684 */
1b27777a 9685 cursize = 0x10;
1da177e4
LT
9686
9687 while (cursize < tp->nvram_size) {
1820180b 9688 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
1da177e4
LT
9689 return;
9690
1820180b 9691 if (val == magic)
1da177e4
LT
9692 break;
9693
9694 cursize <<= 1;
9695 }
9696
9697 tp->nvram_size = cursize;
9698}
6aa20a22 9699
1da177e4
LT
9700static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9701{
9702 u32 val;
9703
1820180b 9704 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
1b27777a
MC
9705 return;
9706
9707 /* Selfboot format */
1820180b 9708 if (val != TG3_EEPROM_MAGIC) {
1b27777a
MC
9709 tg3_get_eeprom_size(tp);
9710 return;
9711 }
9712
1da177e4
LT
9713 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9714 if (val != 0) {
9715 tp->nvram_size = (val >> 16) * 1024;
9716 return;
9717 }
9718 }
989a9d23 9719 tp->nvram_size = 0x80000;
1da177e4
LT
9720}
9721
9722static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9723{
9724 u32 nvcfg1;
9725
9726 nvcfg1 = tr32(NVRAM_CFG1);
9727 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9728 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9729 }
9730 else {
9731 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9732 tw32(NVRAM_CFG1, nvcfg1);
9733 }
9734
4c987487 9735 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
a4e2b347 9736 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
9737 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9738 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9739 tp->nvram_jedecnum = JEDEC_ATMEL;
9740 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9741 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9742 break;
9743 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9744 tp->nvram_jedecnum = JEDEC_ATMEL;
9745 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9746 break;
9747 case FLASH_VENDOR_ATMEL_EEPROM:
9748 tp->nvram_jedecnum = JEDEC_ATMEL;
9749 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9750 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9751 break;
9752 case FLASH_VENDOR_ST:
9753 tp->nvram_jedecnum = JEDEC_ST;
9754 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9755 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9756 break;
9757 case FLASH_VENDOR_SAIFUN:
9758 tp->nvram_jedecnum = JEDEC_SAIFUN;
9759 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9760 break;
9761 case FLASH_VENDOR_SST_SMALL:
9762 case FLASH_VENDOR_SST_LARGE:
9763 tp->nvram_jedecnum = JEDEC_SST;
9764 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9765 break;
9766 }
9767 }
9768 else {
9769 tp->nvram_jedecnum = JEDEC_ATMEL;
9770 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9771 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9772 }
9773}
9774
361b4ac2
MC
9775static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9776{
9777 u32 nvcfg1;
9778
9779 nvcfg1 = tr32(NVRAM_CFG1);
9780
e6af301b
MC
9781 /* NVRAM protection for TPM */
9782 if (nvcfg1 & (1 << 27))
9783 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9784
361b4ac2
MC
9785 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9786 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9787 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9788 tp->nvram_jedecnum = JEDEC_ATMEL;
9789 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9790 break;
9791 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9792 tp->nvram_jedecnum = JEDEC_ATMEL;
9793 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9794 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9795 break;
9796 case FLASH_5752VENDOR_ST_M45PE10:
9797 case FLASH_5752VENDOR_ST_M45PE20:
9798 case FLASH_5752VENDOR_ST_M45PE40:
9799 tp->nvram_jedecnum = JEDEC_ST;
9800 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9801 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9802 break;
9803 }
9804
9805 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9806 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9807 case FLASH_5752PAGE_SIZE_256:
9808 tp->nvram_pagesize = 256;
9809 break;
9810 case FLASH_5752PAGE_SIZE_512:
9811 tp->nvram_pagesize = 512;
9812 break;
9813 case FLASH_5752PAGE_SIZE_1K:
9814 tp->nvram_pagesize = 1024;
9815 break;
9816 case FLASH_5752PAGE_SIZE_2K:
9817 tp->nvram_pagesize = 2048;
9818 break;
9819 case FLASH_5752PAGE_SIZE_4K:
9820 tp->nvram_pagesize = 4096;
9821 break;
9822 case FLASH_5752PAGE_SIZE_264:
9823 tp->nvram_pagesize = 264;
9824 break;
9825 }
9826 }
9827 else {
9828 /* For eeprom, set pagesize to maximum eeprom size */
9829 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9830
9831 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9832 tw32(NVRAM_CFG1, nvcfg1);
9833 }
9834}
9835
d3c7b886
MC
9836static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9837{
989a9d23 9838 u32 nvcfg1, protect = 0;
d3c7b886
MC
9839
9840 nvcfg1 = tr32(NVRAM_CFG1);
9841
9842 /* NVRAM protection for TPM */
989a9d23 9843 if (nvcfg1 & (1 << 27)) {
d3c7b886 9844 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
989a9d23
MC
9845 protect = 1;
9846 }
d3c7b886 9847
989a9d23
MC
9848 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9849 switch (nvcfg1) {
d3c7b886
MC
9850 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9851 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9852 case FLASH_5755VENDOR_ATMEL_FLASH_3:
70b65a2d 9853 case FLASH_5755VENDOR_ATMEL_FLASH_5:
d3c7b886
MC
9854 tp->nvram_jedecnum = JEDEC_ATMEL;
9855 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9856 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9857 tp->nvram_pagesize = 264;
70b65a2d
MC
9858 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
9859 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
989a9d23
MC
9860 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9861 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9862 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
9863 else
9864 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
d3c7b886
MC
9865 break;
9866 case FLASH_5752VENDOR_ST_M45PE10:
9867 case FLASH_5752VENDOR_ST_M45PE20:
9868 case FLASH_5752VENDOR_ST_M45PE40:
9869 tp->nvram_jedecnum = JEDEC_ST;
9870 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9871 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9872 tp->nvram_pagesize = 256;
989a9d23
MC
9873 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
9874 tp->nvram_size = (protect ? 0x10000 : 0x20000);
9875 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
9876 tp->nvram_size = (protect ? 0x10000 : 0x40000);
9877 else
9878 tp->nvram_size = (protect ? 0x20000 : 0x80000);
d3c7b886
MC
9879 break;
9880 }
9881}
9882
1b27777a
MC
9883static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9884{
9885 u32 nvcfg1;
9886
9887 nvcfg1 = tr32(NVRAM_CFG1);
9888
9889 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9890 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9891 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9892 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9893 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9894 tp->nvram_jedecnum = JEDEC_ATMEL;
9895 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9896 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9897
9898 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9899 tw32(NVRAM_CFG1, nvcfg1);
9900 break;
9901 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9902 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9903 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9904 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9905 tp->nvram_jedecnum = JEDEC_ATMEL;
9906 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9907 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9908 tp->nvram_pagesize = 264;
9909 break;
9910 case FLASH_5752VENDOR_ST_M45PE10:
9911 case FLASH_5752VENDOR_ST_M45PE20:
9912 case FLASH_5752VENDOR_ST_M45PE40:
9913 tp->nvram_jedecnum = JEDEC_ST;
9914 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9915 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9916 tp->nvram_pagesize = 256;
9917 break;
9918 }
9919}
9920
6b91fa02
MC
9921static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
9922{
9923 u32 nvcfg1, protect = 0;
9924
9925 nvcfg1 = tr32(NVRAM_CFG1);
9926
9927 /* NVRAM protection for TPM */
9928 if (nvcfg1 & (1 << 27)) {
9929 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9930 protect = 1;
9931 }
9932
9933 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9934 switch (nvcfg1) {
9935 case FLASH_5761VENDOR_ATMEL_ADB021D:
9936 case FLASH_5761VENDOR_ATMEL_ADB041D:
9937 case FLASH_5761VENDOR_ATMEL_ADB081D:
9938 case FLASH_5761VENDOR_ATMEL_ADB161D:
9939 case FLASH_5761VENDOR_ATMEL_MDB021D:
9940 case FLASH_5761VENDOR_ATMEL_MDB041D:
9941 case FLASH_5761VENDOR_ATMEL_MDB081D:
9942 case FLASH_5761VENDOR_ATMEL_MDB161D:
9943 tp->nvram_jedecnum = JEDEC_ATMEL;
9944 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9945 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9946 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
9947 tp->nvram_pagesize = 256;
9948 break;
9949 case FLASH_5761VENDOR_ST_A_M45PE20:
9950 case FLASH_5761VENDOR_ST_A_M45PE40:
9951 case FLASH_5761VENDOR_ST_A_M45PE80:
9952 case FLASH_5761VENDOR_ST_A_M45PE16:
9953 case FLASH_5761VENDOR_ST_M_M45PE20:
9954 case FLASH_5761VENDOR_ST_M_M45PE40:
9955 case FLASH_5761VENDOR_ST_M_M45PE80:
9956 case FLASH_5761VENDOR_ST_M_M45PE16:
9957 tp->nvram_jedecnum = JEDEC_ST;
9958 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9959 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9960 tp->nvram_pagesize = 256;
9961 break;
9962 }
9963
9964 if (protect) {
9965 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
9966 } else {
9967 switch (nvcfg1) {
9968 case FLASH_5761VENDOR_ATMEL_ADB161D:
9969 case FLASH_5761VENDOR_ATMEL_MDB161D:
9970 case FLASH_5761VENDOR_ST_A_M45PE16:
9971 case FLASH_5761VENDOR_ST_M_M45PE16:
9972 tp->nvram_size = 0x100000;
9973 break;
9974 case FLASH_5761VENDOR_ATMEL_ADB081D:
9975 case FLASH_5761VENDOR_ATMEL_MDB081D:
9976 case FLASH_5761VENDOR_ST_A_M45PE80:
9977 case FLASH_5761VENDOR_ST_M_M45PE80:
9978 tp->nvram_size = 0x80000;
9979 break;
9980 case FLASH_5761VENDOR_ATMEL_ADB041D:
9981 case FLASH_5761VENDOR_ATMEL_MDB041D:
9982 case FLASH_5761VENDOR_ST_A_M45PE40:
9983 case FLASH_5761VENDOR_ST_M_M45PE40:
9984 tp->nvram_size = 0x40000;
9985 break;
9986 case FLASH_5761VENDOR_ATMEL_ADB021D:
9987 case FLASH_5761VENDOR_ATMEL_MDB021D:
9988 case FLASH_5761VENDOR_ST_A_M45PE20:
9989 case FLASH_5761VENDOR_ST_M_M45PE20:
9990 tp->nvram_size = 0x20000;
9991 break;
9992 }
9993 }
9994}
9995
b5d3772c
MC
9996static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9997{
9998 tp->nvram_jedecnum = JEDEC_ATMEL;
9999 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10000 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10001}
10002
1da177e4
LT
10003/* Chips other than 5700/5701 use the NVRAM for fetching info. */
10004static void __devinit tg3_nvram_init(struct tg3 *tp)
10005{
1da177e4
LT
10006 tw32_f(GRC_EEPROM_ADDR,
10007 (EEPROM_ADDR_FSM_RESET |
10008 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10009 EEPROM_ADDR_CLKPERD_SHIFT)));
10010
9d57f01c 10011 msleep(1);
1da177e4
LT
10012
10013 /* Enable seeprom accesses. */
10014 tw32_f(GRC_LOCAL_CTRL,
10015 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10016 udelay(100);
10017
10018 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10019 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10020 tp->tg3_flags |= TG3_FLAG_NVRAM;
10021
ec41c7df
MC
10022 if (tg3_nvram_lock(tp)) {
10023 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10024 "tg3_nvram_init failed.\n", tp->dev->name);
10025 return;
10026 }
e6af301b 10027 tg3_enable_nvram_access(tp);
1da177e4 10028
989a9d23
MC
10029 tp->nvram_size = 0;
10030
361b4ac2
MC
10031 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10032 tg3_get_5752_nvram_info(tp);
d3c7b886
MC
10033 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10034 tg3_get_5755_nvram_info(tp);
d30cdd28
MC
10035 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10036 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
1b27777a 10037 tg3_get_5787_nvram_info(tp);
6b91fa02
MC
10038 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10039 tg3_get_5761_nvram_info(tp);
b5d3772c
MC
10040 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10041 tg3_get_5906_nvram_info(tp);
361b4ac2
MC
10042 else
10043 tg3_get_nvram_info(tp);
10044
989a9d23
MC
10045 if (tp->nvram_size == 0)
10046 tg3_get_nvram_size(tp);
1da177e4 10047
e6af301b 10048 tg3_disable_nvram_access(tp);
381291b7 10049 tg3_nvram_unlock(tp);
1da177e4
LT
10050
10051 } else {
10052 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10053
10054 tg3_get_eeprom_size(tp);
10055 }
10056}
10057
10058static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10059 u32 offset, u32 *val)
10060{
10061 u32 tmp;
10062 int i;
10063
10064 if (offset > EEPROM_ADDR_ADDR_MASK ||
10065 (offset % 4) != 0)
10066 return -EINVAL;
10067
10068 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10069 EEPROM_ADDR_DEVID_MASK |
10070 EEPROM_ADDR_READ);
10071 tw32(GRC_EEPROM_ADDR,
10072 tmp |
10073 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10074 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10075 EEPROM_ADDR_ADDR_MASK) |
10076 EEPROM_ADDR_READ | EEPROM_ADDR_START);
10077
9d57f01c 10078 for (i = 0; i < 1000; i++) {
1da177e4
LT
10079 tmp = tr32(GRC_EEPROM_ADDR);
10080
10081 if (tmp & EEPROM_ADDR_COMPLETE)
10082 break;
9d57f01c 10083 msleep(1);
1da177e4
LT
10084 }
10085 if (!(tmp & EEPROM_ADDR_COMPLETE))
10086 return -EBUSY;
10087
10088 *val = tr32(GRC_EEPROM_DATA);
10089 return 0;
10090}
10091
10092#define NVRAM_CMD_TIMEOUT 10000
10093
10094static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10095{
10096 int i;
10097
10098 tw32(NVRAM_CMD, nvram_cmd);
10099 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10100 udelay(10);
10101 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10102 udelay(10);
10103 break;
10104 }
10105 }
10106 if (i == NVRAM_CMD_TIMEOUT) {
10107 return -EBUSY;
10108 }
10109 return 0;
10110}
10111
1820180b
MC
10112static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10113{
10114 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10115 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10116 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
6b91fa02 10117 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
1820180b
MC
10118 (tp->nvram_jedecnum == JEDEC_ATMEL))
10119
10120 addr = ((addr / tp->nvram_pagesize) <<
10121 ATMEL_AT45DB0X1B_PAGE_POS) +
10122 (addr % tp->nvram_pagesize);
10123
10124 return addr;
10125}
10126
c4e6575c
MC
10127static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10128{
10129 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10130 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10131 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
6b91fa02 10132 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
c4e6575c
MC
10133 (tp->nvram_jedecnum == JEDEC_ATMEL))
10134
10135 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10136 tp->nvram_pagesize) +
10137 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10138
10139 return addr;
10140}
10141
1da177e4
LT
10142static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10143{
10144 int ret;
10145
1da177e4
LT
10146 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10147 return tg3_nvram_read_using_eeprom(tp, offset, val);
10148
1820180b 10149 offset = tg3_nvram_phys_addr(tp, offset);
1da177e4
LT
10150
10151 if (offset > NVRAM_ADDR_MSK)
10152 return -EINVAL;
10153
ec41c7df
MC
10154 ret = tg3_nvram_lock(tp);
10155 if (ret)
10156 return ret;
1da177e4 10157
e6af301b 10158 tg3_enable_nvram_access(tp);
1da177e4
LT
10159
10160 tw32(NVRAM_ADDR, offset);
10161 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10162 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10163
10164 if (ret == 0)
10165 *val = swab32(tr32(NVRAM_RDDATA));
10166
e6af301b 10167 tg3_disable_nvram_access(tp);
1da177e4 10168
381291b7
MC
10169 tg3_nvram_unlock(tp);
10170
1da177e4
LT
10171 return ret;
10172}
10173
1820180b
MC
10174static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10175{
10176 int err;
10177 u32 tmp;
10178
10179 err = tg3_nvram_read(tp, offset, &tmp);
10180 *val = swab32(tmp);
10181 return err;
10182}
10183
1da177e4
LT
10184static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10185 u32 offset, u32 len, u8 *buf)
10186{
10187 int i, j, rc = 0;
10188 u32 val;
10189
10190 for (i = 0; i < len; i += 4) {
10191 u32 addr, data;
10192
10193 addr = offset + i;
10194
10195 memcpy(&data, buf + i, 4);
10196
10197 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
10198
10199 val = tr32(GRC_EEPROM_ADDR);
10200 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10201
10202 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10203 EEPROM_ADDR_READ);
10204 tw32(GRC_EEPROM_ADDR, val |
10205 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10206 (addr & EEPROM_ADDR_ADDR_MASK) |
10207 EEPROM_ADDR_START |
10208 EEPROM_ADDR_WRITE);
6aa20a22 10209
9d57f01c 10210 for (j = 0; j < 1000; j++) {
1da177e4
LT
10211 val = tr32(GRC_EEPROM_ADDR);
10212
10213 if (val & EEPROM_ADDR_COMPLETE)
10214 break;
9d57f01c 10215 msleep(1);
1da177e4
LT
10216 }
10217 if (!(val & EEPROM_ADDR_COMPLETE)) {
10218 rc = -EBUSY;
10219 break;
10220 }
10221 }
10222
10223 return rc;
10224}
10225
10226/* offset and length are dword aligned */
10227static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10228 u8 *buf)
10229{
10230 int ret = 0;
10231 u32 pagesize = tp->nvram_pagesize;
10232 u32 pagemask = pagesize - 1;
10233 u32 nvram_cmd;
10234 u8 *tmp;
10235
10236 tmp = kmalloc(pagesize, GFP_KERNEL);
10237 if (tmp == NULL)
10238 return -ENOMEM;
10239
10240 while (len) {
10241 int j;
e6af301b 10242 u32 phy_addr, page_off, size;
1da177e4
LT
10243
10244 phy_addr = offset & ~pagemask;
6aa20a22 10245
1da177e4
LT
10246 for (j = 0; j < pagesize; j += 4) {
10247 if ((ret = tg3_nvram_read(tp, phy_addr + j,
10248 (u32 *) (tmp + j))))
10249 break;
10250 }
10251 if (ret)
10252 break;
10253
10254 page_off = offset & pagemask;
10255 size = pagesize;
10256 if (len < size)
10257 size = len;
10258
10259 len -= size;
10260
10261 memcpy(tmp + page_off, buf, size);
10262
10263 offset = offset + (pagesize - page_off);
10264
e6af301b 10265 tg3_enable_nvram_access(tp);
1da177e4
LT
10266
10267 /*
10268 * Before we can erase the flash page, we need
10269 * to issue a special "write enable" command.
10270 */
10271 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10272
10273 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10274 break;
10275
10276 /* Erase the target page */
10277 tw32(NVRAM_ADDR, phy_addr);
10278
10279 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10280 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10281
10282 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10283 break;
10284
10285 /* Issue another write enable to start the write. */
10286 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10287
10288 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10289 break;
10290
10291 for (j = 0; j < pagesize; j += 4) {
10292 u32 data;
10293
10294 data = *((u32 *) (tmp + j));
10295 tw32(NVRAM_WRDATA, cpu_to_be32(data));
10296
10297 tw32(NVRAM_ADDR, phy_addr + j);
10298
10299 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10300 NVRAM_CMD_WR;
10301
10302 if (j == 0)
10303 nvram_cmd |= NVRAM_CMD_FIRST;
10304 else if (j == (pagesize - 4))
10305 nvram_cmd |= NVRAM_CMD_LAST;
10306
10307 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10308 break;
10309 }
10310 if (ret)
10311 break;
10312 }
10313
10314 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10315 tg3_nvram_exec_cmd(tp, nvram_cmd);
10316
10317 kfree(tmp);
10318
10319 return ret;
10320}
10321
10322/* offset and length are dword aligned */
10323static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10324 u8 *buf)
10325{
10326 int i, ret = 0;
10327
10328 for (i = 0; i < len; i += 4, offset += 4) {
10329 u32 data, page_off, phy_addr, nvram_cmd;
10330
10331 memcpy(&data, buf + i, 4);
10332 tw32(NVRAM_WRDATA, cpu_to_be32(data));
10333
10334 page_off = offset % tp->nvram_pagesize;
10335
1820180b 10336 phy_addr = tg3_nvram_phys_addr(tp, offset);
1da177e4
LT
10337
10338 tw32(NVRAM_ADDR, phy_addr);
10339
10340 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10341
10342 if ((page_off == 0) || (i == 0))
10343 nvram_cmd |= NVRAM_CMD_FIRST;
f6d9a256 10344 if (page_off == (tp->nvram_pagesize - 4))
1da177e4
LT
10345 nvram_cmd |= NVRAM_CMD_LAST;
10346
10347 if (i == (len - 4))
10348 nvram_cmd |= NVRAM_CMD_LAST;
10349
4c987487 10350 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
af36e6b6 10351 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
1b27777a 10352 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
d30cdd28 10353 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
9936bcf6 10354 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
4c987487
MC
10355 (tp->nvram_jedecnum == JEDEC_ST) &&
10356 (nvram_cmd & NVRAM_CMD_FIRST)) {
1da177e4
LT
10357
10358 if ((ret = tg3_nvram_exec_cmd(tp,
10359 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10360 NVRAM_CMD_DONE)))
10361
10362 break;
10363 }
10364 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10365 /* We always do complete word writes to eeprom. */
10366 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10367 }
10368
10369 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10370 break;
10371 }
10372 return ret;
10373}
10374
10375/* offset and length are dword aligned */
10376static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10377{
10378 int ret;
10379
1da177e4 10380 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34
MC
10381 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10382 ~GRC_LCLCTRL_GPIO_OUTPUT1);
1da177e4
LT
10383 udelay(40);
10384 }
10385
10386 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10387 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10388 }
10389 else {
10390 u32 grc_mode;
10391
ec41c7df
MC
10392 ret = tg3_nvram_lock(tp);
10393 if (ret)
10394 return ret;
1da177e4 10395
e6af301b
MC
10396 tg3_enable_nvram_access(tp);
10397 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10398 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
1da177e4 10399 tw32(NVRAM_WRITE1, 0x406);
1da177e4
LT
10400
10401 grc_mode = tr32(GRC_MODE);
10402 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10403
10404 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10405 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10406
10407 ret = tg3_nvram_write_block_buffered(tp, offset, len,
10408 buf);
10409 }
10410 else {
10411 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10412 buf);
10413 }
10414
10415 grc_mode = tr32(GRC_MODE);
10416 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10417
e6af301b 10418 tg3_disable_nvram_access(tp);
1da177e4
LT
10419 tg3_nvram_unlock(tp);
10420 }
10421
10422 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34 10423 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1da177e4
LT
10424 udelay(40);
10425 }
10426
10427 return ret;
10428}
10429
10430struct subsys_tbl_ent {
10431 u16 subsys_vendor, subsys_devid;
10432 u32 phy_id;
10433};
10434
10435static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10436 /* Broadcom boards. */
10437 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10438 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10439 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10440 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
10441 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10442 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10443 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
10444 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10445 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10446 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10447 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10448
10449 /* 3com boards. */
10450 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10451 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10452 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
10453 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10454 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10455
10456 /* DELL boards. */
10457 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10458 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10459 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10460 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10461
10462 /* Compaq boards. */
10463 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10464 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10465 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
10466 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10467 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10468
10469 /* IBM boards. */
10470 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10471};
10472
10473static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10474{
10475 int i;
10476
10477 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10478 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10479 tp->pdev->subsystem_vendor) &&
10480 (subsys_id_to_phy_id[i].subsys_devid ==
10481 tp->pdev->subsystem_device))
10482 return &subsys_id_to_phy_id[i];
10483 }
10484 return NULL;
10485}
10486
7d0c41ef 10487static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
1da177e4 10488{
1da177e4 10489 u32 val;
caf636c7
MC
10490 u16 pmcsr;
10491
10492 /* On some early chips the SRAM cannot be accessed in D3hot state,
10493 * so need make sure we're in D0.
10494 */
10495 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10496 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10497 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10498 msleep(1);
7d0c41ef
MC
10499
10500 /* Make sure register accesses (indirect or otherwise)
10501 * will function correctly.
10502 */
10503 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10504 tp->misc_host_ctrl);
1da177e4 10505
f49639e6
DM
10506 /* The memory arbiter has to be enabled in order for SRAM accesses
10507 * to succeed. Normally on powerup the tg3 chip firmware will make
10508 * sure it is enabled, but other entities such as system netboot
10509 * code might disable it.
10510 */
10511 val = tr32(MEMARB_MODE);
10512 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10513
1da177e4 10514 tp->phy_id = PHY_ID_INVALID;
7d0c41ef
MC
10515 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10516
a85feb8c
GZ
10517 /* Assume an onboard device and WOL capable by default. */
10518 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
72b845e0 10519
b5d3772c 10520 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9d26e213 10521 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
b5d3772c 10522 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9d26e213
MC
10523 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10524 }
0527ba35
MC
10525 val = tr32(VCPU_CFGSHDW);
10526 if (val & VCPU_CFGSHDW_ASPM_DBNC)
8ed5d97e 10527 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
0527ba35
MC
10528 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10529 (val & VCPU_CFGSHDW_WOL_MAGPKT))
10530 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
b5d3772c
MC
10531 return;
10532 }
10533
1da177e4
LT
10534 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10535 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10536 u32 nic_cfg, led_cfg;
7d0c41ef
MC
10537 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10538 int eeprom_phy_serdes = 0;
1da177e4
LT
10539
10540 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10541 tp->nic_sram_data_cfg = nic_cfg;
10542
10543 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10544 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10545 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10546 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10547 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10548 (ver > 0) && (ver < 0x100))
10549 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10550
1da177e4
LT
10551 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10552 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10553 eeprom_phy_serdes = 1;
10554
10555 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10556 if (nic_phy_id != 0) {
10557 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10558 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10559
10560 eeprom_phy_id = (id1 >> 16) << 10;
10561 eeprom_phy_id |= (id2 & 0xfc00) << 16;
10562 eeprom_phy_id |= (id2 & 0x03ff) << 0;
10563 } else
10564 eeprom_phy_id = 0;
10565
7d0c41ef 10566 tp->phy_id = eeprom_phy_id;
747e8f8b 10567 if (eeprom_phy_serdes) {
a4e2b347 10568 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
747e8f8b
MC
10569 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10570 else
10571 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10572 }
7d0c41ef 10573
cbf46853 10574 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
10575 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10576 SHASTA_EXT_LED_MODE_MASK);
cbf46853 10577 else
1da177e4
LT
10578 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10579
10580 switch (led_cfg) {
10581 default:
10582 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10583 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10584 break;
10585
10586 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10587 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10588 break;
10589
10590 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10591 tp->led_ctrl = LED_CTRL_MODE_MAC;
9ba27794
MC
10592
10593 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10594 * read on some older 5700/5701 bootcode.
10595 */
10596 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10597 ASIC_REV_5700 ||
10598 GET_ASIC_REV(tp->pci_chip_rev_id) ==
10599 ASIC_REV_5701)
10600 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10601
1da177e4
LT
10602 break;
10603
10604 case SHASTA_EXT_LED_SHARED:
10605 tp->led_ctrl = LED_CTRL_MODE_SHARED;
10606 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10607 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10608 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10609 LED_CTRL_MODE_PHY_2);
10610 break;
10611
10612 case SHASTA_EXT_LED_MAC:
10613 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10614 break;
10615
10616 case SHASTA_EXT_LED_COMBO:
10617 tp->led_ctrl = LED_CTRL_MODE_COMBO;
10618 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10619 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10620 LED_CTRL_MODE_PHY_2);
10621 break;
10622
10623 };
10624
10625 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10626 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10627 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10628 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10629
b5af7126
MC
10630 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
10631 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1)
5f60891b
MC
10632 tp->led_ctrl = LED_CTRL_MODE_MAC;
10633
9d26e213 10634 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
1da177e4 10635 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9d26e213
MC
10636 if ((tp->pdev->subsystem_vendor ==
10637 PCI_VENDOR_ID_ARIMA) &&
10638 (tp->pdev->subsystem_device == 0x205a ||
10639 tp->pdev->subsystem_device == 0x2063))
10640 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10641 } else {
f49639e6 10642 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9d26e213
MC
10643 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10644 }
1da177e4
LT
10645
10646 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10647 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 10648 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
10649 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10650 }
0d3031d9
MC
10651 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10652 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
a85feb8c
GZ
10653 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10654 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10655 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
1da177e4 10656
0527ba35
MC
10657 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10658 nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10659 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10660
1da177e4
LT
10661 if (cfg2 & (1 << 17))
10662 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10663
10664 /* serdes signal pre-emphasis in register 0x590 set by */
10665 /* bootcode if bit 18 is set */
10666 if (cfg2 & (1 << 18))
10667 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
8ed5d97e
MC
10668
10669 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10670 u32 cfg3;
10671
10672 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10673 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10674 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10675 }
1da177e4 10676 }
7d0c41ef
MC
10677}
10678
10679static int __devinit tg3_phy_probe(struct tg3 *tp)
10680{
10681 u32 hw_phy_id_1, hw_phy_id_2;
10682 u32 hw_phy_id, hw_phy_id_masked;
10683 int err;
1da177e4
LT
10684
10685 /* Reading the PHY ID register can conflict with ASF
10686 * firwmare access to the PHY hardware.
10687 */
10688 err = 0;
0d3031d9
MC
10689 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10690 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
1da177e4
LT
10691 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10692 } else {
10693 /* Now read the physical PHY_ID from the chip and verify
10694 * that it is sane. If it doesn't look good, we fall back
10695 * to either the hard-coded table based PHY_ID and failing
10696 * that the value found in the eeprom area.
10697 */
10698 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10699 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10700
10701 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
10702 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10703 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
10704
10705 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10706 }
10707
10708 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10709 tp->phy_id = hw_phy_id;
10710 if (hw_phy_id_masked == PHY_ID_BCM8002)
10711 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
da6b2d01
MC
10712 else
10713 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
1da177e4 10714 } else {
7d0c41ef
MC
10715 if (tp->phy_id != PHY_ID_INVALID) {
10716 /* Do nothing, phy ID already set up in
10717 * tg3_get_eeprom_hw_cfg().
10718 */
1da177e4
LT
10719 } else {
10720 struct subsys_tbl_ent *p;
10721
10722 /* No eeprom signature? Try the hardcoded
10723 * subsys device table.
10724 */
10725 p = lookup_by_subsys(tp);
10726 if (!p)
10727 return -ENODEV;
10728
10729 tp->phy_id = p->phy_id;
10730 if (!tp->phy_id ||
10731 tp->phy_id == PHY_ID_BCM8002)
10732 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10733 }
10734 }
10735
747e8f8b 10736 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
0d3031d9 10737 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
1da177e4 10738 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
3600d918 10739 u32 bmsr, adv_reg, tg3_ctrl, mask;
1da177e4
LT
10740
10741 tg3_readphy(tp, MII_BMSR, &bmsr);
10742 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10743 (bmsr & BMSR_LSTATUS))
10744 goto skip_phy_reset;
6aa20a22 10745
1da177e4
LT
10746 err = tg3_phy_reset(tp);
10747 if (err)
10748 return err;
10749
10750 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10751 ADVERTISE_100HALF | ADVERTISE_100FULL |
10752 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10753 tg3_ctrl = 0;
10754 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10755 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10756 MII_TG3_CTRL_ADV_1000_FULL);
10757 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10758 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10759 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10760 MII_TG3_CTRL_ENABLE_AS_MASTER);
10761 }
10762
3600d918
MC
10763 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10764 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10765 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10766 if (!tg3_copper_is_advertising_all(tp, mask)) {
1da177e4
LT
10767 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10768
10769 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10770 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10771
10772 tg3_writephy(tp, MII_BMCR,
10773 BMCR_ANENABLE | BMCR_ANRESTART);
10774 }
10775 tg3_phy_set_wirespeed(tp);
10776
10777 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10778 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10779 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10780 }
10781
10782skip_phy_reset:
10783 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10784 err = tg3_init_5401phy_dsp(tp);
10785 if (err)
10786 return err;
10787 }
10788
10789 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10790 err = tg3_init_5401phy_dsp(tp);
10791 }
10792
747e8f8b 10793 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1da177e4
LT
10794 tp->link_config.advertising =
10795 (ADVERTISED_1000baseT_Half |
10796 ADVERTISED_1000baseT_Full |
10797 ADVERTISED_Autoneg |
10798 ADVERTISED_FIBRE);
10799 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10800 tp->link_config.advertising &=
10801 ~(ADVERTISED_1000baseT_Half |
10802 ADVERTISED_1000baseT_Full);
10803
10804 return err;
10805}
10806
10807static void __devinit tg3_read_partno(struct tg3 *tp)
10808{
10809 unsigned char vpd_data[256];
af2c6a4a 10810 unsigned int i;
1b27777a 10811 u32 magic;
1da177e4 10812
1820180b 10813 if (tg3_nvram_read_swab(tp, 0x0, &magic))
f49639e6 10814 goto out_not_found;
1da177e4 10815
1820180b 10816 if (magic == TG3_EEPROM_MAGIC) {
1b27777a
MC
10817 for (i = 0; i < 256; i += 4) {
10818 u32 tmp;
1da177e4 10819
1b27777a
MC
10820 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10821 goto out_not_found;
10822
10823 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
10824 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
10825 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10826 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10827 }
10828 } else {
10829 int vpd_cap;
10830
10831 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10832 for (i = 0; i < 256; i += 4) {
10833 u32 tmp, j = 0;
10834 u16 tmp16;
10835
10836 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10837 i);
10838 while (j++ < 100) {
10839 pci_read_config_word(tp->pdev, vpd_cap +
10840 PCI_VPD_ADDR, &tmp16);
10841 if (tmp16 & 0x8000)
10842 break;
10843 msleep(1);
10844 }
f49639e6
DM
10845 if (!(tmp16 & 0x8000))
10846 goto out_not_found;
10847
1b27777a
MC
10848 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10849 &tmp);
10850 tmp = cpu_to_le32(tmp);
10851 memcpy(&vpd_data[i], &tmp, 4);
10852 }
1da177e4
LT
10853 }
10854
10855 /* Now parse and find the part number. */
af2c6a4a 10856 for (i = 0; i < 254; ) {
1da177e4 10857 unsigned char val = vpd_data[i];
af2c6a4a 10858 unsigned int block_end;
1da177e4
LT
10859
10860 if (val == 0x82 || val == 0x91) {
10861 i = (i + 3 +
10862 (vpd_data[i + 1] +
10863 (vpd_data[i + 2] << 8)));
10864 continue;
10865 }
10866
10867 if (val != 0x90)
10868 goto out_not_found;
10869
10870 block_end = (i + 3 +
10871 (vpd_data[i + 1] +
10872 (vpd_data[i + 2] << 8)));
10873 i += 3;
af2c6a4a
MC
10874
10875 if (block_end > 256)
10876 goto out_not_found;
10877
10878 while (i < (block_end - 2)) {
1da177e4
LT
10879 if (vpd_data[i + 0] == 'P' &&
10880 vpd_data[i + 1] == 'N') {
10881 int partno_len = vpd_data[i + 2];
10882
af2c6a4a
MC
10883 i += 3;
10884 if (partno_len > 24 || (partno_len + i) > 256)
1da177e4
LT
10885 goto out_not_found;
10886
10887 memcpy(tp->board_part_number,
af2c6a4a 10888 &vpd_data[i], partno_len);
1da177e4
LT
10889
10890 /* Success. */
10891 return;
10892 }
af2c6a4a 10893 i += 3 + vpd_data[i + 2];
1da177e4
LT
10894 }
10895
10896 /* Part number not found. */
10897 goto out_not_found;
10898 }
10899
10900out_not_found:
b5d3772c
MC
10901 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10902 strcpy(tp->board_part_number, "BCM95906");
10903 else
10904 strcpy(tp->board_part_number, "none");
1da177e4
LT
10905}
10906
9c8a620e
MC
10907static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
10908{
10909 u32 val;
10910
10911 if (tg3_nvram_read_swab(tp, offset, &val) ||
10912 (val & 0xfc000000) != 0x0c000000 ||
10913 tg3_nvram_read_swab(tp, offset + 4, &val) ||
10914 val != 0)
10915 return 0;
10916
10917 return 1;
10918}
10919
c4e6575c
MC
10920static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10921{
10922 u32 val, offset, start;
9c8a620e
MC
10923 u32 ver_offset;
10924 int i, bcnt;
c4e6575c
MC
10925
10926 if (tg3_nvram_read_swab(tp, 0, &val))
10927 return;
10928
10929 if (val != TG3_EEPROM_MAGIC)
10930 return;
10931
10932 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10933 tg3_nvram_read_swab(tp, 0x4, &start))
10934 return;
10935
10936 offset = tg3_nvram_logical_addr(tp, offset);
9c8a620e
MC
10937
10938 if (!tg3_fw_img_is_valid(tp, offset) ||
10939 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
c4e6575c
MC
10940 return;
10941
9c8a620e
MC
10942 offset = offset + ver_offset - start;
10943 for (i = 0; i < 16; i += 4) {
10944 if (tg3_nvram_read(tp, offset + i, &val))
10945 return;
10946
10947 val = le32_to_cpu(val);
10948 memcpy(tp->fw_ver + i, &val, 4);
10949 }
c4e6575c 10950
9c8a620e 10951 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
84af67fd 10952 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
9c8a620e
MC
10953 return;
10954
10955 for (offset = TG3_NVM_DIR_START;
10956 offset < TG3_NVM_DIR_END;
10957 offset += TG3_NVM_DIRENT_SIZE) {
10958 if (tg3_nvram_read_swab(tp, offset, &val))
c4e6575c
MC
10959 return;
10960
9c8a620e
MC
10961 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
10962 break;
10963 }
10964
10965 if (offset == TG3_NVM_DIR_END)
10966 return;
10967
10968 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10969 start = 0x08000000;
10970 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
10971 return;
10972
10973 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
10974 !tg3_fw_img_is_valid(tp, offset) ||
10975 tg3_nvram_read_swab(tp, offset + 8, &val))
10976 return;
10977
10978 offset += val - start;
10979
10980 bcnt = strlen(tp->fw_ver);
10981
10982 tp->fw_ver[bcnt++] = ',';
10983 tp->fw_ver[bcnt++] = ' ';
10984
10985 for (i = 0; i < 4; i++) {
10986 if (tg3_nvram_read(tp, offset, &val))
c4e6575c
MC
10987 return;
10988
9c8a620e
MC
10989 val = le32_to_cpu(val);
10990 offset += sizeof(val);
c4e6575c 10991
9c8a620e
MC
10992 if (bcnt > TG3_VER_SIZE - sizeof(val)) {
10993 memcpy(&tp->fw_ver[bcnt], &val, TG3_VER_SIZE - bcnt);
10994 break;
c4e6575c 10995 }
9c8a620e
MC
10996
10997 memcpy(&tp->fw_ver[bcnt], &val, sizeof(val));
10998 bcnt += sizeof(val);
c4e6575c 10999 }
9c8a620e
MC
11000
11001 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
c4e6575c
MC
11002}
11003
7544b097
MC
11004static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11005
1da177e4
LT
11006static int __devinit tg3_get_invariants(struct tg3 *tp)
11007{
11008 static struct pci_device_id write_reorder_chipsets[] = {
1da177e4
LT
11009 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11010 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
c165b004
JL
11011 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11012 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
399de50b
MC
11013 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11014 PCI_DEVICE_ID_VIA_8385_0) },
1da177e4
LT
11015 { },
11016 };
11017 u32 misc_ctrl_reg;
11018 u32 cacheline_sz_reg;
11019 u32 pci_state_reg, grc_misc_cfg;
11020 u32 val;
11021 u16 pci_cmd;
c7835a77 11022 int err, pcie_cap;
1da177e4 11023
1da177e4
LT
11024 /* Force memory write invalidate off. If we leave it on,
11025 * then on 5700_BX chips we have to enable a workaround.
11026 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11027 * to match the cacheline size. The Broadcom driver have this
11028 * workaround but turns MWI off all the times so never uses
11029 * it. This seems to suggest that the workaround is insufficient.
11030 */
11031 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11032 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11033 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11034
11035 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11036 * has the register indirect write enable bit set before
11037 * we try to access any of the MMIO registers. It is also
11038 * critical that the PCI-X hw workaround situation is decided
11039 * before that as well.
11040 */
11041 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11042 &misc_ctrl_reg);
11043
11044 tp->pci_chip_rev_id = (misc_ctrl_reg >>
11045 MISC_HOST_CTRL_CHIPREV_SHIFT);
795d01c5
MC
11046 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11047 u32 prod_id_asic_rev;
11048
11049 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11050 &prod_id_asic_rev);
11051 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11052 }
1da177e4 11053
ff645bec
MC
11054 /* Wrong chip ID in 5752 A0. This code can be removed later
11055 * as A0 is not in production.
11056 */
11057 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11058 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11059
6892914f
MC
11060 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11061 * we need to disable memory and use config. cycles
11062 * only to access all registers. The 5702/03 chips
11063 * can mistakenly decode the special cycles from the
11064 * ICH chipsets as memory write cycles, causing corruption
11065 * of register and memory space. Only certain ICH bridges
11066 * will drive special cycles with non-zero data during the
11067 * address phase which can fall within the 5703's address
11068 * range. This is not an ICH bug as the PCI spec allows
11069 * non-zero address during special cycles. However, only
11070 * these ICH bridges are known to drive non-zero addresses
11071 * during special cycles.
11072 *
11073 * Since special cycles do not cross PCI bridges, we only
11074 * enable this workaround if the 5703 is on the secondary
11075 * bus of these ICH bridges.
11076 */
11077 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11078 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11079 static struct tg3_dev_id {
11080 u32 vendor;
11081 u32 device;
11082 u32 rev;
11083 } ich_chipsets[] = {
11084 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11085 PCI_ANY_ID },
11086 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11087 PCI_ANY_ID },
11088 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11089 0xa },
11090 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11091 PCI_ANY_ID },
11092 { },
11093 };
11094 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11095 struct pci_dev *bridge = NULL;
11096
11097 while (pci_id->vendor != 0) {
11098 bridge = pci_get_device(pci_id->vendor, pci_id->device,
11099 bridge);
11100 if (!bridge) {
11101 pci_id++;
11102 continue;
11103 }
11104 if (pci_id->rev != PCI_ANY_ID) {
44c10138 11105 if (bridge->revision > pci_id->rev)
6892914f
MC
11106 continue;
11107 }
11108 if (bridge->subordinate &&
11109 (bridge->subordinate->number ==
11110 tp->pdev->bus->number)) {
11111
11112 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11113 pci_dev_put(bridge);
11114 break;
11115 }
11116 }
11117 }
11118
4a29cc2e
MC
11119 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11120 * DMA addresses > 40-bit. This bridge may have other additional
11121 * 57xx devices behind it in some 4-port NIC designs for example.
11122 * Any tg3 device found behind the bridge will also need the 40-bit
11123 * DMA workaround.
11124 */
a4e2b347
MC
11125 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11126 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11127 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
4a29cc2e 11128 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
4cf78e4f 11129 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
a4e2b347 11130 }
4a29cc2e
MC
11131 else {
11132 struct pci_dev *bridge = NULL;
11133
11134 do {
11135 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11136 PCI_DEVICE_ID_SERVERWORKS_EPB,
11137 bridge);
11138 if (bridge && bridge->subordinate &&
11139 (bridge->subordinate->number <=
11140 tp->pdev->bus->number) &&
11141 (bridge->subordinate->subordinate >=
11142 tp->pdev->bus->number)) {
11143 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11144 pci_dev_put(bridge);
11145 break;
11146 }
11147 } while (bridge);
11148 }
4cf78e4f 11149
1da177e4
LT
11150 /* Initialize misc host control in PCI block. */
11151 tp->misc_host_ctrl |= (misc_ctrl_reg &
11152 MISC_HOST_CTRL_CHIPREV);
11153 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11154 tp->misc_host_ctrl);
11155
11156 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11157 &cacheline_sz_reg);
11158
11159 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
11160 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
11161 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
11162 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
11163
7544b097
MC
11164 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11165 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11166 tp->pdev_peer = tg3_find_peer(tp);
11167
6708e5cc 11168 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
4cf78e4f 11169 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
af36e6b6 11170 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d9ab5ad1 11171 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
d30cdd28 11172 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9936bcf6 11173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
b5d3772c 11174 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
a4e2b347 11175 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6708e5cc
JL
11176 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11177
1b440c56
JL
11178 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11179 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11180 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11181
5a6f3074 11182 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7544b097
MC
11183 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11184 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11185 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11186 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11187 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11188 tp->pdev_peer == tp->pdev))
11189 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11190
af36e6b6 11191 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
b5d3772c 11192 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
d30cdd28 11193 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9936bcf6 11194 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
b5d3772c 11195 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5a6f3074 11196 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
fcfa0a32 11197 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
52c0fd83 11198 } else {
7f62ad5d 11199 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
52c0fd83
MC
11200 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11201 ASIC_REV_5750 &&
11202 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
7f62ad5d 11203 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
52c0fd83 11204 }
5a6f3074 11205 }
1da177e4 11206
0f893dc6
MC
11207 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11208 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
d9ab5ad1 11209 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
af36e6b6 11210 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
b5d3772c 11211 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
d30cdd28 11212 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
9936bcf6 11213 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
b5d3772c 11214 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
0f893dc6
MC
11215 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11216
c7835a77
MC
11217 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11218 if (pcie_cap != 0) {
1da177e4 11219 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
5f5c51e3
MC
11220
11221 pcie_set_readrq(tp->pdev, 4096);
11222
c7835a77
MC
11223 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11224 u16 lnkctl;
11225
11226 pci_read_config_word(tp->pdev,
11227 pcie_cap + PCI_EXP_LNKCTL,
11228 &lnkctl);
11229 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11230 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11231 }
11232 }
1da177e4 11233
399de50b
MC
11234 /* If we have an AMD 762 or VIA K8T800 chipset, write
11235 * reordering to the mailbox registers done by the host
11236 * controller can cause major troubles. We read back from
11237 * every mailbox register write to force the writes to be
11238 * posted to the chip in order.
11239 */
11240 if (pci_dev_present(write_reorder_chipsets) &&
11241 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11242 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11243
1da177e4
LT
11244 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11245 tp->pci_lat_timer < 64) {
11246 tp->pci_lat_timer = 64;
11247
11248 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
11249 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
11250 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
11251 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
11252
11253 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11254 cacheline_sz_reg);
11255 }
11256
9974a356
MC
11257 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11258 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11259 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11260 if (!tp->pcix_cap) {
11261 printk(KERN_ERR PFX "Cannot find PCI-X "
11262 "capability, aborting.\n");
11263 return -EIO;
11264 }
11265 }
11266
1da177e4
LT
11267 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11268 &pci_state_reg);
11269
9974a356 11270 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
1da177e4
LT
11271 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11272
11273 /* If this is a 5700 BX chipset, and we are in PCI-X
11274 * mode, enable register write workaround.
11275 *
11276 * The workaround is to use indirect register accesses
11277 * for all chip writes not to mailbox registers.
11278 */
11279 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11280 u32 pm_reg;
1da177e4
LT
11281
11282 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11283
11284 /* The chip can have it's power management PCI config
11285 * space registers clobbered due to this bug.
11286 * So explicitly force the chip into D0 here.
11287 */
9974a356
MC
11288 pci_read_config_dword(tp->pdev,
11289 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
11290 &pm_reg);
11291 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11292 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9974a356
MC
11293 pci_write_config_dword(tp->pdev,
11294 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
11295 pm_reg);
11296
11297 /* Also, force SERR#/PERR# in PCI command. */
11298 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11299 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11300 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11301 }
11302 }
11303
087fe256
MC
11304 /* 5700 BX chips need to have their TX producer index mailboxes
11305 * written twice to workaround a bug.
11306 */
11307 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11308 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11309
1da177e4
LT
11310 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11311 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11312 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11313 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11314
11315 /* Chip-specific fixup from Broadcom driver */
11316 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11317 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11318 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11319 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11320 }
11321
1ee582d8 11322 /* Default fast path register access methods */
20094930 11323 tp->read32 = tg3_read32;
1ee582d8 11324 tp->write32 = tg3_write32;
09ee929c 11325 tp->read32_mbox = tg3_read32;
20094930 11326 tp->write32_mbox = tg3_write32;
1ee582d8
MC
11327 tp->write32_tx_mbox = tg3_write32;
11328 tp->write32_rx_mbox = tg3_write32;
11329
11330 /* Various workaround register access methods */
11331 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11332 tp->write32 = tg3_write_indirect_reg32;
98efd8a6
MC
11333 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11334 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11335 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11336 /*
11337 * Back to back register writes can cause problems on these
11338 * chips, the workaround is to read back all reg writes
11339 * except those to mailbox regs.
11340 *
11341 * See tg3_write_indirect_reg32().
11342 */
1ee582d8 11343 tp->write32 = tg3_write_flush_reg32;
98efd8a6
MC
11344 }
11345
1ee582d8
MC
11346
11347 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11348 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11349 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11350 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11351 tp->write32_rx_mbox = tg3_write_flush_reg32;
11352 }
20094930 11353
6892914f
MC
11354 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11355 tp->read32 = tg3_read_indirect_reg32;
11356 tp->write32 = tg3_write_indirect_reg32;
11357 tp->read32_mbox = tg3_read_indirect_mbox;
11358 tp->write32_mbox = tg3_write_indirect_mbox;
11359 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11360 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11361
11362 iounmap(tp->regs);
22abe310 11363 tp->regs = NULL;
6892914f
MC
11364
11365 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11366 pci_cmd &= ~PCI_COMMAND_MEMORY;
11367 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11368 }
b5d3772c
MC
11369 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11370 tp->read32_mbox = tg3_read32_mbox_5906;
11371 tp->write32_mbox = tg3_write32_mbox_5906;
11372 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11373 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11374 }
6892914f 11375
bbadf503
MC
11376 if (tp->write32 == tg3_write_indirect_reg32 ||
11377 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11378 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
f49639e6 11379 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
bbadf503
MC
11380 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11381
7d0c41ef 11382 /* Get eeprom hw config before calling tg3_set_power_state().
9d26e213 11383 * In particular, the TG3_FLG2_IS_NIC flag must be
7d0c41ef
MC
11384 * determined before calling tg3_set_power_state() so that
11385 * we know whether or not to switch out of Vaux power.
11386 * When the flag is set, it means that GPIO1 is used for eeprom
11387 * write protect and also implies that it is a LOM where GPIOs
11388 * are not used to switch power.
6aa20a22 11389 */
7d0c41ef
MC
11390 tg3_get_eeprom_hw_cfg(tp);
11391
0d3031d9
MC
11392 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11393 /* Allow reads and writes to the
11394 * APE register and memory space.
11395 */
11396 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11397 PCISTATE_ALLOW_APE_SHMEM_WR;
11398 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11399 pci_state_reg);
11400 }
11401
9936bcf6 11402 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
b5af7126 11403 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
d30cdd28
MC
11404 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11405
b5af7126
MC
11406 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
11407 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
11408 tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
11409 tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
11410 tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
11411 }
11412
314fba34
MC
11413 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11414 * GPIO1 driven high will bring 5700's external PHY out of reset.
11415 * It is also used as eeprom write protect on LOMs.
11416 */
11417 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11418 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11419 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11420 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11421 GRC_LCLCTRL_GPIO_OUTPUT1);
3e7d83bc
MC
11422 /* Unused GPIO3 must be driven as output on 5752 because there
11423 * are no pull-up resistors on unused GPIO pins.
11424 */
11425 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11426 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
314fba34 11427
af36e6b6
MC
11428 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11429 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11430
1da177e4 11431 /* Force the chip into D0. */
bc1c7567 11432 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
11433 if (err) {
11434 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11435 pci_name(tp->pdev));
11436 return err;
11437 }
11438
11439 /* 5700 B0 chips do not support checksumming correctly due
11440 * to hardware bugs.
11441 */
11442 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11443 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11444
1da177e4
LT
11445 /* Derive initial jumbo mode from MTU assigned in
11446 * ether_setup() via the alloc_etherdev() call
11447 */
0f893dc6 11448 if (tp->dev->mtu > ETH_DATA_LEN &&
a4e2b347 11449 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
0f893dc6 11450 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
1da177e4
LT
11451
11452 /* Determine WakeOnLan speed to use. */
11453 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11454 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11455 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11456 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11457 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11458 } else {
11459 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11460 }
11461
11462 /* A few boards don't want Ethernet@WireSpeed phy feature */
11463 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11464 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11465 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
747e8f8b 11466 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
b5d3772c 11467 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
747e8f8b 11468 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1da177e4
LT
11469 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11470
11471 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11472 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11473 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11474 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11475 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11476
c424cb24
MC
11477 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11478 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 11479 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
11480 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11481 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
d4011ada
MC
11482 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11483 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11484 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
c1d2a196
MC
11485 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11486 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11487 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
c424cb24
MC
11488 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11489 }
1da177e4 11490
1da177e4 11491 tp->coalesce_mode = 0;
1da177e4
LT
11492 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11493 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11494 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11495
11496 /* Initialize MAC MI mode, polling disabled. */
11497 tw32_f(MAC_MI_MODE, tp->mi_mode);
11498 udelay(80);
11499
11500 /* Initialize data/descriptor byte/word swapping. */
11501 val = tr32(GRC_MODE);
11502 val &= GRC_MODE_HOST_STACKUP;
11503 tw32(GRC_MODE, val | tp->grc_mode);
11504
11505 tg3_switch_clocks(tp);
11506
11507 /* Clear this out for sanity. */
11508 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11509
11510 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11511 &pci_state_reg);
11512 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11513 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11514 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11515
11516 if (chiprevid == CHIPREV_ID_5701_A0 ||
11517 chiprevid == CHIPREV_ID_5701_B0 ||
11518 chiprevid == CHIPREV_ID_5701_B2 ||
11519 chiprevid == CHIPREV_ID_5701_B5) {
11520 void __iomem *sram_base;
11521
11522 /* Write some dummy words into the SRAM status block
11523 * area, see if it reads back correctly. If the return
11524 * value is bad, force enable the PCIX workaround.
11525 */
11526 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11527
11528 writel(0x00000000, sram_base);
11529 writel(0x00000000, sram_base + 4);
11530 writel(0xffffffff, sram_base + 4);
11531 if (readl(sram_base) != 0x00000000)
11532 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11533 }
11534 }
11535
11536 udelay(50);
11537 tg3_nvram_init(tp);
11538
11539 grc_misc_cfg = tr32(GRC_MISC_CFG);
11540 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11541
1da177e4
LT
11542 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11543 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11544 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11545 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11546
fac9b83e
DM
11547 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11548 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11549 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11550 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11551 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11552 HOSTCC_MODE_CLRTICK_TXBD);
11553
11554 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11555 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11556 tp->misc_host_ctrl);
11557 }
11558
1da177e4
LT
11559 /* these are limited to 10/100 only */
11560 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11561 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11562 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11563 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11564 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11565 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11566 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11567 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11568 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
676917d4
MC
11569 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11570 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
b5d3772c 11571 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
1da177e4
LT
11572 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11573
11574 err = tg3_phy_probe(tp);
11575 if (err) {
11576 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11577 pci_name(tp->pdev), err);
11578 /* ... but do not return immediately ... */
11579 }
11580
11581 tg3_read_partno(tp);
c4e6575c 11582 tg3_read_fw_ver(tp);
1da177e4
LT
11583
11584 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11585 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11586 } else {
11587 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11588 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11589 else
11590 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11591 }
11592
11593 /* 5700 {AX,BX} chips have a broken status block link
11594 * change bit implementation, so we must use the
11595 * status register in those cases.
11596 */
11597 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11598 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11599 else
11600 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11601
11602 /* The led_ctrl is set during tg3_phy_probe, here we might
11603 * have to force the link status polling mechanism based
11604 * upon subsystem IDs.
11605 */
11606 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
007a880d 11607 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
1da177e4
LT
11608 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11609 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11610 TG3_FLAG_USE_LINKCHG_REG);
11611 }
11612
11613 /* For all SERDES we poll the MAC status register. */
11614 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11615 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11616 else
11617 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11618
5a6f3074 11619 /* All chips before 5787 can get confused if TX buffers
1da177e4
LT
11620 * straddle the 4GB address boundary in some cases.
11621 */
af36e6b6 11622 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
b5d3772c 11623 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
d30cdd28 11624 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9936bcf6 11625 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
b5d3772c 11626 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
5a6f3074
MC
11627 tp->dev->hard_start_xmit = tg3_start_xmit;
11628 else
11629 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
1da177e4
LT
11630
11631 tp->rx_offset = 2;
11632 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11633 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11634 tp->rx_offset = 0;
11635
f92905de
MC
11636 tp->rx_std_max_post = TG3_RX_RING_SIZE;
11637
11638 /* Increment the rx prod index on the rx std ring by at most
11639 * 8 for these chips to workaround hw errata.
11640 */
11641 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11642 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11643 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11644 tp->rx_std_max_post = 8;
11645
8ed5d97e
MC
11646 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11647 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11648 PCIE_PWR_MGMT_L1_THRESH_MSK;
11649
1da177e4
LT
11650 return err;
11651}
11652
49b6e95f 11653#ifdef CONFIG_SPARC
1da177e4
LT
11654static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11655{
11656 struct net_device *dev = tp->dev;
11657 struct pci_dev *pdev = tp->pdev;
49b6e95f 11658 struct device_node *dp = pci_device_to_OF_node(pdev);
374d4cac 11659 const unsigned char *addr;
49b6e95f
DM
11660 int len;
11661
11662 addr = of_get_property(dp, "local-mac-address", &len);
11663 if (addr && len == 6) {
11664 memcpy(dev->dev_addr, addr, 6);
11665 memcpy(dev->perm_addr, dev->dev_addr, 6);
11666 return 0;
1da177e4
LT
11667 }
11668 return -ENODEV;
11669}
11670
11671static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11672{
11673 struct net_device *dev = tp->dev;
11674
11675 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2ff43697 11676 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
1da177e4
LT
11677 return 0;
11678}
11679#endif
11680
11681static int __devinit tg3_get_device_address(struct tg3 *tp)
11682{
11683 struct net_device *dev = tp->dev;
11684 u32 hi, lo, mac_offset;
008652b3 11685 int addr_ok = 0;
1da177e4 11686
49b6e95f 11687#ifdef CONFIG_SPARC
1da177e4
LT
11688 if (!tg3_get_macaddr_sparc(tp))
11689 return 0;
11690#endif
11691
11692 mac_offset = 0x7c;
f49639e6 11693 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
a4e2b347 11694 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
11695 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11696 mac_offset = 0xcc;
11697 if (tg3_nvram_lock(tp))
11698 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11699 else
11700 tg3_nvram_unlock(tp);
11701 }
b5d3772c
MC
11702 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11703 mac_offset = 0x10;
1da177e4
LT
11704
11705 /* First try to get it from MAC address mailbox. */
11706 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11707 if ((hi >> 16) == 0x484b) {
11708 dev->dev_addr[0] = (hi >> 8) & 0xff;
11709 dev->dev_addr[1] = (hi >> 0) & 0xff;
11710
11711 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11712 dev->dev_addr[2] = (lo >> 24) & 0xff;
11713 dev->dev_addr[3] = (lo >> 16) & 0xff;
11714 dev->dev_addr[4] = (lo >> 8) & 0xff;
11715 dev->dev_addr[5] = (lo >> 0) & 0xff;
1da177e4 11716
008652b3
MC
11717 /* Some old bootcode may report a 0 MAC address in SRAM */
11718 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11719 }
11720 if (!addr_ok) {
11721 /* Next, try NVRAM. */
f49639e6 11722 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
008652b3
MC
11723 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11724 dev->dev_addr[0] = ((hi >> 16) & 0xff);
11725 dev->dev_addr[1] = ((hi >> 24) & 0xff);
11726 dev->dev_addr[2] = ((lo >> 0) & 0xff);
11727 dev->dev_addr[3] = ((lo >> 8) & 0xff);
11728 dev->dev_addr[4] = ((lo >> 16) & 0xff);
11729 dev->dev_addr[5] = ((lo >> 24) & 0xff);
11730 }
11731 /* Finally just fetch it out of the MAC control regs. */
11732 else {
11733 hi = tr32(MAC_ADDR_0_HIGH);
11734 lo = tr32(MAC_ADDR_0_LOW);
11735
11736 dev->dev_addr[5] = lo & 0xff;
11737 dev->dev_addr[4] = (lo >> 8) & 0xff;
11738 dev->dev_addr[3] = (lo >> 16) & 0xff;
11739 dev->dev_addr[2] = (lo >> 24) & 0xff;
11740 dev->dev_addr[1] = hi & 0xff;
11741 dev->dev_addr[0] = (hi >> 8) & 0xff;
11742 }
1da177e4
LT
11743 }
11744
11745 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11746#ifdef CONFIG_SPARC64
11747 if (!tg3_get_default_macaddr_sparc(tp))
11748 return 0;
11749#endif
11750 return -EINVAL;
11751 }
2ff43697 11752 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
11753 return 0;
11754}
11755
59e6b434
DM
11756#define BOUNDARY_SINGLE_CACHELINE 1
11757#define BOUNDARY_MULTI_CACHELINE 2
11758
11759static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11760{
11761 int cacheline_size;
11762 u8 byte;
11763 int goal;
11764
11765 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11766 if (byte == 0)
11767 cacheline_size = 1024;
11768 else
11769 cacheline_size = (int) byte * 4;
11770
11771 /* On 5703 and later chips, the boundary bits have no
11772 * effect.
11773 */
11774 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11775 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11776 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11777 goto out;
11778
11779#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11780 goal = BOUNDARY_MULTI_CACHELINE;
11781#else
11782#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11783 goal = BOUNDARY_SINGLE_CACHELINE;
11784#else
11785 goal = 0;
11786#endif
11787#endif
11788
11789 if (!goal)
11790 goto out;
11791
11792 /* PCI controllers on most RISC systems tend to disconnect
11793 * when a device tries to burst across a cache-line boundary.
11794 * Therefore, letting tg3 do so just wastes PCI bandwidth.
11795 *
11796 * Unfortunately, for PCI-E there are only limited
11797 * write-side controls for this, and thus for reads
11798 * we will still get the disconnects. We'll also waste
11799 * these PCI cycles for both read and write for chips
11800 * other than 5700 and 5701 which do not implement the
11801 * boundary bits.
11802 */
11803 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11804 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11805 switch (cacheline_size) {
11806 case 16:
11807 case 32:
11808 case 64:
11809 case 128:
11810 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11811 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11812 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11813 } else {
11814 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11815 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11816 }
11817 break;
11818
11819 case 256:
11820 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11821 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11822 break;
11823
11824 default:
11825 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11826 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11827 break;
11828 };
11829 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11830 switch (cacheline_size) {
11831 case 16:
11832 case 32:
11833 case 64:
11834 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11835 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11836 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11837 break;
11838 }
11839 /* fallthrough */
11840 case 128:
11841 default:
11842 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11843 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11844 break;
11845 };
11846 } else {
11847 switch (cacheline_size) {
11848 case 16:
11849 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11850 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11851 DMA_RWCTRL_WRITE_BNDRY_16);
11852 break;
11853 }
11854 /* fallthrough */
11855 case 32:
11856 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11857 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11858 DMA_RWCTRL_WRITE_BNDRY_32);
11859 break;
11860 }
11861 /* fallthrough */
11862 case 64:
11863 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11864 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11865 DMA_RWCTRL_WRITE_BNDRY_64);
11866 break;
11867 }
11868 /* fallthrough */
11869 case 128:
11870 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11871 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11872 DMA_RWCTRL_WRITE_BNDRY_128);
11873 break;
11874 }
11875 /* fallthrough */
11876 case 256:
11877 val |= (DMA_RWCTRL_READ_BNDRY_256 |
11878 DMA_RWCTRL_WRITE_BNDRY_256);
11879 break;
11880 case 512:
11881 val |= (DMA_RWCTRL_READ_BNDRY_512 |
11882 DMA_RWCTRL_WRITE_BNDRY_512);
11883 break;
11884 case 1024:
11885 default:
11886 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11887 DMA_RWCTRL_WRITE_BNDRY_1024);
11888 break;
11889 };
11890 }
11891
11892out:
11893 return val;
11894}
11895
1da177e4
LT
11896static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11897{
11898 struct tg3_internal_buffer_desc test_desc;
11899 u32 sram_dma_descs;
11900 int i, ret;
11901
11902 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11903
11904 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11905 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11906 tw32(RDMAC_STATUS, 0);
11907 tw32(WDMAC_STATUS, 0);
11908
11909 tw32(BUFMGR_MODE, 0);
11910 tw32(FTQ_RESET, 0);
11911
11912 test_desc.addr_hi = ((u64) buf_dma) >> 32;
11913 test_desc.addr_lo = buf_dma & 0xffffffff;
11914 test_desc.nic_mbuf = 0x00002100;
11915 test_desc.len = size;
11916
11917 /*
11918 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11919 * the *second* time the tg3 driver was getting loaded after an
11920 * initial scan.
11921 *
11922 * Broadcom tells me:
11923 * ...the DMA engine is connected to the GRC block and a DMA
11924 * reset may affect the GRC block in some unpredictable way...
11925 * The behavior of resets to individual blocks has not been tested.
11926 *
11927 * Broadcom noted the GRC reset will also reset all sub-components.
11928 */
11929 if (to_device) {
11930 test_desc.cqid_sqid = (13 << 8) | 2;
11931
11932 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11933 udelay(40);
11934 } else {
11935 test_desc.cqid_sqid = (16 << 8) | 7;
11936
11937 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11938 udelay(40);
11939 }
11940 test_desc.flags = 0x00000005;
11941
11942 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11943 u32 val;
11944
11945 val = *(((u32 *)&test_desc) + i);
11946 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11947 sram_dma_descs + (i * sizeof(u32)));
11948 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11949 }
11950 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11951
11952 if (to_device) {
11953 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11954 } else {
11955 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11956 }
11957
11958 ret = -ENODEV;
11959 for (i = 0; i < 40; i++) {
11960 u32 val;
11961
11962 if (to_device)
11963 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11964 else
11965 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11966 if ((val & 0xffff) == sram_dma_descs) {
11967 ret = 0;
11968 break;
11969 }
11970
11971 udelay(100);
11972 }
11973
11974 return ret;
11975}
11976
ded7340d 11977#define TEST_BUFFER_SIZE 0x2000
1da177e4
LT
11978
11979static int __devinit tg3_test_dma(struct tg3 *tp)
11980{
11981 dma_addr_t buf_dma;
59e6b434 11982 u32 *buf, saved_dma_rwctrl;
1da177e4
LT
11983 int ret;
11984
11985 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11986 if (!buf) {
11987 ret = -ENOMEM;
11988 goto out_nofree;
11989 }
11990
11991 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11992 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11993
59e6b434 11994 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
1da177e4
LT
11995
11996 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11997 /* DMA read watermark not used on PCIE */
11998 tp->dma_rwctrl |= 0x00180000;
11999 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
85e94ced
MC
12000 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12001 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
1da177e4
LT
12002 tp->dma_rwctrl |= 0x003f0000;
12003 else
12004 tp->dma_rwctrl |= 0x003f000f;
12005 } else {
12006 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12007 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12008 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
49afdeb6 12009 u32 read_water = 0x7;
1da177e4 12010
4a29cc2e
MC
12011 /* If the 5704 is behind the EPB bridge, we can
12012 * do the less restrictive ONE_DMA workaround for
12013 * better performance.
12014 */
12015 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12016 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12017 tp->dma_rwctrl |= 0x8000;
12018 else if (ccval == 0x6 || ccval == 0x7)
1da177e4
LT
12019 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12020
49afdeb6
MC
12021 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12022 read_water = 4;
59e6b434 12023 /* Set bit 23 to enable PCIX hw bug fix */
49afdeb6
MC
12024 tp->dma_rwctrl |=
12025 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12026 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12027 (1 << 23);
4cf78e4f
MC
12028 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12029 /* 5780 always in PCIX mode */
12030 tp->dma_rwctrl |= 0x00144000;
a4e2b347
MC
12031 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12032 /* 5714 always in PCIX mode */
12033 tp->dma_rwctrl |= 0x00148000;
1da177e4
LT
12034 } else {
12035 tp->dma_rwctrl |= 0x001b000f;
12036 }
12037 }
12038
12039 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12040 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12041 tp->dma_rwctrl &= 0xfffffff0;
12042
12043 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12044 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12045 /* Remove this if it causes problems for some boards. */
12046 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12047
12048 /* On 5700/5701 chips, we need to set this bit.
12049 * Otherwise the chip will issue cacheline transactions
12050 * to streamable DMA memory with not all the byte
12051 * enables turned on. This is an error on several
12052 * RISC PCI controllers, in particular sparc64.
12053 *
12054 * On 5703/5704 chips, this bit has been reassigned
12055 * a different meaning. In particular, it is used
12056 * on those chips to enable a PCI-X workaround.
12057 */
12058 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12059 }
12060
12061 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12062
12063#if 0
12064 /* Unneeded, already done by tg3_get_invariants. */
12065 tg3_switch_clocks(tp);
12066#endif
12067
12068 ret = 0;
12069 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12070 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12071 goto out;
12072
59e6b434
DM
12073 /* It is best to perform DMA test with maximum write burst size
12074 * to expose the 5700/5701 write DMA bug.
12075 */
12076 saved_dma_rwctrl = tp->dma_rwctrl;
12077 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12078 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12079
1da177e4
LT
12080 while (1) {
12081 u32 *p = buf, i;
12082
12083 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12084 p[i] = i;
12085
12086 /* Send the buffer to the chip. */
12087 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12088 if (ret) {
12089 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12090 break;
12091 }
12092
12093#if 0
12094 /* validate data reached card RAM correctly. */
12095 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12096 u32 val;
12097 tg3_read_mem(tp, 0x2100 + (i*4), &val);
12098 if (le32_to_cpu(val) != p[i]) {
12099 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
12100 /* ret = -ENODEV here? */
12101 }
12102 p[i] = 0;
12103 }
12104#endif
12105 /* Now read it back. */
12106 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12107 if (ret) {
12108 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12109
12110 break;
12111 }
12112
12113 /* Verify it. */
12114 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12115 if (p[i] == i)
12116 continue;
12117
59e6b434
DM
12118 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12119 DMA_RWCTRL_WRITE_BNDRY_16) {
12120 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
1da177e4
LT
12121 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12122 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12123 break;
12124 } else {
12125 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12126 ret = -ENODEV;
12127 goto out;
12128 }
12129 }
12130
12131 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12132 /* Success. */
12133 ret = 0;
12134 break;
12135 }
12136 }
59e6b434
DM
12137 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12138 DMA_RWCTRL_WRITE_BNDRY_16) {
6d1cfbab
MC
12139 static struct pci_device_id dma_wait_state_chipsets[] = {
12140 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12141 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12142 { },
12143 };
12144
59e6b434 12145 /* DMA test passed without adjusting DMA boundary,
6d1cfbab
MC
12146 * now look for chipsets that are known to expose the
12147 * DMA bug without failing the test.
59e6b434 12148 */
6d1cfbab
MC
12149 if (pci_dev_present(dma_wait_state_chipsets)) {
12150 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12151 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12152 }
12153 else
12154 /* Safe to use the calculated DMA boundary. */
12155 tp->dma_rwctrl = saved_dma_rwctrl;
12156
59e6b434
DM
12157 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12158 }
1da177e4
LT
12159
12160out:
12161 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12162out_nofree:
12163 return ret;
12164}
12165
12166static void __devinit tg3_init_link_config(struct tg3 *tp)
12167{
12168 tp->link_config.advertising =
12169 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12170 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12171 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12172 ADVERTISED_Autoneg | ADVERTISED_MII);
12173 tp->link_config.speed = SPEED_INVALID;
12174 tp->link_config.duplex = DUPLEX_INVALID;
12175 tp->link_config.autoneg = AUTONEG_ENABLE;
1da177e4
LT
12176 tp->link_config.active_speed = SPEED_INVALID;
12177 tp->link_config.active_duplex = DUPLEX_INVALID;
12178 tp->link_config.phy_is_low_power = 0;
12179 tp->link_config.orig_speed = SPEED_INVALID;
12180 tp->link_config.orig_duplex = DUPLEX_INVALID;
12181 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12182}
12183
12184static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12185{
fdfec172
MC
12186 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12187 tp->bufmgr_config.mbuf_read_dma_low_water =
12188 DEFAULT_MB_RDMA_LOW_WATER_5705;
12189 tp->bufmgr_config.mbuf_mac_rx_low_water =
12190 DEFAULT_MB_MACRX_LOW_WATER_5705;
12191 tp->bufmgr_config.mbuf_high_water =
12192 DEFAULT_MB_HIGH_WATER_5705;
b5d3772c
MC
12193 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12194 tp->bufmgr_config.mbuf_mac_rx_low_water =
12195 DEFAULT_MB_MACRX_LOW_WATER_5906;
12196 tp->bufmgr_config.mbuf_high_water =
12197 DEFAULT_MB_HIGH_WATER_5906;
12198 }
fdfec172
MC
12199
12200 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12201 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12202 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12203 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12204 tp->bufmgr_config.mbuf_high_water_jumbo =
12205 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12206 } else {
12207 tp->bufmgr_config.mbuf_read_dma_low_water =
12208 DEFAULT_MB_RDMA_LOW_WATER;
12209 tp->bufmgr_config.mbuf_mac_rx_low_water =
12210 DEFAULT_MB_MACRX_LOW_WATER;
12211 tp->bufmgr_config.mbuf_high_water =
12212 DEFAULT_MB_HIGH_WATER;
12213
12214 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12215 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12216 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12217 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12218 tp->bufmgr_config.mbuf_high_water_jumbo =
12219 DEFAULT_MB_HIGH_WATER_JUMBO;
12220 }
1da177e4
LT
12221
12222 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12223 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12224}
12225
12226static char * __devinit tg3_phy_string(struct tg3 *tp)
12227{
12228 switch (tp->phy_id & PHY_ID_MASK) {
12229 case PHY_ID_BCM5400: return "5400";
12230 case PHY_ID_BCM5401: return "5401";
12231 case PHY_ID_BCM5411: return "5411";
12232 case PHY_ID_BCM5701: return "5701";
12233 case PHY_ID_BCM5703: return "5703";
12234 case PHY_ID_BCM5704: return "5704";
12235 case PHY_ID_BCM5705: return "5705";
12236 case PHY_ID_BCM5750: return "5750";
85e94ced 12237 case PHY_ID_BCM5752: return "5752";
a4e2b347 12238 case PHY_ID_BCM5714: return "5714";
4cf78e4f 12239 case PHY_ID_BCM5780: return "5780";
af36e6b6 12240 case PHY_ID_BCM5755: return "5755";
d9ab5ad1 12241 case PHY_ID_BCM5787: return "5787";
d30cdd28 12242 case PHY_ID_BCM5784: return "5784";
126a3368 12243 case PHY_ID_BCM5756: return "5722/5756";
b5d3772c 12244 case PHY_ID_BCM5906: return "5906";
9936bcf6 12245 case PHY_ID_BCM5761: return "5761";
1da177e4
LT
12246 case PHY_ID_BCM8002: return "8002/serdes";
12247 case 0: return "serdes";
12248 default: return "unknown";
12249 };
12250}
12251
f9804ddb
MC
12252static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12253{
12254 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12255 strcpy(str, "PCI Express");
12256 return str;
12257 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12258 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12259
12260 strcpy(str, "PCIX:");
12261
12262 if ((clock_ctrl == 7) ||
12263 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12264 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12265 strcat(str, "133MHz");
12266 else if (clock_ctrl == 0)
12267 strcat(str, "33MHz");
12268 else if (clock_ctrl == 2)
12269 strcat(str, "50MHz");
12270 else if (clock_ctrl == 4)
12271 strcat(str, "66MHz");
12272 else if (clock_ctrl == 6)
12273 strcat(str, "100MHz");
f9804ddb
MC
12274 } else {
12275 strcpy(str, "PCI:");
12276 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12277 strcat(str, "66MHz");
12278 else
12279 strcat(str, "33MHz");
12280 }
12281 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12282 strcat(str, ":32-bit");
12283 else
12284 strcat(str, ":64-bit");
12285 return str;
12286}
12287
8c2dc7e1 12288static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
1da177e4
LT
12289{
12290 struct pci_dev *peer;
12291 unsigned int func, devnr = tp->pdev->devfn & ~7;
12292
12293 for (func = 0; func < 8; func++) {
12294 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12295 if (peer && peer != tp->pdev)
12296 break;
12297 pci_dev_put(peer);
12298 }
16fe9d74
MC
12299 /* 5704 can be configured in single-port mode, set peer to
12300 * tp->pdev in that case.
12301 */
12302 if (!peer) {
12303 peer = tp->pdev;
12304 return peer;
12305 }
1da177e4
LT
12306
12307 /*
12308 * We don't need to keep the refcount elevated; there's no way
12309 * to remove one half of this device without removing the other
12310 */
12311 pci_dev_put(peer);
12312
12313 return peer;
12314}
12315
15f9850d
DM
12316static void __devinit tg3_init_coal(struct tg3 *tp)
12317{
12318 struct ethtool_coalesce *ec = &tp->coal;
12319
12320 memset(ec, 0, sizeof(*ec));
12321 ec->cmd = ETHTOOL_GCOALESCE;
12322 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12323 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12324 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12325 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12326 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12327 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12328 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12329 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12330 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12331
12332 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12333 HOSTCC_MODE_CLRTICK_TXBD)) {
12334 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12335 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12336 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12337 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12338 }
d244c892
MC
12339
12340 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12341 ec->rx_coalesce_usecs_irq = 0;
12342 ec->tx_coalesce_usecs_irq = 0;
12343 ec->stats_block_coalesce_usecs = 0;
12344 }
15f9850d
DM
12345}
12346
1da177e4
LT
12347static int __devinit tg3_init_one(struct pci_dev *pdev,
12348 const struct pci_device_id *ent)
12349{
12350 static int tg3_version_printed = 0;
12351 unsigned long tg3reg_base, tg3reg_len;
12352 struct net_device *dev;
12353 struct tg3 *tp;
72f2afb8 12354 int i, err, pm_cap;
f9804ddb 12355 char str[40];
72f2afb8 12356 u64 dma_mask, persist_dma_mask;
1da177e4
LT
12357
12358 if (tg3_version_printed++ == 0)
12359 printk(KERN_INFO "%s", version);
12360
12361 err = pci_enable_device(pdev);
12362 if (err) {
12363 printk(KERN_ERR PFX "Cannot enable PCI device, "
12364 "aborting.\n");
12365 return err;
12366 }
12367
12368 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12369 printk(KERN_ERR PFX "Cannot find proper PCI device "
12370 "base address, aborting.\n");
12371 err = -ENODEV;
12372 goto err_out_disable_pdev;
12373 }
12374
12375 err = pci_request_regions(pdev, DRV_MODULE_NAME);
12376 if (err) {
12377 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12378 "aborting.\n");
12379 goto err_out_disable_pdev;
12380 }
12381
12382 pci_set_master(pdev);
12383
12384 /* Find power-management capability. */
12385 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12386 if (pm_cap == 0) {
12387 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12388 "aborting.\n");
12389 err = -EIO;
12390 goto err_out_free_res;
12391 }
12392
1da177e4
LT
12393 tg3reg_base = pci_resource_start(pdev, 0);
12394 tg3reg_len = pci_resource_len(pdev, 0);
12395
12396 dev = alloc_etherdev(sizeof(*tp));
12397 if (!dev) {
12398 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12399 err = -ENOMEM;
12400 goto err_out_free_res;
12401 }
12402
1da177e4
LT
12403 SET_NETDEV_DEV(dev, &pdev->dev);
12404
1da177e4
LT
12405#if TG3_VLAN_TAG_USED
12406 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12407 dev->vlan_rx_register = tg3_vlan_rx_register;
1da177e4
LT
12408#endif
12409
12410 tp = netdev_priv(dev);
12411 tp->pdev = pdev;
12412 tp->dev = dev;
12413 tp->pm_cap = pm_cap;
12414 tp->mac_mode = TG3_DEF_MAC_MODE;
12415 tp->rx_mode = TG3_DEF_RX_MODE;
12416 tp->tx_mode = TG3_DEF_TX_MODE;
12417 tp->mi_mode = MAC_MI_MODE_BASE;
12418 if (tg3_debug > 0)
12419 tp->msg_enable = tg3_debug;
12420 else
12421 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12422
12423 /* The word/byte swap controls here control register access byte
12424 * swapping. DMA data byte swapping is controlled in the GRC_MODE
12425 * setting below.
12426 */
12427 tp->misc_host_ctrl =
12428 MISC_HOST_CTRL_MASK_PCI_INT |
12429 MISC_HOST_CTRL_WORD_SWAP |
12430 MISC_HOST_CTRL_INDIR_ACCESS |
12431 MISC_HOST_CTRL_PCISTATE_RW;
12432
12433 /* The NONFRM (non-frame) byte/word swap controls take effect
12434 * on descriptor entries, anything which isn't packet data.
12435 *
12436 * The StrongARM chips on the board (one for tx, one for rx)
12437 * are running in big-endian mode.
12438 */
12439 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12440 GRC_MODE_WSWAP_NONFRM_DATA);
12441#ifdef __BIG_ENDIAN
12442 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12443#endif
12444 spin_lock_init(&tp->lock);
1da177e4 12445 spin_lock_init(&tp->indirect_lock);
c4028958 12446 INIT_WORK(&tp->reset_task, tg3_reset_task);
1da177e4
LT
12447
12448 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
ab0049b4 12449 if (!tp->regs) {
1da177e4
LT
12450 printk(KERN_ERR PFX "Cannot map device registers, "
12451 "aborting.\n");
12452 err = -ENOMEM;
12453 goto err_out_free_dev;
12454 }
12455
12456 tg3_init_link_config(tp);
12457
1da177e4
LT
12458 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12459 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12460 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12461
12462 dev->open = tg3_open;
12463 dev->stop = tg3_close;
12464 dev->get_stats = tg3_get_stats;
12465 dev->set_multicast_list = tg3_set_rx_mode;
12466 dev->set_mac_address = tg3_set_mac_addr;
12467 dev->do_ioctl = tg3_ioctl;
12468 dev->tx_timeout = tg3_tx_timeout;
bea3348e 12469 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
1da177e4 12470 dev->ethtool_ops = &tg3_ethtool_ops;
1da177e4
LT
12471 dev->watchdog_timeo = TG3_TX_TIMEOUT;
12472 dev->change_mtu = tg3_change_mtu;
12473 dev->irq = pdev->irq;
12474#ifdef CONFIG_NET_POLL_CONTROLLER
12475 dev->poll_controller = tg3_poll_controller;
12476#endif
12477
12478 err = tg3_get_invariants(tp);
12479 if (err) {
12480 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12481 "aborting.\n");
12482 goto err_out_iounmap;
12483 }
12484
4a29cc2e
MC
12485 /* The EPB bridge inside 5714, 5715, and 5780 and any
12486 * device behind the EPB cannot support DMA addresses > 40-bit.
72f2afb8
MC
12487 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12488 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12489 * do DMA address check in tg3_start_xmit().
12490 */
4a29cc2e
MC
12491 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12492 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12493 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
72f2afb8
MC
12494 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12495#ifdef CONFIG_HIGHMEM
12496 dma_mask = DMA_64BIT_MASK;
12497#endif
4a29cc2e 12498 } else
72f2afb8
MC
12499 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12500
12501 /* Configure DMA attributes. */
12502 if (dma_mask > DMA_32BIT_MASK) {
12503 err = pci_set_dma_mask(pdev, dma_mask);
12504 if (!err) {
12505 dev->features |= NETIF_F_HIGHDMA;
12506 err = pci_set_consistent_dma_mask(pdev,
12507 persist_dma_mask);
12508 if (err < 0) {
12509 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12510 "DMA for consistent allocations\n");
12511 goto err_out_iounmap;
12512 }
12513 }
12514 }
12515 if (err || dma_mask == DMA_32BIT_MASK) {
12516 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12517 if (err) {
12518 printk(KERN_ERR PFX "No usable DMA configuration, "
12519 "aborting.\n");
12520 goto err_out_iounmap;
12521 }
12522 }
12523
fdfec172 12524 tg3_init_bufmgr_config(tp);
1da177e4 12525
1da177e4
LT
12526 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12527 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12528 }
12529 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12530 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12531 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
c7835a77 12532 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
1da177e4
LT
12533 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12534 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12535 } else {
7f62ad5d 12536 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
1da177e4
LT
12537 }
12538
4e3a7aaa
MC
12539 /* TSO is on by default on chips that support hardware TSO.
12540 * Firmware TSO on older chips gives lower performance, so it
12541 * is off by default, but can be enabled using ethtool.
12542 */
b0026624 12543 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
1da177e4 12544 dev->features |= NETIF_F_TSO;
b5d3772c
MC
12545 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12546 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
b0026624 12547 dev->features |= NETIF_F_TSO6;
9936bcf6
MC
12548 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12549 dev->features |= NETIF_F_TSO_ECN;
b0026624 12550 }
1da177e4 12551
1da177e4
LT
12552
12553 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12554 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12555 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12556 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12557 tp->rx_pending = 63;
12558 }
12559
1da177e4
LT
12560 err = tg3_get_device_address(tp);
12561 if (err) {
12562 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12563 "aborting.\n");
12564 goto err_out_iounmap;
12565 }
12566
c88864df
MC
12567 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12568 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12569 printk(KERN_ERR PFX "Cannot find proper PCI device "
12570 "base address for APE, aborting.\n");
12571 err = -ENODEV;
12572 goto err_out_iounmap;
12573 }
12574
12575 tg3reg_base = pci_resource_start(pdev, 2);
12576 tg3reg_len = pci_resource_len(pdev, 2);
12577
12578 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12579 if (tp->aperegs == 0UL) {
12580 printk(KERN_ERR PFX "Cannot map APE registers, "
12581 "aborting.\n");
12582 err = -ENOMEM;
12583 goto err_out_iounmap;
12584 }
12585
12586 tg3_ape_lock_init(tp);
12587 }
12588
1da177e4
LT
12589 /*
12590 * Reset chip in case UNDI or EFI driver did not shutdown
12591 * DMA self test will enable WDMAC and we'll see (spurious)
12592 * pending DMA on the PCI bus at that point.
12593 */
12594 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12595 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
1da177e4 12596 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
944d980e 12597 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
12598 }
12599
12600 err = tg3_test_dma(tp);
12601 if (err) {
12602 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
c88864df 12603 goto err_out_apeunmap;
1da177e4
LT
12604 }
12605
12606 /* Tigon3 can do ipv4 only... and some chips have buggy
12607 * checksumming.
12608 */
12609 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
d212f87b 12610 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
af36e6b6 12611 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 12612 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
12613 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12614 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
d212f87b
SH
12615 dev->features |= NETIF_F_IPV6_CSUM;
12616
1da177e4
LT
12617 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12618 } else
12619 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12620
1da177e4
LT
12621 /* flow control autonegotiation is default behavior */
12622 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12623
15f9850d
DM
12624 tg3_init_coal(tp);
12625
c49a1561
MC
12626 pci_set_drvdata(pdev, dev);
12627
1da177e4
LT
12628 err = register_netdev(dev);
12629 if (err) {
12630 printk(KERN_ERR PFX "Cannot register net device, "
12631 "aborting.\n");
0d3031d9 12632 goto err_out_apeunmap;
1da177e4
LT
12633 }
12634
cbb45d21 12635 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
1da177e4
LT
12636 dev->name,
12637 tp->board_part_number,
12638 tp->pci_chip_rev_id,
12639 tg3_phy_string(tp),
f9804ddb 12640 tg3_bus_string(tp, str),
cbb45d21
MC
12641 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12642 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12643 "10/100/1000Base-T")));
1da177e4
LT
12644
12645 for (i = 0; i < 6; i++)
12646 printk("%2.2x%c", dev->dev_addr[i],
12647 i == 5 ? '\n' : ':');
12648
12649 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
1c46ae05 12650 "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
1da177e4
LT
12651 dev->name,
12652 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12653 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12654 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12655 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
1da177e4
LT
12656 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12657 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
4a29cc2e
MC
12658 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12659 dev->name, tp->dma_rwctrl,
12660 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12661 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
1da177e4
LT
12662
12663 return 0;
12664
0d3031d9
MC
12665err_out_apeunmap:
12666 if (tp->aperegs) {
12667 iounmap(tp->aperegs);
12668 tp->aperegs = NULL;
12669 }
12670
1da177e4 12671err_out_iounmap:
6892914f
MC
12672 if (tp->regs) {
12673 iounmap(tp->regs);
22abe310 12674 tp->regs = NULL;
6892914f 12675 }
1da177e4
LT
12676
12677err_out_free_dev:
12678 free_netdev(dev);
12679
12680err_out_free_res:
12681 pci_release_regions(pdev);
12682
12683err_out_disable_pdev:
12684 pci_disable_device(pdev);
12685 pci_set_drvdata(pdev, NULL);
12686 return err;
12687}
12688
12689static void __devexit tg3_remove_one(struct pci_dev *pdev)
12690{
12691 struct net_device *dev = pci_get_drvdata(pdev);
12692
12693 if (dev) {
12694 struct tg3 *tp = netdev_priv(dev);
12695
7faa006f 12696 flush_scheduled_work();
1da177e4 12697 unregister_netdev(dev);
0d3031d9
MC
12698 if (tp->aperegs) {
12699 iounmap(tp->aperegs);
12700 tp->aperegs = NULL;
12701 }
6892914f
MC
12702 if (tp->regs) {
12703 iounmap(tp->regs);
22abe310 12704 tp->regs = NULL;
6892914f 12705 }
1da177e4
LT
12706 free_netdev(dev);
12707 pci_release_regions(pdev);
12708 pci_disable_device(pdev);
12709 pci_set_drvdata(pdev, NULL);
12710 }
12711}
12712
12713static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12714{
12715 struct net_device *dev = pci_get_drvdata(pdev);
12716 struct tg3 *tp = netdev_priv(dev);
12717 int err;
12718
3e0c95fd
MC
12719 /* PCI register 4 needs to be saved whether netif_running() or not.
12720 * MSI address and data need to be saved if using MSI and
12721 * netif_running().
12722 */
12723 pci_save_state(pdev);
12724
1da177e4
LT
12725 if (!netif_running(dev))
12726 return 0;
12727
7faa006f 12728 flush_scheduled_work();
1da177e4
LT
12729 tg3_netif_stop(tp);
12730
12731 del_timer_sync(&tp->timer);
12732
f47c11ee 12733 tg3_full_lock(tp, 1);
1da177e4 12734 tg3_disable_ints(tp);
f47c11ee 12735 tg3_full_unlock(tp);
1da177e4
LT
12736
12737 netif_device_detach(dev);
12738
f47c11ee 12739 tg3_full_lock(tp, 0);
944d980e 12740 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6a9eba15 12741 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
f47c11ee 12742 tg3_full_unlock(tp);
1da177e4
LT
12743
12744 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12745 if (err) {
f47c11ee 12746 tg3_full_lock(tp, 0);
1da177e4 12747
6a9eba15 12748 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
b9ec6c1b
MC
12749 if (tg3_restart_hw(tp, 1))
12750 goto out;
1da177e4
LT
12751
12752 tp->timer.expires = jiffies + tp->timer_offset;
12753 add_timer(&tp->timer);
12754
12755 netif_device_attach(dev);
12756 tg3_netif_start(tp);
12757
b9ec6c1b 12758out:
f47c11ee 12759 tg3_full_unlock(tp);
1da177e4
LT
12760 }
12761
12762 return err;
12763}
12764
12765static int tg3_resume(struct pci_dev *pdev)
12766{
12767 struct net_device *dev = pci_get_drvdata(pdev);
12768 struct tg3 *tp = netdev_priv(dev);
12769 int err;
12770
3e0c95fd
MC
12771 pci_restore_state(tp->pdev);
12772
1da177e4
LT
12773 if (!netif_running(dev))
12774 return 0;
12775
bc1c7567 12776 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
12777 if (err)
12778 return err;
12779
12780 netif_device_attach(dev);
12781
f47c11ee 12782 tg3_full_lock(tp, 0);
1da177e4 12783
6a9eba15 12784 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
b9ec6c1b
MC
12785 err = tg3_restart_hw(tp, 1);
12786 if (err)
12787 goto out;
1da177e4
LT
12788
12789 tp->timer.expires = jiffies + tp->timer_offset;
12790 add_timer(&tp->timer);
12791
1da177e4
LT
12792 tg3_netif_start(tp);
12793
b9ec6c1b 12794out:
f47c11ee 12795 tg3_full_unlock(tp);
1da177e4 12796
b9ec6c1b 12797 return err;
1da177e4
LT
12798}
12799
12800static struct pci_driver tg3_driver = {
12801 .name = DRV_MODULE_NAME,
12802 .id_table = tg3_pci_tbl,
12803 .probe = tg3_init_one,
12804 .remove = __devexit_p(tg3_remove_one),
12805 .suspend = tg3_suspend,
12806 .resume = tg3_resume
12807};
12808
12809static int __init tg3_init(void)
12810{
29917620 12811 return pci_register_driver(&tg3_driver);
1da177e4
LT
12812}
12813
12814static void __exit tg3_cleanup(void)
12815{
12816 pci_unregister_driver(&tg3_driver);
12817}
12818
12819module_init(tg3_init);
12820module_exit(tg3_cleanup);