]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/tg3.c
tg3: Add TG3_FLG3_USE_PHYLIB
[net-next-2.6.git] / drivers / net / tg3.c
CommitLineData
1da177e4
LT
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
65610fba 7 * Copyright (C) 2005-2007 Broadcom Corporation.
1da177e4
LT
8 *
9 * Firmware is:
49cabf49
MC
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
1da177e4
LT
16 */
17
1da177e4
LT
18
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/compiler.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
14c85021 26#include <linux/in.h>
1da177e4
LT
27#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/workqueue.h>
61487480 39#include <linux/prefetch.h>
f9a5f7d3 40#include <linux/dma-mapping.h>
1da177e4
LT
41
42#include <net/checksum.h>
c9bdd4b5 43#include <net/ip.h>
1da177e4
LT
44
45#include <asm/system.h>
46#include <asm/io.h>
47#include <asm/byteorder.h>
48#include <asm/uaccess.h>
49
49b6e95f 50#ifdef CONFIG_SPARC
1da177e4 51#include <asm/idprom.h>
49b6e95f 52#include <asm/prom.h>
1da177e4
LT
53#endif
54
55#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56#define TG3_VLAN_TAG_USED 1
57#else
58#define TG3_VLAN_TAG_USED 0
59#endif
60
1da177e4 61#define TG3_TSO_SUPPORT 1
1da177e4
LT
62
63#include "tg3.h"
64
65#define DRV_MODULE_NAME "tg3"
66#define PFX DRV_MODULE_NAME ": "
920e37f7
MC
67#define DRV_MODULE_VERSION "3.92"
68#define DRV_MODULE_RELDATE "May 2, 2008"
1da177e4
LT
69
70#define TG3_DEF_MAC_MODE 0
71#define TG3_DEF_RX_MODE 0
72#define TG3_DEF_TX_MODE 0
73#define TG3_DEF_MSG_ENABLE \
74 (NETIF_MSG_DRV | \
75 NETIF_MSG_PROBE | \
76 NETIF_MSG_LINK | \
77 NETIF_MSG_TIMER | \
78 NETIF_MSG_IFDOWN | \
79 NETIF_MSG_IFUP | \
80 NETIF_MSG_RX_ERR | \
81 NETIF_MSG_TX_ERR)
82
83/* length of time before we decide the hardware is borked,
84 * and dev->tx_timeout() should be called to fix the problem
85 */
86#define TG3_TX_TIMEOUT (5 * HZ)
87
88/* hardware minimum and maximum for a single frame's data payload */
89#define TG3_MIN_MTU 60
90#define TG3_MAX_MTU(tp) \
0f893dc6 91 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
1da177e4
LT
92
93/* These numbers seem to be hard coded in the NIC firmware somehow.
94 * You can't change the ring sizes, but you can change where you place
95 * them in the NIC onboard memory.
96 */
97#define TG3_RX_RING_SIZE 512
98#define TG3_DEF_RX_RING_PENDING 200
99#define TG3_RX_JUMBO_RING_SIZE 256
100#define TG3_DEF_RX_JUMBO_RING_PENDING 100
101
102/* Do not place this n-ring entries value into the tp struct itself,
103 * we really want to expose these constants to GCC so that modulo et
104 * al. operations are done with shifts and masks instead of with
105 * hw multiply/modulo instructions. Another solution would be to
106 * replace things like '% foo' with '& (foo - 1)'.
107 */
108#define TG3_RX_RCB_RING_SIZE(tp) \
109 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
110
111#define TG3_TX_RING_SIZE 512
112#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
113
114#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
115 TG3_RX_RING_SIZE)
116#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117 TG3_RX_JUMBO_RING_SIZE)
118#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_RCB_RING_SIZE(tp))
120#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
121 TG3_TX_RING_SIZE)
1da177e4
LT
122#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
125#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
126
127/* minimum number of free TX descriptors required to wake up TX process */
42952231 128#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
1da177e4
LT
129
130/* number of ETHTOOL_GSTATS u64's */
131#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
4cafd3f5
MC
133#define TG3_NUM_TEST 6
134
1da177e4
LT
135static char version[] __devinitdata =
136 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140MODULE_LICENSE("GPL");
141MODULE_VERSION(DRV_MODULE_VERSION);
142
143static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
144module_param(tg3_debug, int, 0);
145MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147static struct pci_device_id tg3_pci_tbl[] = {
13185217
HK
148 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
126a3368 172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
13185217
HK
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
126a3368 187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
13185217
HK
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
676917d4 191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
13185217
HK
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
b5d3772c
MC
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
d30cdd28
MC
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
6c7af27c 203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
9936bcf6
MC
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
13185217
HK
206 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213 {}
1da177e4
LT
214};
215
216MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
50da859d 218static const struct {
1da177e4
LT
219 const char string[ETH_GSTRING_LEN];
220} ethtool_stats_keys[TG3_NUM_STATS] = {
221 { "rx_octets" },
222 { "rx_fragments" },
223 { "rx_ucast_packets" },
224 { "rx_mcast_packets" },
225 { "rx_bcast_packets" },
226 { "rx_fcs_errors" },
227 { "rx_align_errors" },
228 { "rx_xon_pause_rcvd" },
229 { "rx_xoff_pause_rcvd" },
230 { "rx_mac_ctrl_rcvd" },
231 { "rx_xoff_entered" },
232 { "rx_frame_too_long_errors" },
233 { "rx_jabbers" },
234 { "rx_undersize_packets" },
235 { "rx_in_length_errors" },
236 { "rx_out_length_errors" },
237 { "rx_64_or_less_octet_packets" },
238 { "rx_65_to_127_octet_packets" },
239 { "rx_128_to_255_octet_packets" },
240 { "rx_256_to_511_octet_packets" },
241 { "rx_512_to_1023_octet_packets" },
242 { "rx_1024_to_1522_octet_packets" },
243 { "rx_1523_to_2047_octet_packets" },
244 { "rx_2048_to_4095_octet_packets" },
245 { "rx_4096_to_8191_octet_packets" },
246 { "rx_8192_to_9022_octet_packets" },
247
248 { "tx_octets" },
249 { "tx_collisions" },
250
251 { "tx_xon_sent" },
252 { "tx_xoff_sent" },
253 { "tx_flow_control" },
254 { "tx_mac_errors" },
255 { "tx_single_collisions" },
256 { "tx_mult_collisions" },
257 { "tx_deferred" },
258 { "tx_excessive_collisions" },
259 { "tx_late_collisions" },
260 { "tx_collide_2times" },
261 { "tx_collide_3times" },
262 { "tx_collide_4times" },
263 { "tx_collide_5times" },
264 { "tx_collide_6times" },
265 { "tx_collide_7times" },
266 { "tx_collide_8times" },
267 { "tx_collide_9times" },
268 { "tx_collide_10times" },
269 { "tx_collide_11times" },
270 { "tx_collide_12times" },
271 { "tx_collide_13times" },
272 { "tx_collide_14times" },
273 { "tx_collide_15times" },
274 { "tx_ucast_packets" },
275 { "tx_mcast_packets" },
276 { "tx_bcast_packets" },
277 { "tx_carrier_sense_errors" },
278 { "tx_discards" },
279 { "tx_errors" },
280
281 { "dma_writeq_full" },
282 { "dma_write_prioq_full" },
283 { "rxbds_empty" },
284 { "rx_discards" },
285 { "rx_errors" },
286 { "rx_threshold_hit" },
287
288 { "dma_readq_full" },
289 { "dma_read_prioq_full" },
290 { "tx_comp_queue_full" },
291
292 { "ring_set_send_prod_index" },
293 { "ring_status_update" },
294 { "nic_irqs" },
295 { "nic_avoided_irqs" },
296 { "nic_tx_threshold_hit" }
297};
298
50da859d 299static const struct {
4cafd3f5
MC
300 const char string[ETH_GSTRING_LEN];
301} ethtool_test_keys[TG3_NUM_TEST] = {
302 { "nvram test (online) " },
303 { "link test (online) " },
304 { "register test (offline)" },
305 { "memory test (offline)" },
306 { "loopback test (offline)" },
307 { "interrupt test (offline)" },
308};
309
b401e9e2
MC
310static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311{
312 writel(val, tp->regs + off);
313}
314
315static u32 tg3_read32(struct tg3 *tp, u32 off)
316{
6aa20a22 317 return (readl(tp->regs + off));
b401e9e2
MC
318}
319
0d3031d9
MC
320static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321{
322 writel(val, tp->aperegs + off);
323}
324
325static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326{
327 return (readl(tp->aperegs + off));
328}
329
1da177e4
LT
330static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331{
6892914f
MC
332 unsigned long flags;
333
334 spin_lock_irqsave(&tp->indirect_lock, flags);
1ee582d8
MC
335 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
6892914f 337 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1ee582d8
MC
338}
339
340static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341{
342 writel(val, tp->regs + off);
343 readl(tp->regs + off);
1da177e4
LT
344}
345
6892914f 346static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
1da177e4 347{
6892914f
MC
348 unsigned long flags;
349 u32 val;
350
351 spin_lock_irqsave(&tp->indirect_lock, flags);
352 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354 spin_unlock_irqrestore(&tp->indirect_lock, flags);
355 return val;
356}
357
358static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359{
360 unsigned long flags;
361
362 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364 TG3_64BIT_REG_LOW, val);
365 return;
366 }
367 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369 TG3_64BIT_REG_LOW, val);
370 return;
1da177e4 371 }
6892914f
MC
372
373 spin_lock_irqsave(&tp->indirect_lock, flags);
374 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376 spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378 /* In indirect mode when disabling interrupts, we also need
379 * to clear the interrupt bit in the GRC local ctrl register.
380 */
381 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382 (val == 0x1)) {
383 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385 }
386}
387
388static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389{
390 unsigned long flags;
391 u32 val;
392
393 spin_lock_irqsave(&tp->indirect_lock, flags);
394 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396 spin_unlock_irqrestore(&tp->indirect_lock, flags);
397 return val;
398}
399
b401e9e2
MC
400/* usec_wait specifies the wait time in usec when writing to certain registers
401 * where it is unsafe to read back the register without some delay.
402 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404 */
405static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
6892914f 406{
b401e9e2
MC
407 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409 /* Non-posted methods */
410 tp->write32(tp, off, val);
411 else {
412 /* Posted method */
413 tg3_write32(tp, off, val);
414 if (usec_wait)
415 udelay(usec_wait);
416 tp->read32(tp, off);
417 }
418 /* Wait again after the read for the posted method to guarantee that
419 * the wait time is met.
420 */
421 if (usec_wait)
422 udelay(usec_wait);
1da177e4
LT
423}
424
09ee929c
MC
425static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426{
427 tp->write32_mbox(tp, off, val);
6892914f
MC
428 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430 tp->read32_mbox(tp, off);
09ee929c
MC
431}
432
20094930 433static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
1da177e4
LT
434{
435 void __iomem *mbox = tp->regs + off;
436 writel(val, mbox);
437 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438 writel(val, mbox);
439 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440 readl(mbox);
441}
442
b5d3772c
MC
443static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444{
445 return (readl(tp->regs + off + GRCMBOX_BASE));
446}
447
448static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449{
450 writel(val, tp->regs + off + GRCMBOX_BASE);
451}
452
20094930 453#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
09ee929c 454#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
20094930
MC
455#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
456#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
09ee929c 457#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
20094930
MC
458
459#define tw32(reg,val) tp->write32(tp, reg, val)
b401e9e2
MC
460#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
461#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
20094930 462#define tr32(reg) tp->read32(tp, reg)
1da177e4
LT
463
464static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465{
6892914f
MC
466 unsigned long flags;
467
b5d3772c
MC
468 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470 return;
471
6892914f 472 spin_lock_irqsave(&tp->indirect_lock, flags);
bbadf503
MC
473 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 476
bbadf503
MC
477 /* Always leave this as zero. */
478 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479 } else {
480 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481 tw32_f(TG3PCI_MEM_WIN_DATA, val);
28fbef78 482
bbadf503
MC
483 /* Always leave this as zero. */
484 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485 }
486 spin_unlock_irqrestore(&tp->indirect_lock, flags);
758a6139
DM
487}
488
1da177e4
LT
489static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490{
6892914f
MC
491 unsigned long flags;
492
b5d3772c
MC
493 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495 *val = 0;
496 return;
497 }
498
6892914f 499 spin_lock_irqsave(&tp->indirect_lock, flags);
bbadf503
MC
500 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 503
bbadf503
MC
504 /* Always leave this as zero. */
505 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506 } else {
507 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510 /* Always leave this as zero. */
511 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512 }
6892914f 513 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
514}
515
0d3031d9
MC
516static void tg3_ape_lock_init(struct tg3 *tp)
517{
518 int i;
519
520 /* Make sure the driver hasn't any stale locks. */
521 for (i = 0; i < 8; i++)
522 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523 APE_LOCK_GRANT_DRIVER);
524}
525
526static int tg3_ape_lock(struct tg3 *tp, int locknum)
527{
528 int i, off;
529 int ret = 0;
530 u32 status;
531
532 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533 return 0;
534
535 switch (locknum) {
536 case TG3_APE_LOCK_MEM:
537 break;
538 default:
539 return -EINVAL;
540 }
541
542 off = 4 * locknum;
543
544 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546 /* Wait for up to 1 millisecond to acquire lock. */
547 for (i = 0; i < 100; i++) {
548 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549 if (status == APE_LOCK_GRANT_DRIVER)
550 break;
551 udelay(10);
552 }
553
554 if (status != APE_LOCK_GRANT_DRIVER) {
555 /* Revoke the lock request. */
556 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557 APE_LOCK_GRANT_DRIVER);
558
559 ret = -EBUSY;
560 }
561
562 return ret;
563}
564
565static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566{
567 int off;
568
569 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570 return;
571
572 switch (locknum) {
573 case TG3_APE_LOCK_MEM:
574 break;
575 default:
576 return;
577 }
578
579 off = 4 * locknum;
580 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581}
582
1da177e4
LT
583static void tg3_disable_ints(struct tg3 *tp)
584{
585 tw32(TG3PCI_MISC_HOST_CTRL,
586 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c 587 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
1da177e4
LT
588}
589
590static inline void tg3_cond_int(struct tg3 *tp)
591{
38f3843e
MC
592 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593 (tp->hw_status->status & SD_STATUS_UPDATED))
1da177e4 594 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
b5d3772c
MC
595 else
596 tw32(HOSTCC_MODE, tp->coalesce_mode |
597 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
1da177e4
LT
598}
599
600static void tg3_enable_ints(struct tg3 *tp)
601{
bbe832c0
MC
602 tp->irq_sync = 0;
603 wmb();
604
1da177e4
LT
605 tw32(TG3PCI_MISC_HOST_CTRL,
606 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c
MC
607 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608 (tp->last_tag << 24));
fcfa0a32
MC
609 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611 (tp->last_tag << 24));
1da177e4
LT
612 tg3_cond_int(tp);
613}
614
04237ddd
MC
615static inline unsigned int tg3_has_work(struct tg3 *tp)
616{
617 struct tg3_hw_status *sblk = tp->hw_status;
618 unsigned int work_exists = 0;
619
620 /* check for phy events */
621 if (!(tp->tg3_flags &
622 (TG3_FLAG_USE_LINKCHG_REG |
623 TG3_FLAG_POLL_SERDES))) {
624 if (sblk->status & SD_STATUS_LINK_CHG)
625 work_exists = 1;
626 }
627 /* check for RX/TX work to do */
628 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630 work_exists = 1;
631
632 return work_exists;
633}
634
1da177e4 635/* tg3_restart_ints
04237ddd
MC
636 * similar to tg3_enable_ints, but it accurately determines whether there
637 * is new work pending and can return without flushing the PIO write
6aa20a22 638 * which reenables interrupts
1da177e4
LT
639 */
640static void tg3_restart_ints(struct tg3 *tp)
641{
fac9b83e
DM
642 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643 tp->last_tag << 24);
1da177e4
LT
644 mmiowb();
645
fac9b83e
DM
646 /* When doing tagged status, this work check is unnecessary.
647 * The last_tag we write above tells the chip which piece of
648 * work we've completed.
649 */
650 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651 tg3_has_work(tp))
04237ddd
MC
652 tw32(HOSTCC_MODE, tp->coalesce_mode |
653 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
1da177e4
LT
654}
655
656static inline void tg3_netif_stop(struct tg3 *tp)
657{
bbe832c0 658 tp->dev->trans_start = jiffies; /* prevent tx timeout */
bea3348e 659 napi_disable(&tp->napi);
1da177e4
LT
660 netif_tx_disable(tp->dev);
661}
662
663static inline void tg3_netif_start(struct tg3 *tp)
664{
665 netif_wake_queue(tp->dev);
666 /* NOTE: unconditional netif_wake_queue is only appropriate
667 * so long as all callers are assured to have free tx slots
668 * (such as after tg3_init_hw)
669 */
bea3348e 670 napi_enable(&tp->napi);
f47c11ee
DM
671 tp->hw_status->status |= SD_STATUS_UPDATED;
672 tg3_enable_ints(tp);
1da177e4
LT
673}
674
675static void tg3_switch_clocks(struct tg3 *tp)
676{
677 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678 u32 orig_clock_ctrl;
679
795d01c5
MC
680 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
4cf78e4f
MC
682 return;
683
1da177e4
LT
684 orig_clock_ctrl = clock_ctrl;
685 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686 CLOCK_CTRL_CLKRUN_OENABLE |
687 0x1f);
688 tp->pci_clock_ctrl = clock_ctrl;
689
690 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
b401e9e2
MC
692 tw32_wait_f(TG3PCI_CLOCK_CTRL,
693 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1da177e4
LT
694 }
695 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
b401e9e2
MC
696 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697 clock_ctrl |
698 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699 40);
700 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701 clock_ctrl | (CLOCK_CTRL_ALTCLK),
702 40);
1da177e4 703 }
b401e9e2 704 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1da177e4
LT
705}
706
707#define PHY_BUSY_LOOPS 5000
708
709static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710{
711 u32 frame_val;
712 unsigned int loops;
713 int ret;
714
715 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716 tw32_f(MAC_MI_MODE,
717 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718 udelay(80);
719 }
720
721 *val = 0x0;
722
723 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724 MI_COM_PHY_ADDR_MASK);
725 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726 MI_COM_REG_ADDR_MASK);
727 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
6aa20a22 728
1da177e4
LT
729 tw32_f(MAC_MI_COM, frame_val);
730
731 loops = PHY_BUSY_LOOPS;
732 while (loops != 0) {
733 udelay(10);
734 frame_val = tr32(MAC_MI_COM);
735
736 if ((frame_val & MI_COM_BUSY) == 0) {
737 udelay(5);
738 frame_val = tr32(MAC_MI_COM);
739 break;
740 }
741 loops -= 1;
742 }
743
744 ret = -EBUSY;
745 if (loops != 0) {
746 *val = frame_val & MI_COM_DATA_MASK;
747 ret = 0;
748 }
749
750 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751 tw32_f(MAC_MI_MODE, tp->mi_mode);
752 udelay(80);
753 }
754
755 return ret;
756}
757
758static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759{
760 u32 frame_val;
761 unsigned int loops;
762 int ret;
763
b5d3772c
MC
764 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766 return 0;
767
1da177e4
LT
768 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769 tw32_f(MAC_MI_MODE,
770 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771 udelay(80);
772 }
773
774 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775 MI_COM_PHY_ADDR_MASK);
776 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777 MI_COM_REG_ADDR_MASK);
778 frame_val |= (val & MI_COM_DATA_MASK);
779 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
6aa20a22 780
1da177e4
LT
781 tw32_f(MAC_MI_COM, frame_val);
782
783 loops = PHY_BUSY_LOOPS;
784 while (loops != 0) {
785 udelay(10);
786 frame_val = tr32(MAC_MI_COM);
787 if ((frame_val & MI_COM_BUSY) == 0) {
788 udelay(5);
789 frame_val = tr32(MAC_MI_COM);
790 break;
791 }
792 loops -= 1;
793 }
794
795 ret = -EBUSY;
796 if (loops != 0)
797 ret = 0;
798
799 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800 tw32_f(MAC_MI_MODE, tp->mi_mode);
801 udelay(80);
802 }
803
804 return ret;
805}
806
95e2869a
MC
807static int tg3_bmcr_reset(struct tg3 *tp)
808{
809 u32 phy_control;
810 int limit, err;
811
812 /* OK, reset it, and poll the BMCR_RESET bit until it
813 * clears or we time out.
814 */
815 phy_control = BMCR_RESET;
816 err = tg3_writephy(tp, MII_BMCR, phy_control);
817 if (err != 0)
818 return -EBUSY;
819
820 limit = 5000;
821 while (limit--) {
822 err = tg3_readphy(tp, MII_BMCR, &phy_control);
823 if (err != 0)
824 return -EBUSY;
825
826 if ((phy_control & BMCR_RESET) == 0) {
827 udelay(40);
828 break;
829 }
830 udelay(10);
831 }
832 if (limit <= 0)
833 return -EBUSY;
834
835 return 0;
836}
837
838/* tp->lock is held. */
839static void tg3_wait_for_event_ack(struct tg3 *tp)
840{
841 int i;
842
843 /* Wait for up to 2.5 milliseconds */
844 for (i = 0; i < 250000; i++) {
845 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
846 break;
847 udelay(10);
848 }
849}
850
851/* tp->lock is held. */
852static void tg3_ump_link_report(struct tg3 *tp)
853{
854 u32 reg;
855 u32 val;
856
857 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
858 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
859 return;
860
861 tg3_wait_for_event_ack(tp);
862
863 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
864
865 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
866
867 val = 0;
868 if (!tg3_readphy(tp, MII_BMCR, &reg))
869 val = reg << 16;
870 if (!tg3_readphy(tp, MII_BMSR, &reg))
871 val |= (reg & 0xffff);
872 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
873
874 val = 0;
875 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
876 val = reg << 16;
877 if (!tg3_readphy(tp, MII_LPA, &reg))
878 val |= (reg & 0xffff);
879 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
880
881 val = 0;
882 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
883 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
884 val = reg << 16;
885 if (!tg3_readphy(tp, MII_STAT1000, &reg))
886 val |= (reg & 0xffff);
887 }
888 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
889
890 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
891 val = reg << 16;
892 else
893 val = 0;
894 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
895
896 val = tr32(GRC_RX_CPU_EVENT);
897 val |= GRC_RX_CPU_DRIVER_EVENT;
898 tw32_f(GRC_RX_CPU_EVENT, val);
899}
900
901static void tg3_link_report(struct tg3 *tp)
902{
903 if (!netif_carrier_ok(tp->dev)) {
904 if (netif_msg_link(tp))
905 printk(KERN_INFO PFX "%s: Link is down.\n",
906 tp->dev->name);
907 tg3_ump_link_report(tp);
908 } else if (netif_msg_link(tp)) {
909 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
910 tp->dev->name,
911 (tp->link_config.active_speed == SPEED_1000 ?
912 1000 :
913 (tp->link_config.active_speed == SPEED_100 ?
914 100 : 10)),
915 (tp->link_config.active_duplex == DUPLEX_FULL ?
916 "full" : "half"));
917
918 printk(KERN_INFO PFX
919 "%s: Flow control is %s for TX and %s for RX.\n",
920 tp->dev->name,
921 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
922 "on" : "off",
923 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
924 "on" : "off");
925 tg3_ump_link_report(tp);
926 }
927}
928
929static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
930{
931 u16 miireg;
932
933 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
934 miireg = ADVERTISE_PAUSE_CAP;
935 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
936 miireg = ADVERTISE_PAUSE_ASYM;
937 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
938 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
939 else
940 miireg = 0;
941
942 return miireg;
943}
944
945static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
946{
947 u16 miireg;
948
949 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
950 miireg = ADVERTISE_1000XPAUSE;
951 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
952 miireg = ADVERTISE_1000XPSE_ASYM;
953 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
954 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
955 else
956 miireg = 0;
957
958 return miireg;
959}
960
961static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
962{
963 u8 cap = 0;
964
965 if (lcladv & ADVERTISE_PAUSE_CAP) {
966 if (lcladv & ADVERTISE_PAUSE_ASYM) {
967 if (rmtadv & LPA_PAUSE_CAP)
968 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
969 else if (rmtadv & LPA_PAUSE_ASYM)
970 cap = TG3_FLOW_CTRL_RX;
971 } else {
972 if (rmtadv & LPA_PAUSE_CAP)
973 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
974 }
975 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
976 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
977 cap = TG3_FLOW_CTRL_TX;
978 }
979
980 return cap;
981}
982
983static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
984{
985 u8 cap = 0;
986
987 if (lcladv & ADVERTISE_1000XPAUSE) {
988 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
989 if (rmtadv & LPA_1000XPAUSE)
990 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
991 else if (rmtadv & LPA_1000XPAUSE_ASYM)
992 cap = TG3_FLOW_CTRL_RX;
993 } else {
994 if (rmtadv & LPA_1000XPAUSE)
995 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
996 }
997 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
998 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
999 cap = TG3_FLOW_CTRL_TX;
1000 }
1001
1002 return cap;
1003}
1004
f51f3562 1005static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
95e2869a 1006{
f51f3562 1007 u8 flowctrl = 0;
95e2869a
MC
1008 u32 old_rx_mode = tp->rx_mode;
1009 u32 old_tx_mode = tp->tx_mode;
1010
1011 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1012 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1013 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
f51f3562 1014 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
95e2869a 1015 else
f51f3562
MC
1016 flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1017 } else
1018 flowctrl = tp->link_config.flowctrl;
95e2869a 1019
f51f3562 1020 tp->link_config.active_flowctrl = flowctrl;
95e2869a 1021
f51f3562 1022 if (flowctrl & TG3_FLOW_CTRL_RX)
95e2869a
MC
1023 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1024 else
1025 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1026
f51f3562 1027 if (old_rx_mode != tp->rx_mode)
95e2869a 1028 tw32_f(MAC_RX_MODE, tp->rx_mode);
95e2869a 1029
f51f3562 1030 if (flowctrl & TG3_FLOW_CTRL_TX)
95e2869a
MC
1031 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1032 else
1033 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1034
f51f3562 1035 if (old_tx_mode != tp->tx_mode)
95e2869a 1036 tw32_f(MAC_TX_MODE, tp->tx_mode);
95e2869a
MC
1037}
1038
b2a5c19c
MC
1039static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1040{
1041 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1042 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1043}
1044
9ef8ca99
MC
1045static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1046{
1047 u32 phy;
1048
1049 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1050 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1051 return;
1052
1053 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1054 u32 ephy;
1055
1056 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1057 tg3_writephy(tp, MII_TG3_EPHY_TEST,
1058 ephy | MII_TG3_EPHY_SHADOW_EN);
1059 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1060 if (enable)
1061 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1062 else
1063 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1064 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1065 }
1066 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1067 }
1068 } else {
1069 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1070 MII_TG3_AUXCTL_SHDWSEL_MISC;
1071 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1072 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1073 if (enable)
1074 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1075 else
1076 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1077 phy |= MII_TG3_AUXCTL_MISC_WREN;
1078 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1079 }
1080 }
1081}
1082
1da177e4
LT
1083static void tg3_phy_set_wirespeed(struct tg3 *tp)
1084{
1085 u32 val;
1086
1087 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1088 return;
1089
1090 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1091 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1092 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1093 (val | (1 << 15) | (1 << 4)));
1094}
1095
b2a5c19c
MC
1096static void tg3_phy_apply_otp(struct tg3 *tp)
1097{
1098 u32 otp, phy;
1099
1100 if (!tp->phy_otp)
1101 return;
1102
1103 otp = tp->phy_otp;
1104
1105 /* Enable SM_DSP clock and tx 6dB coding. */
1106 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1107 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1108 MII_TG3_AUXCTL_ACTL_TX_6DB;
1109 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1110
1111 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1112 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1113 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1114
1115 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1116 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1117 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1118
1119 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1120 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1121 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1122
1123 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1124 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1125
1126 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1127 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1128
1129 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1130 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1131 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1132
1133 /* Turn off SM_DSP clock. */
1134 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1135 MII_TG3_AUXCTL_ACTL_TX_6DB;
1136 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1137}
1138
1da177e4
LT
1139static int tg3_wait_macro_done(struct tg3 *tp)
1140{
1141 int limit = 100;
1142
1143 while (limit--) {
1144 u32 tmp32;
1145
1146 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1147 if ((tmp32 & 0x1000) == 0)
1148 break;
1149 }
1150 }
1151 if (limit <= 0)
1152 return -EBUSY;
1153
1154 return 0;
1155}
1156
1157static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1158{
1159 static const u32 test_pat[4][6] = {
1160 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1161 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1162 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1163 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1164 };
1165 int chan;
1166
1167 for (chan = 0; chan < 4; chan++) {
1168 int i;
1169
1170 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1171 (chan * 0x2000) | 0x0200);
1172 tg3_writephy(tp, 0x16, 0x0002);
1173
1174 for (i = 0; i < 6; i++)
1175 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1176 test_pat[chan][i]);
1177
1178 tg3_writephy(tp, 0x16, 0x0202);
1179 if (tg3_wait_macro_done(tp)) {
1180 *resetp = 1;
1181 return -EBUSY;
1182 }
1183
1184 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1185 (chan * 0x2000) | 0x0200);
1186 tg3_writephy(tp, 0x16, 0x0082);
1187 if (tg3_wait_macro_done(tp)) {
1188 *resetp = 1;
1189 return -EBUSY;
1190 }
1191
1192 tg3_writephy(tp, 0x16, 0x0802);
1193 if (tg3_wait_macro_done(tp)) {
1194 *resetp = 1;
1195 return -EBUSY;
1196 }
1197
1198 for (i = 0; i < 6; i += 2) {
1199 u32 low, high;
1200
1201 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1202 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1203 tg3_wait_macro_done(tp)) {
1204 *resetp = 1;
1205 return -EBUSY;
1206 }
1207 low &= 0x7fff;
1208 high &= 0x000f;
1209 if (low != test_pat[chan][i] ||
1210 high != test_pat[chan][i+1]) {
1211 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1212 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1213 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1214
1215 return -EBUSY;
1216 }
1217 }
1218 }
1219
1220 return 0;
1221}
1222
1223static int tg3_phy_reset_chanpat(struct tg3 *tp)
1224{
1225 int chan;
1226
1227 for (chan = 0; chan < 4; chan++) {
1228 int i;
1229
1230 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1231 (chan * 0x2000) | 0x0200);
1232 tg3_writephy(tp, 0x16, 0x0002);
1233 for (i = 0; i < 6; i++)
1234 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1235 tg3_writephy(tp, 0x16, 0x0202);
1236 if (tg3_wait_macro_done(tp))
1237 return -EBUSY;
1238 }
1239
1240 return 0;
1241}
1242
1243static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1244{
1245 u32 reg32, phy9_orig;
1246 int retries, do_phy_reset, err;
1247
1248 retries = 10;
1249 do_phy_reset = 1;
1250 do {
1251 if (do_phy_reset) {
1252 err = tg3_bmcr_reset(tp);
1253 if (err)
1254 return err;
1255 do_phy_reset = 0;
1256 }
1257
1258 /* Disable transmitter and interrupt. */
1259 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1260 continue;
1261
1262 reg32 |= 0x3000;
1263 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1264
1265 /* Set full-duplex, 1000 mbps. */
1266 tg3_writephy(tp, MII_BMCR,
1267 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1268
1269 /* Set to master mode. */
1270 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1271 continue;
1272
1273 tg3_writephy(tp, MII_TG3_CTRL,
1274 (MII_TG3_CTRL_AS_MASTER |
1275 MII_TG3_CTRL_ENABLE_AS_MASTER));
1276
1277 /* Enable SM_DSP_CLOCK and 6dB. */
1278 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1279
1280 /* Block the PHY control access. */
1281 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1282 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1283
1284 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1285 if (!err)
1286 break;
1287 } while (--retries);
1288
1289 err = tg3_phy_reset_chanpat(tp);
1290 if (err)
1291 return err;
1292
1293 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1294 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1295
1296 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1297 tg3_writephy(tp, 0x16, 0x0000);
1298
1299 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1300 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1301 /* Set Extended packet length bit for jumbo frames */
1302 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1303 }
1304 else {
1305 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1306 }
1307
1308 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1309
1310 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1311 reg32 &= ~0x3000;
1312 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1313 } else if (!err)
1314 err = -EBUSY;
1315
1316 return err;
1317}
1318
1319/* This will reset the tigon3 PHY if there is no valid
1320 * link unless the FORCE argument is non-zero.
1321 */
1322static int tg3_phy_reset(struct tg3 *tp)
1323{
b2a5c19c 1324 u32 cpmuctrl;
1da177e4
LT
1325 u32 phy_status;
1326 int err;
1327
60189ddf
MC
1328 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1329 u32 val;
1330
1331 val = tr32(GRC_MISC_CFG);
1332 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1333 udelay(40);
1334 }
1da177e4
LT
1335 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1336 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1337 if (err != 0)
1338 return -EBUSY;
1339
c8e1e82b
MC
1340 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1341 netif_carrier_off(tp->dev);
1342 tg3_link_report(tp);
1343 }
1344
1da177e4
LT
1345 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1346 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1347 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1348 err = tg3_phy_reset_5703_4_5(tp);
1349 if (err)
1350 return err;
1351 goto out;
1352 }
1353
b2a5c19c
MC
1354 cpmuctrl = 0;
1355 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1356 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1357 cpmuctrl = tr32(TG3_CPMU_CTRL);
1358 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1359 tw32(TG3_CPMU_CTRL,
1360 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1361 }
1362
1da177e4
LT
1363 err = tg3_bmcr_reset(tp);
1364 if (err)
1365 return err;
1366
b2a5c19c
MC
1367 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1368 u32 phy;
1369
1370 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1371 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1372
1373 tw32(TG3_CPMU_CTRL, cpmuctrl);
1374 }
1375
b5af7126 1376 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
ce057f01
MC
1377 u32 val;
1378
1379 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1380 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1381 CPMU_LSPD_1000MB_MACCLK_12_5) {
1382 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1383 udelay(40);
1384 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1385 }
662f38d2
MC
1386
1387 /* Disable GPHY autopowerdown. */
1388 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1389 MII_TG3_MISC_SHDW_WREN |
1390 MII_TG3_MISC_SHDW_APD_SEL |
1391 MII_TG3_MISC_SHDW_APD_WKTM_84MS);
ce057f01
MC
1392 }
1393
b2a5c19c
MC
1394 tg3_phy_apply_otp(tp);
1395
1da177e4
LT
1396out:
1397 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1398 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1399 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1400 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1401 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1402 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1403 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1404 }
1405 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1406 tg3_writephy(tp, 0x1c, 0x8d68);
1407 tg3_writephy(tp, 0x1c, 0x8d68);
1408 }
1409 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1410 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1411 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1412 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1413 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1414 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1415 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1416 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1417 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1418 }
c424cb24
MC
1419 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1420 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1421 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
c1d2a196
MC
1422 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1423 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1424 tg3_writephy(tp, MII_TG3_TEST1,
1425 MII_TG3_TEST1_TRIM_EN | 0x4);
1426 } else
1427 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
c424cb24
MC
1428 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1429 }
1da177e4
LT
1430 /* Set Extended packet length bit (bit 14) on all chips that */
1431 /* support jumbo frames */
1432 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1433 /* Cannot do read-modify-write on 5401 */
1434 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
0f893dc6 1435 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1436 u32 phy_reg;
1437
1438 /* Set bit 14 with read-modify-write to preserve other bits */
1439 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1440 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1441 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1442 }
1443
1444 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1445 * jumbo frames transmission.
1446 */
0f893dc6 1447 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1448 u32 phy_reg;
1449
1450 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1451 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1452 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1453 }
1454
715116a1 1455 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
715116a1
MC
1456 /* adjust output voltage */
1457 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
715116a1
MC
1458 }
1459
9ef8ca99 1460 tg3_phy_toggle_automdix(tp, 1);
1da177e4
LT
1461 tg3_phy_set_wirespeed(tp);
1462 return 0;
1463}
1464
1465static void tg3_frob_aux_power(struct tg3 *tp)
1466{
1467 struct tg3 *tp_peer = tp;
1468
9d26e213 1469 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1da177e4
LT
1470 return;
1471
8c2dc7e1
MC
1472 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1473 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1474 struct net_device *dev_peer;
1475
1476 dev_peer = pci_get_drvdata(tp->pdev_peer);
bc1c7567 1477 /* remove_one() may have been run on the peer. */
8c2dc7e1 1478 if (!dev_peer)
bc1c7567
MC
1479 tp_peer = tp;
1480 else
1481 tp_peer = netdev_priv(dev_peer);
1da177e4
LT
1482 }
1483
1da177e4 1484 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
6921d201
MC
1485 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1486 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1487 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1da177e4
LT
1488 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1489 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
b401e9e2
MC
1490 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1491 (GRC_LCLCTRL_GPIO_OE0 |
1492 GRC_LCLCTRL_GPIO_OE1 |
1493 GRC_LCLCTRL_GPIO_OE2 |
1494 GRC_LCLCTRL_GPIO_OUTPUT0 |
1495 GRC_LCLCTRL_GPIO_OUTPUT1),
1496 100);
1da177e4
LT
1497 } else {
1498 u32 no_gpio2;
dc56b7d4 1499 u32 grc_local_ctrl = 0;
1da177e4
LT
1500
1501 if (tp_peer != tp &&
1502 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1503 return;
1504
dc56b7d4
MC
1505 /* Workaround to prevent overdrawing Amps. */
1506 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1507 ASIC_REV_5714) {
1508 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
b401e9e2
MC
1509 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1510 grc_local_ctrl, 100);
dc56b7d4
MC
1511 }
1512
1da177e4
LT
1513 /* On 5753 and variants, GPIO2 cannot be used. */
1514 no_gpio2 = tp->nic_sram_data_cfg &
1515 NIC_SRAM_DATA_CFG_NO_GPIO2;
1516
dc56b7d4 1517 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1da177e4
LT
1518 GRC_LCLCTRL_GPIO_OE1 |
1519 GRC_LCLCTRL_GPIO_OE2 |
1520 GRC_LCLCTRL_GPIO_OUTPUT1 |
1521 GRC_LCLCTRL_GPIO_OUTPUT2;
1522 if (no_gpio2) {
1523 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1524 GRC_LCLCTRL_GPIO_OUTPUT2);
1525 }
b401e9e2
MC
1526 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1527 grc_local_ctrl, 100);
1da177e4
LT
1528
1529 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1530
b401e9e2
MC
1531 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1532 grc_local_ctrl, 100);
1da177e4
LT
1533
1534 if (!no_gpio2) {
1535 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
b401e9e2
MC
1536 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1537 grc_local_ctrl, 100);
1da177e4
LT
1538 }
1539 }
1540 } else {
1541 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1542 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1543 if (tp_peer != tp &&
1544 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1545 return;
1546
b401e9e2
MC
1547 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1548 (GRC_LCLCTRL_GPIO_OE1 |
1549 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4 1550
b401e9e2
MC
1551 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1552 GRC_LCLCTRL_GPIO_OE1, 100);
1da177e4 1553
b401e9e2
MC
1554 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1555 (GRC_LCLCTRL_GPIO_OE1 |
1556 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4
LT
1557 }
1558 }
1559}
1560
e8f3f6ca
MC
1561static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1562{
1563 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1564 return 1;
1565 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1566 if (speed != SPEED_10)
1567 return 1;
1568 } else if (speed == SPEED_10)
1569 return 1;
1570
1571 return 0;
1572}
1573
1da177e4
LT
1574static int tg3_setup_phy(struct tg3 *, int);
1575
1576#define RESET_KIND_SHUTDOWN 0
1577#define RESET_KIND_INIT 1
1578#define RESET_KIND_SUSPEND 2
1579
1580static void tg3_write_sig_post_reset(struct tg3 *, int);
1581static int tg3_halt_cpu(struct tg3 *, u32);
6921d201
MC
1582static int tg3_nvram_lock(struct tg3 *);
1583static void tg3_nvram_unlock(struct tg3 *);
1da177e4 1584
15c3b696
MC
1585static void tg3_power_down_phy(struct tg3 *tp)
1586{
ce057f01
MC
1587 u32 val;
1588
5129724a
MC
1589 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1590 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1591 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1592 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1593
1594 sg_dig_ctrl |=
1595 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1596 tw32(SG_DIG_CTRL, sg_dig_ctrl);
1597 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1598 }
3f7045c1 1599 return;
5129724a 1600 }
3f7045c1 1601
60189ddf 1602 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
60189ddf
MC
1603 tg3_bmcr_reset(tp);
1604 val = tr32(GRC_MISC_CFG);
1605 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1606 udelay(40);
1607 return;
dd477003 1608 } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
715116a1
MC
1609 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1610 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1611 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1612 }
3f7045c1 1613
15c3b696
MC
1614 /* The PHY should not be powered down on some chips because
1615 * of bugs.
1616 */
1617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1618 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1619 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1620 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1621 return;
ce057f01 1622
b5af7126 1623 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
ce057f01
MC
1624 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1625 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1626 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1627 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1628 }
1629
15c3b696
MC
1630 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1631}
1632
bc1c7567 1633static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1da177e4
LT
1634{
1635 u32 misc_host_ctrl;
1636 u16 power_control, power_caps;
1637 int pm = tp->pm_cap;
1638
1639 /* Make sure register accesses (indirect or otherwise)
1640 * will function correctly.
1641 */
1642 pci_write_config_dword(tp->pdev,
1643 TG3PCI_MISC_HOST_CTRL,
1644 tp->misc_host_ctrl);
1645
1646 pci_read_config_word(tp->pdev,
1647 pm + PCI_PM_CTRL,
1648 &power_control);
1649 power_control |= PCI_PM_CTRL_PME_STATUS;
1650 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1651 switch (state) {
bc1c7567 1652 case PCI_D0:
1da177e4
LT
1653 power_control |= 0;
1654 pci_write_config_word(tp->pdev,
1655 pm + PCI_PM_CTRL,
1656 power_control);
8c6bda1a
MC
1657 udelay(100); /* Delay after power state change */
1658
9d26e213
MC
1659 /* Switch out of Vaux if it is a NIC */
1660 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
b401e9e2 1661 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1da177e4
LT
1662
1663 return 0;
1664
bc1c7567 1665 case PCI_D1:
1da177e4
LT
1666 power_control |= 1;
1667 break;
1668
bc1c7567 1669 case PCI_D2:
1da177e4
LT
1670 power_control |= 2;
1671 break;
1672
bc1c7567 1673 case PCI_D3hot:
1da177e4
LT
1674 power_control |= 3;
1675 break;
1676
1677 default:
1678 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1679 "requested.\n",
1680 tp->dev->name, state);
1681 return -EINVAL;
1682 };
1683
1684 power_control |= PCI_PM_CTRL_PME_ENABLE;
1685
1686 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1687 tw32(TG3PCI_MISC_HOST_CTRL,
1688 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1689
dd477003 1690 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
1da177e4 1691 tp->link_config.phy_is_low_power = 1;
dd477003
MC
1692 } else {
1693 if (tp->link_config.phy_is_low_power == 0) {
1694 tp->link_config.phy_is_low_power = 1;
1695 tp->link_config.orig_speed = tp->link_config.speed;
1696 tp->link_config.orig_duplex = tp->link_config.duplex;
1697 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1698 }
1da177e4 1699
dd477003
MC
1700 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1701 tp->link_config.speed = SPEED_10;
1702 tp->link_config.duplex = DUPLEX_HALF;
1703 tp->link_config.autoneg = AUTONEG_ENABLE;
1704 tg3_setup_phy(tp, 0);
1705 }
1da177e4
LT
1706 }
1707
b5d3772c
MC
1708 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1709 u32 val;
1710
1711 val = tr32(GRC_VCPU_EXT_CTRL);
1712 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1713 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
6921d201
MC
1714 int i;
1715 u32 val;
1716
1717 for (i = 0; i < 200; i++) {
1718 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1719 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1720 break;
1721 msleep(1);
1722 }
1723 }
a85feb8c
GZ
1724 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1725 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1726 WOL_DRV_STATE_SHUTDOWN |
1727 WOL_DRV_WOL |
1728 WOL_SET_MAGIC_PKT);
6921d201 1729
1da177e4
LT
1730 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1731
1732 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1733 u32 mac_mode;
1734
1735 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
dd477003
MC
1736 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1737 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1738 udelay(40);
1739 }
1da177e4 1740
3f7045c1
MC
1741 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1742 mac_mode = MAC_MODE_PORT_MODE_GMII;
1743 else
1744 mac_mode = MAC_MODE_PORT_MODE_MII;
1da177e4 1745
e8f3f6ca
MC
1746 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1747 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1748 ASIC_REV_5700) {
1749 u32 speed = (tp->tg3_flags &
1750 TG3_FLAG_WOL_SPEED_100MB) ?
1751 SPEED_100 : SPEED_10;
1752 if (tg3_5700_link_polarity(tp, speed))
1753 mac_mode |= MAC_MODE_LINK_POLARITY;
1754 else
1755 mac_mode &= ~MAC_MODE_LINK_POLARITY;
1756 }
1da177e4
LT
1757 } else {
1758 mac_mode = MAC_MODE_PORT_MODE_TBI;
1759 }
1760
cbf46853 1761 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1da177e4
LT
1762 tw32(MAC_LED_CTRL, tp->led_ctrl);
1763
1764 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1765 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1766 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1767
1768 tw32_f(MAC_MODE, mac_mode);
1769 udelay(100);
1770
1771 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1772 udelay(10);
1773 }
1774
1775 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1776 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1777 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1778 u32 base_val;
1779
1780 base_val = tp->pci_clock_ctrl;
1781 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1782 CLOCK_CTRL_TXCLK_DISABLE);
1783
b401e9e2
MC
1784 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1785 CLOCK_CTRL_PWRDOWN_PLL133, 40);
d7b0a857 1786 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
795d01c5 1787 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
d7b0a857 1788 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
4cf78e4f 1789 /* do nothing */
85e94ced 1790 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1da177e4
LT
1791 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1792 u32 newbits1, newbits2;
1793
1794 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1795 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1796 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1797 CLOCK_CTRL_TXCLK_DISABLE |
1798 CLOCK_CTRL_ALTCLK);
1799 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1800 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1801 newbits1 = CLOCK_CTRL_625_CORE;
1802 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1803 } else {
1804 newbits1 = CLOCK_CTRL_ALTCLK;
1805 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1806 }
1807
b401e9e2
MC
1808 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1809 40);
1da177e4 1810
b401e9e2
MC
1811 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1812 40);
1da177e4
LT
1813
1814 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1815 u32 newbits3;
1816
1817 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1818 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1819 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1820 CLOCK_CTRL_TXCLK_DISABLE |
1821 CLOCK_CTRL_44MHZ_CORE);
1822 } else {
1823 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1824 }
1825
b401e9e2
MC
1826 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1827 tp->pci_clock_ctrl | newbits3, 40);
1da177e4
LT
1828 }
1829 }
1830
6921d201 1831 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
0d3031d9
MC
1832 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1833 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
3f7045c1 1834 tg3_power_down_phy(tp);
6921d201 1835
1da177e4
LT
1836 tg3_frob_aux_power(tp);
1837
1838 /* Workaround for unstable PLL clock */
1839 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1840 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1841 u32 val = tr32(0x7d00);
1842
1843 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1844 tw32(0x7d00, val);
6921d201 1845 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
ec41c7df
MC
1846 int err;
1847
1848 err = tg3_nvram_lock(tp);
1da177e4 1849 tg3_halt_cpu(tp, RX_CPU_BASE);
ec41c7df
MC
1850 if (!err)
1851 tg3_nvram_unlock(tp);
6921d201 1852 }
1da177e4
LT
1853 }
1854
bbadf503
MC
1855 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1856
1da177e4
LT
1857 /* Finally, set the new power state. */
1858 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
8c6bda1a 1859 udelay(100); /* Delay after power state change */
1da177e4 1860
1da177e4
LT
1861 return 0;
1862}
1863
1da177e4
LT
1864static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1865{
1866 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1867 case MII_TG3_AUX_STAT_10HALF:
1868 *speed = SPEED_10;
1869 *duplex = DUPLEX_HALF;
1870 break;
1871
1872 case MII_TG3_AUX_STAT_10FULL:
1873 *speed = SPEED_10;
1874 *duplex = DUPLEX_FULL;
1875 break;
1876
1877 case MII_TG3_AUX_STAT_100HALF:
1878 *speed = SPEED_100;
1879 *duplex = DUPLEX_HALF;
1880 break;
1881
1882 case MII_TG3_AUX_STAT_100FULL:
1883 *speed = SPEED_100;
1884 *duplex = DUPLEX_FULL;
1885 break;
1886
1887 case MII_TG3_AUX_STAT_1000HALF:
1888 *speed = SPEED_1000;
1889 *duplex = DUPLEX_HALF;
1890 break;
1891
1892 case MII_TG3_AUX_STAT_1000FULL:
1893 *speed = SPEED_1000;
1894 *duplex = DUPLEX_FULL;
1895 break;
1896
1897 default:
715116a1
MC
1898 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1899 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1900 SPEED_10;
1901 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1902 DUPLEX_HALF;
1903 break;
1904 }
1da177e4
LT
1905 *speed = SPEED_INVALID;
1906 *duplex = DUPLEX_INVALID;
1907 break;
1908 };
1909}
1910
1911static void tg3_phy_copper_begin(struct tg3 *tp)
1912{
1913 u32 new_adv;
1914 int i;
1915
1916 if (tp->link_config.phy_is_low_power) {
1917 /* Entering low power mode. Disable gigabit and
1918 * 100baseT advertisements.
1919 */
1920 tg3_writephy(tp, MII_TG3_CTRL, 0);
1921
1922 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1923 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1924 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1925 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1926
1927 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1928 } else if (tp->link_config.speed == SPEED_INVALID) {
1da177e4
LT
1929 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1930 tp->link_config.advertising &=
1931 ~(ADVERTISED_1000baseT_Half |
1932 ADVERTISED_1000baseT_Full);
1933
ba4d07a8 1934 new_adv = ADVERTISE_CSMA;
1da177e4
LT
1935 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1936 new_adv |= ADVERTISE_10HALF;
1937 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1938 new_adv |= ADVERTISE_10FULL;
1939 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1940 new_adv |= ADVERTISE_100HALF;
1941 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1942 new_adv |= ADVERTISE_100FULL;
ba4d07a8
MC
1943
1944 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1945
1da177e4
LT
1946 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1947
1948 if (tp->link_config.advertising &
1949 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1950 new_adv = 0;
1951 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1952 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1953 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1954 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1955 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1956 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1957 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1958 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1959 MII_TG3_CTRL_ENABLE_AS_MASTER);
1960 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1961 } else {
1962 tg3_writephy(tp, MII_TG3_CTRL, 0);
1963 }
1964 } else {
ba4d07a8
MC
1965 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1966 new_adv |= ADVERTISE_CSMA;
1967
1da177e4
LT
1968 /* Asking for a specific link mode. */
1969 if (tp->link_config.speed == SPEED_1000) {
1da177e4
LT
1970 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1971
1972 if (tp->link_config.duplex == DUPLEX_FULL)
1973 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1974 else
1975 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1976 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1977 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1978 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1979 MII_TG3_CTRL_ENABLE_AS_MASTER);
1da177e4 1980 } else {
1da177e4
LT
1981 if (tp->link_config.speed == SPEED_100) {
1982 if (tp->link_config.duplex == DUPLEX_FULL)
1983 new_adv |= ADVERTISE_100FULL;
1984 else
1985 new_adv |= ADVERTISE_100HALF;
1986 } else {
1987 if (tp->link_config.duplex == DUPLEX_FULL)
1988 new_adv |= ADVERTISE_10FULL;
1989 else
1990 new_adv |= ADVERTISE_10HALF;
1991 }
1992 tg3_writephy(tp, MII_ADVERTISE, new_adv);
ba4d07a8
MC
1993
1994 new_adv = 0;
1da177e4 1995 }
ba4d07a8
MC
1996
1997 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1da177e4
LT
1998 }
1999
2000 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2001 tp->link_config.speed != SPEED_INVALID) {
2002 u32 bmcr, orig_bmcr;
2003
2004 tp->link_config.active_speed = tp->link_config.speed;
2005 tp->link_config.active_duplex = tp->link_config.duplex;
2006
2007 bmcr = 0;
2008 switch (tp->link_config.speed) {
2009 default:
2010 case SPEED_10:
2011 break;
2012
2013 case SPEED_100:
2014 bmcr |= BMCR_SPEED100;
2015 break;
2016
2017 case SPEED_1000:
2018 bmcr |= TG3_BMCR_SPEED1000;
2019 break;
2020 };
2021
2022 if (tp->link_config.duplex == DUPLEX_FULL)
2023 bmcr |= BMCR_FULLDPLX;
2024
2025 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2026 (bmcr != orig_bmcr)) {
2027 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2028 for (i = 0; i < 1500; i++) {
2029 u32 tmp;
2030
2031 udelay(10);
2032 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2033 tg3_readphy(tp, MII_BMSR, &tmp))
2034 continue;
2035 if (!(tmp & BMSR_LSTATUS)) {
2036 udelay(40);
2037 break;
2038 }
2039 }
2040 tg3_writephy(tp, MII_BMCR, bmcr);
2041 udelay(40);
2042 }
2043 } else {
2044 tg3_writephy(tp, MII_BMCR,
2045 BMCR_ANENABLE | BMCR_ANRESTART);
2046 }
2047}
2048
2049static int tg3_init_5401phy_dsp(struct tg3 *tp)
2050{
2051 int err;
2052
2053 /* Turn off tap power management. */
2054 /* Set Extended packet length bit */
2055 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2056
2057 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2058 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2059
2060 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2061 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2062
2063 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2064 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2065
2066 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2067 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2068
2069 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2070 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2071
2072 udelay(40);
2073
2074 return err;
2075}
2076
3600d918 2077static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1da177e4 2078{
3600d918
MC
2079 u32 adv_reg, all_mask = 0;
2080
2081 if (mask & ADVERTISED_10baseT_Half)
2082 all_mask |= ADVERTISE_10HALF;
2083 if (mask & ADVERTISED_10baseT_Full)
2084 all_mask |= ADVERTISE_10FULL;
2085 if (mask & ADVERTISED_100baseT_Half)
2086 all_mask |= ADVERTISE_100HALF;
2087 if (mask & ADVERTISED_100baseT_Full)
2088 all_mask |= ADVERTISE_100FULL;
1da177e4
LT
2089
2090 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2091 return 0;
2092
1da177e4
LT
2093 if ((adv_reg & all_mask) != all_mask)
2094 return 0;
2095 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2096 u32 tg3_ctrl;
2097
3600d918
MC
2098 all_mask = 0;
2099 if (mask & ADVERTISED_1000baseT_Half)
2100 all_mask |= ADVERTISE_1000HALF;
2101 if (mask & ADVERTISED_1000baseT_Full)
2102 all_mask |= ADVERTISE_1000FULL;
2103
1da177e4
LT
2104 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2105 return 0;
2106
1da177e4
LT
2107 if ((tg3_ctrl & all_mask) != all_mask)
2108 return 0;
2109 }
2110 return 1;
2111}
2112
ef167e27
MC
2113static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2114{
2115 u32 curadv, reqadv;
2116
2117 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2118 return 1;
2119
2120 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2121 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2122
2123 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2124 if (curadv != reqadv)
2125 return 0;
2126
2127 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2128 tg3_readphy(tp, MII_LPA, rmtadv);
2129 } else {
2130 /* Reprogram the advertisement register, even if it
2131 * does not affect the current link. If the link
2132 * gets renegotiated in the future, we can save an
2133 * additional renegotiation cycle by advertising
2134 * it correctly in the first place.
2135 */
2136 if (curadv != reqadv) {
2137 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2138 ADVERTISE_PAUSE_ASYM);
2139 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2140 }
2141 }
2142
2143 return 1;
2144}
2145
1da177e4
LT
2146static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2147{
2148 int current_link_up;
2149 u32 bmsr, dummy;
ef167e27 2150 u32 lcl_adv, rmt_adv;
1da177e4
LT
2151 u16 current_speed;
2152 u8 current_duplex;
2153 int i, err;
2154
2155 tw32(MAC_EVENT, 0);
2156
2157 tw32_f(MAC_STATUS,
2158 (MAC_STATUS_SYNC_CHANGED |
2159 MAC_STATUS_CFG_CHANGED |
2160 MAC_STATUS_MI_COMPLETION |
2161 MAC_STATUS_LNKSTATE_CHANGED));
2162 udelay(40);
2163
8ef21428
MC
2164 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2165 tw32_f(MAC_MI_MODE,
2166 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2167 udelay(80);
2168 }
1da177e4
LT
2169
2170 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2171
2172 /* Some third-party PHYs need to be reset on link going
2173 * down.
2174 */
2175 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2176 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2177 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2178 netif_carrier_ok(tp->dev)) {
2179 tg3_readphy(tp, MII_BMSR, &bmsr);
2180 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2181 !(bmsr & BMSR_LSTATUS))
2182 force_reset = 1;
2183 }
2184 if (force_reset)
2185 tg3_phy_reset(tp);
2186
2187 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2188 tg3_readphy(tp, MII_BMSR, &bmsr);
2189 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2190 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2191 bmsr = 0;
2192
2193 if (!(bmsr & BMSR_LSTATUS)) {
2194 err = tg3_init_5401phy_dsp(tp);
2195 if (err)
2196 return err;
2197
2198 tg3_readphy(tp, MII_BMSR, &bmsr);
2199 for (i = 0; i < 1000; i++) {
2200 udelay(10);
2201 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2202 (bmsr & BMSR_LSTATUS)) {
2203 udelay(40);
2204 break;
2205 }
2206 }
2207
2208 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2209 !(bmsr & BMSR_LSTATUS) &&
2210 tp->link_config.active_speed == SPEED_1000) {
2211 err = tg3_phy_reset(tp);
2212 if (!err)
2213 err = tg3_init_5401phy_dsp(tp);
2214 if (err)
2215 return err;
2216 }
2217 }
2218 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2219 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2220 /* 5701 {A0,B0} CRC bug workaround */
2221 tg3_writephy(tp, 0x15, 0x0a75);
2222 tg3_writephy(tp, 0x1c, 0x8c68);
2223 tg3_writephy(tp, 0x1c, 0x8d68);
2224 tg3_writephy(tp, 0x1c, 0x8c68);
2225 }
2226
2227 /* Clear pending interrupts... */
2228 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2229 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2230
2231 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2232 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
715116a1 2233 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1da177e4
LT
2234 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2235
2236 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2237 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2238 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2239 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2240 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2241 else
2242 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2243 }
2244
2245 current_link_up = 0;
2246 current_speed = SPEED_INVALID;
2247 current_duplex = DUPLEX_INVALID;
2248
2249 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2250 u32 val;
2251
2252 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2253 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2254 if (!(val & (1 << 10))) {
2255 val |= (1 << 10);
2256 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2257 goto relink;
2258 }
2259 }
2260
2261 bmsr = 0;
2262 for (i = 0; i < 100; i++) {
2263 tg3_readphy(tp, MII_BMSR, &bmsr);
2264 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2265 (bmsr & BMSR_LSTATUS))
2266 break;
2267 udelay(40);
2268 }
2269
2270 if (bmsr & BMSR_LSTATUS) {
2271 u32 aux_stat, bmcr;
2272
2273 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2274 for (i = 0; i < 2000; i++) {
2275 udelay(10);
2276 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2277 aux_stat)
2278 break;
2279 }
2280
2281 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2282 &current_speed,
2283 &current_duplex);
2284
2285 bmcr = 0;
2286 for (i = 0; i < 200; i++) {
2287 tg3_readphy(tp, MII_BMCR, &bmcr);
2288 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2289 continue;
2290 if (bmcr && bmcr != 0x7fff)
2291 break;
2292 udelay(10);
2293 }
2294
ef167e27
MC
2295 lcl_adv = 0;
2296 rmt_adv = 0;
1da177e4 2297
ef167e27
MC
2298 tp->link_config.active_speed = current_speed;
2299 tp->link_config.active_duplex = current_duplex;
2300
2301 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2302 if ((bmcr & BMCR_ANENABLE) &&
2303 tg3_copper_is_advertising_all(tp,
2304 tp->link_config.advertising)) {
2305 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2306 &rmt_adv))
2307 current_link_up = 1;
1da177e4
LT
2308 }
2309 } else {
2310 if (!(bmcr & BMCR_ANENABLE) &&
2311 tp->link_config.speed == current_speed &&
ef167e27
MC
2312 tp->link_config.duplex == current_duplex &&
2313 tp->link_config.flowctrl ==
2314 tp->link_config.active_flowctrl) {
1da177e4 2315 current_link_up = 1;
1da177e4
LT
2316 }
2317 }
2318
ef167e27
MC
2319 if (current_link_up == 1 &&
2320 tp->link_config.active_duplex == DUPLEX_FULL)
2321 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1da177e4
LT
2322 }
2323
1da177e4 2324relink:
6921d201 2325 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1da177e4
LT
2326 u32 tmp;
2327
2328 tg3_phy_copper_begin(tp);
2329
2330 tg3_readphy(tp, MII_BMSR, &tmp);
2331 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2332 (tmp & BMSR_LSTATUS))
2333 current_link_up = 1;
2334 }
2335
2336 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2337 if (current_link_up == 1) {
2338 if (tp->link_config.active_speed == SPEED_100 ||
2339 tp->link_config.active_speed == SPEED_10)
2340 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2341 else
2342 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2343 } else
2344 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2345
2346 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2347 if (tp->link_config.active_duplex == DUPLEX_HALF)
2348 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2349
1da177e4 2350 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
e8f3f6ca
MC
2351 if (current_link_up == 1 &&
2352 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
1da177e4 2353 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
e8f3f6ca
MC
2354 else
2355 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1da177e4
LT
2356 }
2357
2358 /* ??? Without this setting Netgear GA302T PHY does not
2359 * ??? send/receive packets...
2360 */
2361 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2362 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2363 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2364 tw32_f(MAC_MI_MODE, tp->mi_mode);
2365 udelay(80);
2366 }
2367
2368 tw32_f(MAC_MODE, tp->mac_mode);
2369 udelay(40);
2370
2371 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2372 /* Polled via timer. */
2373 tw32_f(MAC_EVENT, 0);
2374 } else {
2375 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2376 }
2377 udelay(40);
2378
2379 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2380 current_link_up == 1 &&
2381 tp->link_config.active_speed == SPEED_1000 &&
2382 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2383 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2384 udelay(120);
2385 tw32_f(MAC_STATUS,
2386 (MAC_STATUS_SYNC_CHANGED |
2387 MAC_STATUS_CFG_CHANGED));
2388 udelay(40);
2389 tg3_write_mem(tp,
2390 NIC_SRAM_FIRMWARE_MBOX,
2391 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2392 }
2393
2394 if (current_link_up != netif_carrier_ok(tp->dev)) {
2395 if (current_link_up)
2396 netif_carrier_on(tp->dev);
2397 else
2398 netif_carrier_off(tp->dev);
2399 tg3_link_report(tp);
2400 }
2401
2402 return 0;
2403}
2404
2405struct tg3_fiber_aneginfo {
2406 int state;
2407#define ANEG_STATE_UNKNOWN 0
2408#define ANEG_STATE_AN_ENABLE 1
2409#define ANEG_STATE_RESTART_INIT 2
2410#define ANEG_STATE_RESTART 3
2411#define ANEG_STATE_DISABLE_LINK_OK 4
2412#define ANEG_STATE_ABILITY_DETECT_INIT 5
2413#define ANEG_STATE_ABILITY_DETECT 6
2414#define ANEG_STATE_ACK_DETECT_INIT 7
2415#define ANEG_STATE_ACK_DETECT 8
2416#define ANEG_STATE_COMPLETE_ACK_INIT 9
2417#define ANEG_STATE_COMPLETE_ACK 10
2418#define ANEG_STATE_IDLE_DETECT_INIT 11
2419#define ANEG_STATE_IDLE_DETECT 12
2420#define ANEG_STATE_LINK_OK 13
2421#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2422#define ANEG_STATE_NEXT_PAGE_WAIT 15
2423
2424 u32 flags;
2425#define MR_AN_ENABLE 0x00000001
2426#define MR_RESTART_AN 0x00000002
2427#define MR_AN_COMPLETE 0x00000004
2428#define MR_PAGE_RX 0x00000008
2429#define MR_NP_LOADED 0x00000010
2430#define MR_TOGGLE_TX 0x00000020
2431#define MR_LP_ADV_FULL_DUPLEX 0x00000040
2432#define MR_LP_ADV_HALF_DUPLEX 0x00000080
2433#define MR_LP_ADV_SYM_PAUSE 0x00000100
2434#define MR_LP_ADV_ASYM_PAUSE 0x00000200
2435#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2436#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2437#define MR_LP_ADV_NEXT_PAGE 0x00001000
2438#define MR_TOGGLE_RX 0x00002000
2439#define MR_NP_RX 0x00004000
2440
2441#define MR_LINK_OK 0x80000000
2442
2443 unsigned long link_time, cur_time;
2444
2445 u32 ability_match_cfg;
2446 int ability_match_count;
2447
2448 char ability_match, idle_match, ack_match;
2449
2450 u32 txconfig, rxconfig;
2451#define ANEG_CFG_NP 0x00000080
2452#define ANEG_CFG_ACK 0x00000040
2453#define ANEG_CFG_RF2 0x00000020
2454#define ANEG_CFG_RF1 0x00000010
2455#define ANEG_CFG_PS2 0x00000001
2456#define ANEG_CFG_PS1 0x00008000
2457#define ANEG_CFG_HD 0x00004000
2458#define ANEG_CFG_FD 0x00002000
2459#define ANEG_CFG_INVAL 0x00001f06
2460
2461};
2462#define ANEG_OK 0
2463#define ANEG_DONE 1
2464#define ANEG_TIMER_ENAB 2
2465#define ANEG_FAILED -1
2466
2467#define ANEG_STATE_SETTLE_TIME 10000
2468
2469static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2470 struct tg3_fiber_aneginfo *ap)
2471{
5be73b47 2472 u16 flowctrl;
1da177e4
LT
2473 unsigned long delta;
2474 u32 rx_cfg_reg;
2475 int ret;
2476
2477 if (ap->state == ANEG_STATE_UNKNOWN) {
2478 ap->rxconfig = 0;
2479 ap->link_time = 0;
2480 ap->cur_time = 0;
2481 ap->ability_match_cfg = 0;
2482 ap->ability_match_count = 0;
2483 ap->ability_match = 0;
2484 ap->idle_match = 0;
2485 ap->ack_match = 0;
2486 }
2487 ap->cur_time++;
2488
2489 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2490 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2491
2492 if (rx_cfg_reg != ap->ability_match_cfg) {
2493 ap->ability_match_cfg = rx_cfg_reg;
2494 ap->ability_match = 0;
2495 ap->ability_match_count = 0;
2496 } else {
2497 if (++ap->ability_match_count > 1) {
2498 ap->ability_match = 1;
2499 ap->ability_match_cfg = rx_cfg_reg;
2500 }
2501 }
2502 if (rx_cfg_reg & ANEG_CFG_ACK)
2503 ap->ack_match = 1;
2504 else
2505 ap->ack_match = 0;
2506
2507 ap->idle_match = 0;
2508 } else {
2509 ap->idle_match = 1;
2510 ap->ability_match_cfg = 0;
2511 ap->ability_match_count = 0;
2512 ap->ability_match = 0;
2513 ap->ack_match = 0;
2514
2515 rx_cfg_reg = 0;
2516 }
2517
2518 ap->rxconfig = rx_cfg_reg;
2519 ret = ANEG_OK;
2520
2521 switch(ap->state) {
2522 case ANEG_STATE_UNKNOWN:
2523 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2524 ap->state = ANEG_STATE_AN_ENABLE;
2525
2526 /* fallthru */
2527 case ANEG_STATE_AN_ENABLE:
2528 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2529 if (ap->flags & MR_AN_ENABLE) {
2530 ap->link_time = 0;
2531 ap->cur_time = 0;
2532 ap->ability_match_cfg = 0;
2533 ap->ability_match_count = 0;
2534 ap->ability_match = 0;
2535 ap->idle_match = 0;
2536 ap->ack_match = 0;
2537
2538 ap->state = ANEG_STATE_RESTART_INIT;
2539 } else {
2540 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2541 }
2542 break;
2543
2544 case ANEG_STATE_RESTART_INIT:
2545 ap->link_time = ap->cur_time;
2546 ap->flags &= ~(MR_NP_LOADED);
2547 ap->txconfig = 0;
2548 tw32(MAC_TX_AUTO_NEG, 0);
2549 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2550 tw32_f(MAC_MODE, tp->mac_mode);
2551 udelay(40);
2552
2553 ret = ANEG_TIMER_ENAB;
2554 ap->state = ANEG_STATE_RESTART;
2555
2556 /* fallthru */
2557 case ANEG_STATE_RESTART:
2558 delta = ap->cur_time - ap->link_time;
2559 if (delta > ANEG_STATE_SETTLE_TIME) {
2560 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2561 } else {
2562 ret = ANEG_TIMER_ENAB;
2563 }
2564 break;
2565
2566 case ANEG_STATE_DISABLE_LINK_OK:
2567 ret = ANEG_DONE;
2568 break;
2569
2570 case ANEG_STATE_ABILITY_DETECT_INIT:
2571 ap->flags &= ~(MR_TOGGLE_TX);
5be73b47
MC
2572 ap->txconfig = ANEG_CFG_FD;
2573 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2574 if (flowctrl & ADVERTISE_1000XPAUSE)
2575 ap->txconfig |= ANEG_CFG_PS1;
2576 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2577 ap->txconfig |= ANEG_CFG_PS2;
1da177e4
LT
2578 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2579 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2580 tw32_f(MAC_MODE, tp->mac_mode);
2581 udelay(40);
2582
2583 ap->state = ANEG_STATE_ABILITY_DETECT;
2584 break;
2585
2586 case ANEG_STATE_ABILITY_DETECT:
2587 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2588 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2589 }
2590 break;
2591
2592 case ANEG_STATE_ACK_DETECT_INIT:
2593 ap->txconfig |= ANEG_CFG_ACK;
2594 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2595 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2596 tw32_f(MAC_MODE, tp->mac_mode);
2597 udelay(40);
2598
2599 ap->state = ANEG_STATE_ACK_DETECT;
2600
2601 /* fallthru */
2602 case ANEG_STATE_ACK_DETECT:
2603 if (ap->ack_match != 0) {
2604 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2605 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2606 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2607 } else {
2608 ap->state = ANEG_STATE_AN_ENABLE;
2609 }
2610 } else if (ap->ability_match != 0 &&
2611 ap->rxconfig == 0) {
2612 ap->state = ANEG_STATE_AN_ENABLE;
2613 }
2614 break;
2615
2616 case ANEG_STATE_COMPLETE_ACK_INIT:
2617 if (ap->rxconfig & ANEG_CFG_INVAL) {
2618 ret = ANEG_FAILED;
2619 break;
2620 }
2621 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2622 MR_LP_ADV_HALF_DUPLEX |
2623 MR_LP_ADV_SYM_PAUSE |
2624 MR_LP_ADV_ASYM_PAUSE |
2625 MR_LP_ADV_REMOTE_FAULT1 |
2626 MR_LP_ADV_REMOTE_FAULT2 |
2627 MR_LP_ADV_NEXT_PAGE |
2628 MR_TOGGLE_RX |
2629 MR_NP_RX);
2630 if (ap->rxconfig & ANEG_CFG_FD)
2631 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2632 if (ap->rxconfig & ANEG_CFG_HD)
2633 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2634 if (ap->rxconfig & ANEG_CFG_PS1)
2635 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2636 if (ap->rxconfig & ANEG_CFG_PS2)
2637 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2638 if (ap->rxconfig & ANEG_CFG_RF1)
2639 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2640 if (ap->rxconfig & ANEG_CFG_RF2)
2641 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2642 if (ap->rxconfig & ANEG_CFG_NP)
2643 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2644
2645 ap->link_time = ap->cur_time;
2646
2647 ap->flags ^= (MR_TOGGLE_TX);
2648 if (ap->rxconfig & 0x0008)
2649 ap->flags |= MR_TOGGLE_RX;
2650 if (ap->rxconfig & ANEG_CFG_NP)
2651 ap->flags |= MR_NP_RX;
2652 ap->flags |= MR_PAGE_RX;
2653
2654 ap->state = ANEG_STATE_COMPLETE_ACK;
2655 ret = ANEG_TIMER_ENAB;
2656 break;
2657
2658 case ANEG_STATE_COMPLETE_ACK:
2659 if (ap->ability_match != 0 &&
2660 ap->rxconfig == 0) {
2661 ap->state = ANEG_STATE_AN_ENABLE;
2662 break;
2663 }
2664 delta = ap->cur_time - ap->link_time;
2665 if (delta > ANEG_STATE_SETTLE_TIME) {
2666 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2667 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2668 } else {
2669 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2670 !(ap->flags & MR_NP_RX)) {
2671 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2672 } else {
2673 ret = ANEG_FAILED;
2674 }
2675 }
2676 }
2677 break;
2678
2679 case ANEG_STATE_IDLE_DETECT_INIT:
2680 ap->link_time = ap->cur_time;
2681 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2682 tw32_f(MAC_MODE, tp->mac_mode);
2683 udelay(40);
2684
2685 ap->state = ANEG_STATE_IDLE_DETECT;
2686 ret = ANEG_TIMER_ENAB;
2687 break;
2688
2689 case ANEG_STATE_IDLE_DETECT:
2690 if (ap->ability_match != 0 &&
2691 ap->rxconfig == 0) {
2692 ap->state = ANEG_STATE_AN_ENABLE;
2693 break;
2694 }
2695 delta = ap->cur_time - ap->link_time;
2696 if (delta > ANEG_STATE_SETTLE_TIME) {
2697 /* XXX another gem from the Broadcom driver :( */
2698 ap->state = ANEG_STATE_LINK_OK;
2699 }
2700 break;
2701
2702 case ANEG_STATE_LINK_OK:
2703 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2704 ret = ANEG_DONE;
2705 break;
2706
2707 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2708 /* ??? unimplemented */
2709 break;
2710
2711 case ANEG_STATE_NEXT_PAGE_WAIT:
2712 /* ??? unimplemented */
2713 break;
2714
2715 default:
2716 ret = ANEG_FAILED;
2717 break;
2718 };
2719
2720 return ret;
2721}
2722
5be73b47 2723static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
1da177e4
LT
2724{
2725 int res = 0;
2726 struct tg3_fiber_aneginfo aninfo;
2727 int status = ANEG_FAILED;
2728 unsigned int tick;
2729 u32 tmp;
2730
2731 tw32_f(MAC_TX_AUTO_NEG, 0);
2732
2733 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2734 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2735 udelay(40);
2736
2737 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2738 udelay(40);
2739
2740 memset(&aninfo, 0, sizeof(aninfo));
2741 aninfo.flags |= MR_AN_ENABLE;
2742 aninfo.state = ANEG_STATE_UNKNOWN;
2743 aninfo.cur_time = 0;
2744 tick = 0;
2745 while (++tick < 195000) {
2746 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2747 if (status == ANEG_DONE || status == ANEG_FAILED)
2748 break;
2749
2750 udelay(1);
2751 }
2752
2753 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2754 tw32_f(MAC_MODE, tp->mac_mode);
2755 udelay(40);
2756
5be73b47
MC
2757 *txflags = aninfo.txconfig;
2758 *rxflags = aninfo.flags;
1da177e4
LT
2759
2760 if (status == ANEG_DONE &&
2761 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2762 MR_LP_ADV_FULL_DUPLEX)))
2763 res = 1;
2764
2765 return res;
2766}
2767
2768static void tg3_init_bcm8002(struct tg3 *tp)
2769{
2770 u32 mac_status = tr32(MAC_STATUS);
2771 int i;
2772
2773 /* Reset when initting first time or we have a link. */
2774 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2775 !(mac_status & MAC_STATUS_PCS_SYNCED))
2776 return;
2777
2778 /* Set PLL lock range. */
2779 tg3_writephy(tp, 0x16, 0x8007);
2780
2781 /* SW reset */
2782 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2783
2784 /* Wait for reset to complete. */
2785 /* XXX schedule_timeout() ... */
2786 for (i = 0; i < 500; i++)
2787 udelay(10);
2788
2789 /* Config mode; select PMA/Ch 1 regs. */
2790 tg3_writephy(tp, 0x10, 0x8411);
2791
2792 /* Enable auto-lock and comdet, select txclk for tx. */
2793 tg3_writephy(tp, 0x11, 0x0a10);
2794
2795 tg3_writephy(tp, 0x18, 0x00a0);
2796 tg3_writephy(tp, 0x16, 0x41ff);
2797
2798 /* Assert and deassert POR. */
2799 tg3_writephy(tp, 0x13, 0x0400);
2800 udelay(40);
2801 tg3_writephy(tp, 0x13, 0x0000);
2802
2803 tg3_writephy(tp, 0x11, 0x0a50);
2804 udelay(40);
2805 tg3_writephy(tp, 0x11, 0x0a10);
2806
2807 /* Wait for signal to stabilize */
2808 /* XXX schedule_timeout() ... */
2809 for (i = 0; i < 15000; i++)
2810 udelay(10);
2811
2812 /* Deselect the channel register so we can read the PHYID
2813 * later.
2814 */
2815 tg3_writephy(tp, 0x10, 0x8011);
2816}
2817
2818static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2819{
82cd3d11 2820 u16 flowctrl;
1da177e4
LT
2821 u32 sg_dig_ctrl, sg_dig_status;
2822 u32 serdes_cfg, expected_sg_dig_ctrl;
2823 int workaround, port_a;
2824 int current_link_up;
2825
2826 serdes_cfg = 0;
2827 expected_sg_dig_ctrl = 0;
2828 workaround = 0;
2829 port_a = 1;
2830 current_link_up = 0;
2831
2832 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2833 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2834 workaround = 1;
2835 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2836 port_a = 0;
2837
2838 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2839 /* preserve bits 20-23 for voltage regulator */
2840 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2841 }
2842
2843 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2844
2845 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
c98f6e3b 2846 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
1da177e4
LT
2847 if (workaround) {
2848 u32 val = serdes_cfg;
2849
2850 if (port_a)
2851 val |= 0xc010000;
2852 else
2853 val |= 0x4010000;
2854 tw32_f(MAC_SERDES_CFG, val);
2855 }
c98f6e3b
MC
2856
2857 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
1da177e4
LT
2858 }
2859 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2860 tg3_setup_flow_control(tp, 0, 0);
2861 current_link_up = 1;
2862 }
2863 goto out;
2864 }
2865
2866 /* Want auto-negotiation. */
c98f6e3b 2867 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
1da177e4 2868
82cd3d11
MC
2869 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2870 if (flowctrl & ADVERTISE_1000XPAUSE)
2871 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
2872 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2873 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
1da177e4
LT
2874
2875 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3d3ebe74
MC
2876 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2877 tp->serdes_counter &&
2878 ((mac_status & (MAC_STATUS_PCS_SYNCED |
2879 MAC_STATUS_RCVD_CFG)) ==
2880 MAC_STATUS_PCS_SYNCED)) {
2881 tp->serdes_counter--;
2882 current_link_up = 1;
2883 goto out;
2884 }
2885restart_autoneg:
1da177e4
LT
2886 if (workaround)
2887 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
c98f6e3b 2888 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
1da177e4
LT
2889 udelay(5);
2890 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2891
3d3ebe74
MC
2892 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2893 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
1da177e4
LT
2894 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2895 MAC_STATUS_SIGNAL_DET)) {
3d3ebe74 2896 sg_dig_status = tr32(SG_DIG_STATUS);
1da177e4
LT
2897 mac_status = tr32(MAC_STATUS);
2898
c98f6e3b 2899 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
1da177e4 2900 (mac_status & MAC_STATUS_PCS_SYNCED)) {
82cd3d11
MC
2901 u32 local_adv = 0, remote_adv = 0;
2902
2903 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
2904 local_adv |= ADVERTISE_1000XPAUSE;
2905 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
2906 local_adv |= ADVERTISE_1000XPSE_ASYM;
1da177e4 2907
c98f6e3b 2908 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
82cd3d11 2909 remote_adv |= LPA_1000XPAUSE;
c98f6e3b 2910 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
82cd3d11 2911 remote_adv |= LPA_1000XPAUSE_ASYM;
1da177e4
LT
2912
2913 tg3_setup_flow_control(tp, local_adv, remote_adv);
2914 current_link_up = 1;
3d3ebe74
MC
2915 tp->serdes_counter = 0;
2916 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
c98f6e3b 2917 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3d3ebe74
MC
2918 if (tp->serdes_counter)
2919 tp->serdes_counter--;
1da177e4
LT
2920 else {
2921 if (workaround) {
2922 u32 val = serdes_cfg;
2923
2924 if (port_a)
2925 val |= 0xc010000;
2926 else
2927 val |= 0x4010000;
2928
2929 tw32_f(MAC_SERDES_CFG, val);
2930 }
2931
c98f6e3b 2932 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
1da177e4
LT
2933 udelay(40);
2934
2935 /* Link parallel detection - link is up */
2936 /* only if we have PCS_SYNC and not */
2937 /* receiving config code words */
2938 mac_status = tr32(MAC_STATUS);
2939 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2940 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2941 tg3_setup_flow_control(tp, 0, 0);
2942 current_link_up = 1;
3d3ebe74
MC
2943 tp->tg3_flags2 |=
2944 TG3_FLG2_PARALLEL_DETECT;
2945 tp->serdes_counter =
2946 SERDES_PARALLEL_DET_TIMEOUT;
2947 } else
2948 goto restart_autoneg;
1da177e4
LT
2949 }
2950 }
3d3ebe74
MC
2951 } else {
2952 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2953 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
1da177e4
LT
2954 }
2955
2956out:
2957 return current_link_up;
2958}
2959
2960static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2961{
2962 int current_link_up = 0;
2963
5cf64b8a 2964 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
1da177e4 2965 goto out;
1da177e4
LT
2966
2967 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5be73b47 2968 u32 txflags, rxflags;
1da177e4 2969 int i;
6aa20a22 2970
5be73b47
MC
2971 if (fiber_autoneg(tp, &txflags, &rxflags)) {
2972 u32 local_adv = 0, remote_adv = 0;
1da177e4 2973
5be73b47
MC
2974 if (txflags & ANEG_CFG_PS1)
2975 local_adv |= ADVERTISE_1000XPAUSE;
2976 if (txflags & ANEG_CFG_PS2)
2977 local_adv |= ADVERTISE_1000XPSE_ASYM;
2978
2979 if (rxflags & MR_LP_ADV_SYM_PAUSE)
2980 remote_adv |= LPA_1000XPAUSE;
2981 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
2982 remote_adv |= LPA_1000XPAUSE_ASYM;
1da177e4
LT
2983
2984 tg3_setup_flow_control(tp, local_adv, remote_adv);
2985
1da177e4
LT
2986 current_link_up = 1;
2987 }
2988 for (i = 0; i < 30; i++) {
2989 udelay(20);
2990 tw32_f(MAC_STATUS,
2991 (MAC_STATUS_SYNC_CHANGED |
2992 MAC_STATUS_CFG_CHANGED));
2993 udelay(40);
2994 if ((tr32(MAC_STATUS) &
2995 (MAC_STATUS_SYNC_CHANGED |
2996 MAC_STATUS_CFG_CHANGED)) == 0)
2997 break;
2998 }
2999
3000 mac_status = tr32(MAC_STATUS);
3001 if (current_link_up == 0 &&
3002 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3003 !(mac_status & MAC_STATUS_RCVD_CFG))
3004 current_link_up = 1;
3005 } else {
5be73b47
MC
3006 tg3_setup_flow_control(tp, 0, 0);
3007
1da177e4
LT
3008 /* Forcing 1000FD link up. */
3009 current_link_up = 1;
1da177e4
LT
3010
3011 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3012 udelay(40);
e8f3f6ca
MC
3013
3014 tw32_f(MAC_MODE, tp->mac_mode);
3015 udelay(40);
1da177e4
LT
3016 }
3017
3018out:
3019 return current_link_up;
3020}
3021
3022static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3023{
3024 u32 orig_pause_cfg;
3025 u16 orig_active_speed;
3026 u8 orig_active_duplex;
3027 u32 mac_status;
3028 int current_link_up;
3029 int i;
3030
8d018621 3031 orig_pause_cfg = tp->link_config.active_flowctrl;
1da177e4
LT
3032 orig_active_speed = tp->link_config.active_speed;
3033 orig_active_duplex = tp->link_config.active_duplex;
3034
3035 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3036 netif_carrier_ok(tp->dev) &&
3037 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3038 mac_status = tr32(MAC_STATUS);
3039 mac_status &= (MAC_STATUS_PCS_SYNCED |
3040 MAC_STATUS_SIGNAL_DET |
3041 MAC_STATUS_CFG_CHANGED |
3042 MAC_STATUS_RCVD_CFG);
3043 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3044 MAC_STATUS_SIGNAL_DET)) {
3045 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3046 MAC_STATUS_CFG_CHANGED));
3047 return 0;
3048 }
3049 }
3050
3051 tw32_f(MAC_TX_AUTO_NEG, 0);
3052
3053 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3054 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3055 tw32_f(MAC_MODE, tp->mac_mode);
3056 udelay(40);
3057
3058 if (tp->phy_id == PHY_ID_BCM8002)
3059 tg3_init_bcm8002(tp);
3060
3061 /* Enable link change event even when serdes polling. */
3062 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3063 udelay(40);
3064
3065 current_link_up = 0;
3066 mac_status = tr32(MAC_STATUS);
3067
3068 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3069 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3070 else
3071 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3072
1da177e4
LT
3073 tp->hw_status->status =
3074 (SD_STATUS_UPDATED |
3075 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3076
3077 for (i = 0; i < 100; i++) {
3078 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3079 MAC_STATUS_CFG_CHANGED));
3080 udelay(5);
3081 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3d3ebe74
MC
3082 MAC_STATUS_CFG_CHANGED |
3083 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
1da177e4
LT
3084 break;
3085 }
3086
3087 mac_status = tr32(MAC_STATUS);
3088 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3089 current_link_up = 0;
3d3ebe74
MC
3090 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3091 tp->serdes_counter == 0) {
1da177e4
LT
3092 tw32_f(MAC_MODE, (tp->mac_mode |
3093 MAC_MODE_SEND_CONFIGS));
3094 udelay(1);
3095 tw32_f(MAC_MODE, tp->mac_mode);
3096 }
3097 }
3098
3099 if (current_link_up == 1) {
3100 tp->link_config.active_speed = SPEED_1000;
3101 tp->link_config.active_duplex = DUPLEX_FULL;
3102 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3103 LED_CTRL_LNKLED_OVERRIDE |
3104 LED_CTRL_1000MBPS_ON));
3105 } else {
3106 tp->link_config.active_speed = SPEED_INVALID;
3107 tp->link_config.active_duplex = DUPLEX_INVALID;
3108 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3109 LED_CTRL_LNKLED_OVERRIDE |
3110 LED_CTRL_TRAFFIC_OVERRIDE));
3111 }
3112
3113 if (current_link_up != netif_carrier_ok(tp->dev)) {
3114 if (current_link_up)
3115 netif_carrier_on(tp->dev);
3116 else
3117 netif_carrier_off(tp->dev);
3118 tg3_link_report(tp);
3119 } else {
8d018621 3120 u32 now_pause_cfg = tp->link_config.active_flowctrl;
1da177e4
LT
3121 if (orig_pause_cfg != now_pause_cfg ||
3122 orig_active_speed != tp->link_config.active_speed ||
3123 orig_active_duplex != tp->link_config.active_duplex)
3124 tg3_link_report(tp);
3125 }
3126
3127 return 0;
3128}
3129
747e8f8b
MC
3130static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3131{
3132 int current_link_up, err = 0;
3133 u32 bmsr, bmcr;
3134 u16 current_speed;
3135 u8 current_duplex;
ef167e27 3136 u32 local_adv, remote_adv;
747e8f8b
MC
3137
3138 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3139 tw32_f(MAC_MODE, tp->mac_mode);
3140 udelay(40);
3141
3142 tw32(MAC_EVENT, 0);
3143
3144 tw32_f(MAC_STATUS,
3145 (MAC_STATUS_SYNC_CHANGED |
3146 MAC_STATUS_CFG_CHANGED |
3147 MAC_STATUS_MI_COMPLETION |
3148 MAC_STATUS_LNKSTATE_CHANGED));
3149 udelay(40);
3150
3151 if (force_reset)
3152 tg3_phy_reset(tp);
3153
3154 current_link_up = 0;
3155 current_speed = SPEED_INVALID;
3156 current_duplex = DUPLEX_INVALID;
3157
3158 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3159 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
3160 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3161 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3162 bmsr |= BMSR_LSTATUS;
3163 else
3164 bmsr &= ~BMSR_LSTATUS;
3165 }
747e8f8b
MC
3166
3167 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3168
3169 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
ef167e27
MC
3170 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3171 tp->link_config.flowctrl == tp->link_config.active_flowctrl) {
747e8f8b
MC
3172 /* do nothing, just check for link up at the end */
3173 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3174 u32 adv, new_adv;
3175
3176 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3177 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3178 ADVERTISE_1000XPAUSE |
3179 ADVERTISE_1000XPSE_ASYM |
3180 ADVERTISE_SLCT);
3181
ba4d07a8 3182 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
747e8f8b
MC
3183
3184 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3185 new_adv |= ADVERTISE_1000XHALF;
3186 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3187 new_adv |= ADVERTISE_1000XFULL;
3188
3189 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3190 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3191 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3192 tg3_writephy(tp, MII_BMCR, bmcr);
3193
3194 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3d3ebe74 3195 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
747e8f8b
MC
3196 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3197
3198 return err;
3199 }
3200 } else {
3201 u32 new_bmcr;
3202
3203 bmcr &= ~BMCR_SPEED1000;
3204 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3205
3206 if (tp->link_config.duplex == DUPLEX_FULL)
3207 new_bmcr |= BMCR_FULLDPLX;
3208
3209 if (new_bmcr != bmcr) {
3210 /* BMCR_SPEED1000 is a reserved bit that needs
3211 * to be set on write.
3212 */
3213 new_bmcr |= BMCR_SPEED1000;
3214
3215 /* Force a linkdown */
3216 if (netif_carrier_ok(tp->dev)) {
3217 u32 adv;
3218
3219 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3220 adv &= ~(ADVERTISE_1000XFULL |
3221 ADVERTISE_1000XHALF |
3222 ADVERTISE_SLCT);
3223 tg3_writephy(tp, MII_ADVERTISE, adv);
3224 tg3_writephy(tp, MII_BMCR, bmcr |
3225 BMCR_ANRESTART |
3226 BMCR_ANENABLE);
3227 udelay(10);
3228 netif_carrier_off(tp->dev);
3229 }
3230 tg3_writephy(tp, MII_BMCR, new_bmcr);
3231 bmcr = new_bmcr;
3232 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3233 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
3234 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3235 ASIC_REV_5714) {
3236 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3237 bmsr |= BMSR_LSTATUS;
3238 else
3239 bmsr &= ~BMSR_LSTATUS;
3240 }
747e8f8b
MC
3241 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3242 }
3243 }
3244
3245 if (bmsr & BMSR_LSTATUS) {
3246 current_speed = SPEED_1000;
3247 current_link_up = 1;
3248 if (bmcr & BMCR_FULLDPLX)
3249 current_duplex = DUPLEX_FULL;
3250 else
3251 current_duplex = DUPLEX_HALF;
3252
ef167e27
MC
3253 local_adv = 0;
3254 remote_adv = 0;
3255
747e8f8b 3256 if (bmcr & BMCR_ANENABLE) {
ef167e27 3257 u32 common;
747e8f8b
MC
3258
3259 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3260 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3261 common = local_adv & remote_adv;
3262 if (common & (ADVERTISE_1000XHALF |
3263 ADVERTISE_1000XFULL)) {
3264 if (common & ADVERTISE_1000XFULL)
3265 current_duplex = DUPLEX_FULL;
3266 else
3267 current_duplex = DUPLEX_HALF;
747e8f8b
MC
3268 }
3269 else
3270 current_link_up = 0;
3271 }
3272 }
3273
ef167e27
MC
3274 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3275 tg3_setup_flow_control(tp, local_adv, remote_adv);
3276
747e8f8b
MC
3277 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3278 if (tp->link_config.active_duplex == DUPLEX_HALF)
3279 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3280
3281 tw32_f(MAC_MODE, tp->mac_mode);
3282 udelay(40);
3283
3284 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3285
3286 tp->link_config.active_speed = current_speed;
3287 tp->link_config.active_duplex = current_duplex;
3288
3289 if (current_link_up != netif_carrier_ok(tp->dev)) {
3290 if (current_link_up)
3291 netif_carrier_on(tp->dev);
3292 else {
3293 netif_carrier_off(tp->dev);
3294 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3295 }
3296 tg3_link_report(tp);
3297 }
3298 return err;
3299}
3300
3301static void tg3_serdes_parallel_detect(struct tg3 *tp)
3302{
3d3ebe74 3303 if (tp->serdes_counter) {
747e8f8b 3304 /* Give autoneg time to complete. */
3d3ebe74 3305 tp->serdes_counter--;
747e8f8b
MC
3306 return;
3307 }
3308 if (!netif_carrier_ok(tp->dev) &&
3309 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3310 u32 bmcr;
3311
3312 tg3_readphy(tp, MII_BMCR, &bmcr);
3313 if (bmcr & BMCR_ANENABLE) {
3314 u32 phy1, phy2;
3315
3316 /* Select shadow register 0x1f */
3317 tg3_writephy(tp, 0x1c, 0x7c00);
3318 tg3_readphy(tp, 0x1c, &phy1);
3319
3320 /* Select expansion interrupt status register */
3321 tg3_writephy(tp, 0x17, 0x0f01);
3322 tg3_readphy(tp, 0x15, &phy2);
3323 tg3_readphy(tp, 0x15, &phy2);
3324
3325 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3326 /* We have signal detect and not receiving
3327 * config code words, link is up by parallel
3328 * detection.
3329 */
3330
3331 bmcr &= ~BMCR_ANENABLE;
3332 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3333 tg3_writephy(tp, MII_BMCR, bmcr);
3334 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3335 }
3336 }
3337 }
3338 else if (netif_carrier_ok(tp->dev) &&
3339 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3340 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3341 u32 phy2;
3342
3343 /* Select expansion interrupt status register */
3344 tg3_writephy(tp, 0x17, 0x0f01);
3345 tg3_readphy(tp, 0x15, &phy2);
3346 if (phy2 & 0x20) {
3347 u32 bmcr;
3348
3349 /* Config code words received, turn on autoneg. */
3350 tg3_readphy(tp, MII_BMCR, &bmcr);
3351 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3352
3353 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3354
3355 }
3356 }
3357}
3358
1da177e4
LT
3359static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3360{
3361 int err;
3362
3363 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3364 err = tg3_setup_fiber_phy(tp, force_reset);
747e8f8b
MC
3365 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3366 err = tg3_setup_fiber_mii_phy(tp, force_reset);
1da177e4
LT
3367 } else {
3368 err = tg3_setup_copper_phy(tp, force_reset);
3369 }
3370
b5af7126
MC
3371 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3372 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
aa6c91fe
MC
3373 u32 val, scale;
3374
3375 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3376 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3377 scale = 65;
3378 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3379 scale = 6;
3380 else
3381 scale = 12;
3382
3383 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3384 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3385 tw32(GRC_MISC_CFG, val);
3386 }
3387
1da177e4
LT
3388 if (tp->link_config.active_speed == SPEED_1000 &&
3389 tp->link_config.active_duplex == DUPLEX_HALF)
3390 tw32(MAC_TX_LENGTHS,
3391 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3392 (6 << TX_LENGTHS_IPG_SHIFT) |
3393 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3394 else
3395 tw32(MAC_TX_LENGTHS,
3396 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3397 (6 << TX_LENGTHS_IPG_SHIFT) |
3398 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3399
3400 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3401 if (netif_carrier_ok(tp->dev)) {
3402 tw32(HOSTCC_STAT_COAL_TICKS,
15f9850d 3403 tp->coal.stats_block_coalesce_usecs);
1da177e4
LT
3404 } else {
3405 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3406 }
3407 }
3408
8ed5d97e
MC
3409 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3410 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3411 if (!netif_carrier_ok(tp->dev))
3412 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3413 tp->pwrmgmt_thresh;
3414 else
3415 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3416 tw32(PCIE_PWR_MGMT_THRESH, val);
3417 }
3418
1da177e4
LT
3419 return err;
3420}
3421
df3e6548
MC
3422/* This is called whenever we suspect that the system chipset is re-
3423 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3424 * is bogus tx completions. We try to recover by setting the
3425 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3426 * in the workqueue.
3427 */
3428static void tg3_tx_recover(struct tg3 *tp)
3429{
3430 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3431 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3432
3433 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3434 "mapped I/O cycles to the network device, attempting to "
3435 "recover. Please report the problem to the driver maintainer "
3436 "and include system chipset information.\n", tp->dev->name);
3437
3438 spin_lock(&tp->lock);
df3e6548 3439 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
df3e6548
MC
3440 spin_unlock(&tp->lock);
3441}
3442
1b2a7205
MC
3443static inline u32 tg3_tx_avail(struct tg3 *tp)
3444{
3445 smp_mb();
3446 return (tp->tx_pending -
3447 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3448}
3449
1da177e4
LT
3450/* Tigon3 never reports partial packet sends. So we do not
3451 * need special logic to handle SKBs that have not had all
3452 * of their frags sent yet, like SunGEM does.
3453 */
3454static void tg3_tx(struct tg3 *tp)
3455{
3456 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3457 u32 sw_idx = tp->tx_cons;
3458
3459 while (sw_idx != hw_idx) {
3460 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3461 struct sk_buff *skb = ri->skb;
df3e6548
MC
3462 int i, tx_bug = 0;
3463
3464 if (unlikely(skb == NULL)) {
3465 tg3_tx_recover(tp);
3466 return;
3467 }
1da177e4 3468
1da177e4
LT
3469 pci_unmap_single(tp->pdev,
3470 pci_unmap_addr(ri, mapping),
3471 skb_headlen(skb),
3472 PCI_DMA_TODEVICE);
3473
3474 ri->skb = NULL;
3475
3476 sw_idx = NEXT_TX(sw_idx);
3477
3478 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1da177e4 3479 ri = &tp->tx_buffers[sw_idx];
df3e6548
MC
3480 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3481 tx_bug = 1;
1da177e4
LT
3482
3483 pci_unmap_page(tp->pdev,
3484 pci_unmap_addr(ri, mapping),
3485 skb_shinfo(skb)->frags[i].size,
3486 PCI_DMA_TODEVICE);
3487
3488 sw_idx = NEXT_TX(sw_idx);
3489 }
3490
f47c11ee 3491 dev_kfree_skb(skb);
df3e6548
MC
3492
3493 if (unlikely(tx_bug)) {
3494 tg3_tx_recover(tp);
3495 return;
3496 }
1da177e4
LT
3497 }
3498
3499 tp->tx_cons = sw_idx;
3500
1b2a7205
MC
3501 /* Need to make the tx_cons update visible to tg3_start_xmit()
3502 * before checking for netif_queue_stopped(). Without the
3503 * memory barrier, there is a small possibility that tg3_start_xmit()
3504 * will miss it and cause the queue to be stopped forever.
3505 */
3506 smp_mb();
3507
3508 if (unlikely(netif_queue_stopped(tp->dev) &&
42952231 3509 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
1b2a7205 3510 netif_tx_lock(tp->dev);
51b91468 3511 if (netif_queue_stopped(tp->dev) &&
42952231 3512 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
51b91468 3513 netif_wake_queue(tp->dev);
1b2a7205 3514 netif_tx_unlock(tp->dev);
51b91468 3515 }
1da177e4
LT
3516}
3517
3518/* Returns size of skb allocated or < 0 on error.
3519 *
3520 * We only need to fill in the address because the other members
3521 * of the RX descriptor are invariant, see tg3_init_rings.
3522 *
3523 * Note the purposeful assymetry of cpu vs. chip accesses. For
3524 * posting buffers we only dirty the first cache line of the RX
3525 * descriptor (containing the address). Whereas for the RX status
3526 * buffers the cpu only reads the last cacheline of the RX descriptor
3527 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3528 */
3529static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3530 int src_idx, u32 dest_idx_unmasked)
3531{
3532 struct tg3_rx_buffer_desc *desc;
3533 struct ring_info *map, *src_map;
3534 struct sk_buff *skb;
3535 dma_addr_t mapping;
3536 int skb_size, dest_idx;
3537
3538 src_map = NULL;
3539 switch (opaque_key) {
3540 case RXD_OPAQUE_RING_STD:
3541 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3542 desc = &tp->rx_std[dest_idx];
3543 map = &tp->rx_std_buffers[dest_idx];
3544 if (src_idx >= 0)
3545 src_map = &tp->rx_std_buffers[src_idx];
7e72aad4 3546 skb_size = tp->rx_pkt_buf_sz;
1da177e4
LT
3547 break;
3548
3549 case RXD_OPAQUE_RING_JUMBO:
3550 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3551 desc = &tp->rx_jumbo[dest_idx];
3552 map = &tp->rx_jumbo_buffers[dest_idx];
3553 if (src_idx >= 0)
3554 src_map = &tp->rx_jumbo_buffers[src_idx];
3555 skb_size = RX_JUMBO_PKT_BUF_SZ;
3556 break;
3557
3558 default:
3559 return -EINVAL;
3560 };
3561
3562 /* Do not overwrite any of the map or rp information
3563 * until we are sure we can commit to a new buffer.
3564 *
3565 * Callers depend upon this behavior and assume that
3566 * we leave everything unchanged if we fail.
3567 */
a20e9c62 3568 skb = netdev_alloc_skb(tp->dev, skb_size);
1da177e4
LT
3569 if (skb == NULL)
3570 return -ENOMEM;
3571
1da177e4
LT
3572 skb_reserve(skb, tp->rx_offset);
3573
3574 mapping = pci_map_single(tp->pdev, skb->data,
3575 skb_size - tp->rx_offset,
3576 PCI_DMA_FROMDEVICE);
3577
3578 map->skb = skb;
3579 pci_unmap_addr_set(map, mapping, mapping);
3580
3581 if (src_map != NULL)
3582 src_map->skb = NULL;
3583
3584 desc->addr_hi = ((u64)mapping >> 32);
3585 desc->addr_lo = ((u64)mapping & 0xffffffff);
3586
3587 return skb_size;
3588}
3589
3590/* We only need to move over in the address because the other
3591 * members of the RX descriptor are invariant. See notes above
3592 * tg3_alloc_rx_skb for full details.
3593 */
3594static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3595 int src_idx, u32 dest_idx_unmasked)
3596{
3597 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3598 struct ring_info *src_map, *dest_map;
3599 int dest_idx;
3600
3601 switch (opaque_key) {
3602 case RXD_OPAQUE_RING_STD:
3603 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3604 dest_desc = &tp->rx_std[dest_idx];
3605 dest_map = &tp->rx_std_buffers[dest_idx];
3606 src_desc = &tp->rx_std[src_idx];
3607 src_map = &tp->rx_std_buffers[src_idx];
3608 break;
3609
3610 case RXD_OPAQUE_RING_JUMBO:
3611 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3612 dest_desc = &tp->rx_jumbo[dest_idx];
3613 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3614 src_desc = &tp->rx_jumbo[src_idx];
3615 src_map = &tp->rx_jumbo_buffers[src_idx];
3616 break;
3617
3618 default:
3619 return;
3620 };
3621
3622 dest_map->skb = src_map->skb;
3623 pci_unmap_addr_set(dest_map, mapping,
3624 pci_unmap_addr(src_map, mapping));
3625 dest_desc->addr_hi = src_desc->addr_hi;
3626 dest_desc->addr_lo = src_desc->addr_lo;
3627
3628 src_map->skb = NULL;
3629}
3630
3631#if TG3_VLAN_TAG_USED
3632static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3633{
3634 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3635}
3636#endif
3637
3638/* The RX ring scheme is composed of multiple rings which post fresh
3639 * buffers to the chip, and one special ring the chip uses to report
3640 * status back to the host.
3641 *
3642 * The special ring reports the status of received packets to the
3643 * host. The chip does not write into the original descriptor the
3644 * RX buffer was obtained from. The chip simply takes the original
3645 * descriptor as provided by the host, updates the status and length
3646 * field, then writes this into the next status ring entry.
3647 *
3648 * Each ring the host uses to post buffers to the chip is described
3649 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3650 * it is first placed into the on-chip ram. When the packet's length
3651 * is known, it walks down the TG3_BDINFO entries to select the ring.
3652 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3653 * which is within the range of the new packet's length is chosen.
3654 *
3655 * The "separate ring for rx status" scheme may sound queer, but it makes
3656 * sense from a cache coherency perspective. If only the host writes
3657 * to the buffer post rings, and only the chip writes to the rx status
3658 * rings, then cache lines never move beyond shared-modified state.
3659 * If both the host and chip were to write into the same ring, cache line
3660 * eviction could occur since both entities want it in an exclusive state.
3661 */
3662static int tg3_rx(struct tg3 *tp, int budget)
3663{
f92905de 3664 u32 work_mask, rx_std_posted = 0;
483ba50b
MC
3665 u32 sw_idx = tp->rx_rcb_ptr;
3666 u16 hw_idx;
1da177e4
LT
3667 int received;
3668
3669 hw_idx = tp->hw_status->idx[0].rx_producer;
3670 /*
3671 * We need to order the read of hw_idx and the read of
3672 * the opaque cookie.
3673 */
3674 rmb();
1da177e4
LT
3675 work_mask = 0;
3676 received = 0;
3677 while (sw_idx != hw_idx && budget > 0) {
3678 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3679 unsigned int len;
3680 struct sk_buff *skb;
3681 dma_addr_t dma_addr;
3682 u32 opaque_key, desc_idx, *post_ptr;
3683
3684 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3685 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3686 if (opaque_key == RXD_OPAQUE_RING_STD) {
3687 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3688 mapping);
3689 skb = tp->rx_std_buffers[desc_idx].skb;
3690 post_ptr = &tp->rx_std_ptr;
f92905de 3691 rx_std_posted++;
1da177e4
LT
3692 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3693 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3694 mapping);
3695 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3696 post_ptr = &tp->rx_jumbo_ptr;
3697 }
3698 else {
3699 goto next_pkt_nopost;
3700 }
3701
3702 work_mask |= opaque_key;
3703
3704 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3705 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3706 drop_it:
3707 tg3_recycle_rx(tp, opaque_key,
3708 desc_idx, *post_ptr);
3709 drop_it_no_recycle:
3710 /* Other statistics kept track of by card. */
3711 tp->net_stats.rx_dropped++;
3712 goto next_pkt;
3713 }
3714
3715 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3716
6aa20a22 3717 if (len > RX_COPY_THRESHOLD
1da177e4
LT
3718 && tp->rx_offset == 2
3719 /* rx_offset != 2 iff this is a 5701 card running
3720 * in PCI-X mode [see tg3_get_invariants()] */
3721 ) {
3722 int skb_size;
3723
3724 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3725 desc_idx, *post_ptr);
3726 if (skb_size < 0)
3727 goto drop_it;
3728
3729 pci_unmap_single(tp->pdev, dma_addr,
3730 skb_size - tp->rx_offset,
3731 PCI_DMA_FROMDEVICE);
3732
3733 skb_put(skb, len);
3734 } else {
3735 struct sk_buff *copy_skb;
3736
3737 tg3_recycle_rx(tp, opaque_key,
3738 desc_idx, *post_ptr);
3739
a20e9c62 3740 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
1da177e4
LT
3741 if (copy_skb == NULL)
3742 goto drop_it_no_recycle;
3743
1da177e4
LT
3744 skb_reserve(copy_skb, 2);
3745 skb_put(copy_skb, len);
3746 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
d626f62b 3747 skb_copy_from_linear_data(skb, copy_skb->data, len);
1da177e4
LT
3748 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3749
3750 /* We'll reuse the original ring buffer. */
3751 skb = copy_skb;
3752 }
3753
3754 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3755 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3756 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3757 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3758 skb->ip_summed = CHECKSUM_UNNECESSARY;
3759 else
3760 skb->ip_summed = CHECKSUM_NONE;
3761
3762 skb->protocol = eth_type_trans(skb, tp->dev);
3763#if TG3_VLAN_TAG_USED
3764 if (tp->vlgrp != NULL &&
3765 desc->type_flags & RXD_FLAG_VLAN) {
3766 tg3_vlan_rx(tp, skb,
3767 desc->err_vlan & RXD_VLAN_MASK);
3768 } else
3769#endif
3770 netif_receive_skb(skb);
3771
3772 tp->dev->last_rx = jiffies;
3773 received++;
3774 budget--;
3775
3776next_pkt:
3777 (*post_ptr)++;
f92905de
MC
3778
3779 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3780 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3781
3782 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3783 TG3_64BIT_REG_LOW, idx);
3784 work_mask &= ~RXD_OPAQUE_RING_STD;
3785 rx_std_posted = 0;
3786 }
1da177e4 3787next_pkt_nopost:
483ba50b 3788 sw_idx++;
6b31a515 3789 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
52f6d697
MC
3790
3791 /* Refresh hw_idx to see if there is new work */
3792 if (sw_idx == hw_idx) {
3793 hw_idx = tp->hw_status->idx[0].rx_producer;
3794 rmb();
3795 }
1da177e4
LT
3796 }
3797
3798 /* ACK the status ring. */
483ba50b
MC
3799 tp->rx_rcb_ptr = sw_idx;
3800 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
1da177e4
LT
3801
3802 /* Refill RX ring(s). */
3803 if (work_mask & RXD_OPAQUE_RING_STD) {
3804 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3805 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3806 sw_idx);
3807 }
3808 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3809 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3810 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3811 sw_idx);
3812 }
3813 mmiowb();
3814
3815 return received;
3816}
3817
6f535763 3818static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
1da177e4 3819{
1da177e4 3820 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4 3821
1da177e4
LT
3822 /* handle link change and other phy events */
3823 if (!(tp->tg3_flags &
3824 (TG3_FLAG_USE_LINKCHG_REG |
3825 TG3_FLAG_POLL_SERDES))) {
3826 if (sblk->status & SD_STATUS_LINK_CHG) {
3827 sblk->status = SD_STATUS_UPDATED |
3828 (sblk->status & ~SD_STATUS_LINK_CHG);
f47c11ee 3829 spin_lock(&tp->lock);
dd477003
MC
3830 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
3831 tw32_f(MAC_STATUS,
3832 (MAC_STATUS_SYNC_CHANGED |
3833 MAC_STATUS_CFG_CHANGED |
3834 MAC_STATUS_MI_COMPLETION |
3835 MAC_STATUS_LNKSTATE_CHANGED));
3836 udelay(40);
3837 } else
3838 tg3_setup_phy(tp, 0);
f47c11ee 3839 spin_unlock(&tp->lock);
1da177e4
LT
3840 }
3841 }
3842
3843 /* run TX completion thread */
3844 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
1da177e4 3845 tg3_tx(tp);
6f535763 3846 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4fd7ab59 3847 return work_done;
1da177e4
LT
3848 }
3849
1da177e4
LT
3850 /* run RX thread, within the bounds set by NAPI.
3851 * All RX "locking" is done by ensuring outside
bea3348e 3852 * code synchronizes with tg3->napi.poll()
1da177e4 3853 */
bea3348e 3854 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
6f535763 3855 work_done += tg3_rx(tp, budget - work_done);
1da177e4 3856
6f535763
DM
3857 return work_done;
3858}
3859
3860static int tg3_poll(struct napi_struct *napi, int budget)
3861{
3862 struct tg3 *tp = container_of(napi, struct tg3, napi);
3863 int work_done = 0;
4fd7ab59 3864 struct tg3_hw_status *sblk = tp->hw_status;
6f535763
DM
3865
3866 while (1) {
3867 work_done = tg3_poll_work(tp, work_done, budget);
3868
3869 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3870 goto tx_recovery;
3871
3872 if (unlikely(work_done >= budget))
3873 break;
3874
4fd7ab59
MC
3875 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3876 /* tp->last_tag is used in tg3_restart_ints() below
3877 * to tell the hw how much work has been processed,
3878 * so we must read it before checking for more work.
3879 */
3880 tp->last_tag = sblk->status_tag;
3881 rmb();
3882 } else
3883 sblk->status &= ~SD_STATUS_UPDATED;
6f535763 3884
4fd7ab59 3885 if (likely(!tg3_has_work(tp))) {
6f535763
DM
3886 netif_rx_complete(tp->dev, napi);
3887 tg3_restart_ints(tp);
3888 break;
3889 }
1da177e4
LT
3890 }
3891
bea3348e 3892 return work_done;
6f535763
DM
3893
3894tx_recovery:
4fd7ab59 3895 /* work_done is guaranteed to be less than budget. */
6f535763
DM
3896 netif_rx_complete(tp->dev, napi);
3897 schedule_work(&tp->reset_task);
4fd7ab59 3898 return work_done;
1da177e4
LT
3899}
3900
f47c11ee
DM
3901static void tg3_irq_quiesce(struct tg3 *tp)
3902{
3903 BUG_ON(tp->irq_sync);
3904
3905 tp->irq_sync = 1;
3906 smp_mb();
3907
3908 synchronize_irq(tp->pdev->irq);
3909}
3910
3911static inline int tg3_irq_sync(struct tg3 *tp)
3912{
3913 return tp->irq_sync;
3914}
3915
3916/* Fully shutdown all tg3 driver activity elsewhere in the system.
3917 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3918 * with as well. Most of the time, this is not necessary except when
3919 * shutting down the device.
3920 */
3921static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3922{
46966545 3923 spin_lock_bh(&tp->lock);
f47c11ee
DM
3924 if (irq_sync)
3925 tg3_irq_quiesce(tp);
f47c11ee
DM
3926}
3927
3928static inline void tg3_full_unlock(struct tg3 *tp)
3929{
f47c11ee
DM
3930 spin_unlock_bh(&tp->lock);
3931}
3932
fcfa0a32
MC
3933/* One-shot MSI handler - Chip automatically disables interrupt
3934 * after sending MSI so driver doesn't have to do it.
3935 */
7d12e780 3936static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
fcfa0a32
MC
3937{
3938 struct net_device *dev = dev_id;
3939 struct tg3 *tp = netdev_priv(dev);
3940
3941 prefetch(tp->hw_status);
3942 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3943
3944 if (likely(!tg3_irq_sync(tp)))
bea3348e 3945 netif_rx_schedule(dev, &tp->napi);
fcfa0a32
MC
3946
3947 return IRQ_HANDLED;
3948}
3949
88b06bc2
MC
3950/* MSI ISR - No need to check for interrupt sharing and no need to
3951 * flush status block and interrupt mailbox. PCI ordering rules
3952 * guarantee that MSI will arrive after the status block.
3953 */
7d12e780 3954static irqreturn_t tg3_msi(int irq, void *dev_id)
88b06bc2
MC
3955{
3956 struct net_device *dev = dev_id;
3957 struct tg3 *tp = netdev_priv(dev);
88b06bc2 3958
61487480
MC
3959 prefetch(tp->hw_status);
3960 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
88b06bc2 3961 /*
fac9b83e 3962 * Writing any value to intr-mbox-0 clears PCI INTA# and
88b06bc2 3963 * chip-internal interrupt pending events.
fac9b83e 3964 * Writing non-zero to intr-mbox-0 additional tells the
88b06bc2
MC
3965 * NIC to stop sending us irqs, engaging "in-intr-handler"
3966 * event coalescing.
3967 */
3968 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
61487480 3969 if (likely(!tg3_irq_sync(tp)))
bea3348e 3970 netif_rx_schedule(dev, &tp->napi);
61487480 3971
88b06bc2
MC
3972 return IRQ_RETVAL(1);
3973}
3974
7d12e780 3975static irqreturn_t tg3_interrupt(int irq, void *dev_id)
1da177e4
LT
3976{
3977 struct net_device *dev = dev_id;
3978 struct tg3 *tp = netdev_priv(dev);
3979 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4
LT
3980 unsigned int handled = 1;
3981
1da177e4
LT
3982 /* In INTx mode, it is possible for the interrupt to arrive at
3983 * the CPU before the status block posted prior to the interrupt.
3984 * Reading the PCI State register will confirm whether the
3985 * interrupt is ours and will flush the status block.
3986 */
d18edcb2
MC
3987 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3988 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3989 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3990 handled = 0;
f47c11ee 3991 goto out;
fac9b83e 3992 }
d18edcb2
MC
3993 }
3994
3995 /*
3996 * Writing any value to intr-mbox-0 clears PCI INTA# and
3997 * chip-internal interrupt pending events.
3998 * Writing non-zero to intr-mbox-0 additional tells the
3999 * NIC to stop sending us irqs, engaging "in-intr-handler"
4000 * event coalescing.
c04cb347
MC
4001 *
4002 * Flush the mailbox to de-assert the IRQ immediately to prevent
4003 * spurious interrupts. The flush impacts performance but
4004 * excessive spurious interrupts can be worse in some cases.
d18edcb2 4005 */
c04cb347 4006 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
d18edcb2
MC
4007 if (tg3_irq_sync(tp))
4008 goto out;
4009 sblk->status &= ~SD_STATUS_UPDATED;
4010 if (likely(tg3_has_work(tp))) {
4011 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
bea3348e 4012 netif_rx_schedule(dev, &tp->napi);
d18edcb2
MC
4013 } else {
4014 /* No work, shared interrupt perhaps? re-enable
4015 * interrupts, and flush that PCI write
4016 */
4017 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4018 0x00000000);
fac9b83e 4019 }
f47c11ee 4020out:
fac9b83e
DM
4021 return IRQ_RETVAL(handled);
4022}
4023
7d12e780 4024static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
fac9b83e
DM
4025{
4026 struct net_device *dev = dev_id;
4027 struct tg3 *tp = netdev_priv(dev);
4028 struct tg3_hw_status *sblk = tp->hw_status;
fac9b83e
DM
4029 unsigned int handled = 1;
4030
fac9b83e
DM
4031 /* In INTx mode, it is possible for the interrupt to arrive at
4032 * the CPU before the status block posted prior to the interrupt.
4033 * Reading the PCI State register will confirm whether the
4034 * interrupt is ours and will flush the status block.
4035 */
d18edcb2
MC
4036 if (unlikely(sblk->status_tag == tp->last_tag)) {
4037 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4038 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4039 handled = 0;
f47c11ee 4040 goto out;
1da177e4 4041 }
d18edcb2
MC
4042 }
4043
4044 /*
4045 * writing any value to intr-mbox-0 clears PCI INTA# and
4046 * chip-internal interrupt pending events.
4047 * writing non-zero to intr-mbox-0 additional tells the
4048 * NIC to stop sending us irqs, engaging "in-intr-handler"
4049 * event coalescing.
c04cb347
MC
4050 *
4051 * Flush the mailbox to de-assert the IRQ immediately to prevent
4052 * spurious interrupts. The flush impacts performance but
4053 * excessive spurious interrupts can be worse in some cases.
d18edcb2 4054 */
c04cb347 4055 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
d18edcb2
MC
4056 if (tg3_irq_sync(tp))
4057 goto out;
bea3348e 4058 if (netif_rx_schedule_prep(dev, &tp->napi)) {
d18edcb2
MC
4059 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4060 /* Update last_tag to mark that this status has been
4061 * seen. Because interrupt may be shared, we may be
4062 * racing with tg3_poll(), so only update last_tag
4063 * if tg3_poll() is not scheduled.
4064 */
4065 tp->last_tag = sblk->status_tag;
bea3348e 4066 __netif_rx_schedule(dev, &tp->napi);
1da177e4 4067 }
f47c11ee 4068out:
1da177e4
LT
4069 return IRQ_RETVAL(handled);
4070}
4071
7938109f 4072/* ISR for interrupt test */
7d12e780 4073static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7938109f
MC
4074{
4075 struct net_device *dev = dev_id;
4076 struct tg3 *tp = netdev_priv(dev);
4077 struct tg3_hw_status *sblk = tp->hw_status;
4078
f9804ddb
MC
4079 if ((sblk->status & SD_STATUS_UPDATED) ||
4080 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
b16250e3 4081 tg3_disable_ints(tp);
7938109f
MC
4082 return IRQ_RETVAL(1);
4083 }
4084 return IRQ_RETVAL(0);
4085}
4086
8e7a22e3 4087static int tg3_init_hw(struct tg3 *, int);
944d980e 4088static int tg3_halt(struct tg3 *, int, int);
1da177e4 4089
b9ec6c1b
MC
4090/* Restart hardware after configuration changes, self-test, etc.
4091 * Invoked with tp->lock held.
4092 */
4093static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
78c6146f
ED
4094 __releases(tp->lock)
4095 __acquires(tp->lock)
b9ec6c1b
MC
4096{
4097 int err;
4098
4099 err = tg3_init_hw(tp, reset_phy);
4100 if (err) {
4101 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4102 "aborting.\n", tp->dev->name);
4103 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4104 tg3_full_unlock(tp);
4105 del_timer_sync(&tp->timer);
4106 tp->irq_sync = 0;
bea3348e 4107 napi_enable(&tp->napi);
b9ec6c1b
MC
4108 dev_close(tp->dev);
4109 tg3_full_lock(tp, 0);
4110 }
4111 return err;
4112}
4113
1da177e4
LT
4114#ifdef CONFIG_NET_POLL_CONTROLLER
4115static void tg3_poll_controller(struct net_device *dev)
4116{
88b06bc2
MC
4117 struct tg3 *tp = netdev_priv(dev);
4118
7d12e780 4119 tg3_interrupt(tp->pdev->irq, dev);
1da177e4
LT
4120}
4121#endif
4122
c4028958 4123static void tg3_reset_task(struct work_struct *work)
1da177e4 4124{
c4028958 4125 struct tg3 *tp = container_of(work, struct tg3, reset_task);
1da177e4
LT
4126 unsigned int restart_timer;
4127
7faa006f 4128 tg3_full_lock(tp, 0);
7faa006f
MC
4129
4130 if (!netif_running(tp->dev)) {
7faa006f
MC
4131 tg3_full_unlock(tp);
4132 return;
4133 }
4134
4135 tg3_full_unlock(tp);
4136
1da177e4
LT
4137 tg3_netif_stop(tp);
4138
f47c11ee 4139 tg3_full_lock(tp, 1);
1da177e4
LT
4140
4141 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4142 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4143
df3e6548
MC
4144 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4145 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4146 tp->write32_rx_mbox = tg3_write_flush_reg32;
4147 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4148 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4149 }
4150
944d980e 4151 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
b9ec6c1b
MC
4152 if (tg3_init_hw(tp, 1))
4153 goto out;
1da177e4
LT
4154
4155 tg3_netif_start(tp);
4156
1da177e4
LT
4157 if (restart_timer)
4158 mod_timer(&tp->timer, jiffies + 1);
7faa006f 4159
b9ec6c1b 4160out:
7faa006f 4161 tg3_full_unlock(tp);
1da177e4
LT
4162}
4163
b0408751
MC
4164static void tg3_dump_short_state(struct tg3 *tp)
4165{
4166 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4167 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4168 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4169 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4170}
4171
1da177e4
LT
4172static void tg3_tx_timeout(struct net_device *dev)
4173{
4174 struct tg3 *tp = netdev_priv(dev);
4175
b0408751 4176 if (netif_msg_tx_err(tp)) {
9f88f29f
MC
4177 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4178 dev->name);
b0408751
MC
4179 tg3_dump_short_state(tp);
4180 }
1da177e4
LT
4181
4182 schedule_work(&tp->reset_task);
4183}
4184
c58ec932
MC
4185/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4186static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4187{
4188 u32 base = (u32) mapping & 0xffffffff;
4189
4190 return ((base > 0xffffdcc0) &&
4191 (base + len + 8 < base));
4192}
4193
72f2afb8
MC
4194/* Test for DMA addresses > 40-bit */
4195static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4196 int len)
4197{
4198#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6728a8e2 4199 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
72f2afb8
MC
4200 return (((u64) mapping + len) > DMA_40BIT_MASK);
4201 return 0;
4202#else
4203 return 0;
4204#endif
4205}
4206
1da177e4
LT
4207static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4208
72f2afb8
MC
4209/* Workaround 4GB and 40-bit hardware DMA bugs. */
4210static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
c58ec932
MC
4211 u32 last_plus_one, u32 *start,
4212 u32 base_flags, u32 mss)
1da177e4 4213{
41588ba1 4214 struct sk_buff *new_skb;
c58ec932 4215 dma_addr_t new_addr = 0;
1da177e4 4216 u32 entry = *start;
c58ec932 4217 int i, ret = 0;
1da177e4 4218
41588ba1
MC
4219 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4220 new_skb = skb_copy(skb, GFP_ATOMIC);
4221 else {
4222 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4223
4224 new_skb = skb_copy_expand(skb,
4225 skb_headroom(skb) + more_headroom,
4226 skb_tailroom(skb), GFP_ATOMIC);
4227 }
4228
1da177e4 4229 if (!new_skb) {
c58ec932
MC
4230 ret = -1;
4231 } else {
4232 /* New SKB is guaranteed to be linear. */
4233 entry = *start;
4234 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4235 PCI_DMA_TODEVICE);
4236 /* Make sure new skb does not cross any 4G boundaries.
4237 * Drop the packet if it does.
4238 */
4239 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4240 ret = -1;
4241 dev_kfree_skb(new_skb);
4242 new_skb = NULL;
4243 } else {
4244 tg3_set_txd(tp, entry, new_addr, new_skb->len,
4245 base_flags, 1 | (mss << 1));
4246 *start = NEXT_TX(entry);
4247 }
1da177e4
LT
4248 }
4249
1da177e4
LT
4250 /* Now clean up the sw ring entries. */
4251 i = 0;
4252 while (entry != last_plus_one) {
4253 int len;
4254
4255 if (i == 0)
4256 len = skb_headlen(skb);
4257 else
4258 len = skb_shinfo(skb)->frags[i-1].size;
4259 pci_unmap_single(tp->pdev,
4260 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4261 len, PCI_DMA_TODEVICE);
4262 if (i == 0) {
4263 tp->tx_buffers[entry].skb = new_skb;
4264 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4265 } else {
4266 tp->tx_buffers[entry].skb = NULL;
4267 }
4268 entry = NEXT_TX(entry);
4269 i++;
4270 }
4271
4272 dev_kfree_skb(skb);
4273
c58ec932 4274 return ret;
1da177e4
LT
4275}
4276
4277static void tg3_set_txd(struct tg3 *tp, int entry,
4278 dma_addr_t mapping, int len, u32 flags,
4279 u32 mss_and_is_end)
4280{
4281 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4282 int is_end = (mss_and_is_end & 0x1);
4283 u32 mss = (mss_and_is_end >> 1);
4284 u32 vlan_tag = 0;
4285
4286 if (is_end)
4287 flags |= TXD_FLAG_END;
4288 if (flags & TXD_FLAG_VLAN) {
4289 vlan_tag = flags >> 16;
4290 flags &= 0xffff;
4291 }
4292 vlan_tag |= (mss << TXD_MSS_SHIFT);
4293
4294 txd->addr_hi = ((u64) mapping >> 32);
4295 txd->addr_lo = ((u64) mapping & 0xffffffff);
4296 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4297 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4298}
4299
5a6f3074
MC
4300/* hard_start_xmit for devices that don't have any bugs and
4301 * support TG3_FLG2_HW_TSO_2 only.
4302 */
1da177e4 4303static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5a6f3074
MC
4304{
4305 struct tg3 *tp = netdev_priv(dev);
4306 dma_addr_t mapping;
4307 u32 len, entry, base_flags, mss;
4308
4309 len = skb_headlen(skb);
4310
00b70504 4311 /* We are running in BH disabled context with netif_tx_lock
bea3348e 4312 * and TX reclaim runs via tp->napi.poll inside of a software
5a6f3074
MC
4313 * interrupt. Furthermore, IRQ processing runs lockless so we have
4314 * no IRQ context deadlocks to worry about either. Rejoice!
4315 */
1b2a7205 4316 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
5a6f3074
MC
4317 if (!netif_queue_stopped(dev)) {
4318 netif_stop_queue(dev);
4319
4320 /* This is a hard error, log it. */
4321 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4322 "queue awake!\n", dev->name);
4323 }
5a6f3074
MC
4324 return NETDEV_TX_BUSY;
4325 }
4326
4327 entry = tp->tx_prod;
4328 base_flags = 0;
5a6f3074 4329 mss = 0;
c13e3713 4330 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5a6f3074
MC
4331 int tcp_opt_len, ip_tcp_len;
4332
4333 if (skb_header_cloned(skb) &&
4334 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4335 dev_kfree_skb(skb);
4336 goto out_unlock;
4337 }
4338
b0026624
MC
4339 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4340 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4341 else {
eddc9ec5
ACM
4342 struct iphdr *iph = ip_hdr(skb);
4343
ab6a5bb6 4344 tcp_opt_len = tcp_optlen(skb);
c9bdd4b5 4345 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
b0026624 4346
eddc9ec5
ACM
4347 iph->check = 0;
4348 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
b0026624
MC
4349 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4350 }
5a6f3074
MC
4351
4352 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4353 TXD_FLAG_CPU_POST_DMA);
4354
aa8223c7 4355 tcp_hdr(skb)->check = 0;
5a6f3074 4356
5a6f3074 4357 }
84fa7933 4358 else if (skb->ip_summed == CHECKSUM_PARTIAL)
5a6f3074 4359 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5a6f3074
MC
4360#if TG3_VLAN_TAG_USED
4361 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4362 base_flags |= (TXD_FLAG_VLAN |
4363 (vlan_tx_tag_get(skb) << 16));
4364#endif
4365
4366 /* Queue skb data, a.k.a. the main skb fragment. */
4367 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4368
4369 tp->tx_buffers[entry].skb = skb;
4370 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4371
4372 tg3_set_txd(tp, entry, mapping, len, base_flags,
4373 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4374
4375 entry = NEXT_TX(entry);
4376
4377 /* Now loop through additional data fragments, and queue them. */
4378 if (skb_shinfo(skb)->nr_frags > 0) {
4379 unsigned int i, last;
4380
4381 last = skb_shinfo(skb)->nr_frags - 1;
4382 for (i = 0; i <= last; i++) {
4383 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4384
4385 len = frag->size;
4386 mapping = pci_map_page(tp->pdev,
4387 frag->page,
4388 frag->page_offset,
4389 len, PCI_DMA_TODEVICE);
4390
4391 tp->tx_buffers[entry].skb = NULL;
4392 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4393
4394 tg3_set_txd(tp, entry, mapping, len,
4395 base_flags, (i == last) | (mss << 1));
4396
4397 entry = NEXT_TX(entry);
4398 }
4399 }
4400
4401 /* Packets are ready, update Tx producer idx local and on card. */
4402 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4403
4404 tp->tx_prod = entry;
1b2a7205 4405 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5a6f3074 4406 netif_stop_queue(dev);
42952231 4407 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5a6f3074
MC
4408 netif_wake_queue(tp->dev);
4409 }
4410
4411out_unlock:
4412 mmiowb();
5a6f3074
MC
4413
4414 dev->trans_start = jiffies;
4415
4416 return NETDEV_TX_OK;
4417}
4418
52c0fd83
MC
4419static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4420
4421/* Use GSO to workaround a rare TSO bug that may be triggered when the
4422 * TSO header is greater than 80 bytes.
4423 */
4424static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4425{
4426 struct sk_buff *segs, *nskb;
4427
4428 /* Estimate the number of fragments in the worst case */
1b2a7205 4429 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
52c0fd83 4430 netif_stop_queue(tp->dev);
7f62ad5d
MC
4431 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4432 return NETDEV_TX_BUSY;
4433
4434 netif_wake_queue(tp->dev);
52c0fd83
MC
4435 }
4436
4437 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
801678c5 4438 if (IS_ERR(segs))
52c0fd83
MC
4439 goto tg3_tso_bug_end;
4440
4441 do {
4442 nskb = segs;
4443 segs = segs->next;
4444 nskb->next = NULL;
4445 tg3_start_xmit_dma_bug(nskb, tp->dev);
4446 } while (segs);
4447
4448tg3_tso_bug_end:
4449 dev_kfree_skb(skb);
4450
4451 return NETDEV_TX_OK;
4452}
52c0fd83 4453
5a6f3074
MC
4454/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4455 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4456 */
4457static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
4458{
4459 struct tg3 *tp = netdev_priv(dev);
4460 dma_addr_t mapping;
1da177e4
LT
4461 u32 len, entry, base_flags, mss;
4462 int would_hit_hwbug;
1da177e4
LT
4463
4464 len = skb_headlen(skb);
4465
00b70504 4466 /* We are running in BH disabled context with netif_tx_lock
bea3348e 4467 * and TX reclaim runs via tp->napi.poll inside of a software
f47c11ee
DM
4468 * interrupt. Furthermore, IRQ processing runs lockless so we have
4469 * no IRQ context deadlocks to worry about either. Rejoice!
1da177e4 4470 */
1b2a7205 4471 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
1f064a87
SH
4472 if (!netif_queue_stopped(dev)) {
4473 netif_stop_queue(dev);
4474
4475 /* This is a hard error, log it. */
4476 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4477 "queue awake!\n", dev->name);
4478 }
1da177e4
LT
4479 return NETDEV_TX_BUSY;
4480 }
4481
4482 entry = tp->tx_prod;
4483 base_flags = 0;
84fa7933 4484 if (skb->ip_summed == CHECKSUM_PARTIAL)
1da177e4 4485 base_flags |= TXD_FLAG_TCPUDP_CSUM;
1da177e4 4486 mss = 0;
c13e3713 4487 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
eddc9ec5 4488 struct iphdr *iph;
52c0fd83 4489 int tcp_opt_len, ip_tcp_len, hdr_len;
1da177e4
LT
4490
4491 if (skb_header_cloned(skb) &&
4492 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4493 dev_kfree_skb(skb);
4494 goto out_unlock;
4495 }
4496
ab6a5bb6 4497 tcp_opt_len = tcp_optlen(skb);
c9bdd4b5 4498 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
1da177e4 4499
52c0fd83
MC
4500 hdr_len = ip_tcp_len + tcp_opt_len;
4501 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7f62ad5d 4502 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
52c0fd83
MC
4503 return (tg3_tso_bug(tp, skb));
4504
1da177e4
LT
4505 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4506 TXD_FLAG_CPU_POST_DMA);
4507
eddc9ec5
ACM
4508 iph = ip_hdr(skb);
4509 iph->check = 0;
4510 iph->tot_len = htons(mss + hdr_len);
1da177e4 4511 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
aa8223c7 4512 tcp_hdr(skb)->check = 0;
1da177e4 4513 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
aa8223c7
ACM
4514 } else
4515 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4516 iph->daddr, 0,
4517 IPPROTO_TCP,
4518 0);
1da177e4
LT
4519
4520 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4521 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
eddc9ec5 4522 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
4523 int tsflags;
4524
eddc9ec5 4525 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
4526 mss |= (tsflags << 11);
4527 }
4528 } else {
eddc9ec5 4529 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
4530 int tsflags;
4531
eddc9ec5 4532 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
4533 base_flags |= tsflags << 12;
4534 }
4535 }
4536 }
1da177e4
LT
4537#if TG3_VLAN_TAG_USED
4538 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4539 base_flags |= (TXD_FLAG_VLAN |
4540 (vlan_tx_tag_get(skb) << 16));
4541#endif
4542
4543 /* Queue skb data, a.k.a. the main skb fragment. */
4544 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4545
4546 tp->tx_buffers[entry].skb = skb;
4547 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4548
4549 would_hit_hwbug = 0;
4550
41588ba1
MC
4551 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4552 would_hit_hwbug = 1;
4553 else if (tg3_4g_overflow_test(mapping, len))
c58ec932 4554 would_hit_hwbug = 1;
1da177e4
LT
4555
4556 tg3_set_txd(tp, entry, mapping, len, base_flags,
4557 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4558
4559 entry = NEXT_TX(entry);
4560
4561 /* Now loop through additional data fragments, and queue them. */
4562 if (skb_shinfo(skb)->nr_frags > 0) {
4563 unsigned int i, last;
4564
4565 last = skb_shinfo(skb)->nr_frags - 1;
4566 for (i = 0; i <= last; i++) {
4567 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4568
4569 len = frag->size;
4570 mapping = pci_map_page(tp->pdev,
4571 frag->page,
4572 frag->page_offset,
4573 len, PCI_DMA_TODEVICE);
4574
4575 tp->tx_buffers[entry].skb = NULL;
4576 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4577
c58ec932
MC
4578 if (tg3_4g_overflow_test(mapping, len))
4579 would_hit_hwbug = 1;
1da177e4 4580
72f2afb8
MC
4581 if (tg3_40bit_overflow_test(tp, mapping, len))
4582 would_hit_hwbug = 1;
4583
1da177e4
LT
4584 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4585 tg3_set_txd(tp, entry, mapping, len,
4586 base_flags, (i == last)|(mss << 1));
4587 else
4588 tg3_set_txd(tp, entry, mapping, len,
4589 base_flags, (i == last));
4590
4591 entry = NEXT_TX(entry);
4592 }
4593 }
4594
4595 if (would_hit_hwbug) {
4596 u32 last_plus_one = entry;
4597 u32 start;
1da177e4 4598
c58ec932
MC
4599 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4600 start &= (TG3_TX_RING_SIZE - 1);
1da177e4
LT
4601
4602 /* If the workaround fails due to memory/mapping
4603 * failure, silently drop this packet.
4604 */
72f2afb8 4605 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
c58ec932 4606 &start, base_flags, mss))
1da177e4
LT
4607 goto out_unlock;
4608
4609 entry = start;
4610 }
4611
4612 /* Packets are ready, update Tx producer idx local and on card. */
4613 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4614
4615 tp->tx_prod = entry;
1b2a7205 4616 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
1da177e4 4617 netif_stop_queue(dev);
42952231 4618 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
51b91468
MC
4619 netif_wake_queue(tp->dev);
4620 }
1da177e4
LT
4621
4622out_unlock:
4623 mmiowb();
1da177e4
LT
4624
4625 dev->trans_start = jiffies;
4626
4627 return NETDEV_TX_OK;
4628}
4629
4630static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4631 int new_mtu)
4632{
4633 dev->mtu = new_mtu;
4634
ef7f5ec0 4635 if (new_mtu > ETH_DATA_LEN) {
a4e2b347 4636 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
ef7f5ec0
MC
4637 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4638 ethtool_op_set_tso(dev, 0);
4639 }
4640 else
4641 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4642 } else {
a4e2b347 4643 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
ef7f5ec0 4644 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
0f893dc6 4645 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
ef7f5ec0 4646 }
1da177e4
LT
4647}
4648
4649static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4650{
4651 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 4652 int err;
1da177e4
LT
4653
4654 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4655 return -EINVAL;
4656
4657 if (!netif_running(dev)) {
4658 /* We'll just catch it later when the
4659 * device is up'd.
4660 */
4661 tg3_set_mtu(dev, tp, new_mtu);
4662 return 0;
4663 }
4664
4665 tg3_netif_stop(tp);
f47c11ee
DM
4666
4667 tg3_full_lock(tp, 1);
1da177e4 4668
944d980e 4669 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
4670
4671 tg3_set_mtu(dev, tp, new_mtu);
4672
b9ec6c1b 4673 err = tg3_restart_hw(tp, 0);
1da177e4 4674
b9ec6c1b
MC
4675 if (!err)
4676 tg3_netif_start(tp);
1da177e4 4677
f47c11ee 4678 tg3_full_unlock(tp);
1da177e4 4679
b9ec6c1b 4680 return err;
1da177e4
LT
4681}
4682
4683/* Free up pending packets in all rx/tx rings.
4684 *
4685 * The chip has been shut down and the driver detached from
4686 * the networking, so no interrupts or new tx packets will
4687 * end up in the driver. tp->{tx,}lock is not held and we are not
4688 * in an interrupt context and thus may sleep.
4689 */
4690static void tg3_free_rings(struct tg3 *tp)
4691{
4692 struct ring_info *rxp;
4693 int i;
4694
4695 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4696 rxp = &tp->rx_std_buffers[i];
4697
4698 if (rxp->skb == NULL)
4699 continue;
4700 pci_unmap_single(tp->pdev,
4701 pci_unmap_addr(rxp, mapping),
7e72aad4 4702 tp->rx_pkt_buf_sz - tp->rx_offset,
1da177e4
LT
4703 PCI_DMA_FROMDEVICE);
4704 dev_kfree_skb_any(rxp->skb);
4705 rxp->skb = NULL;
4706 }
4707
4708 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4709 rxp = &tp->rx_jumbo_buffers[i];
4710
4711 if (rxp->skb == NULL)
4712 continue;
4713 pci_unmap_single(tp->pdev,
4714 pci_unmap_addr(rxp, mapping),
4715 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4716 PCI_DMA_FROMDEVICE);
4717 dev_kfree_skb_any(rxp->skb);
4718 rxp->skb = NULL;
4719 }
4720
4721 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4722 struct tx_ring_info *txp;
4723 struct sk_buff *skb;
4724 int j;
4725
4726 txp = &tp->tx_buffers[i];
4727 skb = txp->skb;
4728
4729 if (skb == NULL) {
4730 i++;
4731 continue;
4732 }
4733
4734 pci_unmap_single(tp->pdev,
4735 pci_unmap_addr(txp, mapping),
4736 skb_headlen(skb),
4737 PCI_DMA_TODEVICE);
4738 txp->skb = NULL;
4739
4740 i++;
4741
4742 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4743 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4744 pci_unmap_page(tp->pdev,
4745 pci_unmap_addr(txp, mapping),
4746 skb_shinfo(skb)->frags[j].size,
4747 PCI_DMA_TODEVICE);
4748 i++;
4749 }
4750
4751 dev_kfree_skb_any(skb);
4752 }
4753}
4754
4755/* Initialize tx/rx rings for packet processing.
4756 *
4757 * The chip has been shut down and the driver detached from
4758 * the networking, so no interrupts or new tx packets will
4759 * end up in the driver. tp->{tx,}lock are held and thus
4760 * we may not sleep.
4761 */
32d8c572 4762static int tg3_init_rings(struct tg3 *tp)
1da177e4
LT
4763{
4764 u32 i;
4765
4766 /* Free up all the SKBs. */
4767 tg3_free_rings(tp);
4768
4769 /* Zero out all descriptors. */
4770 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4771 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4772 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4773 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4774
7e72aad4 4775 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
a4e2b347 4776 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
7e72aad4
MC
4777 (tp->dev->mtu > ETH_DATA_LEN))
4778 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4779
1da177e4
LT
4780 /* Initialize invariants of the rings, we only set this
4781 * stuff once. This works because the card does not
4782 * write into the rx buffer posting rings.
4783 */
4784 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4785 struct tg3_rx_buffer_desc *rxd;
4786
4787 rxd = &tp->rx_std[i];
7e72aad4 4788 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
1da177e4
LT
4789 << RXD_LEN_SHIFT;
4790 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4791 rxd->opaque = (RXD_OPAQUE_RING_STD |
4792 (i << RXD_OPAQUE_INDEX_SHIFT));
4793 }
4794
0f893dc6 4795 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
4796 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4797 struct tg3_rx_buffer_desc *rxd;
4798
4799 rxd = &tp->rx_jumbo[i];
4800 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4801 << RXD_LEN_SHIFT;
4802 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4803 RXD_FLAG_JUMBO;
4804 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4805 (i << RXD_OPAQUE_INDEX_SHIFT));
4806 }
4807 }
4808
4809 /* Now allocate fresh SKBs for each rx ring. */
4810 for (i = 0; i < tp->rx_pending; i++) {
32d8c572
MC
4811 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4812 printk(KERN_WARNING PFX
4813 "%s: Using a smaller RX standard ring, "
4814 "only %d out of %d buffers were allocated "
4815 "successfully.\n",
4816 tp->dev->name, i, tp->rx_pending);
4817 if (i == 0)
4818 return -ENOMEM;
4819 tp->rx_pending = i;
1da177e4 4820 break;
32d8c572 4821 }
1da177e4
LT
4822 }
4823
0f893dc6 4824 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
4825 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4826 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
32d8c572
MC
4827 -1, i) < 0) {
4828 printk(KERN_WARNING PFX
4829 "%s: Using a smaller RX jumbo ring, "
4830 "only %d out of %d buffers were "
4831 "allocated successfully.\n",
4832 tp->dev->name, i, tp->rx_jumbo_pending);
4833 if (i == 0) {
4834 tg3_free_rings(tp);
4835 return -ENOMEM;
4836 }
4837 tp->rx_jumbo_pending = i;
1da177e4 4838 break;
32d8c572 4839 }
1da177e4
LT
4840 }
4841 }
32d8c572 4842 return 0;
1da177e4
LT
4843}
4844
4845/*
4846 * Must not be invoked with interrupt sources disabled and
4847 * the hardware shutdown down.
4848 */
4849static void tg3_free_consistent(struct tg3 *tp)
4850{
b4558ea9
JJ
4851 kfree(tp->rx_std_buffers);
4852 tp->rx_std_buffers = NULL;
1da177e4
LT
4853 if (tp->rx_std) {
4854 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4855 tp->rx_std, tp->rx_std_mapping);
4856 tp->rx_std = NULL;
4857 }
4858 if (tp->rx_jumbo) {
4859 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4860 tp->rx_jumbo, tp->rx_jumbo_mapping);
4861 tp->rx_jumbo = NULL;
4862 }
4863 if (tp->rx_rcb) {
4864 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4865 tp->rx_rcb, tp->rx_rcb_mapping);
4866 tp->rx_rcb = NULL;
4867 }
4868 if (tp->tx_ring) {
4869 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4870 tp->tx_ring, tp->tx_desc_mapping);
4871 tp->tx_ring = NULL;
4872 }
4873 if (tp->hw_status) {
4874 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4875 tp->hw_status, tp->status_mapping);
4876 tp->hw_status = NULL;
4877 }
4878 if (tp->hw_stats) {
4879 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4880 tp->hw_stats, tp->stats_mapping);
4881 tp->hw_stats = NULL;
4882 }
4883}
4884
4885/*
4886 * Must not be invoked with interrupt sources disabled and
4887 * the hardware shutdown down. Can sleep.
4888 */
4889static int tg3_alloc_consistent(struct tg3 *tp)
4890{
bd2b3343 4891 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
1da177e4
LT
4892 (TG3_RX_RING_SIZE +
4893 TG3_RX_JUMBO_RING_SIZE)) +
4894 (sizeof(struct tx_ring_info) *
4895 TG3_TX_RING_SIZE),
4896 GFP_KERNEL);
4897 if (!tp->rx_std_buffers)
4898 return -ENOMEM;
4899
1da177e4
LT
4900 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4901 tp->tx_buffers = (struct tx_ring_info *)
4902 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4903
4904 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4905 &tp->rx_std_mapping);
4906 if (!tp->rx_std)
4907 goto err_out;
4908
4909 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4910 &tp->rx_jumbo_mapping);
4911
4912 if (!tp->rx_jumbo)
4913 goto err_out;
4914
4915 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4916 &tp->rx_rcb_mapping);
4917 if (!tp->rx_rcb)
4918 goto err_out;
4919
4920 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4921 &tp->tx_desc_mapping);
4922 if (!tp->tx_ring)
4923 goto err_out;
4924
4925 tp->hw_status = pci_alloc_consistent(tp->pdev,
4926 TG3_HW_STATUS_SIZE,
4927 &tp->status_mapping);
4928 if (!tp->hw_status)
4929 goto err_out;
4930
4931 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4932 sizeof(struct tg3_hw_stats),
4933 &tp->stats_mapping);
4934 if (!tp->hw_stats)
4935 goto err_out;
4936
4937 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4938 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4939
4940 return 0;
4941
4942err_out:
4943 tg3_free_consistent(tp);
4944 return -ENOMEM;
4945}
4946
4947#define MAX_WAIT_CNT 1000
4948
4949/* To stop a block, clear the enable bit and poll till it
4950 * clears. tp->lock is held.
4951 */
b3b7d6be 4952static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
1da177e4
LT
4953{
4954 unsigned int i;
4955 u32 val;
4956
4957 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4958 switch (ofs) {
4959 case RCVLSC_MODE:
4960 case DMAC_MODE:
4961 case MBFREE_MODE:
4962 case BUFMGR_MODE:
4963 case MEMARB_MODE:
4964 /* We can't enable/disable these bits of the
4965 * 5705/5750, just say success.
4966 */
4967 return 0;
4968
4969 default:
4970 break;
4971 };
4972 }
4973
4974 val = tr32(ofs);
4975 val &= ~enable_bit;
4976 tw32_f(ofs, val);
4977
4978 for (i = 0; i < MAX_WAIT_CNT; i++) {
4979 udelay(100);
4980 val = tr32(ofs);
4981 if ((val & enable_bit) == 0)
4982 break;
4983 }
4984
b3b7d6be 4985 if (i == MAX_WAIT_CNT && !silent) {
1da177e4
LT
4986 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4987 "ofs=%lx enable_bit=%x\n",
4988 ofs, enable_bit);
4989 return -ENODEV;
4990 }
4991
4992 return 0;
4993}
4994
4995/* tp->lock is held. */
b3b7d6be 4996static int tg3_abort_hw(struct tg3 *tp, int silent)
1da177e4
LT
4997{
4998 int i, err;
4999
5000 tg3_disable_ints(tp);
5001
5002 tp->rx_mode &= ~RX_MODE_ENABLE;
5003 tw32_f(MAC_RX_MODE, tp->rx_mode);
5004 udelay(10);
5005
b3b7d6be
DM
5006 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5007 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5008 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5009 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5010 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5011 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5012
5013 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5014 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5015 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5016 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5017 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5018 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5019 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
1da177e4
LT
5020
5021 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5022 tw32_f(MAC_MODE, tp->mac_mode);
5023 udelay(40);
5024
5025 tp->tx_mode &= ~TX_MODE_ENABLE;
5026 tw32_f(MAC_TX_MODE, tp->tx_mode);
5027
5028 for (i = 0; i < MAX_WAIT_CNT; i++) {
5029 udelay(100);
5030 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5031 break;
5032 }
5033 if (i >= MAX_WAIT_CNT) {
5034 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5035 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5036 tp->dev->name, tr32(MAC_TX_MODE));
e6de8ad1 5037 err |= -ENODEV;
1da177e4
LT
5038 }
5039
e6de8ad1 5040 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
b3b7d6be
DM
5041 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5042 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
1da177e4
LT
5043
5044 tw32(FTQ_RESET, 0xffffffff);
5045 tw32(FTQ_RESET, 0x00000000);
5046
b3b7d6be
DM
5047 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5048 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
1da177e4
LT
5049
5050 if (tp->hw_status)
5051 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5052 if (tp->hw_stats)
5053 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5054
1da177e4
LT
5055 return err;
5056}
5057
5058/* tp->lock is held. */
5059static int tg3_nvram_lock(struct tg3 *tp)
5060{
5061 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5062 int i;
5063
ec41c7df
MC
5064 if (tp->nvram_lock_cnt == 0) {
5065 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5066 for (i = 0; i < 8000; i++) {
5067 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5068 break;
5069 udelay(20);
5070 }
5071 if (i == 8000) {
5072 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5073 return -ENODEV;
5074 }
1da177e4 5075 }
ec41c7df 5076 tp->nvram_lock_cnt++;
1da177e4
LT
5077 }
5078 return 0;
5079}
5080
5081/* tp->lock is held. */
5082static void tg3_nvram_unlock(struct tg3 *tp)
5083{
ec41c7df
MC
5084 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5085 if (tp->nvram_lock_cnt > 0)
5086 tp->nvram_lock_cnt--;
5087 if (tp->nvram_lock_cnt == 0)
5088 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5089 }
1da177e4
LT
5090}
5091
e6af301b
MC
5092/* tp->lock is held. */
5093static void tg3_enable_nvram_access(struct tg3 *tp)
5094{
5095 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5096 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5097 u32 nvaccess = tr32(NVRAM_ACCESS);
5098
5099 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5100 }
5101}
5102
5103/* tp->lock is held. */
5104static void tg3_disable_nvram_access(struct tg3 *tp)
5105{
5106 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5107 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5108 u32 nvaccess = tr32(NVRAM_ACCESS);
5109
5110 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5111 }
5112}
5113
0d3031d9
MC
5114static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5115{
5116 int i;
5117 u32 apedata;
5118
5119 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5120 if (apedata != APE_SEG_SIG_MAGIC)
5121 return;
5122
5123 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5124 if (apedata != APE_FW_STATUS_READY)
5125 return;
5126
5127 /* Wait for up to 1 millisecond for APE to service previous event. */
5128 for (i = 0; i < 10; i++) {
5129 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5130 return;
5131
5132 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5133
5134 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5135 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5136 event | APE_EVENT_STATUS_EVENT_PENDING);
5137
5138 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5139
5140 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5141 break;
5142
5143 udelay(100);
5144 }
5145
5146 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5147 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5148}
5149
5150static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5151{
5152 u32 event;
5153 u32 apedata;
5154
5155 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5156 return;
5157
5158 switch (kind) {
5159 case RESET_KIND_INIT:
5160 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5161 APE_HOST_SEG_SIG_MAGIC);
5162 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5163 APE_HOST_SEG_LEN_MAGIC);
5164 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5165 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5166 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5167 APE_HOST_DRIVER_ID_MAGIC);
5168 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5169 APE_HOST_BEHAV_NO_PHYLOCK);
5170
5171 event = APE_EVENT_STATUS_STATE_START;
5172 break;
5173 case RESET_KIND_SHUTDOWN:
5174 event = APE_EVENT_STATUS_STATE_UNLOAD;
5175 break;
5176 case RESET_KIND_SUSPEND:
5177 event = APE_EVENT_STATUS_STATE_SUSPEND;
5178 break;
5179 default:
5180 return;
5181 }
5182
5183 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5184
5185 tg3_ape_send_event(tp, event);
5186}
5187
1da177e4
LT
5188/* tp->lock is held. */
5189static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5190{
f49639e6
DM
5191 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5192 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1da177e4
LT
5193
5194 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5195 switch (kind) {
5196 case RESET_KIND_INIT:
5197 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5198 DRV_STATE_START);
5199 break;
5200
5201 case RESET_KIND_SHUTDOWN:
5202 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5203 DRV_STATE_UNLOAD);
5204 break;
5205
5206 case RESET_KIND_SUSPEND:
5207 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5208 DRV_STATE_SUSPEND);
5209 break;
5210
5211 default:
5212 break;
5213 };
5214 }
0d3031d9
MC
5215
5216 if (kind == RESET_KIND_INIT ||
5217 kind == RESET_KIND_SUSPEND)
5218 tg3_ape_driver_state_change(tp, kind);
1da177e4
LT
5219}
5220
5221/* tp->lock is held. */
5222static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5223{
5224 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5225 switch (kind) {
5226 case RESET_KIND_INIT:
5227 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5228 DRV_STATE_START_DONE);
5229 break;
5230
5231 case RESET_KIND_SHUTDOWN:
5232 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5233 DRV_STATE_UNLOAD_DONE);
5234 break;
5235
5236 default:
5237 break;
5238 };
5239 }
0d3031d9
MC
5240
5241 if (kind == RESET_KIND_SHUTDOWN)
5242 tg3_ape_driver_state_change(tp, kind);
1da177e4
LT
5243}
5244
5245/* tp->lock is held. */
5246static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5247{
5248 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5249 switch (kind) {
5250 case RESET_KIND_INIT:
5251 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5252 DRV_STATE_START);
5253 break;
5254
5255 case RESET_KIND_SHUTDOWN:
5256 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5257 DRV_STATE_UNLOAD);
5258 break;
5259
5260 case RESET_KIND_SUSPEND:
5261 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5262 DRV_STATE_SUSPEND);
5263 break;
5264
5265 default:
5266 break;
5267 };
5268 }
5269}
5270
7a6f4369
MC
5271static int tg3_poll_fw(struct tg3 *tp)
5272{
5273 int i;
5274 u32 val;
5275
b5d3772c 5276 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
0ccead18
GZ
5277 /* Wait up to 20ms for init done. */
5278 for (i = 0; i < 200; i++) {
b5d3772c
MC
5279 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5280 return 0;
0ccead18 5281 udelay(100);
b5d3772c
MC
5282 }
5283 return -ENODEV;
5284 }
5285
7a6f4369
MC
5286 /* Wait for firmware initialization to complete. */
5287 for (i = 0; i < 100000; i++) {
5288 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5289 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5290 break;
5291 udelay(10);
5292 }
5293
5294 /* Chip might not be fitted with firmware. Some Sun onboard
5295 * parts are configured like that. So don't signal the timeout
5296 * of the above loop as an error, but do report the lack of
5297 * running firmware once.
5298 */
5299 if (i >= 100000 &&
5300 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5301 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5302
5303 printk(KERN_INFO PFX "%s: No firmware running.\n",
5304 tp->dev->name);
5305 }
5306
5307 return 0;
5308}
5309
ee6a99b5
MC
5310/* Save PCI command register before chip reset */
5311static void tg3_save_pci_state(struct tg3 *tp)
5312{
8a6eac90 5313 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
ee6a99b5
MC
5314}
5315
5316/* Restore PCI state after chip reset */
5317static void tg3_restore_pci_state(struct tg3 *tp)
5318{
5319 u32 val;
5320
5321 /* Re-enable indirect register accesses. */
5322 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5323 tp->misc_host_ctrl);
5324
5325 /* Set MAX PCI retry to zero. */
5326 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5327 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5328 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5329 val |= PCISTATE_RETRY_SAME_DMA;
0d3031d9
MC
5330 /* Allow reads and writes to the APE register and memory space. */
5331 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5332 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5333 PCISTATE_ALLOW_APE_SHMEM_WR;
ee6a99b5
MC
5334 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5335
8a6eac90 5336 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
ee6a99b5 5337
5f5c51e3
MC
5338 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5339 pcie_set_readrq(tp->pdev, 4096);
5340 else {
114342f2
MC
5341 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5342 tp->pci_cacheline_sz);
5343 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5344 tp->pci_lat_timer);
5345 }
5f5c51e3 5346
ee6a99b5 5347 /* Make sure PCI-X relaxed ordering bit is clear. */
9974a356
MC
5348 if (tp->pcix_cap) {
5349 u16 pcix_cmd;
5350
5351 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5352 &pcix_cmd);
5353 pcix_cmd &= ~PCI_X_CMD_ERO;
5354 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5355 pcix_cmd);
5356 }
ee6a99b5
MC
5357
5358 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
ee6a99b5
MC
5359
5360 /* Chip reset on 5780 will reset MSI enable bit,
5361 * so need to restore it.
5362 */
5363 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5364 u16 ctrl;
5365
5366 pci_read_config_word(tp->pdev,
5367 tp->msi_cap + PCI_MSI_FLAGS,
5368 &ctrl);
5369 pci_write_config_word(tp->pdev,
5370 tp->msi_cap + PCI_MSI_FLAGS,
5371 ctrl | PCI_MSI_FLAGS_ENABLE);
5372 val = tr32(MSGINT_MODE);
5373 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5374 }
5375 }
5376}
5377
1da177e4
LT
5378static void tg3_stop_fw(struct tg3 *);
5379
5380/* tp->lock is held. */
5381static int tg3_chip_reset(struct tg3 *tp)
5382{
5383 u32 val;
1ee582d8 5384 void (*write_op)(struct tg3 *, u32, u32);
7a6f4369 5385 int err;
1da177e4 5386
f49639e6
DM
5387 tg3_nvram_lock(tp);
5388
5389 /* No matching tg3_nvram_unlock() after this because
5390 * chip reset below will undo the nvram lock.
5391 */
5392 tp->nvram_lock_cnt = 0;
1da177e4 5393
ee6a99b5
MC
5394 /* GRC_MISC_CFG core clock reset will clear the memory
5395 * enable bit in PCI register 4 and the MSI enable bit
5396 * on some chips, so we save relevant registers here.
5397 */
5398 tg3_save_pci_state(tp);
5399
d9ab5ad1 5400 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
af36e6b6 5401 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 5402 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
5403 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5404 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
d9ab5ad1
MC
5405 tw32(GRC_FASTBOOT_PC, 0);
5406
1da177e4
LT
5407 /*
5408 * We must avoid the readl() that normally takes place.
5409 * It locks machines, causes machine checks, and other
5410 * fun things. So, temporarily disable the 5701
5411 * hardware workaround, while we do the reset.
5412 */
1ee582d8
MC
5413 write_op = tp->write32;
5414 if (write_op == tg3_write_flush_reg32)
5415 tp->write32 = tg3_write32;
1da177e4 5416
d18edcb2
MC
5417 /* Prevent the irq handler from reading or writing PCI registers
5418 * during chip reset when the memory enable bit in the PCI command
5419 * register may be cleared. The chip does not generate interrupt
5420 * at this time, but the irq handler may still be called due to irq
5421 * sharing or irqpoll.
5422 */
5423 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
b8fa2f3a
MC
5424 if (tp->hw_status) {
5425 tp->hw_status->status = 0;
5426 tp->hw_status->status_tag = 0;
5427 }
d18edcb2
MC
5428 tp->last_tag = 0;
5429 smp_mb();
5430 synchronize_irq(tp->pdev->irq);
5431
1da177e4
LT
5432 /* do the reset */
5433 val = GRC_MISC_CFG_CORECLK_RESET;
5434
5435 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5436 if (tr32(0x7e2c) == 0x60) {
5437 tw32(0x7e2c, 0x20);
5438 }
5439 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5440 tw32(GRC_MISC_CFG, (1 << 29));
5441 val |= (1 << 29);
5442 }
5443 }
5444
b5d3772c
MC
5445 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5446 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5447 tw32(GRC_VCPU_EXT_CTRL,
5448 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5449 }
5450
1da177e4
LT
5451 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5452 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5453 tw32(GRC_MISC_CFG, val);
5454
1ee582d8
MC
5455 /* restore 5701 hardware bug workaround write method */
5456 tp->write32 = write_op;
1da177e4
LT
5457
5458 /* Unfortunately, we have to delay before the PCI read back.
5459 * Some 575X chips even will not respond to a PCI cfg access
5460 * when the reset command is given to the chip.
5461 *
5462 * How do these hardware designers expect things to work
5463 * properly if the PCI write is posted for a long period
5464 * of time? It is always necessary to have some method by
5465 * which a register read back can occur to push the write
5466 * out which does the reset.
5467 *
5468 * For most tg3 variants the trick below was working.
5469 * Ho hum...
5470 */
5471 udelay(120);
5472
5473 /* Flush PCI posted writes. The normal MMIO registers
5474 * are inaccessible at this time so this is the only
5475 * way to make this reliably (actually, this is no longer
5476 * the case, see above). I tried to use indirect
5477 * register read/write but this upset some 5701 variants.
5478 */
5479 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5480
5481 udelay(120);
5482
5483 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5484 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5485 int i;
5486 u32 cfg_val;
5487
5488 /* Wait for link training to complete. */
5489 for (i = 0; i < 5000; i++)
5490 udelay(100);
5491
5492 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5493 pci_write_config_dword(tp->pdev, 0xc4,
5494 cfg_val | (1 << 15));
5495 }
5496 /* Set PCIE max payload size and clear error status. */
5497 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5498 }
5499
ee6a99b5 5500 tg3_restore_pci_state(tp);
1da177e4 5501
d18edcb2
MC
5502 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5503
ee6a99b5
MC
5504 val = 0;
5505 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4cf78e4f 5506 val = tr32(MEMARB_MODE);
ee6a99b5 5507 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
1da177e4
LT
5508
5509 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5510 tg3_stop_fw(tp);
5511 tw32(0x5000, 0x400);
5512 }
5513
5514 tw32(GRC_MODE, tp->grc_mode);
5515
5516 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
ab0049b4 5517 val = tr32(0xc4);
1da177e4
LT
5518
5519 tw32(0xc4, val | (1 << 15));
5520 }
5521
5522 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5523 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5524 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5525 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5526 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5527 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5528 }
5529
5530 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5531 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5532 tw32_f(MAC_MODE, tp->mac_mode);
747e8f8b
MC
5533 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5534 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5535 tw32_f(MAC_MODE, tp->mac_mode);
1da177e4
LT
5536 } else
5537 tw32_f(MAC_MODE, 0);
5538 udelay(40);
5539
7a6f4369
MC
5540 err = tg3_poll_fw(tp);
5541 if (err)
5542 return err;
1da177e4
LT
5543
5544 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5545 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
ab0049b4 5546 val = tr32(0x7c00);
1da177e4
LT
5547
5548 tw32(0x7c00, val | (1 << 25));
5549 }
5550
5551 /* Reprobe ASF enable state. */
5552 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5553 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5554 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5555 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5556 u32 nic_cfg;
5557
5558 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5559 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5560 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 5561 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
5562 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5563 }
5564 }
5565
5566 return 0;
5567}
5568
5569/* tp->lock is held. */
5570static void tg3_stop_fw(struct tg3 *tp)
5571{
0d3031d9
MC
5572 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5573 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
1da177e4 5574 u32 val;
7c5026aa
MC
5575
5576 /* Wait for RX cpu to ACK the previous event. */
5577 tg3_wait_for_event_ack(tp);
1da177e4
LT
5578
5579 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5580 val = tr32(GRC_RX_CPU_EVENT);
7c5026aa 5581 val |= GRC_RX_CPU_DRIVER_EVENT;
1da177e4
LT
5582 tw32(GRC_RX_CPU_EVENT, val);
5583
7c5026aa
MC
5584 /* Wait for RX cpu to ACK this event. */
5585 tg3_wait_for_event_ack(tp);
1da177e4
LT
5586 }
5587}
5588
5589/* tp->lock is held. */
944d980e 5590static int tg3_halt(struct tg3 *tp, int kind, int silent)
1da177e4
LT
5591{
5592 int err;
5593
5594 tg3_stop_fw(tp);
5595
944d980e 5596 tg3_write_sig_pre_reset(tp, kind);
1da177e4 5597
b3b7d6be 5598 tg3_abort_hw(tp, silent);
1da177e4
LT
5599 err = tg3_chip_reset(tp);
5600
944d980e
MC
5601 tg3_write_sig_legacy(tp, kind);
5602 tg3_write_sig_post_reset(tp, kind);
1da177e4
LT
5603
5604 if (err)
5605 return err;
5606
5607 return 0;
5608}
5609
5610#define TG3_FW_RELEASE_MAJOR 0x0
5611#define TG3_FW_RELASE_MINOR 0x0
5612#define TG3_FW_RELEASE_FIX 0x0
5613#define TG3_FW_START_ADDR 0x08000000
5614#define TG3_FW_TEXT_ADDR 0x08000000
5615#define TG3_FW_TEXT_LEN 0x9c0
5616#define TG3_FW_RODATA_ADDR 0x080009c0
5617#define TG3_FW_RODATA_LEN 0x60
5618#define TG3_FW_DATA_ADDR 0x08000a40
5619#define TG3_FW_DATA_LEN 0x20
5620#define TG3_FW_SBSS_ADDR 0x08000a60
5621#define TG3_FW_SBSS_LEN 0xc
5622#define TG3_FW_BSS_ADDR 0x08000a70
5623#define TG3_FW_BSS_LEN 0x10
5624
50da859d 5625static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
1da177e4
LT
5626 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5627 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5628 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5629 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5630 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5631 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5632 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5633 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5634 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5635 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5636 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5637 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5638 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5639 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5640 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5641 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5642 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5643 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5644 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5645 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5646 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5647 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5648 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5649 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5650 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5651 0, 0, 0, 0, 0, 0,
5652 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5653 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5654 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5655 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5656 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5657 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5658 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5659 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5660 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5661 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5662 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5663 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5664 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5665 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5666 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5667 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5668 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5669 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5670 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5671 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5672 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5673 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5674 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5675 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5676 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5677 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5678 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5679 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5680 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5681 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5682 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5683 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5684 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5685 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5686 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5687 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5688 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5689 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5690 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5691 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5692 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5693 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5694 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5695 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5696 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5697 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5698 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5699 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5700 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5701 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5702 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5703 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5704 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5705 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5706 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5707 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5708 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5709 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5710 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5711 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5712 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5713 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5714 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5715 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5716 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5717};
5718
50da859d 5719static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
1da177e4
LT
5720 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5721 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5722 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5723 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5724 0x00000000
5725};
5726
5727#if 0 /* All zeros, don't eat up space with it. */
5728u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5729 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5730 0x00000000, 0x00000000, 0x00000000, 0x00000000
5731};
5732#endif
5733
5734#define RX_CPU_SCRATCH_BASE 0x30000
5735#define RX_CPU_SCRATCH_SIZE 0x04000
5736#define TX_CPU_SCRATCH_BASE 0x34000
5737#define TX_CPU_SCRATCH_SIZE 0x04000
5738
5739/* tp->lock is held. */
5740static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5741{
5742 int i;
5743
5d9428de
ES
5744 BUG_ON(offset == TX_CPU_BASE &&
5745 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
1da177e4 5746
b5d3772c
MC
5747 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5748 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5749
5750 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5751 return 0;
5752 }
1da177e4
LT
5753 if (offset == RX_CPU_BASE) {
5754 for (i = 0; i < 10000; i++) {
5755 tw32(offset + CPU_STATE, 0xffffffff);
5756 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5757 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5758 break;
5759 }
5760
5761 tw32(offset + CPU_STATE, 0xffffffff);
5762 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
5763 udelay(10);
5764 } else {
5765 for (i = 0; i < 10000; i++) {
5766 tw32(offset + CPU_STATE, 0xffffffff);
5767 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5768 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5769 break;
5770 }
5771 }
5772
5773 if (i >= 10000) {
5774 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5775 "and %s CPU\n",
5776 tp->dev->name,
5777 (offset == RX_CPU_BASE ? "RX" : "TX"));
5778 return -ENODEV;
5779 }
ec41c7df
MC
5780
5781 /* Clear firmware's nvram arbitration. */
5782 if (tp->tg3_flags & TG3_FLAG_NVRAM)
5783 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
1da177e4
LT
5784 return 0;
5785}
5786
5787struct fw_info {
5788 unsigned int text_base;
5789 unsigned int text_len;
50da859d 5790 const u32 *text_data;
1da177e4
LT
5791 unsigned int rodata_base;
5792 unsigned int rodata_len;
50da859d 5793 const u32 *rodata_data;
1da177e4
LT
5794 unsigned int data_base;
5795 unsigned int data_len;
50da859d 5796 const u32 *data_data;
1da177e4
LT
5797};
5798
5799/* tp->lock is held. */
5800static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5801 int cpu_scratch_size, struct fw_info *info)
5802{
ec41c7df 5803 int err, lock_err, i;
1da177e4
LT
5804 void (*write_op)(struct tg3 *, u32, u32);
5805
5806 if (cpu_base == TX_CPU_BASE &&
5807 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5808 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5809 "TX cpu firmware on %s which is 5705.\n",
5810 tp->dev->name);
5811 return -EINVAL;
5812 }
5813
5814 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5815 write_op = tg3_write_mem;
5816 else
5817 write_op = tg3_write_indirect_reg32;
5818
1b628151
MC
5819 /* It is possible that bootcode is still loading at this point.
5820 * Get the nvram lock first before halting the cpu.
5821 */
ec41c7df 5822 lock_err = tg3_nvram_lock(tp);
1da177e4 5823 err = tg3_halt_cpu(tp, cpu_base);
ec41c7df
MC
5824 if (!lock_err)
5825 tg3_nvram_unlock(tp);
1da177e4
LT
5826 if (err)
5827 goto out;
5828
5829 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5830 write_op(tp, cpu_scratch_base + i, 0);
5831 tw32(cpu_base + CPU_STATE, 0xffffffff);
5832 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5833 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5834 write_op(tp, (cpu_scratch_base +
5835 (info->text_base & 0xffff) +
5836 (i * sizeof(u32))),
5837 (info->text_data ?
5838 info->text_data[i] : 0));
5839 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5840 write_op(tp, (cpu_scratch_base +
5841 (info->rodata_base & 0xffff) +
5842 (i * sizeof(u32))),
5843 (info->rodata_data ?
5844 info->rodata_data[i] : 0));
5845 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5846 write_op(tp, (cpu_scratch_base +
5847 (info->data_base & 0xffff) +
5848 (i * sizeof(u32))),
5849 (info->data_data ?
5850 info->data_data[i] : 0));
5851
5852 err = 0;
5853
5854out:
1da177e4
LT
5855 return err;
5856}
5857
5858/* tp->lock is held. */
5859static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5860{
5861 struct fw_info info;
5862 int err, i;
5863
5864 info.text_base = TG3_FW_TEXT_ADDR;
5865 info.text_len = TG3_FW_TEXT_LEN;
5866 info.text_data = &tg3FwText[0];
5867 info.rodata_base = TG3_FW_RODATA_ADDR;
5868 info.rodata_len = TG3_FW_RODATA_LEN;
5869 info.rodata_data = &tg3FwRodata[0];
5870 info.data_base = TG3_FW_DATA_ADDR;
5871 info.data_len = TG3_FW_DATA_LEN;
5872 info.data_data = NULL;
5873
5874 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5875 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5876 &info);
5877 if (err)
5878 return err;
5879
5880 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5881 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5882 &info);
5883 if (err)
5884 return err;
5885
5886 /* Now startup only the RX cpu. */
5887 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5888 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5889
5890 for (i = 0; i < 5; i++) {
5891 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5892 break;
5893 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5894 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
5895 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5896 udelay(1000);
5897 }
5898 if (i >= 5) {
5899 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5900 "to set RX CPU PC, is %08x should be %08x\n",
5901 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5902 TG3_FW_TEXT_ADDR);
5903 return -ENODEV;
5904 }
5905 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5906 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
5907
5908 return 0;
5909}
5910
1da177e4
LT
5911
5912#define TG3_TSO_FW_RELEASE_MAJOR 0x1
5913#define TG3_TSO_FW_RELASE_MINOR 0x6
5914#define TG3_TSO_FW_RELEASE_FIX 0x0
5915#define TG3_TSO_FW_START_ADDR 0x08000000
5916#define TG3_TSO_FW_TEXT_ADDR 0x08000000
5917#define TG3_TSO_FW_TEXT_LEN 0x1aa0
5918#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5919#define TG3_TSO_FW_RODATA_LEN 0x60
5920#define TG3_TSO_FW_DATA_ADDR 0x08001b20
5921#define TG3_TSO_FW_DATA_LEN 0x30
5922#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5923#define TG3_TSO_FW_SBSS_LEN 0x2c
5924#define TG3_TSO_FW_BSS_ADDR 0x08001b80
5925#define TG3_TSO_FW_BSS_LEN 0x894
5926
50da859d 5927static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
1da177e4
LT
5928 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5929 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5930 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5931 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5932 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5933 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5934 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5935 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5936 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5937 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5938 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5939 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5940 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5941 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5942 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5943 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5944 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5945 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5946 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5947 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5948 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5949 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5950 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5951 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5952 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5953 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5954 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5955 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5956 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5957 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5958 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5959 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5960 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5961 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5962 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5963 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5964 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5965 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5966 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5967 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5968 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5969 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5970 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5971 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5972 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5973 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5974 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5975 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5976 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5977 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5978 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5979 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5980 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5981 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5982 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5983 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5984 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5985 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5986 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5987 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5988 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5989 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5990 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5991 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5992 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5993 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5994 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5995 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5996 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5997 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5998 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5999 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6000 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6001 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6002 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6003 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6004 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6005 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6006 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6007 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6008 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6009 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6010 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6011 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6012 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6013 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6014 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6015 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6016 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6017 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6018 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6019 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6020 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6021 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6022 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6023 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6024 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6025 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6026 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6027 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6028 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6029 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6030 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6031 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6032 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6033 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6034 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6035 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6036 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6037 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6038 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6039 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6040 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6041 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6042 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6043 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6044 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6045 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6046 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6047 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6048 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6049 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6050 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6051 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6052 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6053 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6054 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6055 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6056 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6057 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6058 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6059 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6060 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6061 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6062 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6063 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6064 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6065 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6066 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6067 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6068 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6069 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6070 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6071 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6072 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6073 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6074 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6075 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6076 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6077 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6078 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6079 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6080 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6081 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6082 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6083 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6084 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6085 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6086 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6087 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6088 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6089 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6090 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6091 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6092 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6093 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6094 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6095 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6096 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6097 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6098 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6099 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6100 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6101 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6102 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6103 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6104 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6105 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6106 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6107 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6108 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6109 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6110 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6111 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6112 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6113 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6114 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6115 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6116 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6117 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6118 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6119 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6120 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6121 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6122 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6123 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6124 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6125 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6126 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6127 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6128 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6129 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6130 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6131 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6132 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6133 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6134 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6135 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6136 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6137 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6138 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6139 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6140 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6141 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6142 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6143 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6144 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6145 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6146 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6147 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6148 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6149 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6150 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6151 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6152 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6153 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6154 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6155 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6156 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6157 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6158 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6159 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6160 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6161 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6162 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6163 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6164 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6165 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6166 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6167 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6168 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6169 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6170 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6171 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6172 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6173 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6174 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6175 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6176 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6177 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6178 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6179 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6180 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6181 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6182 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6183 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6184 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6185 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6186 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6187 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6188 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6189 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6190 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6191 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6192 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6193 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6194 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6195 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6196 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6197 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6198 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6199 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6200 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6201 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6202 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6203 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6204 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6205 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6206 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6207 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6208 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6209 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6210 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6211 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6212};
6213
50da859d 6214static const u32 tg3TsoFwRodata[] = {
1da177e4
LT
6215 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6216 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6217 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6218 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6219 0x00000000,
6220};
6221
50da859d 6222static const u32 tg3TsoFwData[] = {
1da177e4
LT
6223 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6224 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6225 0x00000000,
6226};
6227
6228/* 5705 needs a special version of the TSO firmware. */
6229#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
6230#define TG3_TSO5_FW_RELASE_MINOR 0x2
6231#define TG3_TSO5_FW_RELEASE_FIX 0x0
6232#define TG3_TSO5_FW_START_ADDR 0x00010000
6233#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
6234#define TG3_TSO5_FW_TEXT_LEN 0xe90
6235#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
6236#define TG3_TSO5_FW_RODATA_LEN 0x50
6237#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
6238#define TG3_TSO5_FW_DATA_LEN 0x20
6239#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
6240#define TG3_TSO5_FW_SBSS_LEN 0x28
6241#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
6242#define TG3_TSO5_FW_BSS_LEN 0x88
6243
50da859d 6244static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
1da177e4
LT
6245 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6246 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6247 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6248 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6249 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6250 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6251 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6252 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6253 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6254 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6255 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6256 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6257 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6258 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6259 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6260 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6261 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6262 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6263 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6264 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6265 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6266 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6267 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6268 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6269 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6270 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6271 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6272 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6273 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6274 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6275 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6276 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6277 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6278 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6279 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6280 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6281 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6282 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6283 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6284 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6285 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6286 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6287 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6288 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6289 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6290 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6291 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6292 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6293 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6294 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6295 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6296 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6297 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6298 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6299 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6300 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6301 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6302 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6303 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6304 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6305 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6306 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6307 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6308 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6309 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6310 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6311 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6312 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6313 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6314 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6315 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6316 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6317 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6318 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6319 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6320 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6321 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6322 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6323 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6324 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6325 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6326 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6327 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6328 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6329 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6330 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6331 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6332 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6333 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6334 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6335 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6336 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6337 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6338 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6339 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6340 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6341 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6342 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6343 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6344 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6345 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6346 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6347 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6348 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6349 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6350 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6351 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6352 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6353 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6354 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6355 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6356 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6357 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6358 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6359 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6360 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6361 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6362 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6363 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6364 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6365 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6366 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6367 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6368 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6369 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6370 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6371 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6372 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6373 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6374 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6375 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6376 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6377 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6378 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6379 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6380 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6381 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6382 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6383 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6384 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6385 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6386 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6387 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6388 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6389 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6390 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6391 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6392 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6393 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6394 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6395 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6396 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6397 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6398 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6399 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6400 0x00000000, 0x00000000, 0x00000000,
6401};
6402
50da859d 6403static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
1da177e4
LT
6404 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6405 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6406 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6407 0x00000000, 0x00000000, 0x00000000,
6408};
6409
50da859d 6410static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
1da177e4
LT
6411 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6412 0x00000000, 0x00000000, 0x00000000,
6413};
6414
6415/* tp->lock is held. */
6416static int tg3_load_tso_firmware(struct tg3 *tp)
6417{
6418 struct fw_info info;
6419 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6420 int err, i;
6421
6422 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6423 return 0;
6424
6425 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6426 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6427 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6428 info.text_data = &tg3Tso5FwText[0];
6429 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6430 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6431 info.rodata_data = &tg3Tso5FwRodata[0];
6432 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6433 info.data_len = TG3_TSO5_FW_DATA_LEN;
6434 info.data_data = &tg3Tso5FwData[0];
6435 cpu_base = RX_CPU_BASE;
6436 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6437 cpu_scratch_size = (info.text_len +
6438 info.rodata_len +
6439 info.data_len +
6440 TG3_TSO5_FW_SBSS_LEN +
6441 TG3_TSO5_FW_BSS_LEN);
6442 } else {
6443 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6444 info.text_len = TG3_TSO_FW_TEXT_LEN;
6445 info.text_data = &tg3TsoFwText[0];
6446 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6447 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6448 info.rodata_data = &tg3TsoFwRodata[0];
6449 info.data_base = TG3_TSO_FW_DATA_ADDR;
6450 info.data_len = TG3_TSO_FW_DATA_LEN;
6451 info.data_data = &tg3TsoFwData[0];
6452 cpu_base = TX_CPU_BASE;
6453 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6454 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6455 }
6456
6457 err = tg3_load_firmware_cpu(tp, cpu_base,
6458 cpu_scratch_base, cpu_scratch_size,
6459 &info);
6460 if (err)
6461 return err;
6462
6463 /* Now startup the cpu. */
6464 tw32(cpu_base + CPU_STATE, 0xffffffff);
6465 tw32_f(cpu_base + CPU_PC, info.text_base);
6466
6467 for (i = 0; i < 5; i++) {
6468 if (tr32(cpu_base + CPU_PC) == info.text_base)
6469 break;
6470 tw32(cpu_base + CPU_STATE, 0xffffffff);
6471 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6472 tw32_f(cpu_base + CPU_PC, info.text_base);
6473 udelay(1000);
6474 }
6475 if (i >= 5) {
6476 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6477 "to set CPU PC, is %08x should be %08x\n",
6478 tp->dev->name, tr32(cpu_base + CPU_PC),
6479 info.text_base);
6480 return -ENODEV;
6481 }
6482 tw32(cpu_base + CPU_STATE, 0xffffffff);
6483 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6484 return 0;
6485}
6486
1da177e4
LT
6487
6488/* tp->lock is held. */
986e0aeb 6489static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
1da177e4
LT
6490{
6491 u32 addr_high, addr_low;
6492 int i;
6493
6494 addr_high = ((tp->dev->dev_addr[0] << 8) |
6495 tp->dev->dev_addr[1]);
6496 addr_low = ((tp->dev->dev_addr[2] << 24) |
6497 (tp->dev->dev_addr[3] << 16) |
6498 (tp->dev->dev_addr[4] << 8) |
6499 (tp->dev->dev_addr[5] << 0));
6500 for (i = 0; i < 4; i++) {
986e0aeb
MC
6501 if (i == 1 && skip_mac_1)
6502 continue;
1da177e4
LT
6503 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6504 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6505 }
6506
6507 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6508 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6509 for (i = 0; i < 12; i++) {
6510 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6511 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6512 }
6513 }
6514
6515 addr_high = (tp->dev->dev_addr[0] +
6516 tp->dev->dev_addr[1] +
6517 tp->dev->dev_addr[2] +
6518 tp->dev->dev_addr[3] +
6519 tp->dev->dev_addr[4] +
6520 tp->dev->dev_addr[5]) &
6521 TX_BACKOFF_SEED_MASK;
6522 tw32(MAC_TX_BACKOFF_SEED, addr_high);
6523}
6524
6525static int tg3_set_mac_addr(struct net_device *dev, void *p)
6526{
6527 struct tg3 *tp = netdev_priv(dev);
6528 struct sockaddr *addr = p;
986e0aeb 6529 int err = 0, skip_mac_1 = 0;
1da177e4 6530
f9804ddb
MC
6531 if (!is_valid_ether_addr(addr->sa_data))
6532 return -EINVAL;
6533
1da177e4
LT
6534 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6535
e75f7c90
MC
6536 if (!netif_running(dev))
6537 return 0;
6538
58712ef9 6539 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
986e0aeb 6540 u32 addr0_high, addr0_low, addr1_high, addr1_low;
58712ef9 6541
986e0aeb
MC
6542 addr0_high = tr32(MAC_ADDR_0_HIGH);
6543 addr0_low = tr32(MAC_ADDR_0_LOW);
6544 addr1_high = tr32(MAC_ADDR_1_HIGH);
6545 addr1_low = tr32(MAC_ADDR_1_LOW);
6546
6547 /* Skip MAC addr 1 if ASF is using it. */
6548 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6549 !(addr1_high == 0 && addr1_low == 0))
6550 skip_mac_1 = 1;
58712ef9 6551 }
986e0aeb
MC
6552 spin_lock_bh(&tp->lock);
6553 __tg3_set_mac_addr(tp, skip_mac_1);
6554 spin_unlock_bh(&tp->lock);
1da177e4 6555
b9ec6c1b 6556 return err;
1da177e4
LT
6557}
6558
6559/* tp->lock is held. */
6560static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6561 dma_addr_t mapping, u32 maxlen_flags,
6562 u32 nic_addr)
6563{
6564 tg3_write_mem(tp,
6565 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6566 ((u64) mapping >> 32));
6567 tg3_write_mem(tp,
6568 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6569 ((u64) mapping & 0xffffffff));
6570 tg3_write_mem(tp,
6571 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6572 maxlen_flags);
6573
6574 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6575 tg3_write_mem(tp,
6576 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6577 nic_addr);
6578}
6579
6580static void __tg3_set_rx_mode(struct net_device *);
d244c892 6581static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
15f9850d
DM
6582{
6583 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6584 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6585 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6586 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6587 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6588 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6589 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6590 }
6591 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6592 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6593 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6594 u32 val = ec->stats_block_coalesce_usecs;
6595
6596 if (!netif_carrier_ok(tp->dev))
6597 val = 0;
6598
6599 tw32(HOSTCC_STAT_COAL_TICKS, val);
6600 }
6601}
1da177e4
LT
6602
6603/* tp->lock is held. */
8e7a22e3 6604static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
6605{
6606 u32 val, rdmac_mode;
6607 int i, err, limit;
6608
6609 tg3_disable_ints(tp);
6610
6611 tg3_stop_fw(tp);
6612
6613 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6614
6615 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
e6de8ad1 6616 tg3_abort_hw(tp, 1);
1da177e4
LT
6617 }
6618
dd477003
MC
6619 if (reset_phy &&
6620 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
d4d2c558
MC
6621 tg3_phy_reset(tp);
6622
1da177e4
LT
6623 err = tg3_chip_reset(tp);
6624 if (err)
6625 return err;
6626
6627 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6628
b5af7126
MC
6629 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
6630 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
d30cdd28
MC
6631 val = tr32(TG3_CPMU_CTRL);
6632 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6633 tw32(TG3_CPMU_CTRL, val);
9acb961e
MC
6634
6635 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6636 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6637 val |= CPMU_LSPD_10MB_MACCLK_6_25;
6638 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6639
6640 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6641 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6642 val |= CPMU_LNK_AWARE_MACCLK_6_25;
6643 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6644
6645 val = tr32(TG3_CPMU_HST_ACC);
6646 val &= ~CPMU_HST_ACC_MACCLK_MASK;
6647 val |= CPMU_HST_ACC_MACCLK_6_25;
6648 tw32(TG3_CPMU_HST_ACC, val);
d30cdd28
MC
6649 }
6650
1da177e4
LT
6651 /* This works around an issue with Athlon chipsets on
6652 * B3 tigon3 silicon. This bit has no effect on any
6653 * other revision. But do not set this on PCI Express
795d01c5 6654 * chips and don't even touch the clocks if the CPMU is present.
1da177e4 6655 */
795d01c5
MC
6656 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6657 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6658 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6659 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6660 }
1da177e4
LT
6661
6662 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6663 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6664 val = tr32(TG3PCI_PCISTATE);
6665 val |= PCISTATE_RETRY_SAME_DMA;
6666 tw32(TG3PCI_PCISTATE, val);
6667 }
6668
0d3031d9
MC
6669 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6670 /* Allow reads and writes to the
6671 * APE register and memory space.
6672 */
6673 val = tr32(TG3PCI_PCISTATE);
6674 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6675 PCISTATE_ALLOW_APE_SHMEM_WR;
6676 tw32(TG3PCI_PCISTATE, val);
6677 }
6678
1da177e4
LT
6679 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6680 /* Enable some hw fixes. */
6681 val = tr32(TG3PCI_MSI_DATA);
6682 val |= (1 << 26) | (1 << 28) | (1 << 29);
6683 tw32(TG3PCI_MSI_DATA, val);
6684 }
6685
6686 /* Descriptor ring init may make accesses to the
6687 * NIC SRAM area to setup the TX descriptors, so we
6688 * can only do this after the hardware has been
6689 * successfully reset.
6690 */
32d8c572
MC
6691 err = tg3_init_rings(tp);
6692 if (err)
6693 return err;
1da177e4 6694
9936bcf6
MC
6695 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6696 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
d30cdd28
MC
6697 /* This value is determined during the probe time DMA
6698 * engine test, tg3_test_dma.
6699 */
6700 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6701 }
1da177e4
LT
6702
6703 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6704 GRC_MODE_4X_NIC_SEND_RINGS |
6705 GRC_MODE_NO_TX_PHDR_CSUM |
6706 GRC_MODE_NO_RX_PHDR_CSUM);
6707 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
d2d746f8
MC
6708
6709 /* Pseudo-header checksum is done by hardware logic and not
6710 * the offload processers, so make the chip do the pseudo-
6711 * header checksums on receive. For transmit it is more
6712 * convenient to do the pseudo-header checksum in software
6713 * as Linux does that on transmit for us in all cases.
6714 */
6715 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
1da177e4
LT
6716
6717 tw32(GRC_MODE,
6718 tp->grc_mode |
6719 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6720
6721 /* Setup the timer prescalar register. Clock is always 66Mhz. */
6722 val = tr32(GRC_MISC_CFG);
6723 val &= ~0xff;
6724 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6725 tw32(GRC_MISC_CFG, val);
6726
6727 /* Initialize MBUF/DESC pool. */
cbf46853 6728 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
1da177e4
LT
6729 /* Do nothing. */
6730 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6731 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6732 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6733 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6734 else
6735 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6736 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6737 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6738 }
1da177e4
LT
6739 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6740 int fw_len;
6741
6742 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6743 TG3_TSO5_FW_RODATA_LEN +
6744 TG3_TSO5_FW_DATA_LEN +
6745 TG3_TSO5_FW_SBSS_LEN +
6746 TG3_TSO5_FW_BSS_LEN);
6747 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6748 tw32(BUFMGR_MB_POOL_ADDR,
6749 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6750 tw32(BUFMGR_MB_POOL_SIZE,
6751 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6752 }
1da177e4 6753
0f893dc6 6754 if (tp->dev->mtu <= ETH_DATA_LEN) {
1da177e4
LT
6755 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6756 tp->bufmgr_config.mbuf_read_dma_low_water);
6757 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6758 tp->bufmgr_config.mbuf_mac_rx_low_water);
6759 tw32(BUFMGR_MB_HIGH_WATER,
6760 tp->bufmgr_config.mbuf_high_water);
6761 } else {
6762 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6763 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6764 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6765 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6766 tw32(BUFMGR_MB_HIGH_WATER,
6767 tp->bufmgr_config.mbuf_high_water_jumbo);
6768 }
6769 tw32(BUFMGR_DMA_LOW_WATER,
6770 tp->bufmgr_config.dma_low_water);
6771 tw32(BUFMGR_DMA_HIGH_WATER,
6772 tp->bufmgr_config.dma_high_water);
6773
6774 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6775 for (i = 0; i < 2000; i++) {
6776 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6777 break;
6778 udelay(10);
6779 }
6780 if (i >= 2000) {
6781 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6782 tp->dev->name);
6783 return -ENODEV;
6784 }
6785
6786 /* Setup replenish threshold. */
f92905de
MC
6787 val = tp->rx_pending / 8;
6788 if (val == 0)
6789 val = 1;
6790 else if (val > tp->rx_std_max_post)
6791 val = tp->rx_std_max_post;
b5d3772c
MC
6792 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6793 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6794 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6795
6796 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6797 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6798 }
f92905de
MC
6799
6800 tw32(RCVBDI_STD_THRESH, val);
1da177e4
LT
6801
6802 /* Initialize TG3_BDINFO's at:
6803 * RCVDBDI_STD_BD: standard eth size rx ring
6804 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6805 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6806 *
6807 * like so:
6808 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6809 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6810 * ring attribute flags
6811 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6812 *
6813 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6814 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6815 *
6816 * The size of each ring is fixed in the firmware, but the location is
6817 * configurable.
6818 */
6819 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6820 ((u64) tp->rx_std_mapping >> 32));
6821 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6822 ((u64) tp->rx_std_mapping & 0xffffffff));
6823 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6824 NIC_SRAM_RX_BUFFER_DESC);
6825
6826 /* Don't even try to program the JUMBO/MINI buffer descriptor
6827 * configs on 5705.
6828 */
6829 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6830 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6831 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6832 } else {
6833 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6834 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6835
6836 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6837 BDINFO_FLAGS_DISABLED);
6838
6839 /* Setup replenish threshold. */
6840 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6841
0f893dc6 6842 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
6843 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6844 ((u64) tp->rx_jumbo_mapping >> 32));
6845 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6846 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6847 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6848 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6849 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6850 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6851 } else {
6852 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6853 BDINFO_FLAGS_DISABLED);
6854 }
6855
6856 }
6857
6858 /* There is only one send ring on 5705/5750, no need to explicitly
6859 * disable the others.
6860 */
6861 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6862 /* Clear out send RCB ring in SRAM. */
6863 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6864 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6865 BDINFO_FLAGS_DISABLED);
6866 }
6867
6868 tp->tx_prod = 0;
6869 tp->tx_cons = 0;
6870 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6871 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6872
6873 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6874 tp->tx_desc_mapping,
6875 (TG3_TX_RING_SIZE <<
6876 BDINFO_FLAGS_MAXLEN_SHIFT),
6877 NIC_SRAM_TX_BUFFER_DESC);
6878
6879 /* There is only one receive return ring on 5705/5750, no need
6880 * to explicitly disable the others.
6881 */
6882 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6883 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6884 i += TG3_BDINFO_SIZE) {
6885 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6886 BDINFO_FLAGS_DISABLED);
6887 }
6888 }
6889
6890 tp->rx_rcb_ptr = 0;
6891 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6892
6893 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6894 tp->rx_rcb_mapping,
6895 (TG3_RX_RCB_RING_SIZE(tp) <<
6896 BDINFO_FLAGS_MAXLEN_SHIFT),
6897 0);
6898
6899 tp->rx_std_ptr = tp->rx_pending;
6900 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6901 tp->rx_std_ptr);
6902
0f893dc6 6903 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
1da177e4
LT
6904 tp->rx_jumbo_pending : 0;
6905 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6906 tp->rx_jumbo_ptr);
6907
6908 /* Initialize MAC address and backoff seed. */
986e0aeb 6909 __tg3_set_mac_addr(tp, 0);
1da177e4
LT
6910
6911 /* MTU + ethernet header + FCS + optional VLAN tag */
6912 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6913
6914 /* The slot time is changed by tg3_setup_phy if we
6915 * run at gigabit with half duplex.
6916 */
6917 tw32(MAC_TX_LENGTHS,
6918 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6919 (6 << TX_LENGTHS_IPG_SHIFT) |
6920 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6921
6922 /* Receive rules. */
6923 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6924 tw32(RCVLPC_CONFIG, 0x0181);
6925
6926 /* Calculate RDMAC_MODE setting early, we need it to determine
6927 * the RCVLPC_STATE_ENABLE mask.
6928 */
6929 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6930 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6931 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6932 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6933 RDMAC_MODE_LNGREAD_ENAB);
85e94ced 6934
d30cdd28
MC
6935 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6936 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6937 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6938 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6939
85e94ced
MC
6940 /* If statement applies to 5705 and 5750 PCI devices only */
6941 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6942 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6943 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
1da177e4 6944 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
c13e3713 6945 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1da177e4
LT
6946 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6947 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6948 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6949 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6950 }
6951 }
6952
85e94ced
MC
6953 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6954 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6955
1da177e4
LT
6956 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6957 rdmac_mode |= (1 << 27);
1da177e4
LT
6958
6959 /* Receive/send statistics. */
1661394e
MC
6960 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6961 val = tr32(RCVLPC_STATS_ENABLE);
6962 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6963 tw32(RCVLPC_STATS_ENABLE, val);
6964 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6965 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
1da177e4
LT
6966 val = tr32(RCVLPC_STATS_ENABLE);
6967 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6968 tw32(RCVLPC_STATS_ENABLE, val);
6969 } else {
6970 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6971 }
6972 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6973 tw32(SNDDATAI_STATSENAB, 0xffffff);
6974 tw32(SNDDATAI_STATSCTRL,
6975 (SNDDATAI_SCTRL_ENABLE |
6976 SNDDATAI_SCTRL_FASTUPD));
6977
6978 /* Setup host coalescing engine. */
6979 tw32(HOSTCC_MODE, 0);
6980 for (i = 0; i < 2000; i++) {
6981 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6982 break;
6983 udelay(10);
6984 }
6985
d244c892 6986 __tg3_set_coalesce(tp, &tp->coal);
1da177e4
LT
6987
6988 /* set status block DMA address */
6989 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6990 ((u64) tp->status_mapping >> 32));
6991 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6992 ((u64) tp->status_mapping & 0xffffffff));
6993
6994 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6995 /* Status/statistics block address. See tg3_timer,
6996 * the tg3_periodic_fetch_stats call there, and
6997 * tg3_get_stats to see how this works for 5705/5750 chips.
6998 */
1da177e4
LT
6999 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7000 ((u64) tp->stats_mapping >> 32));
7001 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7002 ((u64) tp->stats_mapping & 0xffffffff));
7003 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7004 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7005 }
7006
7007 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7008
7009 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7010 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7011 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7012 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7013
7014 /* Clear statistics/status block in chip, and status block in ram. */
7015 for (i = NIC_SRAM_STATS_BLK;
7016 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7017 i += sizeof(u32)) {
7018 tg3_write_mem(tp, i, 0);
7019 udelay(40);
7020 }
7021 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7022
c94e3941
MC
7023 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7024 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7025 /* reset to prevent losing 1st rx packet intermittently */
7026 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7027 udelay(10);
7028 }
7029
1da177e4
LT
7030 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7031 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
e8f3f6ca
MC
7032 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7033 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7034 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7035 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1da177e4
LT
7036 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7037 udelay(40);
7038
314fba34 7039 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9d26e213 7040 * If TG3_FLG2_IS_NIC is zero, we should read the
314fba34
MC
7041 * register to preserve the GPIO settings for LOMs. The GPIOs,
7042 * whether used as inputs or outputs, are set by boot code after
7043 * reset.
7044 */
9d26e213 7045 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
314fba34
MC
7046 u32 gpio_mask;
7047
9d26e213
MC
7048 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7049 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7050 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
3e7d83bc
MC
7051
7052 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7053 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7054 GRC_LCLCTRL_GPIO_OUTPUT3;
7055
af36e6b6
MC
7056 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7057 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7058
aaf84465 7059 tp->grc_local_ctrl &= ~gpio_mask;
314fba34
MC
7060 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7061
7062 /* GPIO1 must be driven high for eeprom write protect */
9d26e213
MC
7063 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7064 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7065 GRC_LCLCTRL_GPIO_OUTPUT1);
314fba34 7066 }
1da177e4
LT
7067 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7068 udelay(100);
7069
09ee929c 7070 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
fac9b83e 7071 tp->last_tag = 0;
1da177e4
LT
7072
7073 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7074 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7075 udelay(40);
7076 }
7077
7078 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7079 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7080 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7081 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7082 WDMAC_MODE_LNGREAD_ENAB);
7083
85e94ced
MC
7084 /* If statement applies to 5705 and 5750 PCI devices only */
7085 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7086 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7087 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1da177e4
LT
7088 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7089 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7090 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7091 /* nothing */
7092 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7093 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7094 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7095 val |= WDMAC_MODE_RX_ACCEL;
7096 }
7097 }
7098
d9ab5ad1 7099 /* Enable host coalescing bug fix */
af36e6b6 7100 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
d30cdd28 7101 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
9936bcf6
MC
7102 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7103 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
f51f3562 7104 val |= WDMAC_MODE_STATUS_TAG_FIX;
d9ab5ad1 7105
1da177e4
LT
7106 tw32_f(WDMAC_MODE, val);
7107 udelay(40);
7108
9974a356
MC
7109 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7110 u16 pcix_cmd;
7111
7112 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7113 &pcix_cmd);
1da177e4 7114 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9974a356
MC
7115 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7116 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 7117 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9974a356
MC
7118 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7119 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 7120 }
9974a356
MC
7121 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7122 pcix_cmd);
1da177e4
LT
7123 }
7124
7125 tw32_f(RDMAC_MODE, rdmac_mode);
7126 udelay(40);
7127
7128 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7129 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7130 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9936bcf6
MC
7131
7132 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7133 tw32(SNDDATAC_MODE,
7134 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7135 else
7136 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7137
1da177e4
LT
7138 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7139 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7140 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7141 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
1da177e4
LT
7142 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7143 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
1da177e4
LT
7144 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7145 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7146
7147 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7148 err = tg3_load_5701_a0_firmware_fix(tp);
7149 if (err)
7150 return err;
7151 }
7152
1da177e4
LT
7153 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7154 err = tg3_load_tso_firmware(tp);
7155 if (err)
7156 return err;
7157 }
1da177e4
LT
7158
7159 tp->tx_mode = TX_MODE_ENABLE;
7160 tw32_f(MAC_TX_MODE, tp->tx_mode);
7161 udelay(100);
7162
7163 tp->rx_mode = RX_MODE_ENABLE;
9936bcf6
MC
7164 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7165 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
af36e6b6
MC
7166 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7167
1da177e4
LT
7168 tw32_f(MAC_RX_MODE, tp->rx_mode);
7169 udelay(10);
7170
8ef21428 7171 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1da177e4
LT
7172 tw32_f(MAC_MI_MODE, tp->mi_mode);
7173 udelay(80);
7174
7175 tw32(MAC_LED_CTRL, tp->led_ctrl);
7176
7177 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
c94e3941 7178 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1da177e4
LT
7179 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7180 udelay(10);
7181 }
7182 tw32_f(MAC_RX_MODE, tp->rx_mode);
7183 udelay(10);
7184
7185 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7186 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7187 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7188 /* Set drive transmission level to 1.2V */
7189 /* only if the signal pre-emphasis bit is not set */
7190 val = tr32(MAC_SERDES_CFG);
7191 val &= 0xfffff000;
7192 val |= 0x880;
7193 tw32(MAC_SERDES_CFG, val);
7194 }
7195 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7196 tw32(MAC_SERDES_CFG, 0x616000);
7197 }
7198
7199 /* Prevent chip from dropping frames when flow control
7200 * is enabled.
7201 */
7202 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7203
7204 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7205 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7206 /* Use hardware link auto-negotiation */
7207 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7208 }
7209
d4d2c558
MC
7210 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7211 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7212 u32 tmp;
7213
7214 tmp = tr32(SERDES_RX_CTRL);
7215 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7216 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7217 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7218 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7219 }
7220
dd477003
MC
7221 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7222 if (tp->link_config.phy_is_low_power) {
7223 tp->link_config.phy_is_low_power = 0;
7224 tp->link_config.speed = tp->link_config.orig_speed;
7225 tp->link_config.duplex = tp->link_config.orig_duplex;
7226 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7227 }
1da177e4 7228
dd477003
MC
7229 err = tg3_setup_phy(tp, 0);
7230 if (err)
7231 return err;
1da177e4 7232
dd477003
MC
7233 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7234 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7235 u32 tmp;
7236
7237 /* Clear CRC stats. */
7238 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7239 tg3_writephy(tp, MII_TG3_TEST1,
7240 tmp | MII_TG3_TEST1_CRC_EN);
7241 tg3_readphy(tp, 0x14, &tmp);
7242 }
1da177e4
LT
7243 }
7244 }
7245
7246 __tg3_set_rx_mode(tp->dev);
7247
7248 /* Initialize receive rules. */
7249 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7250 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7251 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7252 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7253
4cf78e4f 7254 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
a4e2b347 7255 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
1da177e4
LT
7256 limit = 8;
7257 else
7258 limit = 16;
7259 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7260 limit -= 4;
7261 switch (limit) {
7262 case 16:
7263 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7264 case 15:
7265 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7266 case 14:
7267 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7268 case 13:
7269 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7270 case 12:
7271 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7272 case 11:
7273 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7274 case 10:
7275 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7276 case 9:
7277 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7278 case 8:
7279 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7280 case 7:
7281 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7282 case 6:
7283 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7284 case 5:
7285 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7286 case 4:
7287 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7288 case 3:
7289 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7290 case 2:
7291 case 1:
7292
7293 default:
7294 break;
7295 };
7296
9ce768ea
MC
7297 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7298 /* Write our heartbeat update interval to APE. */
7299 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7300 APE_HOST_HEARTBEAT_INT_DISABLE);
0d3031d9 7301
1da177e4
LT
7302 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7303
1da177e4
LT
7304 return 0;
7305}
7306
7307/* Called at device open time to get the chip ready for
7308 * packet processing. Invoked with tp->lock held.
7309 */
8e7a22e3 7310static int tg3_init_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
7311{
7312 int err;
7313
7314 /* Force the chip into D0. */
bc1c7567 7315 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
7316 if (err)
7317 goto out;
7318
7319 tg3_switch_clocks(tp);
7320
7321 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7322
8e7a22e3 7323 err = tg3_reset_hw(tp, reset_phy);
1da177e4
LT
7324
7325out:
7326 return err;
7327}
7328
7329#define TG3_STAT_ADD32(PSTAT, REG) \
7330do { u32 __val = tr32(REG); \
7331 (PSTAT)->low += __val; \
7332 if ((PSTAT)->low < __val) \
7333 (PSTAT)->high += 1; \
7334} while (0)
7335
7336static void tg3_periodic_fetch_stats(struct tg3 *tp)
7337{
7338 struct tg3_hw_stats *sp = tp->hw_stats;
7339
7340 if (!netif_carrier_ok(tp->dev))
7341 return;
7342
7343 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7344 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7345 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7346 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7347 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7348 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7349 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7350 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7351 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7352 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7353 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7354 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7355 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7356
7357 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7358 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7359 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7360 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7361 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7362 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7363 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7364 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7365 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7366 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7367 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7368 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7369 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7370 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
463d305b
MC
7371
7372 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7373 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7374 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
1da177e4
LT
7375}
7376
7377static void tg3_timer(unsigned long __opaque)
7378{
7379 struct tg3 *tp = (struct tg3 *) __opaque;
1da177e4 7380
f475f163
MC
7381 if (tp->irq_sync)
7382 goto restart_timer;
7383
f47c11ee 7384 spin_lock(&tp->lock);
1da177e4 7385
fac9b83e
DM
7386 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7387 /* All of this garbage is because when using non-tagged
7388 * IRQ status the mailbox/status_block protocol the chip
7389 * uses with the cpu is race prone.
7390 */
7391 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7392 tw32(GRC_LOCAL_CTRL,
7393 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7394 } else {
7395 tw32(HOSTCC_MODE, tp->coalesce_mode |
7396 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7397 }
1da177e4 7398
fac9b83e
DM
7399 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7400 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
f47c11ee 7401 spin_unlock(&tp->lock);
fac9b83e
DM
7402 schedule_work(&tp->reset_task);
7403 return;
7404 }
1da177e4
LT
7405 }
7406
1da177e4
LT
7407 /* This part only runs once per second. */
7408 if (!--tp->timer_counter) {
fac9b83e
DM
7409 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7410 tg3_periodic_fetch_stats(tp);
7411
1da177e4
LT
7412 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7413 u32 mac_stat;
7414 int phy_event;
7415
7416 mac_stat = tr32(MAC_STATUS);
7417
7418 phy_event = 0;
7419 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7420 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7421 phy_event = 1;
7422 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7423 phy_event = 1;
7424
7425 if (phy_event)
7426 tg3_setup_phy(tp, 0);
7427 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7428 u32 mac_stat = tr32(MAC_STATUS);
7429 int need_setup = 0;
7430
7431 if (netif_carrier_ok(tp->dev) &&
7432 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7433 need_setup = 1;
7434 }
7435 if (! netif_carrier_ok(tp->dev) &&
7436 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7437 MAC_STATUS_SIGNAL_DET))) {
7438 need_setup = 1;
7439 }
7440 if (need_setup) {
3d3ebe74
MC
7441 if (!tp->serdes_counter) {
7442 tw32_f(MAC_MODE,
7443 (tp->mac_mode &
7444 ~MAC_MODE_PORT_MODE_MASK));
7445 udelay(40);
7446 tw32_f(MAC_MODE, tp->mac_mode);
7447 udelay(40);
7448 }
1da177e4
LT
7449 tg3_setup_phy(tp, 0);
7450 }
747e8f8b
MC
7451 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7452 tg3_serdes_parallel_detect(tp);
1da177e4
LT
7453
7454 tp->timer_counter = tp->timer_multiplier;
7455 }
7456
130b8e4d
MC
7457 /* Heartbeat is only sent once every 2 seconds.
7458 *
7459 * The heartbeat is to tell the ASF firmware that the host
7460 * driver is still alive. In the event that the OS crashes,
7461 * ASF needs to reset the hardware to free up the FIFO space
7462 * that may be filled with rx packets destined for the host.
7463 * If the FIFO is full, ASF will no longer function properly.
7464 *
7465 * Unintended resets have been reported on real time kernels
7466 * where the timer doesn't run on time. Netpoll will also have
7467 * same problem.
7468 *
7469 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7470 * to check the ring condition when the heartbeat is expiring
7471 * before doing the reset. This will prevent most unintended
7472 * resets.
7473 */
1da177e4
LT
7474 if (!--tp->asf_counter) {
7475 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7476 u32 val;
7477
7c5026aa
MC
7478 tg3_wait_for_event_ack(tp);
7479
bbadf503 7480 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
130b8e4d 7481 FWCMD_NICDRV_ALIVE3);
bbadf503 7482 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
28fbef78 7483 /* 5 seconds timeout */
bbadf503 7484 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
1da177e4 7485 val = tr32(GRC_RX_CPU_EVENT);
7c5026aa
MC
7486 val |= GRC_RX_CPU_DRIVER_EVENT;
7487 tw32_f(GRC_RX_CPU_EVENT, val);
1da177e4
LT
7488 }
7489 tp->asf_counter = tp->asf_multiplier;
7490 }
7491
f47c11ee 7492 spin_unlock(&tp->lock);
1da177e4 7493
f475f163 7494restart_timer:
1da177e4
LT
7495 tp->timer.expires = jiffies + tp->timer_offset;
7496 add_timer(&tp->timer);
7497}
7498
81789ef5 7499static int tg3_request_irq(struct tg3 *tp)
fcfa0a32 7500{
7d12e780 7501 irq_handler_t fn;
fcfa0a32
MC
7502 unsigned long flags;
7503 struct net_device *dev = tp->dev;
7504
7505 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7506 fn = tg3_msi;
7507 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7508 fn = tg3_msi_1shot;
1fb9df5d 7509 flags = IRQF_SAMPLE_RANDOM;
fcfa0a32
MC
7510 } else {
7511 fn = tg3_interrupt;
7512 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7513 fn = tg3_interrupt_tagged;
1fb9df5d 7514 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
fcfa0a32
MC
7515 }
7516 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7517}
7518
7938109f
MC
7519static int tg3_test_interrupt(struct tg3 *tp)
7520{
7521 struct net_device *dev = tp->dev;
b16250e3 7522 int err, i, intr_ok = 0;
7938109f 7523
d4bc3927
MC
7524 if (!netif_running(dev))
7525 return -ENODEV;
7526
7938109f
MC
7527 tg3_disable_ints(tp);
7528
7529 free_irq(tp->pdev->irq, dev);
7530
7531 err = request_irq(tp->pdev->irq, tg3_test_isr,
1fb9df5d 7532 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7938109f
MC
7533 if (err)
7534 return err;
7535
38f3843e 7536 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7938109f
MC
7537 tg3_enable_ints(tp);
7538
7539 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7540 HOSTCC_MODE_NOW);
7541
7542 for (i = 0; i < 5; i++) {
b16250e3
MC
7543 u32 int_mbox, misc_host_ctrl;
7544
09ee929c
MC
7545 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7546 TG3_64BIT_REG_LOW);
b16250e3
MC
7547 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7548
7549 if ((int_mbox != 0) ||
7550 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7551 intr_ok = 1;
7938109f 7552 break;
b16250e3
MC
7553 }
7554
7938109f
MC
7555 msleep(10);
7556 }
7557
7558 tg3_disable_ints(tp);
7559
7560 free_irq(tp->pdev->irq, dev);
6aa20a22 7561
fcfa0a32 7562 err = tg3_request_irq(tp);
7938109f
MC
7563
7564 if (err)
7565 return err;
7566
b16250e3 7567 if (intr_ok)
7938109f
MC
7568 return 0;
7569
7570 return -EIO;
7571}
7572
7573/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7574 * successfully restored
7575 */
7576static int tg3_test_msi(struct tg3 *tp)
7577{
7578 struct net_device *dev = tp->dev;
7579 int err;
7580 u16 pci_cmd;
7581
7582 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7583 return 0;
7584
7585 /* Turn off SERR reporting in case MSI terminates with Master
7586 * Abort.
7587 */
7588 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7589 pci_write_config_word(tp->pdev, PCI_COMMAND,
7590 pci_cmd & ~PCI_COMMAND_SERR);
7591
7592 err = tg3_test_interrupt(tp);
7593
7594 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7595
7596 if (!err)
7597 return 0;
7598
7599 /* other failures */
7600 if (err != -EIO)
7601 return err;
7602
7603 /* MSI test failed, go back to INTx mode */
7604 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7605 "switching to INTx mode. Please report this failure to "
7606 "the PCI maintainer and include system chipset information.\n",
7607 tp->dev->name);
7608
7609 free_irq(tp->pdev->irq, dev);
7610 pci_disable_msi(tp->pdev);
7611
7612 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7613
fcfa0a32 7614 err = tg3_request_irq(tp);
7938109f
MC
7615 if (err)
7616 return err;
7617
7618 /* Need to reset the chip because the MSI cycle may have terminated
7619 * with Master Abort.
7620 */
f47c11ee 7621 tg3_full_lock(tp, 1);
7938109f 7622
944d980e 7623 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8e7a22e3 7624 err = tg3_init_hw(tp, 1);
7938109f 7625
f47c11ee 7626 tg3_full_unlock(tp);
7938109f
MC
7627
7628 if (err)
7629 free_irq(tp->pdev->irq, dev);
7630
7631 return err;
7632}
7633
1da177e4
LT
7634static int tg3_open(struct net_device *dev)
7635{
7636 struct tg3 *tp = netdev_priv(dev);
7637 int err;
7638
c49a1561
MC
7639 netif_carrier_off(tp->dev);
7640
f47c11ee 7641 tg3_full_lock(tp, 0);
1da177e4 7642
bc1c7567 7643 err = tg3_set_power_state(tp, PCI_D0);
12862086
IS
7644 if (err) {
7645 tg3_full_unlock(tp);
bc1c7567 7646 return err;
12862086 7647 }
bc1c7567 7648
1da177e4
LT
7649 tg3_disable_ints(tp);
7650 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7651
f47c11ee 7652 tg3_full_unlock(tp);
1da177e4
LT
7653
7654 /* The placement of this call is tied
7655 * to the setup and use of Host TX descriptors.
7656 */
7657 err = tg3_alloc_consistent(tp);
7658 if (err)
7659 return err;
7660
7544b097 7661 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
fac9b83e
DM
7662 /* All MSI supporting chips should support tagged
7663 * status. Assert that this is the case.
7664 */
7665 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7666 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7667 "Not using MSI.\n", tp->dev->name);
7668 } else if (pci_enable_msi(tp->pdev) == 0) {
88b06bc2
MC
7669 u32 msi_mode;
7670
7671 msi_mode = tr32(MSGINT_MODE);
7672 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7673 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7674 }
7675 }
fcfa0a32 7676 err = tg3_request_irq(tp);
1da177e4
LT
7677
7678 if (err) {
88b06bc2
MC
7679 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7680 pci_disable_msi(tp->pdev);
7681 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7682 }
1da177e4
LT
7683 tg3_free_consistent(tp);
7684 return err;
7685 }
7686
bea3348e
SH
7687 napi_enable(&tp->napi);
7688
f47c11ee 7689 tg3_full_lock(tp, 0);
1da177e4 7690
8e7a22e3 7691 err = tg3_init_hw(tp, 1);
1da177e4 7692 if (err) {
944d980e 7693 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7694 tg3_free_rings(tp);
7695 } else {
fac9b83e
DM
7696 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7697 tp->timer_offset = HZ;
7698 else
7699 tp->timer_offset = HZ / 10;
7700
7701 BUG_ON(tp->timer_offset > HZ);
7702 tp->timer_counter = tp->timer_multiplier =
7703 (HZ / tp->timer_offset);
7704 tp->asf_counter = tp->asf_multiplier =
28fbef78 7705 ((HZ / tp->timer_offset) * 2);
1da177e4
LT
7706
7707 init_timer(&tp->timer);
7708 tp->timer.expires = jiffies + tp->timer_offset;
7709 tp->timer.data = (unsigned long) tp;
7710 tp->timer.function = tg3_timer;
1da177e4
LT
7711 }
7712
f47c11ee 7713 tg3_full_unlock(tp);
1da177e4
LT
7714
7715 if (err) {
bea3348e 7716 napi_disable(&tp->napi);
88b06bc2
MC
7717 free_irq(tp->pdev->irq, dev);
7718 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7719 pci_disable_msi(tp->pdev);
7720 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7721 }
1da177e4
LT
7722 tg3_free_consistent(tp);
7723 return err;
7724 }
7725
7938109f
MC
7726 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7727 err = tg3_test_msi(tp);
fac9b83e 7728
7938109f 7729 if (err) {
f47c11ee 7730 tg3_full_lock(tp, 0);
7938109f
MC
7731
7732 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7733 pci_disable_msi(tp->pdev);
7734 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7735 }
944d980e 7736 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f
MC
7737 tg3_free_rings(tp);
7738 tg3_free_consistent(tp);
7739
f47c11ee 7740 tg3_full_unlock(tp);
7938109f 7741
bea3348e
SH
7742 napi_disable(&tp->napi);
7743
7938109f
MC
7744 return err;
7745 }
fcfa0a32
MC
7746
7747 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7748 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
b5d3772c 7749 u32 val = tr32(PCIE_TRANSACTION_CFG);
fcfa0a32 7750
b5d3772c
MC
7751 tw32(PCIE_TRANSACTION_CFG,
7752 val | PCIE_TRANS_CFG_1SHOT_MSI);
fcfa0a32
MC
7753 }
7754 }
7938109f
MC
7755 }
7756
f47c11ee 7757 tg3_full_lock(tp, 0);
1da177e4 7758
7938109f
MC
7759 add_timer(&tp->timer);
7760 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
1da177e4
LT
7761 tg3_enable_ints(tp);
7762
f47c11ee 7763 tg3_full_unlock(tp);
1da177e4
LT
7764
7765 netif_start_queue(dev);
7766
7767 return 0;
7768}
7769
7770#if 0
7771/*static*/ void tg3_dump_state(struct tg3 *tp)
7772{
7773 u32 val32, val32_2, val32_3, val32_4, val32_5;
7774 u16 val16;
7775 int i;
7776
7777 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7778 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7779 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7780 val16, val32);
7781
7782 /* MAC block */
7783 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7784 tr32(MAC_MODE), tr32(MAC_STATUS));
7785 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7786 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7787 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7788 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7789 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7790 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7791
7792 /* Send data initiator control block */
7793 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7794 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7795 printk(" SNDDATAI_STATSCTRL[%08x]\n",
7796 tr32(SNDDATAI_STATSCTRL));
7797
7798 /* Send data completion control block */
7799 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7800
7801 /* Send BD ring selector block */
7802 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7803 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7804
7805 /* Send BD initiator control block */
7806 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7807 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7808
7809 /* Send BD completion control block */
7810 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7811
7812 /* Receive list placement control block */
7813 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7814 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7815 printk(" RCVLPC_STATSCTRL[%08x]\n",
7816 tr32(RCVLPC_STATSCTRL));
7817
7818 /* Receive data and receive BD initiator control block */
7819 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7820 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7821
7822 /* Receive data completion control block */
7823 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7824 tr32(RCVDCC_MODE));
7825
7826 /* Receive BD initiator control block */
7827 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7828 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7829
7830 /* Receive BD completion control block */
7831 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7832 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7833
7834 /* Receive list selector control block */
7835 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7836 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7837
7838 /* Mbuf cluster free block */
7839 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7840 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7841
7842 /* Host coalescing control block */
7843 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7844 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7845 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7846 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7847 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7848 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7849 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7850 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7851 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7852 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7853 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7854 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7855
7856 /* Memory arbiter control block */
7857 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7858 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7859
7860 /* Buffer manager control block */
7861 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7862 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7863 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7864 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7865 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7866 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7867 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7868 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7869
7870 /* Read DMA control block */
7871 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7872 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7873
7874 /* Write DMA control block */
7875 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7876 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7877
7878 /* DMA completion block */
7879 printk("DEBUG: DMAC_MODE[%08x]\n",
7880 tr32(DMAC_MODE));
7881
7882 /* GRC block */
7883 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7884 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7885 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7886 tr32(GRC_LOCAL_CTRL));
7887
7888 /* TG3_BDINFOs */
7889 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7890 tr32(RCVDBDI_JUMBO_BD + 0x0),
7891 tr32(RCVDBDI_JUMBO_BD + 0x4),
7892 tr32(RCVDBDI_JUMBO_BD + 0x8),
7893 tr32(RCVDBDI_JUMBO_BD + 0xc));
7894 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7895 tr32(RCVDBDI_STD_BD + 0x0),
7896 tr32(RCVDBDI_STD_BD + 0x4),
7897 tr32(RCVDBDI_STD_BD + 0x8),
7898 tr32(RCVDBDI_STD_BD + 0xc));
7899 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7900 tr32(RCVDBDI_MINI_BD + 0x0),
7901 tr32(RCVDBDI_MINI_BD + 0x4),
7902 tr32(RCVDBDI_MINI_BD + 0x8),
7903 tr32(RCVDBDI_MINI_BD + 0xc));
7904
7905 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7906 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7907 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7908 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7909 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7910 val32, val32_2, val32_3, val32_4);
7911
7912 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7913 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7914 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7915 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7916 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7917 val32, val32_2, val32_3, val32_4);
7918
7919 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7920 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7921 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7922 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7923 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7924 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7925 val32, val32_2, val32_3, val32_4, val32_5);
7926
7927 /* SW status block */
7928 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7929 tp->hw_status->status,
7930 tp->hw_status->status_tag,
7931 tp->hw_status->rx_jumbo_consumer,
7932 tp->hw_status->rx_consumer,
7933 tp->hw_status->rx_mini_consumer,
7934 tp->hw_status->idx[0].rx_producer,
7935 tp->hw_status->idx[0].tx_consumer);
7936
7937 /* SW statistics block */
7938 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7939 ((u32 *)tp->hw_stats)[0],
7940 ((u32 *)tp->hw_stats)[1],
7941 ((u32 *)tp->hw_stats)[2],
7942 ((u32 *)tp->hw_stats)[3]);
7943
7944 /* Mailboxes */
7945 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
09ee929c
MC
7946 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7947 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7948 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7949 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
1da177e4
LT
7950
7951 /* NIC side send descriptors. */
7952 for (i = 0; i < 6; i++) {
7953 unsigned long txd;
7954
7955 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7956 + (i * sizeof(struct tg3_tx_buffer_desc));
7957 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7958 i,
7959 readl(txd + 0x0), readl(txd + 0x4),
7960 readl(txd + 0x8), readl(txd + 0xc));
7961 }
7962
7963 /* NIC side RX descriptors. */
7964 for (i = 0; i < 6; i++) {
7965 unsigned long rxd;
7966
7967 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7968 + (i * sizeof(struct tg3_rx_buffer_desc));
7969 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7970 i,
7971 readl(rxd + 0x0), readl(rxd + 0x4),
7972 readl(rxd + 0x8), readl(rxd + 0xc));
7973 rxd += (4 * sizeof(u32));
7974 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7975 i,
7976 readl(rxd + 0x0), readl(rxd + 0x4),
7977 readl(rxd + 0x8), readl(rxd + 0xc));
7978 }
7979
7980 for (i = 0; i < 6; i++) {
7981 unsigned long rxd;
7982
7983 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7984 + (i * sizeof(struct tg3_rx_buffer_desc));
7985 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7986 i,
7987 readl(rxd + 0x0), readl(rxd + 0x4),
7988 readl(rxd + 0x8), readl(rxd + 0xc));
7989 rxd += (4 * sizeof(u32));
7990 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7991 i,
7992 readl(rxd + 0x0), readl(rxd + 0x4),
7993 readl(rxd + 0x8), readl(rxd + 0xc));
7994 }
7995}
7996#endif
7997
7998static struct net_device_stats *tg3_get_stats(struct net_device *);
7999static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8000
8001static int tg3_close(struct net_device *dev)
8002{
8003 struct tg3 *tp = netdev_priv(dev);
8004
bea3348e 8005 napi_disable(&tp->napi);
28e53bdd 8006 cancel_work_sync(&tp->reset_task);
7faa006f 8007
1da177e4
LT
8008 netif_stop_queue(dev);
8009
8010 del_timer_sync(&tp->timer);
8011
f47c11ee 8012 tg3_full_lock(tp, 1);
1da177e4
LT
8013#if 0
8014 tg3_dump_state(tp);
8015#endif
8016
8017 tg3_disable_ints(tp);
8018
944d980e 8019 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4 8020 tg3_free_rings(tp);
5cf64b8a 8021 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
1da177e4 8022
f47c11ee 8023 tg3_full_unlock(tp);
1da177e4 8024
88b06bc2
MC
8025 free_irq(tp->pdev->irq, dev);
8026 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8027 pci_disable_msi(tp->pdev);
8028 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8029 }
1da177e4
LT
8030
8031 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8032 sizeof(tp->net_stats_prev));
8033 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8034 sizeof(tp->estats_prev));
8035
8036 tg3_free_consistent(tp);
8037
bc1c7567
MC
8038 tg3_set_power_state(tp, PCI_D3hot);
8039
8040 netif_carrier_off(tp->dev);
8041
1da177e4
LT
8042 return 0;
8043}
8044
8045static inline unsigned long get_stat64(tg3_stat64_t *val)
8046{
8047 unsigned long ret;
8048
8049#if (BITS_PER_LONG == 32)
8050 ret = val->low;
8051#else
8052 ret = ((u64)val->high << 32) | ((u64)val->low);
8053#endif
8054 return ret;
8055}
8056
8057static unsigned long calc_crc_errors(struct tg3 *tp)
8058{
8059 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8060
8061 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8062 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8063 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1da177e4
LT
8064 u32 val;
8065
f47c11ee 8066 spin_lock_bh(&tp->lock);
569a5df8
MC
8067 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8068 tg3_writephy(tp, MII_TG3_TEST1,
8069 val | MII_TG3_TEST1_CRC_EN);
1da177e4
LT
8070 tg3_readphy(tp, 0x14, &val);
8071 } else
8072 val = 0;
f47c11ee 8073 spin_unlock_bh(&tp->lock);
1da177e4
LT
8074
8075 tp->phy_crc_errors += val;
8076
8077 return tp->phy_crc_errors;
8078 }
8079
8080 return get_stat64(&hw_stats->rx_fcs_errors);
8081}
8082
8083#define ESTAT_ADD(member) \
8084 estats->member = old_estats->member + \
8085 get_stat64(&hw_stats->member)
8086
8087static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8088{
8089 struct tg3_ethtool_stats *estats = &tp->estats;
8090 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8091 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8092
8093 if (!hw_stats)
8094 return old_estats;
8095
8096 ESTAT_ADD(rx_octets);
8097 ESTAT_ADD(rx_fragments);
8098 ESTAT_ADD(rx_ucast_packets);
8099 ESTAT_ADD(rx_mcast_packets);
8100 ESTAT_ADD(rx_bcast_packets);
8101 ESTAT_ADD(rx_fcs_errors);
8102 ESTAT_ADD(rx_align_errors);
8103 ESTAT_ADD(rx_xon_pause_rcvd);
8104 ESTAT_ADD(rx_xoff_pause_rcvd);
8105 ESTAT_ADD(rx_mac_ctrl_rcvd);
8106 ESTAT_ADD(rx_xoff_entered);
8107 ESTAT_ADD(rx_frame_too_long_errors);
8108 ESTAT_ADD(rx_jabbers);
8109 ESTAT_ADD(rx_undersize_packets);
8110 ESTAT_ADD(rx_in_length_errors);
8111 ESTAT_ADD(rx_out_length_errors);
8112 ESTAT_ADD(rx_64_or_less_octet_packets);
8113 ESTAT_ADD(rx_65_to_127_octet_packets);
8114 ESTAT_ADD(rx_128_to_255_octet_packets);
8115 ESTAT_ADD(rx_256_to_511_octet_packets);
8116 ESTAT_ADD(rx_512_to_1023_octet_packets);
8117 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8118 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8119 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8120 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8121 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8122
8123 ESTAT_ADD(tx_octets);
8124 ESTAT_ADD(tx_collisions);
8125 ESTAT_ADD(tx_xon_sent);
8126 ESTAT_ADD(tx_xoff_sent);
8127 ESTAT_ADD(tx_flow_control);
8128 ESTAT_ADD(tx_mac_errors);
8129 ESTAT_ADD(tx_single_collisions);
8130 ESTAT_ADD(tx_mult_collisions);
8131 ESTAT_ADD(tx_deferred);
8132 ESTAT_ADD(tx_excessive_collisions);
8133 ESTAT_ADD(tx_late_collisions);
8134 ESTAT_ADD(tx_collide_2times);
8135 ESTAT_ADD(tx_collide_3times);
8136 ESTAT_ADD(tx_collide_4times);
8137 ESTAT_ADD(tx_collide_5times);
8138 ESTAT_ADD(tx_collide_6times);
8139 ESTAT_ADD(tx_collide_7times);
8140 ESTAT_ADD(tx_collide_8times);
8141 ESTAT_ADD(tx_collide_9times);
8142 ESTAT_ADD(tx_collide_10times);
8143 ESTAT_ADD(tx_collide_11times);
8144 ESTAT_ADD(tx_collide_12times);
8145 ESTAT_ADD(tx_collide_13times);
8146 ESTAT_ADD(tx_collide_14times);
8147 ESTAT_ADD(tx_collide_15times);
8148 ESTAT_ADD(tx_ucast_packets);
8149 ESTAT_ADD(tx_mcast_packets);
8150 ESTAT_ADD(tx_bcast_packets);
8151 ESTAT_ADD(tx_carrier_sense_errors);
8152 ESTAT_ADD(tx_discards);
8153 ESTAT_ADD(tx_errors);
8154
8155 ESTAT_ADD(dma_writeq_full);
8156 ESTAT_ADD(dma_write_prioq_full);
8157 ESTAT_ADD(rxbds_empty);
8158 ESTAT_ADD(rx_discards);
8159 ESTAT_ADD(rx_errors);
8160 ESTAT_ADD(rx_threshold_hit);
8161
8162 ESTAT_ADD(dma_readq_full);
8163 ESTAT_ADD(dma_read_prioq_full);
8164 ESTAT_ADD(tx_comp_queue_full);
8165
8166 ESTAT_ADD(ring_set_send_prod_index);
8167 ESTAT_ADD(ring_status_update);
8168 ESTAT_ADD(nic_irqs);
8169 ESTAT_ADD(nic_avoided_irqs);
8170 ESTAT_ADD(nic_tx_threshold_hit);
8171
8172 return estats;
8173}
8174
8175static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8176{
8177 struct tg3 *tp = netdev_priv(dev);
8178 struct net_device_stats *stats = &tp->net_stats;
8179 struct net_device_stats *old_stats = &tp->net_stats_prev;
8180 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8181
8182 if (!hw_stats)
8183 return old_stats;
8184
8185 stats->rx_packets = old_stats->rx_packets +
8186 get_stat64(&hw_stats->rx_ucast_packets) +
8187 get_stat64(&hw_stats->rx_mcast_packets) +
8188 get_stat64(&hw_stats->rx_bcast_packets);
6aa20a22 8189
1da177e4
LT
8190 stats->tx_packets = old_stats->tx_packets +
8191 get_stat64(&hw_stats->tx_ucast_packets) +
8192 get_stat64(&hw_stats->tx_mcast_packets) +
8193 get_stat64(&hw_stats->tx_bcast_packets);
8194
8195 stats->rx_bytes = old_stats->rx_bytes +
8196 get_stat64(&hw_stats->rx_octets);
8197 stats->tx_bytes = old_stats->tx_bytes +
8198 get_stat64(&hw_stats->tx_octets);
8199
8200 stats->rx_errors = old_stats->rx_errors +
4f63b877 8201 get_stat64(&hw_stats->rx_errors);
1da177e4
LT
8202 stats->tx_errors = old_stats->tx_errors +
8203 get_stat64(&hw_stats->tx_errors) +
8204 get_stat64(&hw_stats->tx_mac_errors) +
8205 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8206 get_stat64(&hw_stats->tx_discards);
8207
8208 stats->multicast = old_stats->multicast +
8209 get_stat64(&hw_stats->rx_mcast_packets);
8210 stats->collisions = old_stats->collisions +
8211 get_stat64(&hw_stats->tx_collisions);
8212
8213 stats->rx_length_errors = old_stats->rx_length_errors +
8214 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8215 get_stat64(&hw_stats->rx_undersize_packets);
8216
8217 stats->rx_over_errors = old_stats->rx_over_errors +
8218 get_stat64(&hw_stats->rxbds_empty);
8219 stats->rx_frame_errors = old_stats->rx_frame_errors +
8220 get_stat64(&hw_stats->rx_align_errors);
8221 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8222 get_stat64(&hw_stats->tx_discards);
8223 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8224 get_stat64(&hw_stats->tx_carrier_sense_errors);
8225
8226 stats->rx_crc_errors = old_stats->rx_crc_errors +
8227 calc_crc_errors(tp);
8228
4f63b877
JL
8229 stats->rx_missed_errors = old_stats->rx_missed_errors +
8230 get_stat64(&hw_stats->rx_discards);
8231
1da177e4
LT
8232 return stats;
8233}
8234
8235static inline u32 calc_crc(unsigned char *buf, int len)
8236{
8237 u32 reg;
8238 u32 tmp;
8239 int j, k;
8240
8241 reg = 0xffffffff;
8242
8243 for (j = 0; j < len; j++) {
8244 reg ^= buf[j];
8245
8246 for (k = 0; k < 8; k++) {
8247 tmp = reg & 0x01;
8248
8249 reg >>= 1;
8250
8251 if (tmp) {
8252 reg ^= 0xedb88320;
8253 }
8254 }
8255 }
8256
8257 return ~reg;
8258}
8259
8260static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8261{
8262 /* accept or reject all multicast frames */
8263 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8264 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8265 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8266 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8267}
8268
8269static void __tg3_set_rx_mode(struct net_device *dev)
8270{
8271 struct tg3 *tp = netdev_priv(dev);
8272 u32 rx_mode;
8273
8274 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8275 RX_MODE_KEEP_VLAN_TAG);
8276
8277 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8278 * flag clear.
8279 */
8280#if TG3_VLAN_TAG_USED
8281 if (!tp->vlgrp &&
8282 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8283 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8284#else
8285 /* By definition, VLAN is disabled always in this
8286 * case.
8287 */
8288 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8289 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8290#endif
8291
8292 if (dev->flags & IFF_PROMISC) {
8293 /* Promiscuous mode. */
8294 rx_mode |= RX_MODE_PROMISC;
8295 } else if (dev->flags & IFF_ALLMULTI) {
8296 /* Accept all multicast. */
8297 tg3_set_multi (tp, 1);
8298 } else if (dev->mc_count < 1) {
8299 /* Reject all multicast. */
8300 tg3_set_multi (tp, 0);
8301 } else {
8302 /* Accept one or more multicast(s). */
8303 struct dev_mc_list *mclist;
8304 unsigned int i;
8305 u32 mc_filter[4] = { 0, };
8306 u32 regidx;
8307 u32 bit;
8308 u32 crc;
8309
8310 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8311 i++, mclist = mclist->next) {
8312
8313 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8314 bit = ~crc & 0x7f;
8315 regidx = (bit & 0x60) >> 5;
8316 bit &= 0x1f;
8317 mc_filter[regidx] |= (1 << bit);
8318 }
8319
8320 tw32(MAC_HASH_REG_0, mc_filter[0]);
8321 tw32(MAC_HASH_REG_1, mc_filter[1]);
8322 tw32(MAC_HASH_REG_2, mc_filter[2]);
8323 tw32(MAC_HASH_REG_3, mc_filter[3]);
8324 }
8325
8326 if (rx_mode != tp->rx_mode) {
8327 tp->rx_mode = rx_mode;
8328 tw32_f(MAC_RX_MODE, rx_mode);
8329 udelay(10);
8330 }
8331}
8332
8333static void tg3_set_rx_mode(struct net_device *dev)
8334{
8335 struct tg3 *tp = netdev_priv(dev);
8336
e75f7c90
MC
8337 if (!netif_running(dev))
8338 return;
8339
f47c11ee 8340 tg3_full_lock(tp, 0);
1da177e4 8341 __tg3_set_rx_mode(dev);
f47c11ee 8342 tg3_full_unlock(tp);
1da177e4
LT
8343}
8344
8345#define TG3_REGDUMP_LEN (32 * 1024)
8346
8347static int tg3_get_regs_len(struct net_device *dev)
8348{
8349 return TG3_REGDUMP_LEN;
8350}
8351
8352static void tg3_get_regs(struct net_device *dev,
8353 struct ethtool_regs *regs, void *_p)
8354{
8355 u32 *p = _p;
8356 struct tg3 *tp = netdev_priv(dev);
8357 u8 *orig_p = _p;
8358 int i;
8359
8360 regs->version = 0;
8361
8362 memset(p, 0, TG3_REGDUMP_LEN);
8363
bc1c7567
MC
8364 if (tp->link_config.phy_is_low_power)
8365 return;
8366
f47c11ee 8367 tg3_full_lock(tp, 0);
1da177e4
LT
8368
8369#define __GET_REG32(reg) (*(p)++ = tr32(reg))
8370#define GET_REG32_LOOP(base,len) \
8371do { p = (u32 *)(orig_p + (base)); \
8372 for (i = 0; i < len; i += 4) \
8373 __GET_REG32((base) + i); \
8374} while (0)
8375#define GET_REG32_1(reg) \
8376do { p = (u32 *)(orig_p + (reg)); \
8377 __GET_REG32((reg)); \
8378} while (0)
8379
8380 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8381 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8382 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8383 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8384 GET_REG32_1(SNDDATAC_MODE);
8385 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8386 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8387 GET_REG32_1(SNDBDC_MODE);
8388 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8389 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8390 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8391 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8392 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8393 GET_REG32_1(RCVDCC_MODE);
8394 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8395 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8396 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8397 GET_REG32_1(MBFREE_MODE);
8398 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8399 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8400 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8401 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8402 GET_REG32_LOOP(WDMAC_MODE, 0x08);
091465d7
CE
8403 GET_REG32_1(RX_CPU_MODE);
8404 GET_REG32_1(RX_CPU_STATE);
8405 GET_REG32_1(RX_CPU_PGMCTR);
8406 GET_REG32_1(RX_CPU_HWBKPT);
8407 GET_REG32_1(TX_CPU_MODE);
8408 GET_REG32_1(TX_CPU_STATE);
8409 GET_REG32_1(TX_CPU_PGMCTR);
1da177e4
LT
8410 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8411 GET_REG32_LOOP(FTQ_RESET, 0x120);
8412 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8413 GET_REG32_1(DMAC_MODE);
8414 GET_REG32_LOOP(GRC_MODE, 0x4c);
8415 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8416 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8417
8418#undef __GET_REG32
8419#undef GET_REG32_LOOP
8420#undef GET_REG32_1
8421
f47c11ee 8422 tg3_full_unlock(tp);
1da177e4
LT
8423}
8424
8425static int tg3_get_eeprom_len(struct net_device *dev)
8426{
8427 struct tg3 *tp = netdev_priv(dev);
8428
8429 return tp->nvram_size;
8430}
8431
8432static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
b9fc7dc5 8433static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
1820180b 8434static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
1da177e4
LT
8435
8436static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8437{
8438 struct tg3 *tp = netdev_priv(dev);
8439 int ret;
8440 u8 *pd;
b9fc7dc5
AV
8441 u32 i, offset, len, b_offset, b_count;
8442 __le32 val;
1da177e4 8443
bc1c7567
MC
8444 if (tp->link_config.phy_is_low_power)
8445 return -EAGAIN;
8446
1da177e4
LT
8447 offset = eeprom->offset;
8448 len = eeprom->len;
8449 eeprom->len = 0;
8450
8451 eeprom->magic = TG3_EEPROM_MAGIC;
8452
8453 if (offset & 3) {
8454 /* adjustments to start on required 4 byte boundary */
8455 b_offset = offset & 3;
8456 b_count = 4 - b_offset;
8457 if (b_count > len) {
8458 /* i.e. offset=1 len=2 */
8459 b_count = len;
8460 }
b9fc7dc5 8461 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
1da177e4
LT
8462 if (ret)
8463 return ret;
1da177e4
LT
8464 memcpy(data, ((char*)&val) + b_offset, b_count);
8465 len -= b_count;
8466 offset += b_count;
8467 eeprom->len += b_count;
8468 }
8469
8470 /* read bytes upto the last 4 byte boundary */
8471 pd = &data[eeprom->len];
8472 for (i = 0; i < (len - (len & 3)); i += 4) {
b9fc7dc5 8473 ret = tg3_nvram_read_le(tp, offset + i, &val);
1da177e4
LT
8474 if (ret) {
8475 eeprom->len += i;
8476 return ret;
8477 }
1da177e4
LT
8478 memcpy(pd + i, &val, 4);
8479 }
8480 eeprom->len += i;
8481
8482 if (len & 3) {
8483 /* read last bytes not ending on 4 byte boundary */
8484 pd = &data[eeprom->len];
8485 b_count = len & 3;
8486 b_offset = offset + len - b_count;
b9fc7dc5 8487 ret = tg3_nvram_read_le(tp, b_offset, &val);
1da177e4
LT
8488 if (ret)
8489 return ret;
b9fc7dc5 8490 memcpy(pd, &val, b_count);
1da177e4
LT
8491 eeprom->len += b_count;
8492 }
8493 return 0;
8494}
8495
6aa20a22 8496static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
1da177e4
LT
8497
8498static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8499{
8500 struct tg3 *tp = netdev_priv(dev);
8501 int ret;
b9fc7dc5 8502 u32 offset, len, b_offset, odd_len;
1da177e4 8503 u8 *buf;
b9fc7dc5 8504 __le32 start, end;
1da177e4 8505
bc1c7567
MC
8506 if (tp->link_config.phy_is_low_power)
8507 return -EAGAIN;
8508
1da177e4
LT
8509 if (eeprom->magic != TG3_EEPROM_MAGIC)
8510 return -EINVAL;
8511
8512 offset = eeprom->offset;
8513 len = eeprom->len;
8514
8515 if ((b_offset = (offset & 3))) {
8516 /* adjustments to start on required 4 byte boundary */
b9fc7dc5 8517 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
1da177e4
LT
8518 if (ret)
8519 return ret;
1da177e4
LT
8520 len += b_offset;
8521 offset &= ~3;
1c8594b4
MC
8522 if (len < 4)
8523 len = 4;
1da177e4
LT
8524 }
8525
8526 odd_len = 0;
1c8594b4 8527 if (len & 3) {
1da177e4
LT
8528 /* adjustments to end on required 4 byte boundary */
8529 odd_len = 1;
8530 len = (len + 3) & ~3;
b9fc7dc5 8531 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
1da177e4
LT
8532 if (ret)
8533 return ret;
1da177e4
LT
8534 }
8535
8536 buf = data;
8537 if (b_offset || odd_len) {
8538 buf = kmalloc(len, GFP_KERNEL);
ab0049b4 8539 if (!buf)
1da177e4
LT
8540 return -ENOMEM;
8541 if (b_offset)
8542 memcpy(buf, &start, 4);
8543 if (odd_len)
8544 memcpy(buf+len-4, &end, 4);
8545 memcpy(buf + b_offset, data, eeprom->len);
8546 }
8547
8548 ret = tg3_nvram_write_block(tp, offset, len, buf);
8549
8550 if (buf != data)
8551 kfree(buf);
8552
8553 return ret;
8554}
8555
8556static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8557{
8558 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8559
1da177e4
LT
8560 cmd->supported = (SUPPORTED_Autoneg);
8561
8562 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8563 cmd->supported |= (SUPPORTED_1000baseT_Half |
8564 SUPPORTED_1000baseT_Full);
8565
ef348144 8566 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1da177e4
LT
8567 cmd->supported |= (SUPPORTED_100baseT_Half |
8568 SUPPORTED_100baseT_Full |
8569 SUPPORTED_10baseT_Half |
8570 SUPPORTED_10baseT_Full |
3bebab59 8571 SUPPORTED_TP);
ef348144
KK
8572 cmd->port = PORT_TP;
8573 } else {
1da177e4 8574 cmd->supported |= SUPPORTED_FIBRE;
ef348144
KK
8575 cmd->port = PORT_FIBRE;
8576 }
6aa20a22 8577
1da177e4
LT
8578 cmd->advertising = tp->link_config.advertising;
8579 if (netif_running(dev)) {
8580 cmd->speed = tp->link_config.active_speed;
8581 cmd->duplex = tp->link_config.active_duplex;
8582 }
1da177e4
LT
8583 cmd->phy_address = PHY_ADDR;
8584 cmd->transceiver = 0;
8585 cmd->autoneg = tp->link_config.autoneg;
8586 cmd->maxtxpkt = 0;
8587 cmd->maxrxpkt = 0;
8588 return 0;
8589}
6aa20a22 8590
1da177e4
LT
8591static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8592{
8593 struct tg3 *tp = netdev_priv(dev);
6aa20a22
JG
8594
8595 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
1da177e4
LT
8596 /* These are the only valid advertisement bits allowed. */
8597 if (cmd->autoneg == AUTONEG_ENABLE &&
8598 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8599 ADVERTISED_1000baseT_Full |
8600 ADVERTISED_Autoneg |
8601 ADVERTISED_FIBRE)))
8602 return -EINVAL;
37ff238d
MC
8603 /* Fiber can only do SPEED_1000. */
8604 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8605 (cmd->speed != SPEED_1000))
8606 return -EINVAL;
8607 /* Copper cannot force SPEED_1000. */
8608 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8609 (cmd->speed == SPEED_1000))
8610 return -EINVAL;
8611 else if ((cmd->speed == SPEED_1000) &&
8612 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8613 return -EINVAL;
1da177e4 8614
f47c11ee 8615 tg3_full_lock(tp, 0);
1da177e4
LT
8616
8617 tp->link_config.autoneg = cmd->autoneg;
8618 if (cmd->autoneg == AUTONEG_ENABLE) {
405d8e5c
AG
8619 tp->link_config.advertising = (cmd->advertising |
8620 ADVERTISED_Autoneg);
1da177e4
LT
8621 tp->link_config.speed = SPEED_INVALID;
8622 tp->link_config.duplex = DUPLEX_INVALID;
8623 } else {
8624 tp->link_config.advertising = 0;
8625 tp->link_config.speed = cmd->speed;
8626 tp->link_config.duplex = cmd->duplex;
8627 }
6aa20a22 8628
24fcad6b
MC
8629 tp->link_config.orig_speed = tp->link_config.speed;
8630 tp->link_config.orig_duplex = tp->link_config.duplex;
8631 tp->link_config.orig_autoneg = tp->link_config.autoneg;
8632
1da177e4
LT
8633 if (netif_running(dev))
8634 tg3_setup_phy(tp, 1);
8635
f47c11ee 8636 tg3_full_unlock(tp);
6aa20a22 8637
1da177e4
LT
8638 return 0;
8639}
6aa20a22 8640
1da177e4
LT
8641static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8642{
8643 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8644
1da177e4
LT
8645 strcpy(info->driver, DRV_MODULE_NAME);
8646 strcpy(info->version, DRV_MODULE_VERSION);
c4e6575c 8647 strcpy(info->fw_version, tp->fw_ver);
1da177e4
LT
8648 strcpy(info->bus_info, pci_name(tp->pdev));
8649}
6aa20a22 8650
1da177e4
LT
8651static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8652{
8653 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8654
a85feb8c
GZ
8655 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8656 wol->supported = WAKE_MAGIC;
8657 else
8658 wol->supported = 0;
1da177e4
LT
8659 wol->wolopts = 0;
8660 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8661 wol->wolopts = WAKE_MAGIC;
8662 memset(&wol->sopass, 0, sizeof(wol->sopass));
8663}
6aa20a22 8664
1da177e4
LT
8665static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8666{
8667 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8668
1da177e4
LT
8669 if (wol->wolopts & ~WAKE_MAGIC)
8670 return -EINVAL;
8671 if ((wol->wolopts & WAKE_MAGIC) &&
a85feb8c 8672 !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
1da177e4 8673 return -EINVAL;
6aa20a22 8674
f47c11ee 8675 spin_lock_bh(&tp->lock);
1da177e4
LT
8676 if (wol->wolopts & WAKE_MAGIC)
8677 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8678 else
8679 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
f47c11ee 8680 spin_unlock_bh(&tp->lock);
6aa20a22 8681
1da177e4
LT
8682 return 0;
8683}
6aa20a22 8684
1da177e4
LT
8685static u32 tg3_get_msglevel(struct net_device *dev)
8686{
8687 struct tg3 *tp = netdev_priv(dev);
8688 return tp->msg_enable;
8689}
6aa20a22 8690
1da177e4
LT
8691static void tg3_set_msglevel(struct net_device *dev, u32 value)
8692{
8693 struct tg3 *tp = netdev_priv(dev);
8694 tp->msg_enable = value;
8695}
6aa20a22 8696
1da177e4
LT
8697static int tg3_set_tso(struct net_device *dev, u32 value)
8698{
8699 struct tg3 *tp = netdev_priv(dev);
8700
8701 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8702 if (value)
8703 return -EINVAL;
8704 return 0;
8705 }
b5d3772c
MC
8706 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8707 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
9936bcf6 8708 if (value) {
b0026624 8709 dev->features |= NETIF_F_TSO6;
9936bcf6
MC
8710 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8711 dev->features |= NETIF_F_TSO_ECN;
8712 } else
8713 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
b0026624 8714 }
1da177e4
LT
8715 return ethtool_op_set_tso(dev, value);
8716}
6aa20a22 8717
1da177e4
LT
8718static int tg3_nway_reset(struct net_device *dev)
8719{
8720 struct tg3 *tp = netdev_priv(dev);
8721 u32 bmcr;
8722 int r;
6aa20a22 8723
1da177e4
LT
8724 if (!netif_running(dev))
8725 return -EAGAIN;
8726
c94e3941
MC
8727 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8728 return -EINVAL;
8729
f47c11ee 8730 spin_lock_bh(&tp->lock);
1da177e4
LT
8731 r = -EINVAL;
8732 tg3_readphy(tp, MII_BMCR, &bmcr);
8733 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
c94e3941
MC
8734 ((bmcr & BMCR_ANENABLE) ||
8735 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8736 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8737 BMCR_ANENABLE);
1da177e4
LT
8738 r = 0;
8739 }
f47c11ee 8740 spin_unlock_bh(&tp->lock);
6aa20a22 8741
1da177e4
LT
8742 return r;
8743}
6aa20a22 8744
1da177e4
LT
8745static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8746{
8747 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8748
1da177e4
LT
8749 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8750 ering->rx_mini_max_pending = 0;
4f81c32b
MC
8751 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8752 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8753 else
8754 ering->rx_jumbo_max_pending = 0;
8755
8756 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
1da177e4
LT
8757
8758 ering->rx_pending = tp->rx_pending;
8759 ering->rx_mini_pending = 0;
4f81c32b
MC
8760 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8761 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8762 else
8763 ering->rx_jumbo_pending = 0;
8764
1da177e4
LT
8765 ering->tx_pending = tp->tx_pending;
8766}
6aa20a22 8767
1da177e4
LT
8768static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8769{
8770 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 8771 int irq_sync = 0, err = 0;
6aa20a22 8772
1da177e4
LT
8773 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8774 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
bc3a9254
MC
8775 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8776 (ering->tx_pending <= MAX_SKB_FRAGS) ||
7f62ad5d 8777 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
bc3a9254 8778 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
1da177e4 8779 return -EINVAL;
6aa20a22 8780
bbe832c0 8781 if (netif_running(dev)) {
1da177e4 8782 tg3_netif_stop(tp);
bbe832c0
MC
8783 irq_sync = 1;
8784 }
1da177e4 8785
bbe832c0 8786 tg3_full_lock(tp, irq_sync);
6aa20a22 8787
1da177e4
LT
8788 tp->rx_pending = ering->rx_pending;
8789
8790 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8791 tp->rx_pending > 63)
8792 tp->rx_pending = 63;
8793 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8794 tp->tx_pending = ering->tx_pending;
8795
8796 if (netif_running(dev)) {
944d980e 8797 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
b9ec6c1b
MC
8798 err = tg3_restart_hw(tp, 1);
8799 if (!err)
8800 tg3_netif_start(tp);
1da177e4
LT
8801 }
8802
f47c11ee 8803 tg3_full_unlock(tp);
6aa20a22 8804
b9ec6c1b 8805 return err;
1da177e4 8806}
6aa20a22 8807
1da177e4
LT
8808static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8809{
8810 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8811
1da177e4 8812 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8d018621
MC
8813
8814 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
8815 epause->rx_pause = 1;
8816 else
8817 epause->rx_pause = 0;
8818
8819 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
8820 epause->tx_pause = 1;
8821 else
8822 epause->tx_pause = 0;
1da177e4 8823}
6aa20a22 8824
1da177e4
LT
8825static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8826{
8827 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 8828 int irq_sync = 0, err = 0;
6aa20a22 8829
bbe832c0 8830 if (netif_running(dev)) {
1da177e4 8831 tg3_netif_stop(tp);
bbe832c0
MC
8832 irq_sync = 1;
8833 }
1da177e4 8834
bbe832c0 8835 tg3_full_lock(tp, irq_sync);
f47c11ee 8836
1da177e4
LT
8837 if (epause->autoneg)
8838 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8839 else
8840 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8841 if (epause->rx_pause)
8d018621 8842 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
1da177e4 8843 else
8d018621 8844 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
1da177e4 8845 if (epause->tx_pause)
8d018621 8846 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
1da177e4 8847 else
8d018621 8848 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
1da177e4
LT
8849
8850 if (netif_running(dev)) {
944d980e 8851 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
b9ec6c1b
MC
8852 err = tg3_restart_hw(tp, 1);
8853 if (!err)
8854 tg3_netif_start(tp);
1da177e4 8855 }
f47c11ee
DM
8856
8857 tg3_full_unlock(tp);
6aa20a22 8858
b9ec6c1b 8859 return err;
1da177e4 8860}
6aa20a22 8861
1da177e4
LT
8862static u32 tg3_get_rx_csum(struct net_device *dev)
8863{
8864 struct tg3 *tp = netdev_priv(dev);
8865 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8866}
6aa20a22 8867
1da177e4
LT
8868static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8869{
8870 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8871
1da177e4
LT
8872 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8873 if (data != 0)
8874 return -EINVAL;
8875 return 0;
8876 }
6aa20a22 8877
f47c11ee 8878 spin_lock_bh(&tp->lock);
1da177e4
LT
8879 if (data)
8880 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8881 else
8882 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
f47c11ee 8883 spin_unlock_bh(&tp->lock);
6aa20a22 8884
1da177e4
LT
8885 return 0;
8886}
6aa20a22 8887
1da177e4
LT
8888static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8889{
8890 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8891
1da177e4
LT
8892 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8893 if (data != 0)
8894 return -EINVAL;
8895 return 0;
8896 }
6aa20a22 8897
af36e6b6 8898 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 8899 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
8900 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8901 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6460d948 8902 ethtool_op_set_tx_ipv6_csum(dev, data);
1da177e4 8903 else
9c27dbdf 8904 ethtool_op_set_tx_csum(dev, data);
1da177e4
LT
8905
8906 return 0;
8907}
8908
b9f2c044 8909static int tg3_get_sset_count (struct net_device *dev, int sset)
1da177e4 8910{
b9f2c044
JG
8911 switch (sset) {
8912 case ETH_SS_TEST:
8913 return TG3_NUM_TEST;
8914 case ETH_SS_STATS:
8915 return TG3_NUM_STATS;
8916 default:
8917 return -EOPNOTSUPP;
8918 }
4cafd3f5
MC
8919}
8920
1da177e4
LT
8921static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8922{
8923 switch (stringset) {
8924 case ETH_SS_STATS:
8925 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8926 break;
4cafd3f5
MC
8927 case ETH_SS_TEST:
8928 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8929 break;
1da177e4
LT
8930 default:
8931 WARN_ON(1); /* we need a WARN() */
8932 break;
8933 }
8934}
8935
4009a93d
MC
8936static int tg3_phys_id(struct net_device *dev, u32 data)
8937{
8938 struct tg3 *tp = netdev_priv(dev);
8939 int i;
8940
8941 if (!netif_running(tp->dev))
8942 return -EAGAIN;
8943
8944 if (data == 0)
759afc31 8945 data = UINT_MAX / 2;
4009a93d
MC
8946
8947 for (i = 0; i < (data * 2); i++) {
8948 if ((i % 2) == 0)
8949 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8950 LED_CTRL_1000MBPS_ON |
8951 LED_CTRL_100MBPS_ON |
8952 LED_CTRL_10MBPS_ON |
8953 LED_CTRL_TRAFFIC_OVERRIDE |
8954 LED_CTRL_TRAFFIC_BLINK |
8955 LED_CTRL_TRAFFIC_LED);
6aa20a22 8956
4009a93d
MC
8957 else
8958 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8959 LED_CTRL_TRAFFIC_OVERRIDE);
8960
8961 if (msleep_interruptible(500))
8962 break;
8963 }
8964 tw32(MAC_LED_CTRL, tp->led_ctrl);
8965 return 0;
8966}
8967
1da177e4
LT
8968static void tg3_get_ethtool_stats (struct net_device *dev,
8969 struct ethtool_stats *estats, u64 *tmp_stats)
8970{
8971 struct tg3 *tp = netdev_priv(dev);
8972 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8973}
8974
566f86ad 8975#define NVRAM_TEST_SIZE 0x100
a5767dec
MC
8976#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
8977#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
8978#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
b16250e3
MC
8979#define NVRAM_SELFBOOT_HW_SIZE 0x20
8980#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
566f86ad
MC
8981
8982static int tg3_test_nvram(struct tg3 *tp)
8983{
b9fc7dc5
AV
8984 u32 csum, magic;
8985 __le32 *buf;
ab0049b4 8986 int i, j, k, err = 0, size;
566f86ad 8987
1820180b 8988 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
1b27777a
MC
8989 return -EIO;
8990
1b27777a
MC
8991 if (magic == TG3_EEPROM_MAGIC)
8992 size = NVRAM_TEST_SIZE;
b16250e3 8993 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
a5767dec
MC
8994 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
8995 TG3_EEPROM_SB_FORMAT_1) {
8996 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
8997 case TG3_EEPROM_SB_REVISION_0:
8998 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
8999 break;
9000 case TG3_EEPROM_SB_REVISION_2:
9001 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9002 break;
9003 case TG3_EEPROM_SB_REVISION_3:
9004 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9005 break;
9006 default:
9007 return 0;
9008 }
9009 } else
1b27777a 9010 return 0;
b16250e3
MC
9011 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9012 size = NVRAM_SELFBOOT_HW_SIZE;
9013 else
1b27777a
MC
9014 return -EIO;
9015
9016 buf = kmalloc(size, GFP_KERNEL);
566f86ad
MC
9017 if (buf == NULL)
9018 return -ENOMEM;
9019
1b27777a
MC
9020 err = -EIO;
9021 for (i = 0, j = 0; i < size; i += 4, j++) {
b9fc7dc5 9022 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
566f86ad 9023 break;
566f86ad 9024 }
1b27777a 9025 if (i < size)
566f86ad
MC
9026 goto out;
9027
1b27777a 9028 /* Selfboot format */
b9fc7dc5
AV
9029 magic = swab32(le32_to_cpu(buf[0]));
9030 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
b16250e3 9031 TG3_EEPROM_MAGIC_FW) {
1b27777a
MC
9032 u8 *buf8 = (u8 *) buf, csum8 = 0;
9033
b9fc7dc5 9034 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
a5767dec
MC
9035 TG3_EEPROM_SB_REVISION_2) {
9036 /* For rev 2, the csum doesn't include the MBA. */
9037 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9038 csum8 += buf8[i];
9039 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9040 csum8 += buf8[i];
9041 } else {
9042 for (i = 0; i < size; i++)
9043 csum8 += buf8[i];
9044 }
1b27777a 9045
ad96b485
AB
9046 if (csum8 == 0) {
9047 err = 0;
9048 goto out;
9049 }
9050
9051 err = -EIO;
9052 goto out;
1b27777a 9053 }
566f86ad 9054
b9fc7dc5 9055 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
b16250e3
MC
9056 TG3_EEPROM_MAGIC_HW) {
9057 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9058 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9059 u8 *buf8 = (u8 *) buf;
b16250e3
MC
9060
9061 /* Separate the parity bits and the data bytes. */
9062 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9063 if ((i == 0) || (i == 8)) {
9064 int l;
9065 u8 msk;
9066
9067 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9068 parity[k++] = buf8[i] & msk;
9069 i++;
9070 }
9071 else if (i == 16) {
9072 int l;
9073 u8 msk;
9074
9075 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9076 parity[k++] = buf8[i] & msk;
9077 i++;
9078
9079 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9080 parity[k++] = buf8[i] & msk;
9081 i++;
9082 }
9083 data[j++] = buf8[i];
9084 }
9085
9086 err = -EIO;
9087 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9088 u8 hw8 = hweight8(data[i]);
9089
9090 if ((hw8 & 0x1) && parity[i])
9091 goto out;
9092 else if (!(hw8 & 0x1) && !parity[i])
9093 goto out;
9094 }
9095 err = 0;
9096 goto out;
9097 }
9098
566f86ad
MC
9099 /* Bootstrap checksum at offset 0x10 */
9100 csum = calc_crc((unsigned char *) buf, 0x10);
b9fc7dc5 9101 if(csum != le32_to_cpu(buf[0x10/4]))
566f86ad
MC
9102 goto out;
9103
9104 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9105 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
b9fc7dc5 9106 if (csum != le32_to_cpu(buf[0xfc/4]))
566f86ad
MC
9107 goto out;
9108
9109 err = 0;
9110
9111out:
9112 kfree(buf);
9113 return err;
9114}
9115
ca43007a
MC
9116#define TG3_SERDES_TIMEOUT_SEC 2
9117#define TG3_COPPER_TIMEOUT_SEC 6
9118
9119static int tg3_test_link(struct tg3 *tp)
9120{
9121 int i, max;
9122
9123 if (!netif_running(tp->dev))
9124 return -ENODEV;
9125
4c987487 9126 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
ca43007a
MC
9127 max = TG3_SERDES_TIMEOUT_SEC;
9128 else
9129 max = TG3_COPPER_TIMEOUT_SEC;
9130
9131 for (i = 0; i < max; i++) {
9132 if (netif_carrier_ok(tp->dev))
9133 return 0;
9134
9135 if (msleep_interruptible(1000))
9136 break;
9137 }
9138
9139 return -EIO;
9140}
9141
a71116d1 9142/* Only test the commonly used registers */
30ca3e37 9143static int tg3_test_registers(struct tg3 *tp)
a71116d1 9144{
b16250e3 9145 int i, is_5705, is_5750;
a71116d1
MC
9146 u32 offset, read_mask, write_mask, val, save_val, read_val;
9147 static struct {
9148 u16 offset;
9149 u16 flags;
9150#define TG3_FL_5705 0x1
9151#define TG3_FL_NOT_5705 0x2
9152#define TG3_FL_NOT_5788 0x4
b16250e3 9153#define TG3_FL_NOT_5750 0x8
a71116d1
MC
9154 u32 read_mask;
9155 u32 write_mask;
9156 } reg_tbl[] = {
9157 /* MAC Control Registers */
9158 { MAC_MODE, TG3_FL_NOT_5705,
9159 0x00000000, 0x00ef6f8c },
9160 { MAC_MODE, TG3_FL_5705,
9161 0x00000000, 0x01ef6b8c },
9162 { MAC_STATUS, TG3_FL_NOT_5705,
9163 0x03800107, 0x00000000 },
9164 { MAC_STATUS, TG3_FL_5705,
9165 0x03800100, 0x00000000 },
9166 { MAC_ADDR_0_HIGH, 0x0000,
9167 0x00000000, 0x0000ffff },
9168 { MAC_ADDR_0_LOW, 0x0000,
9169 0x00000000, 0xffffffff },
9170 { MAC_RX_MTU_SIZE, 0x0000,
9171 0x00000000, 0x0000ffff },
9172 { MAC_TX_MODE, 0x0000,
9173 0x00000000, 0x00000070 },
9174 { MAC_TX_LENGTHS, 0x0000,
9175 0x00000000, 0x00003fff },
9176 { MAC_RX_MODE, TG3_FL_NOT_5705,
9177 0x00000000, 0x000007fc },
9178 { MAC_RX_MODE, TG3_FL_5705,
9179 0x00000000, 0x000007dc },
9180 { MAC_HASH_REG_0, 0x0000,
9181 0x00000000, 0xffffffff },
9182 { MAC_HASH_REG_1, 0x0000,
9183 0x00000000, 0xffffffff },
9184 { MAC_HASH_REG_2, 0x0000,
9185 0x00000000, 0xffffffff },
9186 { MAC_HASH_REG_3, 0x0000,
9187 0x00000000, 0xffffffff },
9188
9189 /* Receive Data and Receive BD Initiator Control Registers. */
9190 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9191 0x00000000, 0xffffffff },
9192 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9193 0x00000000, 0xffffffff },
9194 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9195 0x00000000, 0x00000003 },
9196 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9197 0x00000000, 0xffffffff },
9198 { RCVDBDI_STD_BD+0, 0x0000,
9199 0x00000000, 0xffffffff },
9200 { RCVDBDI_STD_BD+4, 0x0000,
9201 0x00000000, 0xffffffff },
9202 { RCVDBDI_STD_BD+8, 0x0000,
9203 0x00000000, 0xffff0002 },
9204 { RCVDBDI_STD_BD+0xc, 0x0000,
9205 0x00000000, 0xffffffff },
6aa20a22 9206
a71116d1
MC
9207 /* Receive BD Initiator Control Registers. */
9208 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9209 0x00000000, 0xffffffff },
9210 { RCVBDI_STD_THRESH, TG3_FL_5705,
9211 0x00000000, 0x000003ff },
9212 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9213 0x00000000, 0xffffffff },
6aa20a22 9214
a71116d1
MC
9215 /* Host Coalescing Control Registers. */
9216 { HOSTCC_MODE, TG3_FL_NOT_5705,
9217 0x00000000, 0x00000004 },
9218 { HOSTCC_MODE, TG3_FL_5705,
9219 0x00000000, 0x000000f6 },
9220 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9221 0x00000000, 0xffffffff },
9222 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9223 0x00000000, 0x000003ff },
9224 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9225 0x00000000, 0xffffffff },
9226 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9227 0x00000000, 0x000003ff },
9228 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9229 0x00000000, 0xffffffff },
9230 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9231 0x00000000, 0x000000ff },
9232 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9233 0x00000000, 0xffffffff },
9234 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9235 0x00000000, 0x000000ff },
9236 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9237 0x00000000, 0xffffffff },
9238 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9239 0x00000000, 0xffffffff },
9240 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9241 0x00000000, 0xffffffff },
9242 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9243 0x00000000, 0x000000ff },
9244 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9245 0x00000000, 0xffffffff },
9246 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9247 0x00000000, 0x000000ff },
9248 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9249 0x00000000, 0xffffffff },
9250 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9251 0x00000000, 0xffffffff },
9252 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9253 0x00000000, 0xffffffff },
9254 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9255 0x00000000, 0xffffffff },
9256 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9257 0x00000000, 0xffffffff },
9258 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9259 0xffffffff, 0x00000000 },
9260 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9261 0xffffffff, 0x00000000 },
9262
9263 /* Buffer Manager Control Registers. */
b16250e3 9264 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
a71116d1 9265 0x00000000, 0x007fff80 },
b16250e3 9266 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
a71116d1
MC
9267 0x00000000, 0x007fffff },
9268 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9269 0x00000000, 0x0000003f },
9270 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9271 0x00000000, 0x000001ff },
9272 { BUFMGR_MB_HIGH_WATER, 0x0000,
9273 0x00000000, 0x000001ff },
9274 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9275 0xffffffff, 0x00000000 },
9276 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9277 0xffffffff, 0x00000000 },
6aa20a22 9278
a71116d1
MC
9279 /* Mailbox Registers */
9280 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9281 0x00000000, 0x000001ff },
9282 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9283 0x00000000, 0x000001ff },
9284 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9285 0x00000000, 0x000007ff },
9286 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9287 0x00000000, 0x000001ff },
9288
9289 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9290 };
9291
b16250e3
MC
9292 is_5705 = is_5750 = 0;
9293 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
a71116d1 9294 is_5705 = 1;
b16250e3
MC
9295 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9296 is_5750 = 1;
9297 }
a71116d1
MC
9298
9299 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9300 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9301 continue;
9302
9303 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9304 continue;
9305
9306 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9307 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9308 continue;
9309
b16250e3
MC
9310 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9311 continue;
9312
a71116d1
MC
9313 offset = (u32) reg_tbl[i].offset;
9314 read_mask = reg_tbl[i].read_mask;
9315 write_mask = reg_tbl[i].write_mask;
9316
9317 /* Save the original register content */
9318 save_val = tr32(offset);
9319
9320 /* Determine the read-only value. */
9321 read_val = save_val & read_mask;
9322
9323 /* Write zero to the register, then make sure the read-only bits
9324 * are not changed and the read/write bits are all zeros.
9325 */
9326 tw32(offset, 0);
9327
9328 val = tr32(offset);
9329
9330 /* Test the read-only and read/write bits. */
9331 if (((val & read_mask) != read_val) || (val & write_mask))
9332 goto out;
9333
9334 /* Write ones to all the bits defined by RdMask and WrMask, then
9335 * make sure the read-only bits are not changed and the
9336 * read/write bits are all ones.
9337 */
9338 tw32(offset, read_mask | write_mask);
9339
9340 val = tr32(offset);
9341
9342 /* Test the read-only bits. */
9343 if ((val & read_mask) != read_val)
9344 goto out;
9345
9346 /* Test the read/write bits. */
9347 if ((val & write_mask) != write_mask)
9348 goto out;
9349
9350 tw32(offset, save_val);
9351 }
9352
9353 return 0;
9354
9355out:
9f88f29f
MC
9356 if (netif_msg_hw(tp))
9357 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9358 offset);
a71116d1
MC
9359 tw32(offset, save_val);
9360 return -EIO;
9361}
9362
7942e1db
MC
9363static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9364{
f71e1309 9365 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7942e1db
MC
9366 int i;
9367 u32 j;
9368
e9edda69 9369 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
7942e1db
MC
9370 for (j = 0; j < len; j += 4) {
9371 u32 val;
9372
9373 tg3_write_mem(tp, offset + j, test_pattern[i]);
9374 tg3_read_mem(tp, offset + j, &val);
9375 if (val != test_pattern[i])
9376 return -EIO;
9377 }
9378 }
9379 return 0;
9380}
9381
9382static int tg3_test_memory(struct tg3 *tp)
9383{
9384 static struct mem_entry {
9385 u32 offset;
9386 u32 len;
9387 } mem_tbl_570x[] = {
38690194 9388 { 0x00000000, 0x00b50},
7942e1db
MC
9389 { 0x00002000, 0x1c000},
9390 { 0xffffffff, 0x00000}
9391 }, mem_tbl_5705[] = {
9392 { 0x00000100, 0x0000c},
9393 { 0x00000200, 0x00008},
7942e1db
MC
9394 { 0x00004000, 0x00800},
9395 { 0x00006000, 0x01000},
9396 { 0x00008000, 0x02000},
9397 { 0x00010000, 0x0e000},
9398 { 0xffffffff, 0x00000}
79f4d13a
MC
9399 }, mem_tbl_5755[] = {
9400 { 0x00000200, 0x00008},
9401 { 0x00004000, 0x00800},
9402 { 0x00006000, 0x00800},
9403 { 0x00008000, 0x02000},
9404 { 0x00010000, 0x0c000},
9405 { 0xffffffff, 0x00000}
b16250e3
MC
9406 }, mem_tbl_5906[] = {
9407 { 0x00000200, 0x00008},
9408 { 0x00004000, 0x00400},
9409 { 0x00006000, 0x00400},
9410 { 0x00008000, 0x01000},
9411 { 0x00010000, 0x01000},
9412 { 0xffffffff, 0x00000}
7942e1db
MC
9413 };
9414 struct mem_entry *mem_tbl;
9415 int err = 0;
9416 int i;
9417
79f4d13a 9418 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
af36e6b6 9419 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 9420 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
9421 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9422 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
79f4d13a 9423 mem_tbl = mem_tbl_5755;
b16250e3
MC
9424 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9425 mem_tbl = mem_tbl_5906;
79f4d13a
MC
9426 else
9427 mem_tbl = mem_tbl_5705;
9428 } else
7942e1db
MC
9429 mem_tbl = mem_tbl_570x;
9430
9431 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9432 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9433 mem_tbl[i].len)) != 0)
9434 break;
9435 }
6aa20a22 9436
7942e1db
MC
9437 return err;
9438}
9439
9f40dead
MC
9440#define TG3_MAC_LOOPBACK 0
9441#define TG3_PHY_LOOPBACK 1
9442
9443static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
c76949a6 9444{
9f40dead 9445 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
c76949a6
MC
9446 u32 desc_idx;
9447 struct sk_buff *skb, *rx_skb;
9448 u8 *tx_data;
9449 dma_addr_t map;
9450 int num_pkts, tx_len, rx_len, i, err;
9451 struct tg3_rx_buffer_desc *desc;
9452
9f40dead 9453 if (loopback_mode == TG3_MAC_LOOPBACK) {
c94e3941
MC
9454 /* HW errata - mac loopback fails in some cases on 5780.
9455 * Normal traffic and PHY loopback are not affected by
9456 * errata.
9457 */
9458 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9459 return 0;
9460
9f40dead 9461 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
e8f3f6ca
MC
9462 MAC_MODE_PORT_INT_LPBACK;
9463 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9464 mac_mode |= MAC_MODE_LINK_POLARITY;
3f7045c1
MC
9465 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9466 mac_mode |= MAC_MODE_PORT_MODE_MII;
9467 else
9468 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9f40dead
MC
9469 tw32(MAC_MODE, mac_mode);
9470 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
3f7045c1
MC
9471 u32 val;
9472
b16250e3
MC
9473 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9474 u32 phytest;
9475
9476 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9477 u32 phy;
9478
9479 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9480 phytest | MII_TG3_EPHY_SHADOW_EN);
9481 if (!tg3_readphy(tp, 0x1b, &phy))
9482 tg3_writephy(tp, 0x1b, phy & ~0x20);
b16250e3
MC
9483 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9484 }
5d64ad34
MC
9485 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9486 } else
9487 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
3f7045c1 9488
9ef8ca99
MC
9489 tg3_phy_toggle_automdix(tp, 0);
9490
3f7045c1 9491 tg3_writephy(tp, MII_BMCR, val);
c94e3941 9492 udelay(40);
5d64ad34 9493
e8f3f6ca 9494 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5d64ad34 9495 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
b16250e3 9496 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
5d64ad34
MC
9497 mac_mode |= MAC_MODE_PORT_MODE_MII;
9498 } else
9499 mac_mode |= MAC_MODE_PORT_MODE_GMII;
b16250e3 9500
c94e3941
MC
9501 /* reset to prevent losing 1st rx packet intermittently */
9502 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9503 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9504 udelay(10);
9505 tw32_f(MAC_RX_MODE, tp->rx_mode);
9506 }
e8f3f6ca
MC
9507 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9508 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9509 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9510 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9511 mac_mode |= MAC_MODE_LINK_POLARITY;
ff18ff02
MC
9512 tg3_writephy(tp, MII_TG3_EXT_CTRL,
9513 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9514 }
9f40dead 9515 tw32(MAC_MODE, mac_mode);
9f40dead
MC
9516 }
9517 else
9518 return -EINVAL;
c76949a6
MC
9519
9520 err = -EIO;
9521
c76949a6 9522 tx_len = 1514;
a20e9c62 9523 skb = netdev_alloc_skb(tp->dev, tx_len);
a50bb7b9
JJ
9524 if (!skb)
9525 return -ENOMEM;
9526
c76949a6
MC
9527 tx_data = skb_put(skb, tx_len);
9528 memcpy(tx_data, tp->dev->dev_addr, 6);
9529 memset(tx_data + 6, 0x0, 8);
9530
9531 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9532
9533 for (i = 14; i < tx_len; i++)
9534 tx_data[i] = (u8) (i & 0xff);
9535
9536 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9537
9538 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9539 HOSTCC_MODE_NOW);
9540
9541 udelay(10);
9542
9543 rx_start_idx = tp->hw_status->idx[0].rx_producer;
9544
c76949a6
MC
9545 num_pkts = 0;
9546
9f40dead 9547 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
c76949a6 9548
9f40dead 9549 tp->tx_prod++;
c76949a6
MC
9550 num_pkts++;
9551
9f40dead
MC
9552 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9553 tp->tx_prod);
09ee929c 9554 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
c76949a6
MC
9555
9556 udelay(10);
9557
3f7045c1
MC
9558 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
9559 for (i = 0; i < 25; i++) {
c76949a6
MC
9560 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9561 HOSTCC_MODE_NOW);
9562
9563 udelay(10);
9564
9565 tx_idx = tp->hw_status->idx[0].tx_consumer;
9566 rx_idx = tp->hw_status->idx[0].rx_producer;
9f40dead 9567 if ((tx_idx == tp->tx_prod) &&
c76949a6
MC
9568 (rx_idx == (rx_start_idx + num_pkts)))
9569 break;
9570 }
9571
9572 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9573 dev_kfree_skb(skb);
9574
9f40dead 9575 if (tx_idx != tp->tx_prod)
c76949a6
MC
9576 goto out;
9577
9578 if (rx_idx != rx_start_idx + num_pkts)
9579 goto out;
9580
9581 desc = &tp->rx_rcb[rx_start_idx];
9582 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9583 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9584 if (opaque_key != RXD_OPAQUE_RING_STD)
9585 goto out;
9586
9587 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9588 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9589 goto out;
9590
9591 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9592 if (rx_len != tx_len)
9593 goto out;
9594
9595 rx_skb = tp->rx_std_buffers[desc_idx].skb;
9596
9597 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9598 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9599
9600 for (i = 14; i < tx_len; i++) {
9601 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9602 goto out;
9603 }
9604 err = 0;
6aa20a22 9605
c76949a6
MC
9606 /* tg3_free_rings will unmap and free the rx_skb */
9607out:
9608 return err;
9609}
9610
9f40dead
MC
9611#define TG3_MAC_LOOPBACK_FAILED 1
9612#define TG3_PHY_LOOPBACK_FAILED 2
9613#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
9614 TG3_PHY_LOOPBACK_FAILED)
9615
9616static int tg3_test_loopback(struct tg3 *tp)
9617{
9618 int err = 0;
9936bcf6 9619 u32 cpmuctrl = 0;
9f40dead
MC
9620
9621 if (!netif_running(tp->dev))
9622 return TG3_LOOPBACK_FAILED;
9623
b9ec6c1b
MC
9624 err = tg3_reset_hw(tp, 1);
9625 if (err)
9626 return TG3_LOOPBACK_FAILED;
9f40dead 9627
b2a5c19c
MC
9628 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9629 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
9936bcf6
MC
9630 int i;
9631 u32 status;
9632
9633 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9634
9635 /* Wait for up to 40 microseconds to acquire lock. */
9636 for (i = 0; i < 4; i++) {
9637 status = tr32(TG3_CPMU_MUTEX_GNT);
9638 if (status == CPMU_MUTEX_GNT_DRIVER)
9639 break;
9640 udelay(10);
9641 }
9642
9643 if (status != CPMU_MUTEX_GNT_DRIVER)
9644 return TG3_LOOPBACK_FAILED;
9645
b2a5c19c 9646 /* Turn off link-based power management. */
e875093c 9647 cpmuctrl = tr32(TG3_CPMU_CTRL);
109115e1
MC
9648 tw32(TG3_CPMU_CTRL,
9649 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9650 CPMU_CTRL_LINK_AWARE_MODE));
9936bcf6
MC
9651 }
9652
9f40dead
MC
9653 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9654 err |= TG3_MAC_LOOPBACK_FAILED;
9936bcf6 9655
b2a5c19c
MC
9656 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9657 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
9936bcf6
MC
9658 tw32(TG3_CPMU_CTRL, cpmuctrl);
9659
9660 /* Release the mutex */
9661 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9662 }
9663
dd477003
MC
9664 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
9665 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
9f40dead
MC
9666 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9667 err |= TG3_PHY_LOOPBACK_FAILED;
9668 }
9669
9670 return err;
9671}
9672
4cafd3f5
MC
9673static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9674 u64 *data)
9675{
566f86ad
MC
9676 struct tg3 *tp = netdev_priv(dev);
9677
bc1c7567
MC
9678 if (tp->link_config.phy_is_low_power)
9679 tg3_set_power_state(tp, PCI_D0);
9680
566f86ad
MC
9681 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9682
9683 if (tg3_test_nvram(tp) != 0) {
9684 etest->flags |= ETH_TEST_FL_FAILED;
9685 data[0] = 1;
9686 }
ca43007a
MC
9687 if (tg3_test_link(tp) != 0) {
9688 etest->flags |= ETH_TEST_FL_FAILED;
9689 data[1] = 1;
9690 }
a71116d1 9691 if (etest->flags & ETH_TEST_FL_OFFLINE) {
ec41c7df 9692 int err, irq_sync = 0;
bbe832c0
MC
9693
9694 if (netif_running(dev)) {
a71116d1 9695 tg3_netif_stop(tp);
bbe832c0
MC
9696 irq_sync = 1;
9697 }
a71116d1 9698
bbe832c0 9699 tg3_full_lock(tp, irq_sync);
a71116d1
MC
9700
9701 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
ec41c7df 9702 err = tg3_nvram_lock(tp);
a71116d1
MC
9703 tg3_halt_cpu(tp, RX_CPU_BASE);
9704 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9705 tg3_halt_cpu(tp, TX_CPU_BASE);
ec41c7df
MC
9706 if (!err)
9707 tg3_nvram_unlock(tp);
a71116d1 9708
d9ab5ad1
MC
9709 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9710 tg3_phy_reset(tp);
9711
a71116d1
MC
9712 if (tg3_test_registers(tp) != 0) {
9713 etest->flags |= ETH_TEST_FL_FAILED;
9714 data[2] = 1;
9715 }
7942e1db
MC
9716 if (tg3_test_memory(tp) != 0) {
9717 etest->flags |= ETH_TEST_FL_FAILED;
9718 data[3] = 1;
9719 }
9f40dead 9720 if ((data[4] = tg3_test_loopback(tp)) != 0)
c76949a6 9721 etest->flags |= ETH_TEST_FL_FAILED;
a71116d1 9722
f47c11ee
DM
9723 tg3_full_unlock(tp);
9724
d4bc3927
MC
9725 if (tg3_test_interrupt(tp) != 0) {
9726 etest->flags |= ETH_TEST_FL_FAILED;
9727 data[5] = 1;
9728 }
f47c11ee
DM
9729
9730 tg3_full_lock(tp, 0);
d4bc3927 9731
a71116d1
MC
9732 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9733 if (netif_running(dev)) {
9734 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
b9ec6c1b
MC
9735 if (!tg3_restart_hw(tp, 1))
9736 tg3_netif_start(tp);
a71116d1 9737 }
f47c11ee
DM
9738
9739 tg3_full_unlock(tp);
a71116d1 9740 }
bc1c7567
MC
9741 if (tp->link_config.phy_is_low_power)
9742 tg3_set_power_state(tp, PCI_D3hot);
9743
4cafd3f5
MC
9744}
9745
1da177e4
LT
9746static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9747{
9748 struct mii_ioctl_data *data = if_mii(ifr);
9749 struct tg3 *tp = netdev_priv(dev);
9750 int err;
9751
9752 switch(cmd) {
9753 case SIOCGMIIPHY:
9754 data->phy_id = PHY_ADDR;
9755
9756 /* fallthru */
9757 case SIOCGMIIREG: {
9758 u32 mii_regval;
9759
9760 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9761 break; /* We have no PHY */
9762
bc1c7567
MC
9763 if (tp->link_config.phy_is_low_power)
9764 return -EAGAIN;
9765
f47c11ee 9766 spin_lock_bh(&tp->lock);
1da177e4 9767 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
f47c11ee 9768 spin_unlock_bh(&tp->lock);
1da177e4
LT
9769
9770 data->val_out = mii_regval;
9771
9772 return err;
9773 }
9774
9775 case SIOCSMIIREG:
9776 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9777 break; /* We have no PHY */
9778
9779 if (!capable(CAP_NET_ADMIN))
9780 return -EPERM;
9781
bc1c7567
MC
9782 if (tp->link_config.phy_is_low_power)
9783 return -EAGAIN;
9784
f47c11ee 9785 spin_lock_bh(&tp->lock);
1da177e4 9786 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
f47c11ee 9787 spin_unlock_bh(&tp->lock);
1da177e4
LT
9788
9789 return err;
9790
9791 default:
9792 /* do nothing */
9793 break;
9794 }
9795 return -EOPNOTSUPP;
9796}
9797
9798#if TG3_VLAN_TAG_USED
9799static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9800{
9801 struct tg3 *tp = netdev_priv(dev);
9802
29315e87
MC
9803 if (netif_running(dev))
9804 tg3_netif_stop(tp);
9805
f47c11ee 9806 tg3_full_lock(tp, 0);
1da177e4
LT
9807
9808 tp->vlgrp = grp;
9809
9810 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9811 __tg3_set_rx_mode(dev);
9812
29315e87
MC
9813 if (netif_running(dev))
9814 tg3_netif_start(tp);
46966545
MC
9815
9816 tg3_full_unlock(tp);
1da177e4 9817}
1da177e4
LT
9818#endif
9819
15f9850d
DM
9820static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9821{
9822 struct tg3 *tp = netdev_priv(dev);
9823
9824 memcpy(ec, &tp->coal, sizeof(*ec));
9825 return 0;
9826}
9827
d244c892
MC
9828static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9829{
9830 struct tg3 *tp = netdev_priv(dev);
9831 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9832 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9833
9834 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9835 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9836 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9837 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9838 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9839 }
9840
9841 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9842 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9843 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9844 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9845 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9846 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9847 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9848 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9849 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9850 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9851 return -EINVAL;
9852
9853 /* No rx interrupts will be generated if both are zero */
9854 if ((ec->rx_coalesce_usecs == 0) &&
9855 (ec->rx_max_coalesced_frames == 0))
9856 return -EINVAL;
9857
9858 /* No tx interrupts will be generated if both are zero */
9859 if ((ec->tx_coalesce_usecs == 0) &&
9860 (ec->tx_max_coalesced_frames == 0))
9861 return -EINVAL;
9862
9863 /* Only copy relevant parameters, ignore all others. */
9864 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9865 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9866 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9867 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9868 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9869 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9870 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9871 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9872 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9873
9874 if (netif_running(dev)) {
9875 tg3_full_lock(tp, 0);
9876 __tg3_set_coalesce(tp, &tp->coal);
9877 tg3_full_unlock(tp);
9878 }
9879 return 0;
9880}
9881
7282d491 9882static const struct ethtool_ops tg3_ethtool_ops = {
1da177e4
LT
9883 .get_settings = tg3_get_settings,
9884 .set_settings = tg3_set_settings,
9885 .get_drvinfo = tg3_get_drvinfo,
9886 .get_regs_len = tg3_get_regs_len,
9887 .get_regs = tg3_get_regs,
9888 .get_wol = tg3_get_wol,
9889 .set_wol = tg3_set_wol,
9890 .get_msglevel = tg3_get_msglevel,
9891 .set_msglevel = tg3_set_msglevel,
9892 .nway_reset = tg3_nway_reset,
9893 .get_link = ethtool_op_get_link,
9894 .get_eeprom_len = tg3_get_eeprom_len,
9895 .get_eeprom = tg3_get_eeprom,
9896 .set_eeprom = tg3_set_eeprom,
9897 .get_ringparam = tg3_get_ringparam,
9898 .set_ringparam = tg3_set_ringparam,
9899 .get_pauseparam = tg3_get_pauseparam,
9900 .set_pauseparam = tg3_set_pauseparam,
9901 .get_rx_csum = tg3_get_rx_csum,
9902 .set_rx_csum = tg3_set_rx_csum,
1da177e4 9903 .set_tx_csum = tg3_set_tx_csum,
1da177e4 9904 .set_sg = ethtool_op_set_sg,
1da177e4 9905 .set_tso = tg3_set_tso,
4cafd3f5 9906 .self_test = tg3_self_test,
1da177e4 9907 .get_strings = tg3_get_strings,
4009a93d 9908 .phys_id = tg3_phys_id,
1da177e4 9909 .get_ethtool_stats = tg3_get_ethtool_stats,
15f9850d 9910 .get_coalesce = tg3_get_coalesce,
d244c892 9911 .set_coalesce = tg3_set_coalesce,
b9f2c044 9912 .get_sset_count = tg3_get_sset_count,
1da177e4
LT
9913};
9914
9915static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9916{
1b27777a 9917 u32 cursize, val, magic;
1da177e4
LT
9918
9919 tp->nvram_size = EEPROM_CHIP_SIZE;
9920
1820180b 9921 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
1da177e4
LT
9922 return;
9923
b16250e3
MC
9924 if ((magic != TG3_EEPROM_MAGIC) &&
9925 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9926 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
1da177e4
LT
9927 return;
9928
9929 /*
9930 * Size the chip by reading offsets at increasing powers of two.
9931 * When we encounter our validation signature, we know the addressing
9932 * has wrapped around, and thus have our chip size.
9933 */
1b27777a 9934 cursize = 0x10;
1da177e4
LT
9935
9936 while (cursize < tp->nvram_size) {
1820180b 9937 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
1da177e4
LT
9938 return;
9939
1820180b 9940 if (val == magic)
1da177e4
LT
9941 break;
9942
9943 cursize <<= 1;
9944 }
9945
9946 tp->nvram_size = cursize;
9947}
6aa20a22 9948
1da177e4
LT
9949static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9950{
9951 u32 val;
9952
1820180b 9953 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
1b27777a
MC
9954 return;
9955
9956 /* Selfboot format */
1820180b 9957 if (val != TG3_EEPROM_MAGIC) {
1b27777a
MC
9958 tg3_get_eeprom_size(tp);
9959 return;
9960 }
9961
1da177e4
LT
9962 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9963 if (val != 0) {
9964 tp->nvram_size = (val >> 16) * 1024;
9965 return;
9966 }
9967 }
fd1122a2 9968 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
1da177e4
LT
9969}
9970
9971static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9972{
9973 u32 nvcfg1;
9974
9975 nvcfg1 = tr32(NVRAM_CFG1);
9976 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9977 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9978 }
9979 else {
9980 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9981 tw32(NVRAM_CFG1, nvcfg1);
9982 }
9983
4c987487 9984 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
a4e2b347 9985 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
9986 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9987 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9988 tp->nvram_jedecnum = JEDEC_ATMEL;
9989 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9990 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9991 break;
9992 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9993 tp->nvram_jedecnum = JEDEC_ATMEL;
9994 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9995 break;
9996 case FLASH_VENDOR_ATMEL_EEPROM:
9997 tp->nvram_jedecnum = JEDEC_ATMEL;
9998 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9999 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10000 break;
10001 case FLASH_VENDOR_ST:
10002 tp->nvram_jedecnum = JEDEC_ST;
10003 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10004 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10005 break;
10006 case FLASH_VENDOR_SAIFUN:
10007 tp->nvram_jedecnum = JEDEC_SAIFUN;
10008 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10009 break;
10010 case FLASH_VENDOR_SST_SMALL:
10011 case FLASH_VENDOR_SST_LARGE:
10012 tp->nvram_jedecnum = JEDEC_SST;
10013 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10014 break;
10015 }
10016 }
10017 else {
10018 tp->nvram_jedecnum = JEDEC_ATMEL;
10019 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10020 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10021 }
10022}
10023
361b4ac2
MC
10024static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10025{
10026 u32 nvcfg1;
10027
10028 nvcfg1 = tr32(NVRAM_CFG1);
10029
e6af301b
MC
10030 /* NVRAM protection for TPM */
10031 if (nvcfg1 & (1 << 27))
10032 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10033
361b4ac2
MC
10034 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10035 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10036 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10037 tp->nvram_jedecnum = JEDEC_ATMEL;
10038 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10039 break;
10040 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10041 tp->nvram_jedecnum = JEDEC_ATMEL;
10042 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10043 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10044 break;
10045 case FLASH_5752VENDOR_ST_M45PE10:
10046 case FLASH_5752VENDOR_ST_M45PE20:
10047 case FLASH_5752VENDOR_ST_M45PE40:
10048 tp->nvram_jedecnum = JEDEC_ST;
10049 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10050 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10051 break;
10052 }
10053
10054 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10055 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10056 case FLASH_5752PAGE_SIZE_256:
10057 tp->nvram_pagesize = 256;
10058 break;
10059 case FLASH_5752PAGE_SIZE_512:
10060 tp->nvram_pagesize = 512;
10061 break;
10062 case FLASH_5752PAGE_SIZE_1K:
10063 tp->nvram_pagesize = 1024;
10064 break;
10065 case FLASH_5752PAGE_SIZE_2K:
10066 tp->nvram_pagesize = 2048;
10067 break;
10068 case FLASH_5752PAGE_SIZE_4K:
10069 tp->nvram_pagesize = 4096;
10070 break;
10071 case FLASH_5752PAGE_SIZE_264:
10072 tp->nvram_pagesize = 264;
10073 break;
10074 }
10075 }
10076 else {
10077 /* For eeprom, set pagesize to maximum eeprom size */
10078 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10079
10080 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10081 tw32(NVRAM_CFG1, nvcfg1);
10082 }
10083}
10084
d3c7b886
MC
10085static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10086{
989a9d23 10087 u32 nvcfg1, protect = 0;
d3c7b886
MC
10088
10089 nvcfg1 = tr32(NVRAM_CFG1);
10090
10091 /* NVRAM protection for TPM */
989a9d23 10092 if (nvcfg1 & (1 << 27)) {
d3c7b886 10093 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
989a9d23
MC
10094 protect = 1;
10095 }
d3c7b886 10096
989a9d23
MC
10097 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10098 switch (nvcfg1) {
d3c7b886
MC
10099 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10100 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10101 case FLASH_5755VENDOR_ATMEL_FLASH_3:
70b65a2d 10102 case FLASH_5755VENDOR_ATMEL_FLASH_5:
d3c7b886
MC
10103 tp->nvram_jedecnum = JEDEC_ATMEL;
10104 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10105 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10106 tp->nvram_pagesize = 264;
70b65a2d
MC
10107 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10108 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
fd1122a2
MC
10109 tp->nvram_size = (protect ? 0x3e200 :
10110 TG3_NVRAM_SIZE_512KB);
989a9d23 10111 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
fd1122a2
MC
10112 tp->nvram_size = (protect ? 0x1f200 :
10113 TG3_NVRAM_SIZE_256KB);
989a9d23 10114 else
fd1122a2
MC
10115 tp->nvram_size = (protect ? 0x1f200 :
10116 TG3_NVRAM_SIZE_128KB);
d3c7b886
MC
10117 break;
10118 case FLASH_5752VENDOR_ST_M45PE10:
10119 case FLASH_5752VENDOR_ST_M45PE20:
10120 case FLASH_5752VENDOR_ST_M45PE40:
10121 tp->nvram_jedecnum = JEDEC_ST;
10122 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10123 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10124 tp->nvram_pagesize = 256;
989a9d23 10125 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
fd1122a2
MC
10126 tp->nvram_size = (protect ?
10127 TG3_NVRAM_SIZE_64KB :
10128 TG3_NVRAM_SIZE_128KB);
989a9d23 10129 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
fd1122a2
MC
10130 tp->nvram_size = (protect ?
10131 TG3_NVRAM_SIZE_64KB :
10132 TG3_NVRAM_SIZE_256KB);
989a9d23 10133 else
fd1122a2
MC
10134 tp->nvram_size = (protect ?
10135 TG3_NVRAM_SIZE_128KB :
10136 TG3_NVRAM_SIZE_512KB);
d3c7b886
MC
10137 break;
10138 }
10139}
10140
1b27777a
MC
10141static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10142{
10143 u32 nvcfg1;
10144
10145 nvcfg1 = tr32(NVRAM_CFG1);
10146
10147 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10148 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10149 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10150 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10151 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10152 tp->nvram_jedecnum = JEDEC_ATMEL;
10153 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10154 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10155
10156 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10157 tw32(NVRAM_CFG1, nvcfg1);
10158 break;
10159 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10160 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10161 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10162 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10163 tp->nvram_jedecnum = JEDEC_ATMEL;
10164 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10165 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10166 tp->nvram_pagesize = 264;
10167 break;
10168 case FLASH_5752VENDOR_ST_M45PE10:
10169 case FLASH_5752VENDOR_ST_M45PE20:
10170 case FLASH_5752VENDOR_ST_M45PE40:
10171 tp->nvram_jedecnum = JEDEC_ST;
10172 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10173 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10174 tp->nvram_pagesize = 256;
10175 break;
10176 }
10177}
10178
6b91fa02
MC
10179static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10180{
10181 u32 nvcfg1, protect = 0;
10182
10183 nvcfg1 = tr32(NVRAM_CFG1);
10184
10185 /* NVRAM protection for TPM */
10186 if (nvcfg1 & (1 << 27)) {
10187 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10188 protect = 1;
10189 }
10190
10191 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10192 switch (nvcfg1) {
10193 case FLASH_5761VENDOR_ATMEL_ADB021D:
10194 case FLASH_5761VENDOR_ATMEL_ADB041D:
10195 case FLASH_5761VENDOR_ATMEL_ADB081D:
10196 case FLASH_5761VENDOR_ATMEL_ADB161D:
10197 case FLASH_5761VENDOR_ATMEL_MDB021D:
10198 case FLASH_5761VENDOR_ATMEL_MDB041D:
10199 case FLASH_5761VENDOR_ATMEL_MDB081D:
10200 case FLASH_5761VENDOR_ATMEL_MDB161D:
10201 tp->nvram_jedecnum = JEDEC_ATMEL;
10202 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10203 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10204 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10205 tp->nvram_pagesize = 256;
10206 break;
10207 case FLASH_5761VENDOR_ST_A_M45PE20:
10208 case FLASH_5761VENDOR_ST_A_M45PE40:
10209 case FLASH_5761VENDOR_ST_A_M45PE80:
10210 case FLASH_5761VENDOR_ST_A_M45PE16:
10211 case FLASH_5761VENDOR_ST_M_M45PE20:
10212 case FLASH_5761VENDOR_ST_M_M45PE40:
10213 case FLASH_5761VENDOR_ST_M_M45PE80:
10214 case FLASH_5761VENDOR_ST_M_M45PE16:
10215 tp->nvram_jedecnum = JEDEC_ST;
10216 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10217 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10218 tp->nvram_pagesize = 256;
10219 break;
10220 }
10221
10222 if (protect) {
10223 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10224 } else {
10225 switch (nvcfg1) {
10226 case FLASH_5761VENDOR_ATMEL_ADB161D:
10227 case FLASH_5761VENDOR_ATMEL_MDB161D:
10228 case FLASH_5761VENDOR_ST_A_M45PE16:
10229 case FLASH_5761VENDOR_ST_M_M45PE16:
fd1122a2 10230 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
6b91fa02
MC
10231 break;
10232 case FLASH_5761VENDOR_ATMEL_ADB081D:
10233 case FLASH_5761VENDOR_ATMEL_MDB081D:
10234 case FLASH_5761VENDOR_ST_A_M45PE80:
10235 case FLASH_5761VENDOR_ST_M_M45PE80:
fd1122a2 10236 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
6b91fa02
MC
10237 break;
10238 case FLASH_5761VENDOR_ATMEL_ADB041D:
10239 case FLASH_5761VENDOR_ATMEL_MDB041D:
10240 case FLASH_5761VENDOR_ST_A_M45PE40:
10241 case FLASH_5761VENDOR_ST_M_M45PE40:
fd1122a2 10242 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
6b91fa02
MC
10243 break;
10244 case FLASH_5761VENDOR_ATMEL_ADB021D:
10245 case FLASH_5761VENDOR_ATMEL_MDB021D:
10246 case FLASH_5761VENDOR_ST_A_M45PE20:
10247 case FLASH_5761VENDOR_ST_M_M45PE20:
fd1122a2 10248 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
6b91fa02
MC
10249 break;
10250 }
10251 }
10252}
10253
b5d3772c
MC
10254static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10255{
10256 tp->nvram_jedecnum = JEDEC_ATMEL;
10257 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10258 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10259}
10260
1da177e4
LT
10261/* Chips other than 5700/5701 use the NVRAM for fetching info. */
10262static void __devinit tg3_nvram_init(struct tg3 *tp)
10263{
1da177e4
LT
10264 tw32_f(GRC_EEPROM_ADDR,
10265 (EEPROM_ADDR_FSM_RESET |
10266 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10267 EEPROM_ADDR_CLKPERD_SHIFT)));
10268
9d57f01c 10269 msleep(1);
1da177e4
LT
10270
10271 /* Enable seeprom accesses. */
10272 tw32_f(GRC_LOCAL_CTRL,
10273 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10274 udelay(100);
10275
10276 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10277 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10278 tp->tg3_flags |= TG3_FLAG_NVRAM;
10279
ec41c7df
MC
10280 if (tg3_nvram_lock(tp)) {
10281 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10282 "tg3_nvram_init failed.\n", tp->dev->name);
10283 return;
10284 }
e6af301b 10285 tg3_enable_nvram_access(tp);
1da177e4 10286
989a9d23
MC
10287 tp->nvram_size = 0;
10288
361b4ac2
MC
10289 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10290 tg3_get_5752_nvram_info(tp);
d3c7b886
MC
10291 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10292 tg3_get_5755_nvram_info(tp);
d30cdd28
MC
10293 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10294 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
1b27777a 10295 tg3_get_5787_nvram_info(tp);
6b91fa02
MC
10296 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10297 tg3_get_5761_nvram_info(tp);
b5d3772c
MC
10298 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10299 tg3_get_5906_nvram_info(tp);
361b4ac2
MC
10300 else
10301 tg3_get_nvram_info(tp);
10302
989a9d23
MC
10303 if (tp->nvram_size == 0)
10304 tg3_get_nvram_size(tp);
1da177e4 10305
e6af301b 10306 tg3_disable_nvram_access(tp);
381291b7 10307 tg3_nvram_unlock(tp);
1da177e4
LT
10308
10309 } else {
10310 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10311
10312 tg3_get_eeprom_size(tp);
10313 }
10314}
10315
10316static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10317 u32 offset, u32 *val)
10318{
10319 u32 tmp;
10320 int i;
10321
10322 if (offset > EEPROM_ADDR_ADDR_MASK ||
10323 (offset % 4) != 0)
10324 return -EINVAL;
10325
10326 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10327 EEPROM_ADDR_DEVID_MASK |
10328 EEPROM_ADDR_READ);
10329 tw32(GRC_EEPROM_ADDR,
10330 tmp |
10331 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10332 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10333 EEPROM_ADDR_ADDR_MASK) |
10334 EEPROM_ADDR_READ | EEPROM_ADDR_START);
10335
9d57f01c 10336 for (i = 0; i < 1000; i++) {
1da177e4
LT
10337 tmp = tr32(GRC_EEPROM_ADDR);
10338
10339 if (tmp & EEPROM_ADDR_COMPLETE)
10340 break;
9d57f01c 10341 msleep(1);
1da177e4
LT
10342 }
10343 if (!(tmp & EEPROM_ADDR_COMPLETE))
10344 return -EBUSY;
10345
10346 *val = tr32(GRC_EEPROM_DATA);
10347 return 0;
10348}
10349
10350#define NVRAM_CMD_TIMEOUT 10000
10351
10352static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10353{
10354 int i;
10355
10356 tw32(NVRAM_CMD, nvram_cmd);
10357 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10358 udelay(10);
10359 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10360 udelay(10);
10361 break;
10362 }
10363 }
10364 if (i == NVRAM_CMD_TIMEOUT) {
10365 return -EBUSY;
10366 }
10367 return 0;
10368}
10369
1820180b
MC
10370static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10371{
10372 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10373 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10374 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
6b91fa02 10375 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
1820180b
MC
10376 (tp->nvram_jedecnum == JEDEC_ATMEL))
10377
10378 addr = ((addr / tp->nvram_pagesize) <<
10379 ATMEL_AT45DB0X1B_PAGE_POS) +
10380 (addr % tp->nvram_pagesize);
10381
10382 return addr;
10383}
10384
c4e6575c
MC
10385static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10386{
10387 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10388 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10389 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
6b91fa02 10390 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
c4e6575c
MC
10391 (tp->nvram_jedecnum == JEDEC_ATMEL))
10392
10393 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10394 tp->nvram_pagesize) +
10395 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10396
10397 return addr;
10398}
10399
1da177e4
LT
10400static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10401{
10402 int ret;
10403
1da177e4
LT
10404 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10405 return tg3_nvram_read_using_eeprom(tp, offset, val);
10406
1820180b 10407 offset = tg3_nvram_phys_addr(tp, offset);
1da177e4
LT
10408
10409 if (offset > NVRAM_ADDR_MSK)
10410 return -EINVAL;
10411
ec41c7df
MC
10412 ret = tg3_nvram_lock(tp);
10413 if (ret)
10414 return ret;
1da177e4 10415
e6af301b 10416 tg3_enable_nvram_access(tp);
1da177e4
LT
10417
10418 tw32(NVRAM_ADDR, offset);
10419 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10420 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10421
10422 if (ret == 0)
10423 *val = swab32(tr32(NVRAM_RDDATA));
10424
e6af301b 10425 tg3_disable_nvram_access(tp);
1da177e4 10426
381291b7
MC
10427 tg3_nvram_unlock(tp);
10428
1da177e4
LT
10429 return ret;
10430}
10431
b9fc7dc5
AV
10432static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10433{
10434 u32 v;
10435 int res = tg3_nvram_read(tp, offset, &v);
10436 if (!res)
10437 *val = cpu_to_le32(v);
10438 return res;
10439}
10440
1820180b
MC
10441static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10442{
10443 int err;
10444 u32 tmp;
10445
10446 err = tg3_nvram_read(tp, offset, &tmp);
10447 *val = swab32(tmp);
10448 return err;
10449}
10450
1da177e4
LT
10451static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10452 u32 offset, u32 len, u8 *buf)
10453{
10454 int i, j, rc = 0;
10455 u32 val;
10456
10457 for (i = 0; i < len; i += 4) {
b9fc7dc5
AV
10458 u32 addr;
10459 __le32 data;
1da177e4
LT
10460
10461 addr = offset + i;
10462
10463 memcpy(&data, buf + i, 4);
10464
b9fc7dc5 10465 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
1da177e4
LT
10466
10467 val = tr32(GRC_EEPROM_ADDR);
10468 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10469
10470 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10471 EEPROM_ADDR_READ);
10472 tw32(GRC_EEPROM_ADDR, val |
10473 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10474 (addr & EEPROM_ADDR_ADDR_MASK) |
10475 EEPROM_ADDR_START |
10476 EEPROM_ADDR_WRITE);
6aa20a22 10477
9d57f01c 10478 for (j = 0; j < 1000; j++) {
1da177e4
LT
10479 val = tr32(GRC_EEPROM_ADDR);
10480
10481 if (val & EEPROM_ADDR_COMPLETE)
10482 break;
9d57f01c 10483 msleep(1);
1da177e4
LT
10484 }
10485 if (!(val & EEPROM_ADDR_COMPLETE)) {
10486 rc = -EBUSY;
10487 break;
10488 }
10489 }
10490
10491 return rc;
10492}
10493
10494/* offset and length are dword aligned */
10495static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10496 u8 *buf)
10497{
10498 int ret = 0;
10499 u32 pagesize = tp->nvram_pagesize;
10500 u32 pagemask = pagesize - 1;
10501 u32 nvram_cmd;
10502 u8 *tmp;
10503
10504 tmp = kmalloc(pagesize, GFP_KERNEL);
10505 if (tmp == NULL)
10506 return -ENOMEM;
10507
10508 while (len) {
10509 int j;
e6af301b 10510 u32 phy_addr, page_off, size;
1da177e4
LT
10511
10512 phy_addr = offset & ~pagemask;
6aa20a22 10513
1da177e4 10514 for (j = 0; j < pagesize; j += 4) {
286e310f 10515 if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
b9fc7dc5 10516 (__le32 *) (tmp + j))))
1da177e4
LT
10517 break;
10518 }
10519 if (ret)
10520 break;
10521
10522 page_off = offset & pagemask;
10523 size = pagesize;
10524 if (len < size)
10525 size = len;
10526
10527 len -= size;
10528
10529 memcpy(tmp + page_off, buf, size);
10530
10531 offset = offset + (pagesize - page_off);
10532
e6af301b 10533 tg3_enable_nvram_access(tp);
1da177e4
LT
10534
10535 /*
10536 * Before we can erase the flash page, we need
10537 * to issue a special "write enable" command.
10538 */
10539 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10540
10541 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10542 break;
10543
10544 /* Erase the target page */
10545 tw32(NVRAM_ADDR, phy_addr);
10546
10547 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10548 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10549
10550 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10551 break;
10552
10553 /* Issue another write enable to start the write. */
10554 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10555
10556 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10557 break;
10558
10559 for (j = 0; j < pagesize; j += 4) {
b9fc7dc5 10560 __be32 data;
1da177e4 10561
b9fc7dc5
AV
10562 data = *((__be32 *) (tmp + j));
10563 /* swab32(le32_to_cpu(data)), actually */
10564 tw32(NVRAM_WRDATA, be32_to_cpu(data));
1da177e4
LT
10565
10566 tw32(NVRAM_ADDR, phy_addr + j);
10567
10568 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10569 NVRAM_CMD_WR;
10570
10571 if (j == 0)
10572 nvram_cmd |= NVRAM_CMD_FIRST;
10573 else if (j == (pagesize - 4))
10574 nvram_cmd |= NVRAM_CMD_LAST;
10575
10576 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10577 break;
10578 }
10579 if (ret)
10580 break;
10581 }
10582
10583 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10584 tg3_nvram_exec_cmd(tp, nvram_cmd);
10585
10586 kfree(tmp);
10587
10588 return ret;
10589}
10590
10591/* offset and length are dword aligned */
10592static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10593 u8 *buf)
10594{
10595 int i, ret = 0;
10596
10597 for (i = 0; i < len; i += 4, offset += 4) {
b9fc7dc5
AV
10598 u32 page_off, phy_addr, nvram_cmd;
10599 __be32 data;
1da177e4
LT
10600
10601 memcpy(&data, buf + i, 4);
b9fc7dc5 10602 tw32(NVRAM_WRDATA, be32_to_cpu(data));
1da177e4
LT
10603
10604 page_off = offset % tp->nvram_pagesize;
10605
1820180b 10606 phy_addr = tg3_nvram_phys_addr(tp, offset);
1da177e4
LT
10607
10608 tw32(NVRAM_ADDR, phy_addr);
10609
10610 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10611
10612 if ((page_off == 0) || (i == 0))
10613 nvram_cmd |= NVRAM_CMD_FIRST;
f6d9a256 10614 if (page_off == (tp->nvram_pagesize - 4))
1da177e4
LT
10615 nvram_cmd |= NVRAM_CMD_LAST;
10616
10617 if (i == (len - 4))
10618 nvram_cmd |= NVRAM_CMD_LAST;
10619
4c987487 10620 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
af36e6b6 10621 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
1b27777a 10622 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
d30cdd28 10623 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
9936bcf6 10624 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
4c987487
MC
10625 (tp->nvram_jedecnum == JEDEC_ST) &&
10626 (nvram_cmd & NVRAM_CMD_FIRST)) {
1da177e4
LT
10627
10628 if ((ret = tg3_nvram_exec_cmd(tp,
10629 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10630 NVRAM_CMD_DONE)))
10631
10632 break;
10633 }
10634 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10635 /* We always do complete word writes to eeprom. */
10636 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10637 }
10638
10639 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10640 break;
10641 }
10642 return ret;
10643}
10644
10645/* offset and length are dword aligned */
10646static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10647{
10648 int ret;
10649
1da177e4 10650 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34
MC
10651 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10652 ~GRC_LCLCTRL_GPIO_OUTPUT1);
1da177e4
LT
10653 udelay(40);
10654 }
10655
10656 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10657 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10658 }
10659 else {
10660 u32 grc_mode;
10661
ec41c7df
MC
10662 ret = tg3_nvram_lock(tp);
10663 if (ret)
10664 return ret;
1da177e4 10665
e6af301b
MC
10666 tg3_enable_nvram_access(tp);
10667 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10668 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
1da177e4 10669 tw32(NVRAM_WRITE1, 0x406);
1da177e4
LT
10670
10671 grc_mode = tr32(GRC_MODE);
10672 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10673
10674 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10675 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10676
10677 ret = tg3_nvram_write_block_buffered(tp, offset, len,
10678 buf);
10679 }
10680 else {
10681 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10682 buf);
10683 }
10684
10685 grc_mode = tr32(GRC_MODE);
10686 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10687
e6af301b 10688 tg3_disable_nvram_access(tp);
1da177e4
LT
10689 tg3_nvram_unlock(tp);
10690 }
10691
10692 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34 10693 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1da177e4
LT
10694 udelay(40);
10695 }
10696
10697 return ret;
10698}
10699
10700struct subsys_tbl_ent {
10701 u16 subsys_vendor, subsys_devid;
10702 u32 phy_id;
10703};
10704
10705static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10706 /* Broadcom boards. */
10707 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10708 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10709 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10710 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
10711 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10712 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10713 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
10714 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10715 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10716 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10717 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10718
10719 /* 3com boards. */
10720 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10721 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10722 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
10723 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10724 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10725
10726 /* DELL boards. */
10727 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10728 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10729 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10730 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10731
10732 /* Compaq boards. */
10733 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10734 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10735 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
10736 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10737 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10738
10739 /* IBM boards. */
10740 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10741};
10742
10743static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10744{
10745 int i;
10746
10747 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10748 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10749 tp->pdev->subsystem_vendor) &&
10750 (subsys_id_to_phy_id[i].subsys_devid ==
10751 tp->pdev->subsystem_device))
10752 return &subsys_id_to_phy_id[i];
10753 }
10754 return NULL;
10755}
10756
7d0c41ef 10757static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
1da177e4 10758{
1da177e4 10759 u32 val;
caf636c7
MC
10760 u16 pmcsr;
10761
10762 /* On some early chips the SRAM cannot be accessed in D3hot state,
10763 * so need make sure we're in D0.
10764 */
10765 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10766 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10767 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10768 msleep(1);
7d0c41ef
MC
10769
10770 /* Make sure register accesses (indirect or otherwise)
10771 * will function correctly.
10772 */
10773 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10774 tp->misc_host_ctrl);
1da177e4 10775
f49639e6
DM
10776 /* The memory arbiter has to be enabled in order for SRAM accesses
10777 * to succeed. Normally on powerup the tg3 chip firmware will make
10778 * sure it is enabled, but other entities such as system netboot
10779 * code might disable it.
10780 */
10781 val = tr32(MEMARB_MODE);
10782 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10783
1da177e4 10784 tp->phy_id = PHY_ID_INVALID;
7d0c41ef
MC
10785 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10786
a85feb8c
GZ
10787 /* Assume an onboard device and WOL capable by default. */
10788 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
72b845e0 10789
b5d3772c 10790 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9d26e213 10791 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
b5d3772c 10792 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9d26e213
MC
10793 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10794 }
0527ba35
MC
10795 val = tr32(VCPU_CFGSHDW);
10796 if (val & VCPU_CFGSHDW_ASPM_DBNC)
8ed5d97e 10797 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
0527ba35
MC
10798 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10799 (val & VCPU_CFGSHDW_WOL_MAGPKT))
10800 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
b5d3772c
MC
10801 return;
10802 }
10803
1da177e4
LT
10804 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10805 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10806 u32 nic_cfg, led_cfg;
7d0c41ef
MC
10807 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10808 int eeprom_phy_serdes = 0;
1da177e4
LT
10809
10810 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10811 tp->nic_sram_data_cfg = nic_cfg;
10812
10813 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10814 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10815 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10816 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10817 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10818 (ver > 0) && (ver < 0x100))
10819 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10820
1da177e4
LT
10821 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10822 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10823 eeprom_phy_serdes = 1;
10824
10825 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10826 if (nic_phy_id != 0) {
10827 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10828 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10829
10830 eeprom_phy_id = (id1 >> 16) << 10;
10831 eeprom_phy_id |= (id2 & 0xfc00) << 16;
10832 eeprom_phy_id |= (id2 & 0x03ff) << 0;
10833 } else
10834 eeprom_phy_id = 0;
10835
7d0c41ef 10836 tp->phy_id = eeprom_phy_id;
747e8f8b 10837 if (eeprom_phy_serdes) {
a4e2b347 10838 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
747e8f8b
MC
10839 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10840 else
10841 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10842 }
7d0c41ef 10843
cbf46853 10844 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
10845 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10846 SHASTA_EXT_LED_MODE_MASK);
cbf46853 10847 else
1da177e4
LT
10848 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10849
10850 switch (led_cfg) {
10851 default:
10852 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10853 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10854 break;
10855
10856 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10857 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10858 break;
10859
10860 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10861 tp->led_ctrl = LED_CTRL_MODE_MAC;
9ba27794
MC
10862
10863 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10864 * read on some older 5700/5701 bootcode.
10865 */
10866 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10867 ASIC_REV_5700 ||
10868 GET_ASIC_REV(tp->pci_chip_rev_id) ==
10869 ASIC_REV_5701)
10870 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10871
1da177e4
LT
10872 break;
10873
10874 case SHASTA_EXT_LED_SHARED:
10875 tp->led_ctrl = LED_CTRL_MODE_SHARED;
10876 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10877 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10878 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10879 LED_CTRL_MODE_PHY_2);
10880 break;
10881
10882 case SHASTA_EXT_LED_MAC:
10883 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10884 break;
10885
10886 case SHASTA_EXT_LED_COMBO:
10887 tp->led_ctrl = LED_CTRL_MODE_COMBO;
10888 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10889 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10890 LED_CTRL_MODE_PHY_2);
10891 break;
10892
10893 };
10894
10895 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10896 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10897 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10898 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10899
b2a5c19c
MC
10900 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
10901 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
5f60891b 10902
9d26e213 10903 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
1da177e4 10904 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9d26e213
MC
10905 if ((tp->pdev->subsystem_vendor ==
10906 PCI_VENDOR_ID_ARIMA) &&
10907 (tp->pdev->subsystem_device == 0x205a ||
10908 tp->pdev->subsystem_device == 0x2063))
10909 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10910 } else {
f49639e6 10911 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9d26e213
MC
10912 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10913 }
1da177e4
LT
10914
10915 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10916 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 10917 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
10918 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10919 }
0d3031d9
MC
10920 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10921 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
a85feb8c
GZ
10922 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10923 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10924 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
1da177e4 10925
0527ba35
MC
10926 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10927 nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10928 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10929
1da177e4
LT
10930 if (cfg2 & (1 << 17))
10931 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10932
10933 /* serdes signal pre-emphasis in register 0x590 set by */
10934 /* bootcode if bit 18 is set */
10935 if (cfg2 & (1 << 18))
10936 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
8ed5d97e
MC
10937
10938 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10939 u32 cfg3;
10940
10941 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10942 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10943 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10944 }
1da177e4 10945 }
7d0c41ef
MC
10946}
10947
b2a5c19c
MC
10948static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
10949{
10950 int i;
10951 u32 val;
10952
10953 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
10954 tw32(OTP_CTRL, cmd);
10955
10956 /* Wait for up to 1 ms for command to execute. */
10957 for (i = 0; i < 100; i++) {
10958 val = tr32(OTP_STATUS);
10959 if (val & OTP_STATUS_CMD_DONE)
10960 break;
10961 udelay(10);
10962 }
10963
10964 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
10965}
10966
10967/* Read the gphy configuration from the OTP region of the chip. The gphy
10968 * configuration is a 32-bit value that straddles the alignment boundary.
10969 * We do two 32-bit reads and then shift and merge the results.
10970 */
10971static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
10972{
10973 u32 bhalf_otp, thalf_otp;
10974
10975 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
10976
10977 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
10978 return 0;
10979
10980 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
10981
10982 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
10983 return 0;
10984
10985 thalf_otp = tr32(OTP_READ_DATA);
10986
10987 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
10988
10989 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
10990 return 0;
10991
10992 bhalf_otp = tr32(OTP_READ_DATA);
10993
10994 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
10995}
10996
7d0c41ef
MC
10997static int __devinit tg3_phy_probe(struct tg3 *tp)
10998{
10999 u32 hw_phy_id_1, hw_phy_id_2;
11000 u32 hw_phy_id, hw_phy_id_masked;
11001 int err;
1da177e4
LT
11002
11003 /* Reading the PHY ID register can conflict with ASF
11004 * firwmare access to the PHY hardware.
11005 */
11006 err = 0;
0d3031d9
MC
11007 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11008 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
1da177e4
LT
11009 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11010 } else {
11011 /* Now read the physical PHY_ID from the chip and verify
11012 * that it is sane. If it doesn't look good, we fall back
11013 * to either the hard-coded table based PHY_ID and failing
11014 * that the value found in the eeprom area.
11015 */
11016 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11017 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11018
11019 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11020 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11021 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11022
11023 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11024 }
11025
11026 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11027 tp->phy_id = hw_phy_id;
11028 if (hw_phy_id_masked == PHY_ID_BCM8002)
11029 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
da6b2d01
MC
11030 else
11031 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
1da177e4 11032 } else {
7d0c41ef
MC
11033 if (tp->phy_id != PHY_ID_INVALID) {
11034 /* Do nothing, phy ID already set up in
11035 * tg3_get_eeprom_hw_cfg().
11036 */
1da177e4
LT
11037 } else {
11038 struct subsys_tbl_ent *p;
11039
11040 /* No eeprom signature? Try the hardcoded
11041 * subsys device table.
11042 */
11043 p = lookup_by_subsys(tp);
11044 if (!p)
11045 return -ENODEV;
11046
11047 tp->phy_id = p->phy_id;
11048 if (!tp->phy_id ||
11049 tp->phy_id == PHY_ID_BCM8002)
11050 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11051 }
11052 }
11053
747e8f8b 11054 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
0d3031d9 11055 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
1da177e4 11056 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
3600d918 11057 u32 bmsr, adv_reg, tg3_ctrl, mask;
1da177e4
LT
11058
11059 tg3_readphy(tp, MII_BMSR, &bmsr);
11060 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11061 (bmsr & BMSR_LSTATUS))
11062 goto skip_phy_reset;
6aa20a22 11063
1da177e4
LT
11064 err = tg3_phy_reset(tp);
11065 if (err)
11066 return err;
11067
11068 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11069 ADVERTISE_100HALF | ADVERTISE_100FULL |
11070 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11071 tg3_ctrl = 0;
11072 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11073 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11074 MII_TG3_CTRL_ADV_1000_FULL);
11075 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11076 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11077 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11078 MII_TG3_CTRL_ENABLE_AS_MASTER);
11079 }
11080
3600d918
MC
11081 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11082 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11083 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11084 if (!tg3_copper_is_advertising_all(tp, mask)) {
1da177e4
LT
11085 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11086
11087 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11088 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11089
11090 tg3_writephy(tp, MII_BMCR,
11091 BMCR_ANENABLE | BMCR_ANRESTART);
11092 }
11093 tg3_phy_set_wirespeed(tp);
11094
11095 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11096 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11097 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11098 }
11099
11100skip_phy_reset:
11101 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11102 err = tg3_init_5401phy_dsp(tp);
11103 if (err)
11104 return err;
11105 }
11106
11107 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11108 err = tg3_init_5401phy_dsp(tp);
11109 }
11110
747e8f8b 11111 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1da177e4
LT
11112 tp->link_config.advertising =
11113 (ADVERTISED_1000baseT_Half |
11114 ADVERTISED_1000baseT_Full |
11115 ADVERTISED_Autoneg |
11116 ADVERTISED_FIBRE);
11117 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11118 tp->link_config.advertising &=
11119 ~(ADVERTISED_1000baseT_Half |
11120 ADVERTISED_1000baseT_Full);
11121
11122 return err;
11123}
11124
11125static void __devinit tg3_read_partno(struct tg3 *tp)
11126{
11127 unsigned char vpd_data[256];
af2c6a4a 11128 unsigned int i;
1b27777a 11129 u32 magic;
1da177e4 11130
1820180b 11131 if (tg3_nvram_read_swab(tp, 0x0, &magic))
f49639e6 11132 goto out_not_found;
1da177e4 11133
1820180b 11134 if (magic == TG3_EEPROM_MAGIC) {
1b27777a
MC
11135 for (i = 0; i < 256; i += 4) {
11136 u32 tmp;
1da177e4 11137
1b27777a
MC
11138 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11139 goto out_not_found;
11140
11141 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
11142 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
11143 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11144 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11145 }
11146 } else {
11147 int vpd_cap;
11148
11149 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11150 for (i = 0; i < 256; i += 4) {
11151 u32 tmp, j = 0;
b9fc7dc5 11152 __le32 v;
1b27777a
MC
11153 u16 tmp16;
11154
11155 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11156 i);
11157 while (j++ < 100) {
11158 pci_read_config_word(tp->pdev, vpd_cap +
11159 PCI_VPD_ADDR, &tmp16);
11160 if (tmp16 & 0x8000)
11161 break;
11162 msleep(1);
11163 }
f49639e6
DM
11164 if (!(tmp16 & 0x8000))
11165 goto out_not_found;
11166
1b27777a
MC
11167 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11168 &tmp);
b9fc7dc5
AV
11169 v = cpu_to_le32(tmp);
11170 memcpy(&vpd_data[i], &v, 4);
1b27777a 11171 }
1da177e4
LT
11172 }
11173
11174 /* Now parse and find the part number. */
af2c6a4a 11175 for (i = 0; i < 254; ) {
1da177e4 11176 unsigned char val = vpd_data[i];
af2c6a4a 11177 unsigned int block_end;
1da177e4
LT
11178
11179 if (val == 0x82 || val == 0x91) {
11180 i = (i + 3 +
11181 (vpd_data[i + 1] +
11182 (vpd_data[i + 2] << 8)));
11183 continue;
11184 }
11185
11186 if (val != 0x90)
11187 goto out_not_found;
11188
11189 block_end = (i + 3 +
11190 (vpd_data[i + 1] +
11191 (vpd_data[i + 2] << 8)));
11192 i += 3;
af2c6a4a
MC
11193
11194 if (block_end > 256)
11195 goto out_not_found;
11196
11197 while (i < (block_end - 2)) {
1da177e4
LT
11198 if (vpd_data[i + 0] == 'P' &&
11199 vpd_data[i + 1] == 'N') {
11200 int partno_len = vpd_data[i + 2];
11201
af2c6a4a
MC
11202 i += 3;
11203 if (partno_len > 24 || (partno_len + i) > 256)
1da177e4
LT
11204 goto out_not_found;
11205
11206 memcpy(tp->board_part_number,
af2c6a4a 11207 &vpd_data[i], partno_len);
1da177e4
LT
11208
11209 /* Success. */
11210 return;
11211 }
af2c6a4a 11212 i += 3 + vpd_data[i + 2];
1da177e4
LT
11213 }
11214
11215 /* Part number not found. */
11216 goto out_not_found;
11217 }
11218
11219out_not_found:
b5d3772c
MC
11220 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11221 strcpy(tp->board_part_number, "BCM95906");
11222 else
11223 strcpy(tp->board_part_number, "none");
1da177e4
LT
11224}
11225
9c8a620e
MC
11226static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11227{
11228 u32 val;
11229
11230 if (tg3_nvram_read_swab(tp, offset, &val) ||
11231 (val & 0xfc000000) != 0x0c000000 ||
11232 tg3_nvram_read_swab(tp, offset + 4, &val) ||
11233 val != 0)
11234 return 0;
11235
11236 return 1;
11237}
11238
c4e6575c
MC
11239static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11240{
11241 u32 val, offset, start;
9c8a620e
MC
11242 u32 ver_offset;
11243 int i, bcnt;
c4e6575c
MC
11244
11245 if (tg3_nvram_read_swab(tp, 0, &val))
11246 return;
11247
11248 if (val != TG3_EEPROM_MAGIC)
11249 return;
11250
11251 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11252 tg3_nvram_read_swab(tp, 0x4, &start))
11253 return;
11254
11255 offset = tg3_nvram_logical_addr(tp, offset);
9c8a620e
MC
11256
11257 if (!tg3_fw_img_is_valid(tp, offset) ||
11258 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
c4e6575c
MC
11259 return;
11260
9c8a620e
MC
11261 offset = offset + ver_offset - start;
11262 for (i = 0; i < 16; i += 4) {
b9fc7dc5
AV
11263 __le32 v;
11264 if (tg3_nvram_read_le(tp, offset + i, &v))
9c8a620e
MC
11265 return;
11266
b9fc7dc5 11267 memcpy(tp->fw_ver + i, &v, 4);
9c8a620e 11268 }
c4e6575c 11269
9c8a620e 11270 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
84af67fd 11271 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
9c8a620e
MC
11272 return;
11273
11274 for (offset = TG3_NVM_DIR_START;
11275 offset < TG3_NVM_DIR_END;
11276 offset += TG3_NVM_DIRENT_SIZE) {
11277 if (tg3_nvram_read_swab(tp, offset, &val))
c4e6575c
MC
11278 return;
11279
9c8a620e
MC
11280 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11281 break;
11282 }
11283
11284 if (offset == TG3_NVM_DIR_END)
11285 return;
11286
11287 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11288 start = 0x08000000;
11289 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11290 return;
11291
11292 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11293 !tg3_fw_img_is_valid(tp, offset) ||
11294 tg3_nvram_read_swab(tp, offset + 8, &val))
11295 return;
11296
11297 offset += val - start;
11298
11299 bcnt = strlen(tp->fw_ver);
11300
11301 tp->fw_ver[bcnt++] = ',';
11302 tp->fw_ver[bcnt++] = ' ';
11303
11304 for (i = 0; i < 4; i++) {
b9fc7dc5
AV
11305 __le32 v;
11306 if (tg3_nvram_read_le(tp, offset, &v))
c4e6575c
MC
11307 return;
11308
b9fc7dc5 11309 offset += sizeof(v);
c4e6575c 11310
b9fc7dc5
AV
11311 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11312 memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
9c8a620e 11313 break;
c4e6575c 11314 }
9c8a620e 11315
b9fc7dc5
AV
11316 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11317 bcnt += sizeof(v);
c4e6575c 11318 }
9c8a620e
MC
11319
11320 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
c4e6575c
MC
11321}
11322
7544b097
MC
11323static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11324
1da177e4
LT
11325static int __devinit tg3_get_invariants(struct tg3 *tp)
11326{
11327 static struct pci_device_id write_reorder_chipsets[] = {
1da177e4
LT
11328 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11329 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
c165b004
JL
11330 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11331 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
399de50b
MC
11332 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11333 PCI_DEVICE_ID_VIA_8385_0) },
1da177e4
LT
11334 { },
11335 };
11336 u32 misc_ctrl_reg;
11337 u32 cacheline_sz_reg;
11338 u32 pci_state_reg, grc_misc_cfg;
11339 u32 val;
11340 u16 pci_cmd;
c7835a77 11341 int err, pcie_cap;
1da177e4 11342
1da177e4
LT
11343 /* Force memory write invalidate off. If we leave it on,
11344 * then on 5700_BX chips we have to enable a workaround.
11345 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11346 * to match the cacheline size. The Broadcom driver have this
11347 * workaround but turns MWI off all the times so never uses
11348 * it. This seems to suggest that the workaround is insufficient.
11349 */
11350 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11351 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11352 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11353
11354 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11355 * has the register indirect write enable bit set before
11356 * we try to access any of the MMIO registers. It is also
11357 * critical that the PCI-X hw workaround situation is decided
11358 * before that as well.
11359 */
11360 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11361 &misc_ctrl_reg);
11362
11363 tp->pci_chip_rev_id = (misc_ctrl_reg >>
11364 MISC_HOST_CTRL_CHIPREV_SHIFT);
795d01c5
MC
11365 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11366 u32 prod_id_asic_rev;
11367
11368 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11369 &prod_id_asic_rev);
11370 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11371 }
1da177e4 11372
ff645bec
MC
11373 /* Wrong chip ID in 5752 A0. This code can be removed later
11374 * as A0 is not in production.
11375 */
11376 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11377 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11378
6892914f
MC
11379 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11380 * we need to disable memory and use config. cycles
11381 * only to access all registers. The 5702/03 chips
11382 * can mistakenly decode the special cycles from the
11383 * ICH chipsets as memory write cycles, causing corruption
11384 * of register and memory space. Only certain ICH bridges
11385 * will drive special cycles with non-zero data during the
11386 * address phase which can fall within the 5703's address
11387 * range. This is not an ICH bug as the PCI spec allows
11388 * non-zero address during special cycles. However, only
11389 * these ICH bridges are known to drive non-zero addresses
11390 * during special cycles.
11391 *
11392 * Since special cycles do not cross PCI bridges, we only
11393 * enable this workaround if the 5703 is on the secondary
11394 * bus of these ICH bridges.
11395 */
11396 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11397 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11398 static struct tg3_dev_id {
11399 u32 vendor;
11400 u32 device;
11401 u32 rev;
11402 } ich_chipsets[] = {
11403 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11404 PCI_ANY_ID },
11405 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11406 PCI_ANY_ID },
11407 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11408 0xa },
11409 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11410 PCI_ANY_ID },
11411 { },
11412 };
11413 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11414 struct pci_dev *bridge = NULL;
11415
11416 while (pci_id->vendor != 0) {
11417 bridge = pci_get_device(pci_id->vendor, pci_id->device,
11418 bridge);
11419 if (!bridge) {
11420 pci_id++;
11421 continue;
11422 }
11423 if (pci_id->rev != PCI_ANY_ID) {
44c10138 11424 if (bridge->revision > pci_id->rev)
6892914f
MC
11425 continue;
11426 }
11427 if (bridge->subordinate &&
11428 (bridge->subordinate->number ==
11429 tp->pdev->bus->number)) {
11430
11431 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11432 pci_dev_put(bridge);
11433 break;
11434 }
11435 }
11436 }
11437
41588ba1
MC
11438 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11439 static struct tg3_dev_id {
11440 u32 vendor;
11441 u32 device;
11442 } bridge_chipsets[] = {
11443 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11444 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11445 { },
11446 };
11447 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11448 struct pci_dev *bridge = NULL;
11449
11450 while (pci_id->vendor != 0) {
11451 bridge = pci_get_device(pci_id->vendor,
11452 pci_id->device,
11453 bridge);
11454 if (!bridge) {
11455 pci_id++;
11456 continue;
11457 }
11458 if (bridge->subordinate &&
11459 (bridge->subordinate->number <=
11460 tp->pdev->bus->number) &&
11461 (bridge->subordinate->subordinate >=
11462 tp->pdev->bus->number)) {
11463 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11464 pci_dev_put(bridge);
11465 break;
11466 }
11467 }
11468 }
11469
4a29cc2e
MC
11470 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11471 * DMA addresses > 40-bit. This bridge may have other additional
11472 * 57xx devices behind it in some 4-port NIC designs for example.
11473 * Any tg3 device found behind the bridge will also need the 40-bit
11474 * DMA workaround.
11475 */
a4e2b347
MC
11476 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11477 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11478 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
4a29cc2e 11479 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
4cf78e4f 11480 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
a4e2b347 11481 }
4a29cc2e
MC
11482 else {
11483 struct pci_dev *bridge = NULL;
11484
11485 do {
11486 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11487 PCI_DEVICE_ID_SERVERWORKS_EPB,
11488 bridge);
11489 if (bridge && bridge->subordinate &&
11490 (bridge->subordinate->number <=
11491 tp->pdev->bus->number) &&
11492 (bridge->subordinate->subordinate >=
11493 tp->pdev->bus->number)) {
11494 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11495 pci_dev_put(bridge);
11496 break;
11497 }
11498 } while (bridge);
11499 }
4cf78e4f 11500
1da177e4
LT
11501 /* Initialize misc host control in PCI block. */
11502 tp->misc_host_ctrl |= (misc_ctrl_reg &
11503 MISC_HOST_CTRL_CHIPREV);
11504 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11505 tp->misc_host_ctrl);
11506
11507 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11508 &cacheline_sz_reg);
11509
11510 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
11511 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
11512 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
11513 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
11514
7544b097
MC
11515 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11516 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11517 tp->pdev_peer = tg3_find_peer(tp);
11518
6708e5cc 11519 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
4cf78e4f 11520 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
af36e6b6 11521 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d9ab5ad1 11522 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
d30cdd28 11523 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9936bcf6 11524 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
b5d3772c 11525 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
a4e2b347 11526 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6708e5cc
JL
11527 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11528
1b440c56
JL
11529 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11530 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11531 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11532
5a6f3074 11533 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7544b097
MC
11534 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11535 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11536 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11537 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11538 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11539 tp->pdev_peer == tp->pdev))
11540 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11541
af36e6b6 11542 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
b5d3772c 11543 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
d30cdd28 11544 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9936bcf6 11545 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
b5d3772c 11546 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5a6f3074 11547 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
fcfa0a32 11548 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
52c0fd83 11549 } else {
7f62ad5d 11550 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
52c0fd83
MC
11551 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11552 ASIC_REV_5750 &&
11553 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
7f62ad5d 11554 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
52c0fd83 11555 }
5a6f3074 11556 }
1da177e4 11557
f51f3562
MC
11558 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11559 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
0f893dc6
MC
11560 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11561
c7835a77
MC
11562 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11563 if (pcie_cap != 0) {
1da177e4 11564 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
5f5c51e3
MC
11565
11566 pcie_set_readrq(tp->pdev, 4096);
11567
c7835a77
MC
11568 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11569 u16 lnkctl;
11570
11571 pci_read_config_word(tp->pdev,
11572 pcie_cap + PCI_EXP_LNKCTL,
11573 &lnkctl);
11574 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11575 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11576 }
11577 }
1da177e4 11578
399de50b
MC
11579 /* If we have an AMD 762 or VIA K8T800 chipset, write
11580 * reordering to the mailbox registers done by the host
11581 * controller can cause major troubles. We read back from
11582 * every mailbox register write to force the writes to be
11583 * posted to the chip in order.
11584 */
11585 if (pci_dev_present(write_reorder_chipsets) &&
11586 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11587 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11588
1da177e4
LT
11589 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11590 tp->pci_lat_timer < 64) {
11591 tp->pci_lat_timer = 64;
11592
11593 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
11594 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
11595 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
11596 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
11597
11598 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11599 cacheline_sz_reg);
11600 }
11601
9974a356
MC
11602 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11603 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11604 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11605 if (!tp->pcix_cap) {
11606 printk(KERN_ERR PFX "Cannot find PCI-X "
11607 "capability, aborting.\n");
11608 return -EIO;
11609 }
11610 }
11611
1da177e4
LT
11612 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11613 &pci_state_reg);
11614
9974a356 11615 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
1da177e4
LT
11616 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11617
11618 /* If this is a 5700 BX chipset, and we are in PCI-X
11619 * mode, enable register write workaround.
11620 *
11621 * The workaround is to use indirect register accesses
11622 * for all chip writes not to mailbox registers.
11623 */
11624 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11625 u32 pm_reg;
1da177e4
LT
11626
11627 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11628
11629 /* The chip can have it's power management PCI config
11630 * space registers clobbered due to this bug.
11631 * So explicitly force the chip into D0 here.
11632 */
9974a356
MC
11633 pci_read_config_dword(tp->pdev,
11634 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
11635 &pm_reg);
11636 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11637 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9974a356
MC
11638 pci_write_config_dword(tp->pdev,
11639 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
11640 pm_reg);
11641
11642 /* Also, force SERR#/PERR# in PCI command. */
11643 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11644 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11645 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11646 }
11647 }
11648
087fe256
MC
11649 /* 5700 BX chips need to have their TX producer index mailboxes
11650 * written twice to workaround a bug.
11651 */
11652 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11653 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11654
1da177e4
LT
11655 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11656 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11657 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11658 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11659
11660 /* Chip-specific fixup from Broadcom driver */
11661 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11662 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11663 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11664 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11665 }
11666
1ee582d8 11667 /* Default fast path register access methods */
20094930 11668 tp->read32 = tg3_read32;
1ee582d8 11669 tp->write32 = tg3_write32;
09ee929c 11670 tp->read32_mbox = tg3_read32;
20094930 11671 tp->write32_mbox = tg3_write32;
1ee582d8
MC
11672 tp->write32_tx_mbox = tg3_write32;
11673 tp->write32_rx_mbox = tg3_write32;
11674
11675 /* Various workaround register access methods */
11676 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11677 tp->write32 = tg3_write_indirect_reg32;
98efd8a6
MC
11678 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11679 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11680 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11681 /*
11682 * Back to back register writes can cause problems on these
11683 * chips, the workaround is to read back all reg writes
11684 * except those to mailbox regs.
11685 *
11686 * See tg3_write_indirect_reg32().
11687 */
1ee582d8 11688 tp->write32 = tg3_write_flush_reg32;
98efd8a6
MC
11689 }
11690
1ee582d8
MC
11691
11692 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11693 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11694 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11695 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11696 tp->write32_rx_mbox = tg3_write_flush_reg32;
11697 }
20094930 11698
6892914f
MC
11699 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11700 tp->read32 = tg3_read_indirect_reg32;
11701 tp->write32 = tg3_write_indirect_reg32;
11702 tp->read32_mbox = tg3_read_indirect_mbox;
11703 tp->write32_mbox = tg3_write_indirect_mbox;
11704 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11705 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11706
11707 iounmap(tp->regs);
22abe310 11708 tp->regs = NULL;
6892914f
MC
11709
11710 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11711 pci_cmd &= ~PCI_COMMAND_MEMORY;
11712 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11713 }
b5d3772c
MC
11714 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11715 tp->read32_mbox = tg3_read32_mbox_5906;
11716 tp->write32_mbox = tg3_write32_mbox_5906;
11717 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11718 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11719 }
6892914f 11720
bbadf503
MC
11721 if (tp->write32 == tg3_write_indirect_reg32 ||
11722 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11723 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
f49639e6 11724 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
bbadf503
MC
11725 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11726
7d0c41ef 11727 /* Get eeprom hw config before calling tg3_set_power_state().
9d26e213 11728 * In particular, the TG3_FLG2_IS_NIC flag must be
7d0c41ef
MC
11729 * determined before calling tg3_set_power_state() so that
11730 * we know whether or not to switch out of Vaux power.
11731 * When the flag is set, it means that GPIO1 is used for eeprom
11732 * write protect and also implies that it is a LOM where GPIOs
11733 * are not used to switch power.
6aa20a22 11734 */
7d0c41ef
MC
11735 tg3_get_eeprom_hw_cfg(tp);
11736
0d3031d9
MC
11737 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11738 /* Allow reads and writes to the
11739 * APE register and memory space.
11740 */
11741 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11742 PCISTATE_ALLOW_APE_SHMEM_WR;
11743 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11744 pci_state_reg);
11745 }
11746
9936bcf6 11747 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
b5af7126 11748 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
d30cdd28
MC
11749 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11750
b5af7126
MC
11751 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
11752 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
11753 tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
11754 tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
11755 tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
11756 }
11757
314fba34
MC
11758 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11759 * GPIO1 driven high will bring 5700's external PHY out of reset.
11760 * It is also used as eeprom write protect on LOMs.
11761 */
11762 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11763 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11764 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11765 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11766 GRC_LCLCTRL_GPIO_OUTPUT1);
3e7d83bc
MC
11767 /* Unused GPIO3 must be driven as output on 5752 because there
11768 * are no pull-up resistors on unused GPIO pins.
11769 */
11770 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11771 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
314fba34 11772
af36e6b6
MC
11773 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11774 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11775
1da177e4 11776 /* Force the chip into D0. */
bc1c7567 11777 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
11778 if (err) {
11779 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11780 pci_name(tp->pdev));
11781 return err;
11782 }
11783
11784 /* 5700 B0 chips do not support checksumming correctly due
11785 * to hardware bugs.
11786 */
11787 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11788 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11789
1da177e4
LT
11790 /* Derive initial jumbo mode from MTU assigned in
11791 * ether_setup() via the alloc_etherdev() call
11792 */
0f893dc6 11793 if (tp->dev->mtu > ETH_DATA_LEN &&
a4e2b347 11794 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
0f893dc6 11795 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
1da177e4
LT
11796
11797 /* Determine WakeOnLan speed to use. */
11798 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11799 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11800 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11801 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11802 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11803 } else {
11804 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11805 }
11806
11807 /* A few boards don't want Ethernet@WireSpeed phy feature */
11808 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11809 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11810 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
747e8f8b 11811 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
b5d3772c 11812 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
747e8f8b 11813 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1da177e4
LT
11814 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11815
11816 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11817 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11818 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11819 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11820 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11821
c424cb24
MC
11822 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11823 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 11824 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
11825 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11826 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
d4011ada
MC
11827 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11828 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11829 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
c1d2a196
MC
11830 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11831 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11832 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
c424cb24
MC
11833 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11834 }
1da177e4 11835
b2a5c19c
MC
11836 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11837 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
11838 tp->phy_otp = tg3_read_otp_phycfg(tp);
11839 if (tp->phy_otp == 0)
11840 tp->phy_otp = TG3_OTP_DEFAULT;
11841 }
11842
f51f3562 11843 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
8ef21428
MC
11844 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
11845 else
11846 tp->mi_mode = MAC_MI_MODE_BASE;
11847
1da177e4 11848 tp->coalesce_mode = 0;
1da177e4
LT
11849 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11850 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11851 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11852
11853 /* Initialize MAC MI mode, polling disabled. */
11854 tw32_f(MAC_MI_MODE, tp->mi_mode);
11855 udelay(80);
11856
11857 /* Initialize data/descriptor byte/word swapping. */
11858 val = tr32(GRC_MODE);
11859 val &= GRC_MODE_HOST_STACKUP;
11860 tw32(GRC_MODE, val | tp->grc_mode);
11861
11862 tg3_switch_clocks(tp);
11863
11864 /* Clear this out for sanity. */
11865 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11866
11867 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11868 &pci_state_reg);
11869 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11870 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11871 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11872
11873 if (chiprevid == CHIPREV_ID_5701_A0 ||
11874 chiprevid == CHIPREV_ID_5701_B0 ||
11875 chiprevid == CHIPREV_ID_5701_B2 ||
11876 chiprevid == CHIPREV_ID_5701_B5) {
11877 void __iomem *sram_base;
11878
11879 /* Write some dummy words into the SRAM status block
11880 * area, see if it reads back correctly. If the return
11881 * value is bad, force enable the PCIX workaround.
11882 */
11883 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11884
11885 writel(0x00000000, sram_base);
11886 writel(0x00000000, sram_base + 4);
11887 writel(0xffffffff, sram_base + 4);
11888 if (readl(sram_base) != 0x00000000)
11889 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11890 }
11891 }
11892
11893 udelay(50);
11894 tg3_nvram_init(tp);
11895
11896 grc_misc_cfg = tr32(GRC_MISC_CFG);
11897 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11898
1da177e4
LT
11899 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11900 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11901 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11902 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11903
fac9b83e
DM
11904 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11905 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11906 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11907 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11908 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11909 HOSTCC_MODE_CLRTICK_TXBD);
11910
11911 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11912 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11913 tp->misc_host_ctrl);
11914 }
11915
1da177e4
LT
11916 /* these are limited to 10/100 only */
11917 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11918 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11919 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11920 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11921 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11922 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11923 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11924 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11925 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
676917d4
MC
11926 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11927 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
b5d3772c 11928 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
1da177e4
LT
11929 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11930
11931 err = tg3_phy_probe(tp);
11932 if (err) {
11933 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11934 pci_name(tp->pdev), err);
11935 /* ... but do not return immediately ... */
11936 }
11937
11938 tg3_read_partno(tp);
c4e6575c 11939 tg3_read_fw_ver(tp);
1da177e4
LT
11940
11941 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11942 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11943 } else {
11944 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11945 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11946 else
11947 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11948 }
11949
11950 /* 5700 {AX,BX} chips have a broken status block link
11951 * change bit implementation, so we must use the
11952 * status register in those cases.
11953 */
11954 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11955 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11956 else
11957 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11958
11959 /* The led_ctrl is set during tg3_phy_probe, here we might
11960 * have to force the link status polling mechanism based
11961 * upon subsystem IDs.
11962 */
11963 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
007a880d 11964 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
1da177e4
LT
11965 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11966 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11967 TG3_FLAG_USE_LINKCHG_REG);
11968 }
11969
11970 /* For all SERDES we poll the MAC status register. */
11971 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11972 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11973 else
11974 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11975
5a6f3074 11976 /* All chips before 5787 can get confused if TX buffers
1da177e4
LT
11977 * straddle the 4GB address boundary in some cases.
11978 */
af36e6b6 11979 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
b5d3772c 11980 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
d30cdd28 11981 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9936bcf6 11982 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
b5d3772c 11983 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
5a6f3074
MC
11984 tp->dev->hard_start_xmit = tg3_start_xmit;
11985 else
11986 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
1da177e4
LT
11987
11988 tp->rx_offset = 2;
11989 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11990 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11991 tp->rx_offset = 0;
11992
f92905de
MC
11993 tp->rx_std_max_post = TG3_RX_RING_SIZE;
11994
11995 /* Increment the rx prod index on the rx std ring by at most
11996 * 8 for these chips to workaround hw errata.
11997 */
11998 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11999 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12000 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12001 tp->rx_std_max_post = 8;
12002
8ed5d97e
MC
12003 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12004 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12005 PCIE_PWR_MGMT_L1_THRESH_MSK;
12006
1da177e4
LT
12007 return err;
12008}
12009
49b6e95f 12010#ifdef CONFIG_SPARC
1da177e4
LT
12011static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12012{
12013 struct net_device *dev = tp->dev;
12014 struct pci_dev *pdev = tp->pdev;
49b6e95f 12015 struct device_node *dp = pci_device_to_OF_node(pdev);
374d4cac 12016 const unsigned char *addr;
49b6e95f
DM
12017 int len;
12018
12019 addr = of_get_property(dp, "local-mac-address", &len);
12020 if (addr && len == 6) {
12021 memcpy(dev->dev_addr, addr, 6);
12022 memcpy(dev->perm_addr, dev->dev_addr, 6);
12023 return 0;
1da177e4
LT
12024 }
12025 return -ENODEV;
12026}
12027
12028static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12029{
12030 struct net_device *dev = tp->dev;
12031
12032 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2ff43697 12033 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
1da177e4
LT
12034 return 0;
12035}
12036#endif
12037
12038static int __devinit tg3_get_device_address(struct tg3 *tp)
12039{
12040 struct net_device *dev = tp->dev;
12041 u32 hi, lo, mac_offset;
008652b3 12042 int addr_ok = 0;
1da177e4 12043
49b6e95f 12044#ifdef CONFIG_SPARC
1da177e4
LT
12045 if (!tg3_get_macaddr_sparc(tp))
12046 return 0;
12047#endif
12048
12049 mac_offset = 0x7c;
f49639e6 12050 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
a4e2b347 12051 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
12052 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12053 mac_offset = 0xcc;
12054 if (tg3_nvram_lock(tp))
12055 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12056 else
12057 tg3_nvram_unlock(tp);
12058 }
b5d3772c
MC
12059 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12060 mac_offset = 0x10;
1da177e4
LT
12061
12062 /* First try to get it from MAC address mailbox. */
12063 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12064 if ((hi >> 16) == 0x484b) {
12065 dev->dev_addr[0] = (hi >> 8) & 0xff;
12066 dev->dev_addr[1] = (hi >> 0) & 0xff;
12067
12068 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12069 dev->dev_addr[2] = (lo >> 24) & 0xff;
12070 dev->dev_addr[3] = (lo >> 16) & 0xff;
12071 dev->dev_addr[4] = (lo >> 8) & 0xff;
12072 dev->dev_addr[5] = (lo >> 0) & 0xff;
1da177e4 12073
008652b3
MC
12074 /* Some old bootcode may report a 0 MAC address in SRAM */
12075 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12076 }
12077 if (!addr_ok) {
12078 /* Next, try NVRAM. */
f49639e6 12079 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
008652b3
MC
12080 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12081 dev->dev_addr[0] = ((hi >> 16) & 0xff);
12082 dev->dev_addr[1] = ((hi >> 24) & 0xff);
12083 dev->dev_addr[2] = ((lo >> 0) & 0xff);
12084 dev->dev_addr[3] = ((lo >> 8) & 0xff);
12085 dev->dev_addr[4] = ((lo >> 16) & 0xff);
12086 dev->dev_addr[5] = ((lo >> 24) & 0xff);
12087 }
12088 /* Finally just fetch it out of the MAC control regs. */
12089 else {
12090 hi = tr32(MAC_ADDR_0_HIGH);
12091 lo = tr32(MAC_ADDR_0_LOW);
12092
12093 dev->dev_addr[5] = lo & 0xff;
12094 dev->dev_addr[4] = (lo >> 8) & 0xff;
12095 dev->dev_addr[3] = (lo >> 16) & 0xff;
12096 dev->dev_addr[2] = (lo >> 24) & 0xff;
12097 dev->dev_addr[1] = hi & 0xff;
12098 dev->dev_addr[0] = (hi >> 8) & 0xff;
12099 }
1da177e4
LT
12100 }
12101
12102 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
7582a335 12103#ifdef CONFIG_SPARC
1da177e4
LT
12104 if (!tg3_get_default_macaddr_sparc(tp))
12105 return 0;
12106#endif
12107 return -EINVAL;
12108 }
2ff43697 12109 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
12110 return 0;
12111}
12112
59e6b434
DM
12113#define BOUNDARY_SINGLE_CACHELINE 1
12114#define BOUNDARY_MULTI_CACHELINE 2
12115
12116static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12117{
12118 int cacheline_size;
12119 u8 byte;
12120 int goal;
12121
12122 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12123 if (byte == 0)
12124 cacheline_size = 1024;
12125 else
12126 cacheline_size = (int) byte * 4;
12127
12128 /* On 5703 and later chips, the boundary bits have no
12129 * effect.
12130 */
12131 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12132 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12133 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12134 goto out;
12135
12136#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12137 goal = BOUNDARY_MULTI_CACHELINE;
12138#else
12139#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12140 goal = BOUNDARY_SINGLE_CACHELINE;
12141#else
12142 goal = 0;
12143#endif
12144#endif
12145
12146 if (!goal)
12147 goto out;
12148
12149 /* PCI controllers on most RISC systems tend to disconnect
12150 * when a device tries to burst across a cache-line boundary.
12151 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12152 *
12153 * Unfortunately, for PCI-E there are only limited
12154 * write-side controls for this, and thus for reads
12155 * we will still get the disconnects. We'll also waste
12156 * these PCI cycles for both read and write for chips
12157 * other than 5700 and 5701 which do not implement the
12158 * boundary bits.
12159 */
12160 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12161 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12162 switch (cacheline_size) {
12163 case 16:
12164 case 32:
12165 case 64:
12166 case 128:
12167 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12168 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12169 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12170 } else {
12171 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12172 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12173 }
12174 break;
12175
12176 case 256:
12177 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12178 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12179 break;
12180
12181 default:
12182 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12183 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12184 break;
12185 };
12186 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12187 switch (cacheline_size) {
12188 case 16:
12189 case 32:
12190 case 64:
12191 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12192 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12193 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12194 break;
12195 }
12196 /* fallthrough */
12197 case 128:
12198 default:
12199 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12200 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12201 break;
12202 };
12203 } else {
12204 switch (cacheline_size) {
12205 case 16:
12206 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12207 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12208 DMA_RWCTRL_WRITE_BNDRY_16);
12209 break;
12210 }
12211 /* fallthrough */
12212 case 32:
12213 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12214 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12215 DMA_RWCTRL_WRITE_BNDRY_32);
12216 break;
12217 }
12218 /* fallthrough */
12219 case 64:
12220 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12221 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12222 DMA_RWCTRL_WRITE_BNDRY_64);
12223 break;
12224 }
12225 /* fallthrough */
12226 case 128:
12227 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12228 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12229 DMA_RWCTRL_WRITE_BNDRY_128);
12230 break;
12231 }
12232 /* fallthrough */
12233 case 256:
12234 val |= (DMA_RWCTRL_READ_BNDRY_256 |
12235 DMA_RWCTRL_WRITE_BNDRY_256);
12236 break;
12237 case 512:
12238 val |= (DMA_RWCTRL_READ_BNDRY_512 |
12239 DMA_RWCTRL_WRITE_BNDRY_512);
12240 break;
12241 case 1024:
12242 default:
12243 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12244 DMA_RWCTRL_WRITE_BNDRY_1024);
12245 break;
12246 };
12247 }
12248
12249out:
12250 return val;
12251}
12252
1da177e4
LT
12253static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12254{
12255 struct tg3_internal_buffer_desc test_desc;
12256 u32 sram_dma_descs;
12257 int i, ret;
12258
12259 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12260
12261 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12262 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12263 tw32(RDMAC_STATUS, 0);
12264 tw32(WDMAC_STATUS, 0);
12265
12266 tw32(BUFMGR_MODE, 0);
12267 tw32(FTQ_RESET, 0);
12268
12269 test_desc.addr_hi = ((u64) buf_dma) >> 32;
12270 test_desc.addr_lo = buf_dma & 0xffffffff;
12271 test_desc.nic_mbuf = 0x00002100;
12272 test_desc.len = size;
12273
12274 /*
12275 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12276 * the *second* time the tg3 driver was getting loaded after an
12277 * initial scan.
12278 *
12279 * Broadcom tells me:
12280 * ...the DMA engine is connected to the GRC block and a DMA
12281 * reset may affect the GRC block in some unpredictable way...
12282 * The behavior of resets to individual blocks has not been tested.
12283 *
12284 * Broadcom noted the GRC reset will also reset all sub-components.
12285 */
12286 if (to_device) {
12287 test_desc.cqid_sqid = (13 << 8) | 2;
12288
12289 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12290 udelay(40);
12291 } else {
12292 test_desc.cqid_sqid = (16 << 8) | 7;
12293
12294 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12295 udelay(40);
12296 }
12297 test_desc.flags = 0x00000005;
12298
12299 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12300 u32 val;
12301
12302 val = *(((u32 *)&test_desc) + i);
12303 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12304 sram_dma_descs + (i * sizeof(u32)));
12305 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12306 }
12307 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12308
12309 if (to_device) {
12310 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12311 } else {
12312 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12313 }
12314
12315 ret = -ENODEV;
12316 for (i = 0; i < 40; i++) {
12317 u32 val;
12318
12319 if (to_device)
12320 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12321 else
12322 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12323 if ((val & 0xffff) == sram_dma_descs) {
12324 ret = 0;
12325 break;
12326 }
12327
12328 udelay(100);
12329 }
12330
12331 return ret;
12332}
12333
ded7340d 12334#define TEST_BUFFER_SIZE 0x2000
1da177e4
LT
12335
12336static int __devinit tg3_test_dma(struct tg3 *tp)
12337{
12338 dma_addr_t buf_dma;
59e6b434 12339 u32 *buf, saved_dma_rwctrl;
1da177e4
LT
12340 int ret;
12341
12342 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12343 if (!buf) {
12344 ret = -ENOMEM;
12345 goto out_nofree;
12346 }
12347
12348 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12349 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12350
59e6b434 12351 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
1da177e4
LT
12352
12353 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12354 /* DMA read watermark not used on PCIE */
12355 tp->dma_rwctrl |= 0x00180000;
12356 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
85e94ced
MC
12357 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12358 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
1da177e4
LT
12359 tp->dma_rwctrl |= 0x003f0000;
12360 else
12361 tp->dma_rwctrl |= 0x003f000f;
12362 } else {
12363 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12364 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12365 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
49afdeb6 12366 u32 read_water = 0x7;
1da177e4 12367
4a29cc2e
MC
12368 /* If the 5704 is behind the EPB bridge, we can
12369 * do the less restrictive ONE_DMA workaround for
12370 * better performance.
12371 */
12372 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12373 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12374 tp->dma_rwctrl |= 0x8000;
12375 else if (ccval == 0x6 || ccval == 0x7)
1da177e4
LT
12376 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12377
49afdeb6
MC
12378 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12379 read_water = 4;
59e6b434 12380 /* Set bit 23 to enable PCIX hw bug fix */
49afdeb6
MC
12381 tp->dma_rwctrl |=
12382 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12383 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12384 (1 << 23);
4cf78e4f
MC
12385 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12386 /* 5780 always in PCIX mode */
12387 tp->dma_rwctrl |= 0x00144000;
a4e2b347
MC
12388 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12389 /* 5714 always in PCIX mode */
12390 tp->dma_rwctrl |= 0x00148000;
1da177e4
LT
12391 } else {
12392 tp->dma_rwctrl |= 0x001b000f;
12393 }
12394 }
12395
12396 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12397 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12398 tp->dma_rwctrl &= 0xfffffff0;
12399
12400 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12401 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12402 /* Remove this if it causes problems for some boards. */
12403 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12404
12405 /* On 5700/5701 chips, we need to set this bit.
12406 * Otherwise the chip will issue cacheline transactions
12407 * to streamable DMA memory with not all the byte
12408 * enables turned on. This is an error on several
12409 * RISC PCI controllers, in particular sparc64.
12410 *
12411 * On 5703/5704 chips, this bit has been reassigned
12412 * a different meaning. In particular, it is used
12413 * on those chips to enable a PCI-X workaround.
12414 */
12415 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12416 }
12417
12418 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12419
12420#if 0
12421 /* Unneeded, already done by tg3_get_invariants. */
12422 tg3_switch_clocks(tp);
12423#endif
12424
12425 ret = 0;
12426 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12427 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12428 goto out;
12429
59e6b434
DM
12430 /* It is best to perform DMA test with maximum write burst size
12431 * to expose the 5700/5701 write DMA bug.
12432 */
12433 saved_dma_rwctrl = tp->dma_rwctrl;
12434 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12435 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12436
1da177e4
LT
12437 while (1) {
12438 u32 *p = buf, i;
12439
12440 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12441 p[i] = i;
12442
12443 /* Send the buffer to the chip. */
12444 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12445 if (ret) {
12446 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12447 break;
12448 }
12449
12450#if 0
12451 /* validate data reached card RAM correctly. */
12452 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12453 u32 val;
12454 tg3_read_mem(tp, 0x2100 + (i*4), &val);
12455 if (le32_to_cpu(val) != p[i]) {
12456 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
12457 /* ret = -ENODEV here? */
12458 }
12459 p[i] = 0;
12460 }
12461#endif
12462 /* Now read it back. */
12463 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12464 if (ret) {
12465 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12466
12467 break;
12468 }
12469
12470 /* Verify it. */
12471 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12472 if (p[i] == i)
12473 continue;
12474
59e6b434
DM
12475 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12476 DMA_RWCTRL_WRITE_BNDRY_16) {
12477 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
1da177e4
LT
12478 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12479 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12480 break;
12481 } else {
12482 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12483 ret = -ENODEV;
12484 goto out;
12485 }
12486 }
12487
12488 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12489 /* Success. */
12490 ret = 0;
12491 break;
12492 }
12493 }
59e6b434
DM
12494 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12495 DMA_RWCTRL_WRITE_BNDRY_16) {
6d1cfbab
MC
12496 static struct pci_device_id dma_wait_state_chipsets[] = {
12497 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12498 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12499 { },
12500 };
12501
59e6b434 12502 /* DMA test passed without adjusting DMA boundary,
6d1cfbab
MC
12503 * now look for chipsets that are known to expose the
12504 * DMA bug without failing the test.
59e6b434 12505 */
6d1cfbab
MC
12506 if (pci_dev_present(dma_wait_state_chipsets)) {
12507 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12508 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12509 }
12510 else
12511 /* Safe to use the calculated DMA boundary. */
12512 tp->dma_rwctrl = saved_dma_rwctrl;
12513
59e6b434
DM
12514 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12515 }
1da177e4
LT
12516
12517out:
12518 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12519out_nofree:
12520 return ret;
12521}
12522
12523static void __devinit tg3_init_link_config(struct tg3 *tp)
12524{
12525 tp->link_config.advertising =
12526 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12527 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12528 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12529 ADVERTISED_Autoneg | ADVERTISED_MII);
12530 tp->link_config.speed = SPEED_INVALID;
12531 tp->link_config.duplex = DUPLEX_INVALID;
12532 tp->link_config.autoneg = AUTONEG_ENABLE;
1da177e4
LT
12533 tp->link_config.active_speed = SPEED_INVALID;
12534 tp->link_config.active_duplex = DUPLEX_INVALID;
12535 tp->link_config.phy_is_low_power = 0;
12536 tp->link_config.orig_speed = SPEED_INVALID;
12537 tp->link_config.orig_duplex = DUPLEX_INVALID;
12538 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12539}
12540
12541static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12542{
fdfec172
MC
12543 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12544 tp->bufmgr_config.mbuf_read_dma_low_water =
12545 DEFAULT_MB_RDMA_LOW_WATER_5705;
12546 tp->bufmgr_config.mbuf_mac_rx_low_water =
12547 DEFAULT_MB_MACRX_LOW_WATER_5705;
12548 tp->bufmgr_config.mbuf_high_water =
12549 DEFAULT_MB_HIGH_WATER_5705;
b5d3772c
MC
12550 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12551 tp->bufmgr_config.mbuf_mac_rx_low_water =
12552 DEFAULT_MB_MACRX_LOW_WATER_5906;
12553 tp->bufmgr_config.mbuf_high_water =
12554 DEFAULT_MB_HIGH_WATER_5906;
12555 }
fdfec172
MC
12556
12557 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12558 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12559 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12560 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12561 tp->bufmgr_config.mbuf_high_water_jumbo =
12562 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12563 } else {
12564 tp->bufmgr_config.mbuf_read_dma_low_water =
12565 DEFAULT_MB_RDMA_LOW_WATER;
12566 tp->bufmgr_config.mbuf_mac_rx_low_water =
12567 DEFAULT_MB_MACRX_LOW_WATER;
12568 tp->bufmgr_config.mbuf_high_water =
12569 DEFAULT_MB_HIGH_WATER;
12570
12571 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12572 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12573 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12574 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12575 tp->bufmgr_config.mbuf_high_water_jumbo =
12576 DEFAULT_MB_HIGH_WATER_JUMBO;
12577 }
1da177e4
LT
12578
12579 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12580 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12581}
12582
12583static char * __devinit tg3_phy_string(struct tg3 *tp)
12584{
12585 switch (tp->phy_id & PHY_ID_MASK) {
12586 case PHY_ID_BCM5400: return "5400";
12587 case PHY_ID_BCM5401: return "5401";
12588 case PHY_ID_BCM5411: return "5411";
12589 case PHY_ID_BCM5701: return "5701";
12590 case PHY_ID_BCM5703: return "5703";
12591 case PHY_ID_BCM5704: return "5704";
12592 case PHY_ID_BCM5705: return "5705";
12593 case PHY_ID_BCM5750: return "5750";
85e94ced 12594 case PHY_ID_BCM5752: return "5752";
a4e2b347 12595 case PHY_ID_BCM5714: return "5714";
4cf78e4f 12596 case PHY_ID_BCM5780: return "5780";
af36e6b6 12597 case PHY_ID_BCM5755: return "5755";
d9ab5ad1 12598 case PHY_ID_BCM5787: return "5787";
d30cdd28 12599 case PHY_ID_BCM5784: return "5784";
126a3368 12600 case PHY_ID_BCM5756: return "5722/5756";
b5d3772c 12601 case PHY_ID_BCM5906: return "5906";
9936bcf6 12602 case PHY_ID_BCM5761: return "5761";
1da177e4
LT
12603 case PHY_ID_BCM8002: return "8002/serdes";
12604 case 0: return "serdes";
12605 default: return "unknown";
12606 };
12607}
12608
f9804ddb
MC
12609static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12610{
12611 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12612 strcpy(str, "PCI Express");
12613 return str;
12614 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12615 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12616
12617 strcpy(str, "PCIX:");
12618
12619 if ((clock_ctrl == 7) ||
12620 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12621 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12622 strcat(str, "133MHz");
12623 else if (clock_ctrl == 0)
12624 strcat(str, "33MHz");
12625 else if (clock_ctrl == 2)
12626 strcat(str, "50MHz");
12627 else if (clock_ctrl == 4)
12628 strcat(str, "66MHz");
12629 else if (clock_ctrl == 6)
12630 strcat(str, "100MHz");
f9804ddb
MC
12631 } else {
12632 strcpy(str, "PCI:");
12633 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12634 strcat(str, "66MHz");
12635 else
12636 strcat(str, "33MHz");
12637 }
12638 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12639 strcat(str, ":32-bit");
12640 else
12641 strcat(str, ":64-bit");
12642 return str;
12643}
12644
8c2dc7e1 12645static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
1da177e4
LT
12646{
12647 struct pci_dev *peer;
12648 unsigned int func, devnr = tp->pdev->devfn & ~7;
12649
12650 for (func = 0; func < 8; func++) {
12651 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12652 if (peer && peer != tp->pdev)
12653 break;
12654 pci_dev_put(peer);
12655 }
16fe9d74
MC
12656 /* 5704 can be configured in single-port mode, set peer to
12657 * tp->pdev in that case.
12658 */
12659 if (!peer) {
12660 peer = tp->pdev;
12661 return peer;
12662 }
1da177e4
LT
12663
12664 /*
12665 * We don't need to keep the refcount elevated; there's no way
12666 * to remove one half of this device without removing the other
12667 */
12668 pci_dev_put(peer);
12669
12670 return peer;
12671}
12672
15f9850d
DM
12673static void __devinit tg3_init_coal(struct tg3 *tp)
12674{
12675 struct ethtool_coalesce *ec = &tp->coal;
12676
12677 memset(ec, 0, sizeof(*ec));
12678 ec->cmd = ETHTOOL_GCOALESCE;
12679 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12680 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12681 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12682 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12683 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12684 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12685 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12686 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12687 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12688
12689 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12690 HOSTCC_MODE_CLRTICK_TXBD)) {
12691 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12692 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12693 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12694 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12695 }
d244c892
MC
12696
12697 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12698 ec->rx_coalesce_usecs_irq = 0;
12699 ec->tx_coalesce_usecs_irq = 0;
12700 ec->stats_block_coalesce_usecs = 0;
12701 }
15f9850d
DM
12702}
12703
1da177e4
LT
12704static int __devinit tg3_init_one(struct pci_dev *pdev,
12705 const struct pci_device_id *ent)
12706{
12707 static int tg3_version_printed = 0;
2de58e30
SS
12708 resource_size_t tg3reg_base;
12709 unsigned long tg3reg_len;
1da177e4
LT
12710 struct net_device *dev;
12711 struct tg3 *tp;
d6645372 12712 int err, pm_cap;
f9804ddb 12713 char str[40];
72f2afb8 12714 u64 dma_mask, persist_dma_mask;
d6645372 12715 DECLARE_MAC_BUF(mac);
1da177e4
LT
12716
12717 if (tg3_version_printed++ == 0)
12718 printk(KERN_INFO "%s", version);
12719
12720 err = pci_enable_device(pdev);
12721 if (err) {
12722 printk(KERN_ERR PFX "Cannot enable PCI device, "
12723 "aborting.\n");
12724 return err;
12725 }
12726
12727 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12728 printk(KERN_ERR PFX "Cannot find proper PCI device "
12729 "base address, aborting.\n");
12730 err = -ENODEV;
12731 goto err_out_disable_pdev;
12732 }
12733
12734 err = pci_request_regions(pdev, DRV_MODULE_NAME);
12735 if (err) {
12736 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12737 "aborting.\n");
12738 goto err_out_disable_pdev;
12739 }
12740
12741 pci_set_master(pdev);
12742
12743 /* Find power-management capability. */
12744 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12745 if (pm_cap == 0) {
12746 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12747 "aborting.\n");
12748 err = -EIO;
12749 goto err_out_free_res;
12750 }
12751
1da177e4
LT
12752 tg3reg_base = pci_resource_start(pdev, 0);
12753 tg3reg_len = pci_resource_len(pdev, 0);
12754
12755 dev = alloc_etherdev(sizeof(*tp));
12756 if (!dev) {
12757 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12758 err = -ENOMEM;
12759 goto err_out_free_res;
12760 }
12761
1da177e4
LT
12762 SET_NETDEV_DEV(dev, &pdev->dev);
12763
1da177e4
LT
12764#if TG3_VLAN_TAG_USED
12765 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12766 dev->vlan_rx_register = tg3_vlan_rx_register;
1da177e4
LT
12767#endif
12768
12769 tp = netdev_priv(dev);
12770 tp->pdev = pdev;
12771 tp->dev = dev;
12772 tp->pm_cap = pm_cap;
12773 tp->mac_mode = TG3_DEF_MAC_MODE;
12774 tp->rx_mode = TG3_DEF_RX_MODE;
12775 tp->tx_mode = TG3_DEF_TX_MODE;
8ef21428 12776
1da177e4
LT
12777 if (tg3_debug > 0)
12778 tp->msg_enable = tg3_debug;
12779 else
12780 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12781
12782 /* The word/byte swap controls here control register access byte
12783 * swapping. DMA data byte swapping is controlled in the GRC_MODE
12784 * setting below.
12785 */
12786 tp->misc_host_ctrl =
12787 MISC_HOST_CTRL_MASK_PCI_INT |
12788 MISC_HOST_CTRL_WORD_SWAP |
12789 MISC_HOST_CTRL_INDIR_ACCESS |
12790 MISC_HOST_CTRL_PCISTATE_RW;
12791
12792 /* The NONFRM (non-frame) byte/word swap controls take effect
12793 * on descriptor entries, anything which isn't packet data.
12794 *
12795 * The StrongARM chips on the board (one for tx, one for rx)
12796 * are running in big-endian mode.
12797 */
12798 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12799 GRC_MODE_WSWAP_NONFRM_DATA);
12800#ifdef __BIG_ENDIAN
12801 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12802#endif
12803 spin_lock_init(&tp->lock);
1da177e4 12804 spin_lock_init(&tp->indirect_lock);
c4028958 12805 INIT_WORK(&tp->reset_task, tg3_reset_task);
1da177e4
LT
12806
12807 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
ab0049b4 12808 if (!tp->regs) {
1da177e4
LT
12809 printk(KERN_ERR PFX "Cannot map device registers, "
12810 "aborting.\n");
12811 err = -ENOMEM;
12812 goto err_out_free_dev;
12813 }
12814
12815 tg3_init_link_config(tp);
12816
1da177e4
LT
12817 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12818 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12819 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12820
12821 dev->open = tg3_open;
12822 dev->stop = tg3_close;
12823 dev->get_stats = tg3_get_stats;
12824 dev->set_multicast_list = tg3_set_rx_mode;
12825 dev->set_mac_address = tg3_set_mac_addr;
12826 dev->do_ioctl = tg3_ioctl;
12827 dev->tx_timeout = tg3_tx_timeout;
bea3348e 12828 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
1da177e4 12829 dev->ethtool_ops = &tg3_ethtool_ops;
1da177e4
LT
12830 dev->watchdog_timeo = TG3_TX_TIMEOUT;
12831 dev->change_mtu = tg3_change_mtu;
12832 dev->irq = pdev->irq;
12833#ifdef CONFIG_NET_POLL_CONTROLLER
12834 dev->poll_controller = tg3_poll_controller;
12835#endif
12836
12837 err = tg3_get_invariants(tp);
12838 if (err) {
12839 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12840 "aborting.\n");
12841 goto err_out_iounmap;
12842 }
12843
4a29cc2e
MC
12844 /* The EPB bridge inside 5714, 5715, and 5780 and any
12845 * device behind the EPB cannot support DMA addresses > 40-bit.
72f2afb8
MC
12846 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12847 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12848 * do DMA address check in tg3_start_xmit().
12849 */
4a29cc2e
MC
12850 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12851 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12852 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
72f2afb8
MC
12853 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12854#ifdef CONFIG_HIGHMEM
12855 dma_mask = DMA_64BIT_MASK;
12856#endif
4a29cc2e 12857 } else
72f2afb8
MC
12858 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12859
12860 /* Configure DMA attributes. */
12861 if (dma_mask > DMA_32BIT_MASK) {
12862 err = pci_set_dma_mask(pdev, dma_mask);
12863 if (!err) {
12864 dev->features |= NETIF_F_HIGHDMA;
12865 err = pci_set_consistent_dma_mask(pdev,
12866 persist_dma_mask);
12867 if (err < 0) {
12868 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12869 "DMA for consistent allocations\n");
12870 goto err_out_iounmap;
12871 }
12872 }
12873 }
12874 if (err || dma_mask == DMA_32BIT_MASK) {
12875 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12876 if (err) {
12877 printk(KERN_ERR PFX "No usable DMA configuration, "
12878 "aborting.\n");
12879 goto err_out_iounmap;
12880 }
12881 }
12882
fdfec172 12883 tg3_init_bufmgr_config(tp);
1da177e4 12884
1da177e4
LT
12885 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12886 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12887 }
12888 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12889 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12890 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
c7835a77 12891 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
1da177e4
LT
12892 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12893 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12894 } else {
7f62ad5d 12895 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
1da177e4
LT
12896 }
12897
4e3a7aaa
MC
12898 /* TSO is on by default on chips that support hardware TSO.
12899 * Firmware TSO on older chips gives lower performance, so it
12900 * is off by default, but can be enabled using ethtool.
12901 */
b0026624 12902 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
1da177e4 12903 dev->features |= NETIF_F_TSO;
b5d3772c
MC
12904 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12905 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
b0026624 12906 dev->features |= NETIF_F_TSO6;
9936bcf6
MC
12907 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12908 dev->features |= NETIF_F_TSO_ECN;
b0026624 12909 }
1da177e4 12910
1da177e4
LT
12911
12912 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12913 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12914 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12915 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12916 tp->rx_pending = 63;
12917 }
12918
1da177e4
LT
12919 err = tg3_get_device_address(tp);
12920 if (err) {
12921 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12922 "aborting.\n");
12923 goto err_out_iounmap;
12924 }
12925
c88864df
MC
12926 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12927 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12928 printk(KERN_ERR PFX "Cannot find proper PCI device "
12929 "base address for APE, aborting.\n");
12930 err = -ENODEV;
12931 goto err_out_iounmap;
12932 }
12933
12934 tg3reg_base = pci_resource_start(pdev, 2);
12935 tg3reg_len = pci_resource_len(pdev, 2);
12936
12937 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
79ea13ce 12938 if (!tp->aperegs) {
c88864df
MC
12939 printk(KERN_ERR PFX "Cannot map APE registers, "
12940 "aborting.\n");
12941 err = -ENOMEM;
12942 goto err_out_iounmap;
12943 }
12944
12945 tg3_ape_lock_init(tp);
12946 }
12947
1da177e4
LT
12948 /*
12949 * Reset chip in case UNDI or EFI driver did not shutdown
12950 * DMA self test will enable WDMAC and we'll see (spurious)
12951 * pending DMA on the PCI bus at that point.
12952 */
12953 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12954 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
1da177e4 12955 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
944d980e 12956 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
12957 }
12958
12959 err = tg3_test_dma(tp);
12960 if (err) {
12961 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
c88864df 12962 goto err_out_apeunmap;
1da177e4
LT
12963 }
12964
12965 /* Tigon3 can do ipv4 only... and some chips have buggy
12966 * checksumming.
12967 */
12968 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
d212f87b 12969 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
af36e6b6 12970 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 12971 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
12972 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12973 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
d212f87b
SH
12974 dev->features |= NETIF_F_IPV6_CSUM;
12975
1da177e4
LT
12976 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12977 } else
12978 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12979
1da177e4
LT
12980 /* flow control autonegotiation is default behavior */
12981 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8d018621 12982 tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1da177e4 12983
15f9850d
DM
12984 tg3_init_coal(tp);
12985
c49a1561
MC
12986 pci_set_drvdata(pdev, dev);
12987
1da177e4
LT
12988 err = register_netdev(dev);
12989 if (err) {
12990 printk(KERN_ERR PFX "Cannot register net device, "
12991 "aborting.\n");
0d3031d9 12992 goto err_out_apeunmap;
1da177e4
LT
12993 }
12994
d6645372
JP
12995 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
12996 "(%s) %s Ethernet %s\n",
1da177e4
LT
12997 dev->name,
12998 tp->board_part_number,
12999 tp->pci_chip_rev_id,
13000 tg3_phy_string(tp),
f9804ddb 13001 tg3_bus_string(tp, str),
cbb45d21
MC
13002 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13003 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
d6645372
JP
13004 "10/100/1000Base-T")),
13005 print_mac(mac, dev->dev_addr));
1da177e4
LT
13006
13007 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
1c46ae05 13008 "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
1da177e4
LT
13009 dev->name,
13010 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13011 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13012 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13013 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
1da177e4
LT
13014 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
13015 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
4a29cc2e
MC
13016 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13017 dev->name, tp->dma_rwctrl,
13018 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13019 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
1da177e4
LT
13020
13021 return 0;
13022
0d3031d9
MC
13023err_out_apeunmap:
13024 if (tp->aperegs) {
13025 iounmap(tp->aperegs);
13026 tp->aperegs = NULL;
13027 }
13028
1da177e4 13029err_out_iounmap:
6892914f
MC
13030 if (tp->regs) {
13031 iounmap(tp->regs);
22abe310 13032 tp->regs = NULL;
6892914f 13033 }
1da177e4
LT
13034
13035err_out_free_dev:
13036 free_netdev(dev);
13037
13038err_out_free_res:
13039 pci_release_regions(pdev);
13040
13041err_out_disable_pdev:
13042 pci_disable_device(pdev);
13043 pci_set_drvdata(pdev, NULL);
13044 return err;
13045}
13046
13047static void __devexit tg3_remove_one(struct pci_dev *pdev)
13048{
13049 struct net_device *dev = pci_get_drvdata(pdev);
13050
13051 if (dev) {
13052 struct tg3 *tp = netdev_priv(dev);
13053
7faa006f 13054 flush_scheduled_work();
1da177e4 13055 unregister_netdev(dev);
0d3031d9
MC
13056 if (tp->aperegs) {
13057 iounmap(tp->aperegs);
13058 tp->aperegs = NULL;
13059 }
6892914f
MC
13060 if (tp->regs) {
13061 iounmap(tp->regs);
22abe310 13062 tp->regs = NULL;
6892914f 13063 }
1da177e4
LT
13064 free_netdev(dev);
13065 pci_release_regions(pdev);
13066 pci_disable_device(pdev);
13067 pci_set_drvdata(pdev, NULL);
13068 }
13069}
13070
13071static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13072{
13073 struct net_device *dev = pci_get_drvdata(pdev);
13074 struct tg3 *tp = netdev_priv(dev);
13075 int err;
13076
3e0c95fd
MC
13077 /* PCI register 4 needs to be saved whether netif_running() or not.
13078 * MSI address and data need to be saved if using MSI and
13079 * netif_running().
13080 */
13081 pci_save_state(pdev);
13082
1da177e4
LT
13083 if (!netif_running(dev))
13084 return 0;
13085
7faa006f 13086 flush_scheduled_work();
1da177e4
LT
13087 tg3_netif_stop(tp);
13088
13089 del_timer_sync(&tp->timer);
13090
f47c11ee 13091 tg3_full_lock(tp, 1);
1da177e4 13092 tg3_disable_ints(tp);
f47c11ee 13093 tg3_full_unlock(tp);
1da177e4
LT
13094
13095 netif_device_detach(dev);
13096
f47c11ee 13097 tg3_full_lock(tp, 0);
944d980e 13098 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6a9eba15 13099 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
f47c11ee 13100 tg3_full_unlock(tp);
1da177e4
LT
13101
13102 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
13103 if (err) {
f47c11ee 13104 tg3_full_lock(tp, 0);
1da177e4 13105
6a9eba15 13106 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
b9ec6c1b
MC
13107 if (tg3_restart_hw(tp, 1))
13108 goto out;
1da177e4
LT
13109
13110 tp->timer.expires = jiffies + tp->timer_offset;
13111 add_timer(&tp->timer);
13112
13113 netif_device_attach(dev);
13114 tg3_netif_start(tp);
13115
b9ec6c1b 13116out:
f47c11ee 13117 tg3_full_unlock(tp);
1da177e4
LT
13118 }
13119
13120 return err;
13121}
13122
13123static int tg3_resume(struct pci_dev *pdev)
13124{
13125 struct net_device *dev = pci_get_drvdata(pdev);
13126 struct tg3 *tp = netdev_priv(dev);
13127 int err;
13128
3e0c95fd
MC
13129 pci_restore_state(tp->pdev);
13130
1da177e4
LT
13131 if (!netif_running(dev))
13132 return 0;
13133
bc1c7567 13134 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
13135 if (err)
13136 return err;
13137
13138 netif_device_attach(dev);
13139
f47c11ee 13140 tg3_full_lock(tp, 0);
1da177e4 13141
6a9eba15 13142 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
b9ec6c1b
MC
13143 err = tg3_restart_hw(tp, 1);
13144 if (err)
13145 goto out;
1da177e4
LT
13146
13147 tp->timer.expires = jiffies + tp->timer_offset;
13148 add_timer(&tp->timer);
13149
1da177e4
LT
13150 tg3_netif_start(tp);
13151
b9ec6c1b 13152out:
f47c11ee 13153 tg3_full_unlock(tp);
1da177e4 13154
b9ec6c1b 13155 return err;
1da177e4
LT
13156}
13157
13158static struct pci_driver tg3_driver = {
13159 .name = DRV_MODULE_NAME,
13160 .id_table = tg3_pci_tbl,
13161 .probe = tg3_init_one,
13162 .remove = __devexit_p(tg3_remove_one),
13163 .suspend = tg3_suspend,
13164 .resume = tg3_resume
13165};
13166
13167static int __init tg3_init(void)
13168{
29917620 13169 return pci_register_driver(&tg3_driver);
1da177e4
LT
13170}
13171
13172static void __exit tg3_cleanup(void)
13173{
13174 pci_unregister_driver(&tg3_driver);
13175}
13176
13177module_init(tg3_init);
13178module_exit(tg3_cleanup);