]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/tg3.c
[NET]: use fget_light() in net/socket.c
[net-next-2.6.git] / drivers / net / tg3.c
CommitLineData
1da177e4
LT
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
8 *
9 * Firmware is:
49cabf49
MC
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
1da177e4
LT
16 */
17
18#include <linux/config.h>
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
14c85021 27#include <linux/in.h>
1da177e4
LT
28#include <linux/init.h>
29#include <linux/ioport.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/ethtool.h>
35#include <linux/mii.h>
36#include <linux/if_vlan.h>
37#include <linux/ip.h>
38#include <linux/tcp.h>
39#include <linux/workqueue.h>
61487480 40#include <linux/prefetch.h>
f9a5f7d3 41#include <linux/dma-mapping.h>
1da177e4
LT
42
43#include <net/checksum.h>
44
45#include <asm/system.h>
46#include <asm/io.h>
47#include <asm/byteorder.h>
48#include <asm/uaccess.h>
49
50#ifdef CONFIG_SPARC64
51#include <asm/idprom.h>
52#include <asm/oplib.h>
53#include <asm/pbm.h>
54#endif
55
56#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57#define TG3_VLAN_TAG_USED 1
58#else
59#define TG3_VLAN_TAG_USED 0
60#endif
61
62#ifdef NETIF_F_TSO
63#define TG3_TSO_SUPPORT 1
64#else
65#define TG3_TSO_SUPPORT 0
66#endif
67
68#include "tg3.h"
69
70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": "
4f81c32b
MC
72#define DRV_MODULE_VERSION "3.51"
73#define DRV_MODULE_RELDATE "Feb 21, 2006"
1da177e4
LT
74
75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0
77#define TG3_DEF_TX_MODE 0
78#define TG3_DEF_MSG_ENABLE \
79 (NETIF_MSG_DRV | \
80 NETIF_MSG_PROBE | \
81 NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | \
83 NETIF_MSG_IFDOWN | \
84 NETIF_MSG_IFUP | \
85 NETIF_MSG_RX_ERR | \
86 NETIF_MSG_TX_ERR)
87
88/* length of time before we decide the hardware is borked,
89 * and dev->tx_timeout() should be called to fix the problem
90 */
91#define TG3_TX_TIMEOUT (5 * HZ)
92
93/* hardware minimum and maximum for a single frame's data payload */
94#define TG3_MIN_MTU 60
95#define TG3_MAX_MTU(tp) \
0f893dc6 96 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
1da177e4
LT
97
98/* These numbers seem to be hard coded in the NIC firmware somehow.
99 * You can't change the ring sizes, but you can change where you place
100 * them in the NIC onboard memory.
101 */
102#define TG3_RX_RING_SIZE 512
103#define TG3_DEF_RX_RING_PENDING 200
104#define TG3_RX_JUMBO_RING_SIZE 256
105#define TG3_DEF_RX_JUMBO_RING_PENDING 100
106
107/* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
112 */
113#define TG3_RX_RCB_RING_SIZE(tp) \
114 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115
116#define TG3_TX_RING_SIZE 512
117#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118
119#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 TG3_RX_RING_SIZE)
121#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
126 TG3_TX_RING_SIZE)
1da177e4 127#define TX_BUFFS_AVAIL(TP) \
51b91468
MC
128 ((TP)->tx_pending - \
129 (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
1da177e4
LT
130#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
133#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
134
135/* minimum number of free TX descriptors required to wake up TX process */
136#define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
137
138/* number of ETHTOOL_GSTATS u64's */
139#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
4cafd3f5
MC
141#define TG3_NUM_TEST 6
142
1da177e4
LT
143static char version[] __devinitdata =
144 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148MODULE_LICENSE("GPL");
149MODULE_VERSION(DRV_MODULE_VERSION);
150
151static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
152module_param(tg3_debug, int, 0);
153MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155static struct pci_device_id tg3_pci_tbl[] = {
156 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
6e9017a7 214 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
af2bcd97 215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
d8659255
XVP
216 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1da177e4
LT
218 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
a4e2b347
MC
224 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
d4d2c558
MC
226 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
a4e2b347
MC
228 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
d4d2c558
MC
230 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
4cf78e4f
MC
232 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1da177e4
LT
236 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
237 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
239 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
245 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
247 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
249 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
250 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
251 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252 { 0, }
253};
254
255MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
256
257static struct {
258 const char string[ETH_GSTRING_LEN];
259} ethtool_stats_keys[TG3_NUM_STATS] = {
260 { "rx_octets" },
261 { "rx_fragments" },
262 { "rx_ucast_packets" },
263 { "rx_mcast_packets" },
264 { "rx_bcast_packets" },
265 { "rx_fcs_errors" },
266 { "rx_align_errors" },
267 { "rx_xon_pause_rcvd" },
268 { "rx_xoff_pause_rcvd" },
269 { "rx_mac_ctrl_rcvd" },
270 { "rx_xoff_entered" },
271 { "rx_frame_too_long_errors" },
272 { "rx_jabbers" },
273 { "rx_undersize_packets" },
274 { "rx_in_length_errors" },
275 { "rx_out_length_errors" },
276 { "rx_64_or_less_octet_packets" },
277 { "rx_65_to_127_octet_packets" },
278 { "rx_128_to_255_octet_packets" },
279 { "rx_256_to_511_octet_packets" },
280 { "rx_512_to_1023_octet_packets" },
281 { "rx_1024_to_1522_octet_packets" },
282 { "rx_1523_to_2047_octet_packets" },
283 { "rx_2048_to_4095_octet_packets" },
284 { "rx_4096_to_8191_octet_packets" },
285 { "rx_8192_to_9022_octet_packets" },
286
287 { "tx_octets" },
288 { "tx_collisions" },
289
290 { "tx_xon_sent" },
291 { "tx_xoff_sent" },
292 { "tx_flow_control" },
293 { "tx_mac_errors" },
294 { "tx_single_collisions" },
295 { "tx_mult_collisions" },
296 { "tx_deferred" },
297 { "tx_excessive_collisions" },
298 { "tx_late_collisions" },
299 { "tx_collide_2times" },
300 { "tx_collide_3times" },
301 { "tx_collide_4times" },
302 { "tx_collide_5times" },
303 { "tx_collide_6times" },
304 { "tx_collide_7times" },
305 { "tx_collide_8times" },
306 { "tx_collide_9times" },
307 { "tx_collide_10times" },
308 { "tx_collide_11times" },
309 { "tx_collide_12times" },
310 { "tx_collide_13times" },
311 { "tx_collide_14times" },
312 { "tx_collide_15times" },
313 { "tx_ucast_packets" },
314 { "tx_mcast_packets" },
315 { "tx_bcast_packets" },
316 { "tx_carrier_sense_errors" },
317 { "tx_discards" },
318 { "tx_errors" },
319
320 { "dma_writeq_full" },
321 { "dma_write_prioq_full" },
322 { "rxbds_empty" },
323 { "rx_discards" },
324 { "rx_errors" },
325 { "rx_threshold_hit" },
326
327 { "dma_readq_full" },
328 { "dma_read_prioq_full" },
329 { "tx_comp_queue_full" },
330
331 { "ring_set_send_prod_index" },
332 { "ring_status_update" },
333 { "nic_irqs" },
334 { "nic_avoided_irqs" },
335 { "nic_tx_threshold_hit" }
336};
337
4cafd3f5
MC
338static struct {
339 const char string[ETH_GSTRING_LEN];
340} ethtool_test_keys[TG3_NUM_TEST] = {
341 { "nvram test (online) " },
342 { "link test (online) " },
343 { "register test (offline)" },
344 { "memory test (offline)" },
345 { "loopback test (offline)" },
346 { "interrupt test (offline)" },
347};
348
b401e9e2
MC
349static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
350{
351 writel(val, tp->regs + off);
352}
353
354static u32 tg3_read32(struct tg3 *tp, u32 off)
355{
356 return (readl(tp->regs + off));
357}
358
1da177e4
LT
359static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
360{
6892914f
MC
361 unsigned long flags;
362
363 spin_lock_irqsave(&tp->indirect_lock, flags);
1ee582d8
MC
364 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
365 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
6892914f 366 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1ee582d8
MC
367}
368
369static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
370{
371 writel(val, tp->regs + off);
372 readl(tp->regs + off);
1da177e4
LT
373}
374
6892914f 375static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
1da177e4 376{
6892914f
MC
377 unsigned long flags;
378 u32 val;
379
380 spin_lock_irqsave(&tp->indirect_lock, flags);
381 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
382 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
383 spin_unlock_irqrestore(&tp->indirect_lock, flags);
384 return val;
385}
386
387static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
388{
389 unsigned long flags;
390
391 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
392 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
393 TG3_64BIT_REG_LOW, val);
394 return;
395 }
396 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
397 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
398 TG3_64BIT_REG_LOW, val);
399 return;
1da177e4 400 }
6892914f
MC
401
402 spin_lock_irqsave(&tp->indirect_lock, flags);
403 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
404 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
405 spin_unlock_irqrestore(&tp->indirect_lock, flags);
406
407 /* In indirect mode when disabling interrupts, we also need
408 * to clear the interrupt bit in the GRC local ctrl register.
409 */
410 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
411 (val == 0x1)) {
412 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
413 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
414 }
415}
416
417static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
418{
419 unsigned long flags;
420 u32 val;
421
422 spin_lock_irqsave(&tp->indirect_lock, flags);
423 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
424 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
425 spin_unlock_irqrestore(&tp->indirect_lock, flags);
426 return val;
427}
428
b401e9e2
MC
429/* usec_wait specifies the wait time in usec when writing to certain registers
430 * where it is unsafe to read back the register without some delay.
431 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
432 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
433 */
434static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
6892914f 435{
b401e9e2
MC
436 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
437 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
438 /* Non-posted methods */
439 tp->write32(tp, off, val);
440 else {
441 /* Posted method */
442 tg3_write32(tp, off, val);
443 if (usec_wait)
444 udelay(usec_wait);
445 tp->read32(tp, off);
446 }
447 /* Wait again after the read for the posted method to guarantee that
448 * the wait time is met.
449 */
450 if (usec_wait)
451 udelay(usec_wait);
1da177e4
LT
452}
453
09ee929c
MC
454static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
455{
456 tp->write32_mbox(tp, off, val);
6892914f
MC
457 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
458 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
459 tp->read32_mbox(tp, off);
09ee929c
MC
460}
461
20094930 462static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
1da177e4
LT
463{
464 void __iomem *mbox = tp->regs + off;
465 writel(val, mbox);
466 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
467 writel(val, mbox);
468 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
469 readl(mbox);
470}
471
20094930 472#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
09ee929c 473#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
20094930
MC
474#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
475#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
09ee929c 476#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
20094930
MC
477
478#define tw32(reg,val) tp->write32(tp, reg, val)
b401e9e2
MC
479#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
480#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
20094930 481#define tr32(reg) tp->read32(tp, reg)
1da177e4
LT
482
483static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
484{
6892914f
MC
485 unsigned long flags;
486
487 spin_lock_irqsave(&tp->indirect_lock, flags);
1da177e4
LT
488 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
489 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
490
491 /* Always leave this as zero. */
492 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
6892914f 493 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
494}
495
28fbef78
MC
496static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
497{
498 /* If no workaround is needed, write to mem space directly */
499 if (tp->write32 != tg3_write_indirect_reg32)
500 tw32(NIC_SRAM_WIN_BASE + off, val);
501 else
502 tg3_write_mem(tp, off, val);
503}
504
1da177e4
LT
505static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
506{
6892914f
MC
507 unsigned long flags;
508
509 spin_lock_irqsave(&tp->indirect_lock, flags);
1da177e4
LT
510 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
511 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
512
513 /* Always leave this as zero. */
514 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
6892914f 515 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
516}
517
518static void tg3_disable_ints(struct tg3 *tp)
519{
520 tw32(TG3PCI_MISC_HOST_CTRL,
521 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c 522 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
1da177e4
LT
523}
524
525static inline void tg3_cond_int(struct tg3 *tp)
526{
38f3843e
MC
527 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
528 (tp->hw_status->status & SD_STATUS_UPDATED))
1da177e4
LT
529 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
530}
531
532static void tg3_enable_ints(struct tg3 *tp)
533{
bbe832c0
MC
534 tp->irq_sync = 0;
535 wmb();
536
1da177e4
LT
537 tw32(TG3PCI_MISC_HOST_CTRL,
538 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c
MC
539 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
540 (tp->last_tag << 24));
1da177e4
LT
541 tg3_cond_int(tp);
542}
543
04237ddd
MC
544static inline unsigned int tg3_has_work(struct tg3 *tp)
545{
546 struct tg3_hw_status *sblk = tp->hw_status;
547 unsigned int work_exists = 0;
548
549 /* check for phy events */
550 if (!(tp->tg3_flags &
551 (TG3_FLAG_USE_LINKCHG_REG |
552 TG3_FLAG_POLL_SERDES))) {
553 if (sblk->status & SD_STATUS_LINK_CHG)
554 work_exists = 1;
555 }
556 /* check for RX/TX work to do */
557 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
558 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
559 work_exists = 1;
560
561 return work_exists;
562}
563
1da177e4 564/* tg3_restart_ints
04237ddd
MC
565 * similar to tg3_enable_ints, but it accurately determines whether there
566 * is new work pending and can return without flushing the PIO write
567 * which reenables interrupts
1da177e4
LT
568 */
569static void tg3_restart_ints(struct tg3 *tp)
570{
fac9b83e
DM
571 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
572 tp->last_tag << 24);
1da177e4
LT
573 mmiowb();
574
fac9b83e
DM
575 /* When doing tagged status, this work check is unnecessary.
576 * The last_tag we write above tells the chip which piece of
577 * work we've completed.
578 */
579 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
580 tg3_has_work(tp))
04237ddd
MC
581 tw32(HOSTCC_MODE, tp->coalesce_mode |
582 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
1da177e4
LT
583}
584
585static inline void tg3_netif_stop(struct tg3 *tp)
586{
bbe832c0 587 tp->dev->trans_start = jiffies; /* prevent tx timeout */
1da177e4
LT
588 netif_poll_disable(tp->dev);
589 netif_tx_disable(tp->dev);
590}
591
592static inline void tg3_netif_start(struct tg3 *tp)
593{
594 netif_wake_queue(tp->dev);
595 /* NOTE: unconditional netif_wake_queue is only appropriate
596 * so long as all callers are assured to have free tx slots
597 * (such as after tg3_init_hw)
598 */
599 netif_poll_enable(tp->dev);
f47c11ee
DM
600 tp->hw_status->status |= SD_STATUS_UPDATED;
601 tg3_enable_ints(tp);
1da177e4
LT
602}
603
604static void tg3_switch_clocks(struct tg3 *tp)
605{
606 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
607 u32 orig_clock_ctrl;
608
a4e2b347 609 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4cf78e4f
MC
610 return;
611
1da177e4
LT
612 orig_clock_ctrl = clock_ctrl;
613 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
614 CLOCK_CTRL_CLKRUN_OENABLE |
615 0x1f);
616 tp->pci_clock_ctrl = clock_ctrl;
617
618 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
619 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
b401e9e2
MC
620 tw32_wait_f(TG3PCI_CLOCK_CTRL,
621 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1da177e4
LT
622 }
623 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
b401e9e2
MC
624 tw32_wait_f(TG3PCI_CLOCK_CTRL,
625 clock_ctrl |
626 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
627 40);
628 tw32_wait_f(TG3PCI_CLOCK_CTRL,
629 clock_ctrl | (CLOCK_CTRL_ALTCLK),
630 40);
1da177e4 631 }
b401e9e2 632 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1da177e4
LT
633}
634
635#define PHY_BUSY_LOOPS 5000
636
637static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
638{
639 u32 frame_val;
640 unsigned int loops;
641 int ret;
642
643 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
644 tw32_f(MAC_MI_MODE,
645 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
646 udelay(80);
647 }
648
649 *val = 0x0;
650
651 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
652 MI_COM_PHY_ADDR_MASK);
653 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
654 MI_COM_REG_ADDR_MASK);
655 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
656
657 tw32_f(MAC_MI_COM, frame_val);
658
659 loops = PHY_BUSY_LOOPS;
660 while (loops != 0) {
661 udelay(10);
662 frame_val = tr32(MAC_MI_COM);
663
664 if ((frame_val & MI_COM_BUSY) == 0) {
665 udelay(5);
666 frame_val = tr32(MAC_MI_COM);
667 break;
668 }
669 loops -= 1;
670 }
671
672 ret = -EBUSY;
673 if (loops != 0) {
674 *val = frame_val & MI_COM_DATA_MASK;
675 ret = 0;
676 }
677
678 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
679 tw32_f(MAC_MI_MODE, tp->mi_mode);
680 udelay(80);
681 }
682
683 return ret;
684}
685
686static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
687{
688 u32 frame_val;
689 unsigned int loops;
690 int ret;
691
692 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
693 tw32_f(MAC_MI_MODE,
694 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
695 udelay(80);
696 }
697
698 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
699 MI_COM_PHY_ADDR_MASK);
700 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
701 MI_COM_REG_ADDR_MASK);
702 frame_val |= (val & MI_COM_DATA_MASK);
703 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
704
705 tw32_f(MAC_MI_COM, frame_val);
706
707 loops = PHY_BUSY_LOOPS;
708 while (loops != 0) {
709 udelay(10);
710 frame_val = tr32(MAC_MI_COM);
711 if ((frame_val & MI_COM_BUSY) == 0) {
712 udelay(5);
713 frame_val = tr32(MAC_MI_COM);
714 break;
715 }
716 loops -= 1;
717 }
718
719 ret = -EBUSY;
720 if (loops != 0)
721 ret = 0;
722
723 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
724 tw32_f(MAC_MI_MODE, tp->mi_mode);
725 udelay(80);
726 }
727
728 return ret;
729}
730
731static void tg3_phy_set_wirespeed(struct tg3 *tp)
732{
733 u32 val;
734
735 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
736 return;
737
738 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
739 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
740 tg3_writephy(tp, MII_TG3_AUX_CTRL,
741 (val | (1 << 15) | (1 << 4)));
742}
743
744static int tg3_bmcr_reset(struct tg3 *tp)
745{
746 u32 phy_control;
747 int limit, err;
748
749 /* OK, reset it, and poll the BMCR_RESET bit until it
750 * clears or we time out.
751 */
752 phy_control = BMCR_RESET;
753 err = tg3_writephy(tp, MII_BMCR, phy_control);
754 if (err != 0)
755 return -EBUSY;
756
757 limit = 5000;
758 while (limit--) {
759 err = tg3_readphy(tp, MII_BMCR, &phy_control);
760 if (err != 0)
761 return -EBUSY;
762
763 if ((phy_control & BMCR_RESET) == 0) {
764 udelay(40);
765 break;
766 }
767 udelay(10);
768 }
769 if (limit <= 0)
770 return -EBUSY;
771
772 return 0;
773}
774
775static int tg3_wait_macro_done(struct tg3 *tp)
776{
777 int limit = 100;
778
779 while (limit--) {
780 u32 tmp32;
781
782 if (!tg3_readphy(tp, 0x16, &tmp32)) {
783 if ((tmp32 & 0x1000) == 0)
784 break;
785 }
786 }
787 if (limit <= 0)
788 return -EBUSY;
789
790 return 0;
791}
792
793static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
794{
795 static const u32 test_pat[4][6] = {
796 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
797 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
798 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
799 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
800 };
801 int chan;
802
803 for (chan = 0; chan < 4; chan++) {
804 int i;
805
806 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
807 (chan * 0x2000) | 0x0200);
808 tg3_writephy(tp, 0x16, 0x0002);
809
810 for (i = 0; i < 6; i++)
811 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
812 test_pat[chan][i]);
813
814 tg3_writephy(tp, 0x16, 0x0202);
815 if (tg3_wait_macro_done(tp)) {
816 *resetp = 1;
817 return -EBUSY;
818 }
819
820 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
821 (chan * 0x2000) | 0x0200);
822 tg3_writephy(tp, 0x16, 0x0082);
823 if (tg3_wait_macro_done(tp)) {
824 *resetp = 1;
825 return -EBUSY;
826 }
827
828 tg3_writephy(tp, 0x16, 0x0802);
829 if (tg3_wait_macro_done(tp)) {
830 *resetp = 1;
831 return -EBUSY;
832 }
833
834 for (i = 0; i < 6; i += 2) {
835 u32 low, high;
836
837 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
838 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
839 tg3_wait_macro_done(tp)) {
840 *resetp = 1;
841 return -EBUSY;
842 }
843 low &= 0x7fff;
844 high &= 0x000f;
845 if (low != test_pat[chan][i] ||
846 high != test_pat[chan][i+1]) {
847 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
848 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
849 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
850
851 return -EBUSY;
852 }
853 }
854 }
855
856 return 0;
857}
858
859static int tg3_phy_reset_chanpat(struct tg3 *tp)
860{
861 int chan;
862
863 for (chan = 0; chan < 4; chan++) {
864 int i;
865
866 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
867 (chan * 0x2000) | 0x0200);
868 tg3_writephy(tp, 0x16, 0x0002);
869 for (i = 0; i < 6; i++)
870 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
871 tg3_writephy(tp, 0x16, 0x0202);
872 if (tg3_wait_macro_done(tp))
873 return -EBUSY;
874 }
875
876 return 0;
877}
878
879static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
880{
881 u32 reg32, phy9_orig;
882 int retries, do_phy_reset, err;
883
884 retries = 10;
885 do_phy_reset = 1;
886 do {
887 if (do_phy_reset) {
888 err = tg3_bmcr_reset(tp);
889 if (err)
890 return err;
891 do_phy_reset = 0;
892 }
893
894 /* Disable transmitter and interrupt. */
895 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
896 continue;
897
898 reg32 |= 0x3000;
899 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
900
901 /* Set full-duplex, 1000 mbps. */
902 tg3_writephy(tp, MII_BMCR,
903 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
904
905 /* Set to master mode. */
906 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
907 continue;
908
909 tg3_writephy(tp, MII_TG3_CTRL,
910 (MII_TG3_CTRL_AS_MASTER |
911 MII_TG3_CTRL_ENABLE_AS_MASTER));
912
913 /* Enable SM_DSP_CLOCK and 6dB. */
914 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
915
916 /* Block the PHY control access. */
917 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
918 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
919
920 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
921 if (!err)
922 break;
923 } while (--retries);
924
925 err = tg3_phy_reset_chanpat(tp);
926 if (err)
927 return err;
928
929 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
930 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
931
932 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
933 tg3_writephy(tp, 0x16, 0x0000);
934
935 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
936 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
937 /* Set Extended packet length bit for jumbo frames */
938 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
939 }
940 else {
941 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
942 }
943
944 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
945
946 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
947 reg32 &= ~0x3000;
948 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
949 } else if (!err)
950 err = -EBUSY;
951
952 return err;
953}
954
955/* This will reset the tigon3 PHY if there is no valid
956 * link unless the FORCE argument is non-zero.
957 */
958static int tg3_phy_reset(struct tg3 *tp)
959{
960 u32 phy_status;
961 int err;
962
963 err = tg3_readphy(tp, MII_BMSR, &phy_status);
964 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
965 if (err != 0)
966 return -EBUSY;
967
968 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
969 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
970 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
971 err = tg3_phy_reset_5703_4_5(tp);
972 if (err)
973 return err;
974 goto out;
975 }
976
977 err = tg3_bmcr_reset(tp);
978 if (err)
979 return err;
980
981out:
982 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
983 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
984 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
985 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
986 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
987 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
988 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
989 }
990 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
991 tg3_writephy(tp, 0x1c, 0x8d68);
992 tg3_writephy(tp, 0x1c, 0x8d68);
993 }
994 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
995 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
996 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
997 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
998 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
999 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1000 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1001 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1002 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1003 }
1004 /* Set Extended packet length bit (bit 14) on all chips that */
1005 /* support jumbo frames */
1006 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1007 /* Cannot do read-modify-write on 5401 */
1008 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
0f893dc6 1009 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1010 u32 phy_reg;
1011
1012 /* Set bit 14 with read-modify-write to preserve other bits */
1013 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1014 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1015 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1016 }
1017
1018 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1019 * jumbo frames transmission.
1020 */
0f893dc6 1021 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1022 u32 phy_reg;
1023
1024 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1025 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1026 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1027 }
1028
1029 tg3_phy_set_wirespeed(tp);
1030 return 0;
1031}
1032
1033static void tg3_frob_aux_power(struct tg3 *tp)
1034{
1035 struct tg3 *tp_peer = tp;
1036
1037 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1038 return;
1039
8c2dc7e1
MC
1040 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1041 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1042 struct net_device *dev_peer;
1043
1044 dev_peer = pci_get_drvdata(tp->pdev_peer);
bc1c7567 1045 /* remove_one() may have been run on the peer. */
8c2dc7e1 1046 if (!dev_peer)
bc1c7567
MC
1047 tp_peer = tp;
1048 else
1049 tp_peer = netdev_priv(dev_peer);
1da177e4
LT
1050 }
1051
1da177e4 1052 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
6921d201
MC
1053 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1054 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1055 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1da177e4
LT
1056 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1057 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
b401e9e2
MC
1058 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1059 (GRC_LCLCTRL_GPIO_OE0 |
1060 GRC_LCLCTRL_GPIO_OE1 |
1061 GRC_LCLCTRL_GPIO_OE2 |
1062 GRC_LCLCTRL_GPIO_OUTPUT0 |
1063 GRC_LCLCTRL_GPIO_OUTPUT1),
1064 100);
1da177e4
LT
1065 } else {
1066 u32 no_gpio2;
dc56b7d4 1067 u32 grc_local_ctrl = 0;
1da177e4
LT
1068
1069 if (tp_peer != tp &&
1070 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1071 return;
1072
dc56b7d4
MC
1073 /* Workaround to prevent overdrawing Amps. */
1074 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1075 ASIC_REV_5714) {
1076 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
b401e9e2
MC
1077 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1078 grc_local_ctrl, 100);
dc56b7d4
MC
1079 }
1080
1da177e4
LT
1081 /* On 5753 and variants, GPIO2 cannot be used. */
1082 no_gpio2 = tp->nic_sram_data_cfg &
1083 NIC_SRAM_DATA_CFG_NO_GPIO2;
1084
dc56b7d4 1085 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1da177e4
LT
1086 GRC_LCLCTRL_GPIO_OE1 |
1087 GRC_LCLCTRL_GPIO_OE2 |
1088 GRC_LCLCTRL_GPIO_OUTPUT1 |
1089 GRC_LCLCTRL_GPIO_OUTPUT2;
1090 if (no_gpio2) {
1091 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1092 GRC_LCLCTRL_GPIO_OUTPUT2);
1093 }
b401e9e2
MC
1094 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1095 grc_local_ctrl, 100);
1da177e4
LT
1096
1097 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1098
b401e9e2
MC
1099 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1100 grc_local_ctrl, 100);
1da177e4
LT
1101
1102 if (!no_gpio2) {
1103 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
b401e9e2
MC
1104 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1105 grc_local_ctrl, 100);
1da177e4
LT
1106 }
1107 }
1108 } else {
1109 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1110 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1111 if (tp_peer != tp &&
1112 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1113 return;
1114
b401e9e2
MC
1115 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1116 (GRC_LCLCTRL_GPIO_OE1 |
1117 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4 1118
b401e9e2
MC
1119 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1120 GRC_LCLCTRL_GPIO_OE1, 100);
1da177e4 1121
b401e9e2
MC
1122 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1123 (GRC_LCLCTRL_GPIO_OE1 |
1124 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4
LT
1125 }
1126 }
1127}
1128
1129static int tg3_setup_phy(struct tg3 *, int);
1130
1131#define RESET_KIND_SHUTDOWN 0
1132#define RESET_KIND_INIT 1
1133#define RESET_KIND_SUSPEND 2
1134
1135static void tg3_write_sig_post_reset(struct tg3 *, int);
1136static int tg3_halt_cpu(struct tg3 *, u32);
6921d201
MC
1137static int tg3_nvram_lock(struct tg3 *);
1138static void tg3_nvram_unlock(struct tg3 *);
1da177e4 1139
bc1c7567 1140static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1da177e4
LT
1141{
1142 u32 misc_host_ctrl;
1143 u16 power_control, power_caps;
1144 int pm = tp->pm_cap;
1145
1146 /* Make sure register accesses (indirect or otherwise)
1147 * will function correctly.
1148 */
1149 pci_write_config_dword(tp->pdev,
1150 TG3PCI_MISC_HOST_CTRL,
1151 tp->misc_host_ctrl);
1152
1153 pci_read_config_word(tp->pdev,
1154 pm + PCI_PM_CTRL,
1155 &power_control);
1156 power_control |= PCI_PM_CTRL_PME_STATUS;
1157 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1158 switch (state) {
bc1c7567 1159 case PCI_D0:
1da177e4
LT
1160 power_control |= 0;
1161 pci_write_config_word(tp->pdev,
1162 pm + PCI_PM_CTRL,
1163 power_control);
8c6bda1a
MC
1164 udelay(100); /* Delay after power state change */
1165
1166 /* Switch out of Vaux if it is not a LOM */
b401e9e2
MC
1167 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1168 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1da177e4
LT
1169
1170 return 0;
1171
bc1c7567 1172 case PCI_D1:
1da177e4
LT
1173 power_control |= 1;
1174 break;
1175
bc1c7567 1176 case PCI_D2:
1da177e4
LT
1177 power_control |= 2;
1178 break;
1179
bc1c7567 1180 case PCI_D3hot:
1da177e4
LT
1181 power_control |= 3;
1182 break;
1183
1184 default:
1185 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1186 "requested.\n",
1187 tp->dev->name, state);
1188 return -EINVAL;
1189 };
1190
1191 power_control |= PCI_PM_CTRL_PME_ENABLE;
1192
1193 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1194 tw32(TG3PCI_MISC_HOST_CTRL,
1195 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1196
1197 if (tp->link_config.phy_is_low_power == 0) {
1198 tp->link_config.phy_is_low_power = 1;
1199 tp->link_config.orig_speed = tp->link_config.speed;
1200 tp->link_config.orig_duplex = tp->link_config.duplex;
1201 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1202 }
1203
747e8f8b 1204 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1da177e4
LT
1205 tp->link_config.speed = SPEED_10;
1206 tp->link_config.duplex = DUPLEX_HALF;
1207 tp->link_config.autoneg = AUTONEG_ENABLE;
1208 tg3_setup_phy(tp, 0);
1209 }
1210
6921d201
MC
1211 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1212 int i;
1213 u32 val;
1214
1215 for (i = 0; i < 200; i++) {
1216 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1217 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1218 break;
1219 msleep(1);
1220 }
1221 }
1222 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1223 WOL_DRV_STATE_SHUTDOWN |
1224 WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1225
1da177e4
LT
1226 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1227
1228 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1229 u32 mac_mode;
1230
1231 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1232 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1233 udelay(40);
1234
1235 mac_mode = MAC_MODE_PORT_MODE_MII;
1236
1237 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1238 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1239 mac_mode |= MAC_MODE_LINK_POLARITY;
1240 } else {
1241 mac_mode = MAC_MODE_PORT_MODE_TBI;
1242 }
1243
cbf46853 1244 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1da177e4
LT
1245 tw32(MAC_LED_CTRL, tp->led_ctrl);
1246
1247 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1248 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1249 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1250
1251 tw32_f(MAC_MODE, mac_mode);
1252 udelay(100);
1253
1254 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1255 udelay(10);
1256 }
1257
1258 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1259 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1260 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1261 u32 base_val;
1262
1263 base_val = tp->pci_clock_ctrl;
1264 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1265 CLOCK_CTRL_TXCLK_DISABLE);
1266
b401e9e2
MC
1267 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1268 CLOCK_CTRL_PWRDOWN_PLL133, 40);
a4e2b347 1269 } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4cf78e4f 1270 /* do nothing */
85e94ced 1271 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1da177e4
LT
1272 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1273 u32 newbits1, newbits2;
1274
1275 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1276 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1277 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1278 CLOCK_CTRL_TXCLK_DISABLE |
1279 CLOCK_CTRL_ALTCLK);
1280 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1281 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1282 newbits1 = CLOCK_CTRL_625_CORE;
1283 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1284 } else {
1285 newbits1 = CLOCK_CTRL_ALTCLK;
1286 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1287 }
1288
b401e9e2
MC
1289 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1290 40);
1da177e4 1291
b401e9e2
MC
1292 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1293 40);
1da177e4
LT
1294
1295 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1296 u32 newbits3;
1297
1298 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1299 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1300 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1301 CLOCK_CTRL_TXCLK_DISABLE |
1302 CLOCK_CTRL_44MHZ_CORE);
1303 } else {
1304 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1305 }
1306
b401e9e2
MC
1307 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1308 tp->pci_clock_ctrl | newbits3, 40);
1da177e4
LT
1309 }
1310 }
1311
6921d201
MC
1312 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1313 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1314 /* Turn off the PHY */
1315 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1316 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1317 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1318 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
dc56b7d4
MC
1319 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
1320 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
6921d201
MC
1321 }
1322 }
1323
1da177e4
LT
1324 tg3_frob_aux_power(tp);
1325
1326 /* Workaround for unstable PLL clock */
1327 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1328 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1329 u32 val = tr32(0x7d00);
1330
1331 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1332 tw32(0x7d00, val);
6921d201 1333 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
ec41c7df
MC
1334 int err;
1335
1336 err = tg3_nvram_lock(tp);
1da177e4 1337 tg3_halt_cpu(tp, RX_CPU_BASE);
ec41c7df
MC
1338 if (!err)
1339 tg3_nvram_unlock(tp);
6921d201 1340 }
1da177e4
LT
1341 }
1342
1343 /* Finally, set the new power state. */
1344 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
8c6bda1a 1345 udelay(100); /* Delay after power state change */
1da177e4
LT
1346
1347 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1348
1349 return 0;
1350}
1351
1352static void tg3_link_report(struct tg3 *tp)
1353{
1354 if (!netif_carrier_ok(tp->dev)) {
1355 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1356 } else {
1357 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1358 tp->dev->name,
1359 (tp->link_config.active_speed == SPEED_1000 ?
1360 1000 :
1361 (tp->link_config.active_speed == SPEED_100 ?
1362 100 : 10)),
1363 (tp->link_config.active_duplex == DUPLEX_FULL ?
1364 "full" : "half"));
1365
1366 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1367 "%s for RX.\n",
1368 tp->dev->name,
1369 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1370 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1371 }
1372}
1373
1374static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1375{
1376 u32 new_tg3_flags = 0;
1377 u32 old_rx_mode = tp->rx_mode;
1378 u32 old_tx_mode = tp->tx_mode;
1379
1380 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
747e8f8b
MC
1381
1382 /* Convert 1000BaseX flow control bits to 1000BaseT
1383 * bits before resolving flow control.
1384 */
1385 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1386 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1387 ADVERTISE_PAUSE_ASYM);
1388 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1389
1390 if (local_adv & ADVERTISE_1000XPAUSE)
1391 local_adv |= ADVERTISE_PAUSE_CAP;
1392 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1393 local_adv |= ADVERTISE_PAUSE_ASYM;
1394 if (remote_adv & LPA_1000XPAUSE)
1395 remote_adv |= LPA_PAUSE_CAP;
1396 if (remote_adv & LPA_1000XPAUSE_ASYM)
1397 remote_adv |= LPA_PAUSE_ASYM;
1398 }
1399
1da177e4
LT
1400 if (local_adv & ADVERTISE_PAUSE_CAP) {
1401 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1402 if (remote_adv & LPA_PAUSE_CAP)
1403 new_tg3_flags |=
1404 (TG3_FLAG_RX_PAUSE |
1405 TG3_FLAG_TX_PAUSE);
1406 else if (remote_adv & LPA_PAUSE_ASYM)
1407 new_tg3_flags |=
1408 (TG3_FLAG_RX_PAUSE);
1409 } else {
1410 if (remote_adv & LPA_PAUSE_CAP)
1411 new_tg3_flags |=
1412 (TG3_FLAG_RX_PAUSE |
1413 TG3_FLAG_TX_PAUSE);
1414 }
1415 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1416 if ((remote_adv & LPA_PAUSE_CAP) &&
1417 (remote_adv & LPA_PAUSE_ASYM))
1418 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1419 }
1420
1421 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1422 tp->tg3_flags |= new_tg3_flags;
1423 } else {
1424 new_tg3_flags = tp->tg3_flags;
1425 }
1426
1427 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1428 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1429 else
1430 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1431
1432 if (old_rx_mode != tp->rx_mode) {
1433 tw32_f(MAC_RX_MODE, tp->rx_mode);
1434 }
1435
1436 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1437 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1438 else
1439 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1440
1441 if (old_tx_mode != tp->tx_mode) {
1442 tw32_f(MAC_TX_MODE, tp->tx_mode);
1443 }
1444}
1445
1446static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1447{
1448 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1449 case MII_TG3_AUX_STAT_10HALF:
1450 *speed = SPEED_10;
1451 *duplex = DUPLEX_HALF;
1452 break;
1453
1454 case MII_TG3_AUX_STAT_10FULL:
1455 *speed = SPEED_10;
1456 *duplex = DUPLEX_FULL;
1457 break;
1458
1459 case MII_TG3_AUX_STAT_100HALF:
1460 *speed = SPEED_100;
1461 *duplex = DUPLEX_HALF;
1462 break;
1463
1464 case MII_TG3_AUX_STAT_100FULL:
1465 *speed = SPEED_100;
1466 *duplex = DUPLEX_FULL;
1467 break;
1468
1469 case MII_TG3_AUX_STAT_1000HALF:
1470 *speed = SPEED_1000;
1471 *duplex = DUPLEX_HALF;
1472 break;
1473
1474 case MII_TG3_AUX_STAT_1000FULL:
1475 *speed = SPEED_1000;
1476 *duplex = DUPLEX_FULL;
1477 break;
1478
1479 default:
1480 *speed = SPEED_INVALID;
1481 *duplex = DUPLEX_INVALID;
1482 break;
1483 };
1484}
1485
1486static void tg3_phy_copper_begin(struct tg3 *tp)
1487{
1488 u32 new_adv;
1489 int i;
1490
1491 if (tp->link_config.phy_is_low_power) {
1492 /* Entering low power mode. Disable gigabit and
1493 * 100baseT advertisements.
1494 */
1495 tg3_writephy(tp, MII_TG3_CTRL, 0);
1496
1497 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1498 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1499 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1500 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1501
1502 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1503 } else if (tp->link_config.speed == SPEED_INVALID) {
1504 tp->link_config.advertising =
1505 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1506 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1507 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1508 ADVERTISED_Autoneg | ADVERTISED_MII);
1509
1510 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1511 tp->link_config.advertising &=
1512 ~(ADVERTISED_1000baseT_Half |
1513 ADVERTISED_1000baseT_Full);
1514
1515 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1516 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1517 new_adv |= ADVERTISE_10HALF;
1518 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1519 new_adv |= ADVERTISE_10FULL;
1520 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1521 new_adv |= ADVERTISE_100HALF;
1522 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1523 new_adv |= ADVERTISE_100FULL;
1524 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1525
1526 if (tp->link_config.advertising &
1527 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1528 new_adv = 0;
1529 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1530 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1531 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1532 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1533 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1534 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1535 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1536 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1537 MII_TG3_CTRL_ENABLE_AS_MASTER);
1538 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1539 } else {
1540 tg3_writephy(tp, MII_TG3_CTRL, 0);
1541 }
1542 } else {
1543 /* Asking for a specific link mode. */
1544 if (tp->link_config.speed == SPEED_1000) {
1545 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1546 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1547
1548 if (tp->link_config.duplex == DUPLEX_FULL)
1549 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1550 else
1551 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1552 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1553 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1554 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1555 MII_TG3_CTRL_ENABLE_AS_MASTER);
1556 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1557 } else {
1558 tg3_writephy(tp, MII_TG3_CTRL, 0);
1559
1560 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1561 if (tp->link_config.speed == SPEED_100) {
1562 if (tp->link_config.duplex == DUPLEX_FULL)
1563 new_adv |= ADVERTISE_100FULL;
1564 else
1565 new_adv |= ADVERTISE_100HALF;
1566 } else {
1567 if (tp->link_config.duplex == DUPLEX_FULL)
1568 new_adv |= ADVERTISE_10FULL;
1569 else
1570 new_adv |= ADVERTISE_10HALF;
1571 }
1572 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1573 }
1574 }
1575
1576 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1577 tp->link_config.speed != SPEED_INVALID) {
1578 u32 bmcr, orig_bmcr;
1579
1580 tp->link_config.active_speed = tp->link_config.speed;
1581 tp->link_config.active_duplex = tp->link_config.duplex;
1582
1583 bmcr = 0;
1584 switch (tp->link_config.speed) {
1585 default:
1586 case SPEED_10:
1587 break;
1588
1589 case SPEED_100:
1590 bmcr |= BMCR_SPEED100;
1591 break;
1592
1593 case SPEED_1000:
1594 bmcr |= TG3_BMCR_SPEED1000;
1595 break;
1596 };
1597
1598 if (tp->link_config.duplex == DUPLEX_FULL)
1599 bmcr |= BMCR_FULLDPLX;
1600
1601 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1602 (bmcr != orig_bmcr)) {
1603 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1604 for (i = 0; i < 1500; i++) {
1605 u32 tmp;
1606
1607 udelay(10);
1608 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1609 tg3_readphy(tp, MII_BMSR, &tmp))
1610 continue;
1611 if (!(tmp & BMSR_LSTATUS)) {
1612 udelay(40);
1613 break;
1614 }
1615 }
1616 tg3_writephy(tp, MII_BMCR, bmcr);
1617 udelay(40);
1618 }
1619 } else {
1620 tg3_writephy(tp, MII_BMCR,
1621 BMCR_ANENABLE | BMCR_ANRESTART);
1622 }
1623}
1624
1625static int tg3_init_5401phy_dsp(struct tg3 *tp)
1626{
1627 int err;
1628
1629 /* Turn off tap power management. */
1630 /* Set Extended packet length bit */
1631 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1632
1633 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1634 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1635
1636 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1637 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1638
1639 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1640 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1641
1642 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1643 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1644
1645 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1646 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1647
1648 udelay(40);
1649
1650 return err;
1651}
1652
1653static int tg3_copper_is_advertising_all(struct tg3 *tp)
1654{
1655 u32 adv_reg, all_mask;
1656
1657 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1658 return 0;
1659
1660 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1661 ADVERTISE_100HALF | ADVERTISE_100FULL);
1662 if ((adv_reg & all_mask) != all_mask)
1663 return 0;
1664 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1665 u32 tg3_ctrl;
1666
1667 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1668 return 0;
1669
1670 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1671 MII_TG3_CTRL_ADV_1000_FULL);
1672 if ((tg3_ctrl & all_mask) != all_mask)
1673 return 0;
1674 }
1675 return 1;
1676}
1677
1678static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1679{
1680 int current_link_up;
1681 u32 bmsr, dummy;
1682 u16 current_speed;
1683 u8 current_duplex;
1684 int i, err;
1685
1686 tw32(MAC_EVENT, 0);
1687
1688 tw32_f(MAC_STATUS,
1689 (MAC_STATUS_SYNC_CHANGED |
1690 MAC_STATUS_CFG_CHANGED |
1691 MAC_STATUS_MI_COMPLETION |
1692 MAC_STATUS_LNKSTATE_CHANGED));
1693 udelay(40);
1694
1695 tp->mi_mode = MAC_MI_MODE_BASE;
1696 tw32_f(MAC_MI_MODE, tp->mi_mode);
1697 udelay(80);
1698
1699 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1700
1701 /* Some third-party PHYs need to be reset on link going
1702 * down.
1703 */
1704 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1705 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1706 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1707 netif_carrier_ok(tp->dev)) {
1708 tg3_readphy(tp, MII_BMSR, &bmsr);
1709 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1710 !(bmsr & BMSR_LSTATUS))
1711 force_reset = 1;
1712 }
1713 if (force_reset)
1714 tg3_phy_reset(tp);
1715
1716 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1717 tg3_readphy(tp, MII_BMSR, &bmsr);
1718 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1719 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1720 bmsr = 0;
1721
1722 if (!(bmsr & BMSR_LSTATUS)) {
1723 err = tg3_init_5401phy_dsp(tp);
1724 if (err)
1725 return err;
1726
1727 tg3_readphy(tp, MII_BMSR, &bmsr);
1728 for (i = 0; i < 1000; i++) {
1729 udelay(10);
1730 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1731 (bmsr & BMSR_LSTATUS)) {
1732 udelay(40);
1733 break;
1734 }
1735 }
1736
1737 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1738 !(bmsr & BMSR_LSTATUS) &&
1739 tp->link_config.active_speed == SPEED_1000) {
1740 err = tg3_phy_reset(tp);
1741 if (!err)
1742 err = tg3_init_5401phy_dsp(tp);
1743 if (err)
1744 return err;
1745 }
1746 }
1747 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1748 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1749 /* 5701 {A0,B0} CRC bug workaround */
1750 tg3_writephy(tp, 0x15, 0x0a75);
1751 tg3_writephy(tp, 0x1c, 0x8c68);
1752 tg3_writephy(tp, 0x1c, 0x8d68);
1753 tg3_writephy(tp, 0x1c, 0x8c68);
1754 }
1755
1756 /* Clear pending interrupts... */
1757 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1758 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1759
1760 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1761 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1762 else
1763 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1764
1765 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1766 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1767 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1768 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1769 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1770 else
1771 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1772 }
1773
1774 current_link_up = 0;
1775 current_speed = SPEED_INVALID;
1776 current_duplex = DUPLEX_INVALID;
1777
1778 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1779 u32 val;
1780
1781 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1782 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1783 if (!(val & (1 << 10))) {
1784 val |= (1 << 10);
1785 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1786 goto relink;
1787 }
1788 }
1789
1790 bmsr = 0;
1791 for (i = 0; i < 100; i++) {
1792 tg3_readphy(tp, MII_BMSR, &bmsr);
1793 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1794 (bmsr & BMSR_LSTATUS))
1795 break;
1796 udelay(40);
1797 }
1798
1799 if (bmsr & BMSR_LSTATUS) {
1800 u32 aux_stat, bmcr;
1801
1802 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1803 for (i = 0; i < 2000; i++) {
1804 udelay(10);
1805 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1806 aux_stat)
1807 break;
1808 }
1809
1810 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1811 &current_speed,
1812 &current_duplex);
1813
1814 bmcr = 0;
1815 for (i = 0; i < 200; i++) {
1816 tg3_readphy(tp, MII_BMCR, &bmcr);
1817 if (tg3_readphy(tp, MII_BMCR, &bmcr))
1818 continue;
1819 if (bmcr && bmcr != 0x7fff)
1820 break;
1821 udelay(10);
1822 }
1823
1824 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1825 if (bmcr & BMCR_ANENABLE) {
1826 current_link_up = 1;
1827
1828 /* Force autoneg restart if we are exiting
1829 * low power mode.
1830 */
1831 if (!tg3_copper_is_advertising_all(tp))
1832 current_link_up = 0;
1833 } else {
1834 current_link_up = 0;
1835 }
1836 } else {
1837 if (!(bmcr & BMCR_ANENABLE) &&
1838 tp->link_config.speed == current_speed &&
1839 tp->link_config.duplex == current_duplex) {
1840 current_link_up = 1;
1841 } else {
1842 current_link_up = 0;
1843 }
1844 }
1845
1846 tp->link_config.active_speed = current_speed;
1847 tp->link_config.active_duplex = current_duplex;
1848 }
1849
1850 if (current_link_up == 1 &&
1851 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1852 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1853 u32 local_adv, remote_adv;
1854
1855 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1856 local_adv = 0;
1857 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1858
1859 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1860 remote_adv = 0;
1861
1862 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1863
1864 /* If we are not advertising full pause capability,
1865 * something is wrong. Bring the link down and reconfigure.
1866 */
1867 if (local_adv != ADVERTISE_PAUSE_CAP) {
1868 current_link_up = 0;
1869 } else {
1870 tg3_setup_flow_control(tp, local_adv, remote_adv);
1871 }
1872 }
1873relink:
6921d201 1874 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1da177e4
LT
1875 u32 tmp;
1876
1877 tg3_phy_copper_begin(tp);
1878
1879 tg3_readphy(tp, MII_BMSR, &tmp);
1880 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1881 (tmp & BMSR_LSTATUS))
1882 current_link_up = 1;
1883 }
1884
1885 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1886 if (current_link_up == 1) {
1887 if (tp->link_config.active_speed == SPEED_100 ||
1888 tp->link_config.active_speed == SPEED_10)
1889 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1890 else
1891 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1892 } else
1893 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1894
1895 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1896 if (tp->link_config.active_duplex == DUPLEX_HALF)
1897 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1898
1899 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1900 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1901 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1902 (current_link_up == 1 &&
1903 tp->link_config.active_speed == SPEED_10))
1904 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1905 } else {
1906 if (current_link_up == 1)
1907 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1908 }
1909
1910 /* ??? Without this setting Netgear GA302T PHY does not
1911 * ??? send/receive packets...
1912 */
1913 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1914 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1915 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1916 tw32_f(MAC_MI_MODE, tp->mi_mode);
1917 udelay(80);
1918 }
1919
1920 tw32_f(MAC_MODE, tp->mac_mode);
1921 udelay(40);
1922
1923 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1924 /* Polled via timer. */
1925 tw32_f(MAC_EVENT, 0);
1926 } else {
1927 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1928 }
1929 udelay(40);
1930
1931 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1932 current_link_up == 1 &&
1933 tp->link_config.active_speed == SPEED_1000 &&
1934 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1935 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1936 udelay(120);
1937 tw32_f(MAC_STATUS,
1938 (MAC_STATUS_SYNC_CHANGED |
1939 MAC_STATUS_CFG_CHANGED));
1940 udelay(40);
1941 tg3_write_mem(tp,
1942 NIC_SRAM_FIRMWARE_MBOX,
1943 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1944 }
1945
1946 if (current_link_up != netif_carrier_ok(tp->dev)) {
1947 if (current_link_up)
1948 netif_carrier_on(tp->dev);
1949 else
1950 netif_carrier_off(tp->dev);
1951 tg3_link_report(tp);
1952 }
1953
1954 return 0;
1955}
1956
1957struct tg3_fiber_aneginfo {
1958 int state;
1959#define ANEG_STATE_UNKNOWN 0
1960#define ANEG_STATE_AN_ENABLE 1
1961#define ANEG_STATE_RESTART_INIT 2
1962#define ANEG_STATE_RESTART 3
1963#define ANEG_STATE_DISABLE_LINK_OK 4
1964#define ANEG_STATE_ABILITY_DETECT_INIT 5
1965#define ANEG_STATE_ABILITY_DETECT 6
1966#define ANEG_STATE_ACK_DETECT_INIT 7
1967#define ANEG_STATE_ACK_DETECT 8
1968#define ANEG_STATE_COMPLETE_ACK_INIT 9
1969#define ANEG_STATE_COMPLETE_ACK 10
1970#define ANEG_STATE_IDLE_DETECT_INIT 11
1971#define ANEG_STATE_IDLE_DETECT 12
1972#define ANEG_STATE_LINK_OK 13
1973#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
1974#define ANEG_STATE_NEXT_PAGE_WAIT 15
1975
1976 u32 flags;
1977#define MR_AN_ENABLE 0x00000001
1978#define MR_RESTART_AN 0x00000002
1979#define MR_AN_COMPLETE 0x00000004
1980#define MR_PAGE_RX 0x00000008
1981#define MR_NP_LOADED 0x00000010
1982#define MR_TOGGLE_TX 0x00000020
1983#define MR_LP_ADV_FULL_DUPLEX 0x00000040
1984#define MR_LP_ADV_HALF_DUPLEX 0x00000080
1985#define MR_LP_ADV_SYM_PAUSE 0x00000100
1986#define MR_LP_ADV_ASYM_PAUSE 0x00000200
1987#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1988#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1989#define MR_LP_ADV_NEXT_PAGE 0x00001000
1990#define MR_TOGGLE_RX 0x00002000
1991#define MR_NP_RX 0x00004000
1992
1993#define MR_LINK_OK 0x80000000
1994
1995 unsigned long link_time, cur_time;
1996
1997 u32 ability_match_cfg;
1998 int ability_match_count;
1999
2000 char ability_match, idle_match, ack_match;
2001
2002 u32 txconfig, rxconfig;
2003#define ANEG_CFG_NP 0x00000080
2004#define ANEG_CFG_ACK 0x00000040
2005#define ANEG_CFG_RF2 0x00000020
2006#define ANEG_CFG_RF1 0x00000010
2007#define ANEG_CFG_PS2 0x00000001
2008#define ANEG_CFG_PS1 0x00008000
2009#define ANEG_CFG_HD 0x00004000
2010#define ANEG_CFG_FD 0x00002000
2011#define ANEG_CFG_INVAL 0x00001f06
2012
2013};
2014#define ANEG_OK 0
2015#define ANEG_DONE 1
2016#define ANEG_TIMER_ENAB 2
2017#define ANEG_FAILED -1
2018
2019#define ANEG_STATE_SETTLE_TIME 10000
2020
2021static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2022 struct tg3_fiber_aneginfo *ap)
2023{
2024 unsigned long delta;
2025 u32 rx_cfg_reg;
2026 int ret;
2027
2028 if (ap->state == ANEG_STATE_UNKNOWN) {
2029 ap->rxconfig = 0;
2030 ap->link_time = 0;
2031 ap->cur_time = 0;
2032 ap->ability_match_cfg = 0;
2033 ap->ability_match_count = 0;
2034 ap->ability_match = 0;
2035 ap->idle_match = 0;
2036 ap->ack_match = 0;
2037 }
2038 ap->cur_time++;
2039
2040 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2041 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2042
2043 if (rx_cfg_reg != ap->ability_match_cfg) {
2044 ap->ability_match_cfg = rx_cfg_reg;
2045 ap->ability_match = 0;
2046 ap->ability_match_count = 0;
2047 } else {
2048 if (++ap->ability_match_count > 1) {
2049 ap->ability_match = 1;
2050 ap->ability_match_cfg = rx_cfg_reg;
2051 }
2052 }
2053 if (rx_cfg_reg & ANEG_CFG_ACK)
2054 ap->ack_match = 1;
2055 else
2056 ap->ack_match = 0;
2057
2058 ap->idle_match = 0;
2059 } else {
2060 ap->idle_match = 1;
2061 ap->ability_match_cfg = 0;
2062 ap->ability_match_count = 0;
2063 ap->ability_match = 0;
2064 ap->ack_match = 0;
2065
2066 rx_cfg_reg = 0;
2067 }
2068
2069 ap->rxconfig = rx_cfg_reg;
2070 ret = ANEG_OK;
2071
2072 switch(ap->state) {
2073 case ANEG_STATE_UNKNOWN:
2074 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2075 ap->state = ANEG_STATE_AN_ENABLE;
2076
2077 /* fallthru */
2078 case ANEG_STATE_AN_ENABLE:
2079 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2080 if (ap->flags & MR_AN_ENABLE) {
2081 ap->link_time = 0;
2082 ap->cur_time = 0;
2083 ap->ability_match_cfg = 0;
2084 ap->ability_match_count = 0;
2085 ap->ability_match = 0;
2086 ap->idle_match = 0;
2087 ap->ack_match = 0;
2088
2089 ap->state = ANEG_STATE_RESTART_INIT;
2090 } else {
2091 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2092 }
2093 break;
2094
2095 case ANEG_STATE_RESTART_INIT:
2096 ap->link_time = ap->cur_time;
2097 ap->flags &= ~(MR_NP_LOADED);
2098 ap->txconfig = 0;
2099 tw32(MAC_TX_AUTO_NEG, 0);
2100 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2101 tw32_f(MAC_MODE, tp->mac_mode);
2102 udelay(40);
2103
2104 ret = ANEG_TIMER_ENAB;
2105 ap->state = ANEG_STATE_RESTART;
2106
2107 /* fallthru */
2108 case ANEG_STATE_RESTART:
2109 delta = ap->cur_time - ap->link_time;
2110 if (delta > ANEG_STATE_SETTLE_TIME) {
2111 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2112 } else {
2113 ret = ANEG_TIMER_ENAB;
2114 }
2115 break;
2116
2117 case ANEG_STATE_DISABLE_LINK_OK:
2118 ret = ANEG_DONE;
2119 break;
2120
2121 case ANEG_STATE_ABILITY_DETECT_INIT:
2122 ap->flags &= ~(MR_TOGGLE_TX);
2123 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2124 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2125 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2126 tw32_f(MAC_MODE, tp->mac_mode);
2127 udelay(40);
2128
2129 ap->state = ANEG_STATE_ABILITY_DETECT;
2130 break;
2131
2132 case ANEG_STATE_ABILITY_DETECT:
2133 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2134 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2135 }
2136 break;
2137
2138 case ANEG_STATE_ACK_DETECT_INIT:
2139 ap->txconfig |= ANEG_CFG_ACK;
2140 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2141 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2142 tw32_f(MAC_MODE, tp->mac_mode);
2143 udelay(40);
2144
2145 ap->state = ANEG_STATE_ACK_DETECT;
2146
2147 /* fallthru */
2148 case ANEG_STATE_ACK_DETECT:
2149 if (ap->ack_match != 0) {
2150 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2151 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2152 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2153 } else {
2154 ap->state = ANEG_STATE_AN_ENABLE;
2155 }
2156 } else if (ap->ability_match != 0 &&
2157 ap->rxconfig == 0) {
2158 ap->state = ANEG_STATE_AN_ENABLE;
2159 }
2160 break;
2161
2162 case ANEG_STATE_COMPLETE_ACK_INIT:
2163 if (ap->rxconfig & ANEG_CFG_INVAL) {
2164 ret = ANEG_FAILED;
2165 break;
2166 }
2167 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2168 MR_LP_ADV_HALF_DUPLEX |
2169 MR_LP_ADV_SYM_PAUSE |
2170 MR_LP_ADV_ASYM_PAUSE |
2171 MR_LP_ADV_REMOTE_FAULT1 |
2172 MR_LP_ADV_REMOTE_FAULT2 |
2173 MR_LP_ADV_NEXT_PAGE |
2174 MR_TOGGLE_RX |
2175 MR_NP_RX);
2176 if (ap->rxconfig & ANEG_CFG_FD)
2177 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2178 if (ap->rxconfig & ANEG_CFG_HD)
2179 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2180 if (ap->rxconfig & ANEG_CFG_PS1)
2181 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2182 if (ap->rxconfig & ANEG_CFG_PS2)
2183 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2184 if (ap->rxconfig & ANEG_CFG_RF1)
2185 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2186 if (ap->rxconfig & ANEG_CFG_RF2)
2187 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2188 if (ap->rxconfig & ANEG_CFG_NP)
2189 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2190
2191 ap->link_time = ap->cur_time;
2192
2193 ap->flags ^= (MR_TOGGLE_TX);
2194 if (ap->rxconfig & 0x0008)
2195 ap->flags |= MR_TOGGLE_RX;
2196 if (ap->rxconfig & ANEG_CFG_NP)
2197 ap->flags |= MR_NP_RX;
2198 ap->flags |= MR_PAGE_RX;
2199
2200 ap->state = ANEG_STATE_COMPLETE_ACK;
2201 ret = ANEG_TIMER_ENAB;
2202 break;
2203
2204 case ANEG_STATE_COMPLETE_ACK:
2205 if (ap->ability_match != 0 &&
2206 ap->rxconfig == 0) {
2207 ap->state = ANEG_STATE_AN_ENABLE;
2208 break;
2209 }
2210 delta = ap->cur_time - ap->link_time;
2211 if (delta > ANEG_STATE_SETTLE_TIME) {
2212 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2213 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2214 } else {
2215 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2216 !(ap->flags & MR_NP_RX)) {
2217 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2218 } else {
2219 ret = ANEG_FAILED;
2220 }
2221 }
2222 }
2223 break;
2224
2225 case ANEG_STATE_IDLE_DETECT_INIT:
2226 ap->link_time = ap->cur_time;
2227 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2228 tw32_f(MAC_MODE, tp->mac_mode);
2229 udelay(40);
2230
2231 ap->state = ANEG_STATE_IDLE_DETECT;
2232 ret = ANEG_TIMER_ENAB;
2233 break;
2234
2235 case ANEG_STATE_IDLE_DETECT:
2236 if (ap->ability_match != 0 &&
2237 ap->rxconfig == 0) {
2238 ap->state = ANEG_STATE_AN_ENABLE;
2239 break;
2240 }
2241 delta = ap->cur_time - ap->link_time;
2242 if (delta > ANEG_STATE_SETTLE_TIME) {
2243 /* XXX another gem from the Broadcom driver :( */
2244 ap->state = ANEG_STATE_LINK_OK;
2245 }
2246 break;
2247
2248 case ANEG_STATE_LINK_OK:
2249 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2250 ret = ANEG_DONE;
2251 break;
2252
2253 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2254 /* ??? unimplemented */
2255 break;
2256
2257 case ANEG_STATE_NEXT_PAGE_WAIT:
2258 /* ??? unimplemented */
2259 break;
2260
2261 default:
2262 ret = ANEG_FAILED;
2263 break;
2264 };
2265
2266 return ret;
2267}
2268
2269static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2270{
2271 int res = 0;
2272 struct tg3_fiber_aneginfo aninfo;
2273 int status = ANEG_FAILED;
2274 unsigned int tick;
2275 u32 tmp;
2276
2277 tw32_f(MAC_TX_AUTO_NEG, 0);
2278
2279 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2280 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2281 udelay(40);
2282
2283 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2284 udelay(40);
2285
2286 memset(&aninfo, 0, sizeof(aninfo));
2287 aninfo.flags |= MR_AN_ENABLE;
2288 aninfo.state = ANEG_STATE_UNKNOWN;
2289 aninfo.cur_time = 0;
2290 tick = 0;
2291 while (++tick < 195000) {
2292 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2293 if (status == ANEG_DONE || status == ANEG_FAILED)
2294 break;
2295
2296 udelay(1);
2297 }
2298
2299 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2300 tw32_f(MAC_MODE, tp->mac_mode);
2301 udelay(40);
2302
2303 *flags = aninfo.flags;
2304
2305 if (status == ANEG_DONE &&
2306 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2307 MR_LP_ADV_FULL_DUPLEX)))
2308 res = 1;
2309
2310 return res;
2311}
2312
2313static void tg3_init_bcm8002(struct tg3 *tp)
2314{
2315 u32 mac_status = tr32(MAC_STATUS);
2316 int i;
2317
2318 /* Reset when initting first time or we have a link. */
2319 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2320 !(mac_status & MAC_STATUS_PCS_SYNCED))
2321 return;
2322
2323 /* Set PLL lock range. */
2324 tg3_writephy(tp, 0x16, 0x8007);
2325
2326 /* SW reset */
2327 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2328
2329 /* Wait for reset to complete. */
2330 /* XXX schedule_timeout() ... */
2331 for (i = 0; i < 500; i++)
2332 udelay(10);
2333
2334 /* Config mode; select PMA/Ch 1 regs. */
2335 tg3_writephy(tp, 0x10, 0x8411);
2336
2337 /* Enable auto-lock and comdet, select txclk for tx. */
2338 tg3_writephy(tp, 0x11, 0x0a10);
2339
2340 tg3_writephy(tp, 0x18, 0x00a0);
2341 tg3_writephy(tp, 0x16, 0x41ff);
2342
2343 /* Assert and deassert POR. */
2344 tg3_writephy(tp, 0x13, 0x0400);
2345 udelay(40);
2346 tg3_writephy(tp, 0x13, 0x0000);
2347
2348 tg3_writephy(tp, 0x11, 0x0a50);
2349 udelay(40);
2350 tg3_writephy(tp, 0x11, 0x0a10);
2351
2352 /* Wait for signal to stabilize */
2353 /* XXX schedule_timeout() ... */
2354 for (i = 0; i < 15000; i++)
2355 udelay(10);
2356
2357 /* Deselect the channel register so we can read the PHYID
2358 * later.
2359 */
2360 tg3_writephy(tp, 0x10, 0x8011);
2361}
2362
2363static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2364{
2365 u32 sg_dig_ctrl, sg_dig_status;
2366 u32 serdes_cfg, expected_sg_dig_ctrl;
2367 int workaround, port_a;
2368 int current_link_up;
2369
2370 serdes_cfg = 0;
2371 expected_sg_dig_ctrl = 0;
2372 workaround = 0;
2373 port_a = 1;
2374 current_link_up = 0;
2375
2376 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2377 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2378 workaround = 1;
2379 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2380 port_a = 0;
2381
2382 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2383 /* preserve bits 20-23 for voltage regulator */
2384 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2385 }
2386
2387 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2388
2389 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2390 if (sg_dig_ctrl & (1 << 31)) {
2391 if (workaround) {
2392 u32 val = serdes_cfg;
2393
2394 if (port_a)
2395 val |= 0xc010000;
2396 else
2397 val |= 0x4010000;
2398 tw32_f(MAC_SERDES_CFG, val);
2399 }
2400 tw32_f(SG_DIG_CTRL, 0x01388400);
2401 }
2402 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2403 tg3_setup_flow_control(tp, 0, 0);
2404 current_link_up = 1;
2405 }
2406 goto out;
2407 }
2408
2409 /* Want auto-negotiation. */
2410 expected_sg_dig_ctrl = 0x81388400;
2411
2412 /* Pause capability */
2413 expected_sg_dig_ctrl |= (1 << 11);
2414
2415 /* Asymettric pause */
2416 expected_sg_dig_ctrl |= (1 << 12);
2417
2418 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2419 if (workaround)
2420 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2421 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2422 udelay(5);
2423 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2424
2425 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2426 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2427 MAC_STATUS_SIGNAL_DET)) {
2428 int i;
2429
2430 /* Giver time to negotiate (~200ms) */
2431 for (i = 0; i < 40000; i++) {
2432 sg_dig_status = tr32(SG_DIG_STATUS);
2433 if (sg_dig_status & (0x3))
2434 break;
2435 udelay(5);
2436 }
2437 mac_status = tr32(MAC_STATUS);
2438
2439 if ((sg_dig_status & (1 << 1)) &&
2440 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2441 u32 local_adv, remote_adv;
2442
2443 local_adv = ADVERTISE_PAUSE_CAP;
2444 remote_adv = 0;
2445 if (sg_dig_status & (1 << 19))
2446 remote_adv |= LPA_PAUSE_CAP;
2447 if (sg_dig_status & (1 << 20))
2448 remote_adv |= LPA_PAUSE_ASYM;
2449
2450 tg3_setup_flow_control(tp, local_adv, remote_adv);
2451 current_link_up = 1;
2452 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2453 } else if (!(sg_dig_status & (1 << 1))) {
2454 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2455 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2456 else {
2457 if (workaround) {
2458 u32 val = serdes_cfg;
2459
2460 if (port_a)
2461 val |= 0xc010000;
2462 else
2463 val |= 0x4010000;
2464
2465 tw32_f(MAC_SERDES_CFG, val);
2466 }
2467
2468 tw32_f(SG_DIG_CTRL, 0x01388400);
2469 udelay(40);
2470
2471 /* Link parallel detection - link is up */
2472 /* only if we have PCS_SYNC and not */
2473 /* receiving config code words */
2474 mac_status = tr32(MAC_STATUS);
2475 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2476 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2477 tg3_setup_flow_control(tp, 0, 0);
2478 current_link_up = 1;
2479 }
2480 }
2481 }
2482 }
2483
2484out:
2485 return current_link_up;
2486}
2487
2488static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2489{
2490 int current_link_up = 0;
2491
2492 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2493 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2494 goto out;
2495 }
2496
2497 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2498 u32 flags;
2499 int i;
2500
2501 if (fiber_autoneg(tp, &flags)) {
2502 u32 local_adv, remote_adv;
2503
2504 local_adv = ADVERTISE_PAUSE_CAP;
2505 remote_adv = 0;
2506 if (flags & MR_LP_ADV_SYM_PAUSE)
2507 remote_adv |= LPA_PAUSE_CAP;
2508 if (flags & MR_LP_ADV_ASYM_PAUSE)
2509 remote_adv |= LPA_PAUSE_ASYM;
2510
2511 tg3_setup_flow_control(tp, local_adv, remote_adv);
2512
2513 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2514 current_link_up = 1;
2515 }
2516 for (i = 0; i < 30; i++) {
2517 udelay(20);
2518 tw32_f(MAC_STATUS,
2519 (MAC_STATUS_SYNC_CHANGED |
2520 MAC_STATUS_CFG_CHANGED));
2521 udelay(40);
2522 if ((tr32(MAC_STATUS) &
2523 (MAC_STATUS_SYNC_CHANGED |
2524 MAC_STATUS_CFG_CHANGED)) == 0)
2525 break;
2526 }
2527
2528 mac_status = tr32(MAC_STATUS);
2529 if (current_link_up == 0 &&
2530 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2531 !(mac_status & MAC_STATUS_RCVD_CFG))
2532 current_link_up = 1;
2533 } else {
2534 /* Forcing 1000FD link up. */
2535 current_link_up = 1;
2536 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2537
2538 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2539 udelay(40);
2540 }
2541
2542out:
2543 return current_link_up;
2544}
2545
2546static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2547{
2548 u32 orig_pause_cfg;
2549 u16 orig_active_speed;
2550 u8 orig_active_duplex;
2551 u32 mac_status;
2552 int current_link_up;
2553 int i;
2554
2555 orig_pause_cfg =
2556 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2557 TG3_FLAG_TX_PAUSE));
2558 orig_active_speed = tp->link_config.active_speed;
2559 orig_active_duplex = tp->link_config.active_duplex;
2560
2561 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2562 netif_carrier_ok(tp->dev) &&
2563 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2564 mac_status = tr32(MAC_STATUS);
2565 mac_status &= (MAC_STATUS_PCS_SYNCED |
2566 MAC_STATUS_SIGNAL_DET |
2567 MAC_STATUS_CFG_CHANGED |
2568 MAC_STATUS_RCVD_CFG);
2569 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2570 MAC_STATUS_SIGNAL_DET)) {
2571 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2572 MAC_STATUS_CFG_CHANGED));
2573 return 0;
2574 }
2575 }
2576
2577 tw32_f(MAC_TX_AUTO_NEG, 0);
2578
2579 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2580 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2581 tw32_f(MAC_MODE, tp->mac_mode);
2582 udelay(40);
2583
2584 if (tp->phy_id == PHY_ID_BCM8002)
2585 tg3_init_bcm8002(tp);
2586
2587 /* Enable link change event even when serdes polling. */
2588 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2589 udelay(40);
2590
2591 current_link_up = 0;
2592 mac_status = tr32(MAC_STATUS);
2593
2594 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2595 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2596 else
2597 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2598
2599 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2600 tw32_f(MAC_MODE, tp->mac_mode);
2601 udelay(40);
2602
2603 tp->hw_status->status =
2604 (SD_STATUS_UPDATED |
2605 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2606
2607 for (i = 0; i < 100; i++) {
2608 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2609 MAC_STATUS_CFG_CHANGED));
2610 udelay(5);
2611 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2612 MAC_STATUS_CFG_CHANGED)) == 0)
2613 break;
2614 }
2615
2616 mac_status = tr32(MAC_STATUS);
2617 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2618 current_link_up = 0;
2619 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2620 tw32_f(MAC_MODE, (tp->mac_mode |
2621 MAC_MODE_SEND_CONFIGS));
2622 udelay(1);
2623 tw32_f(MAC_MODE, tp->mac_mode);
2624 }
2625 }
2626
2627 if (current_link_up == 1) {
2628 tp->link_config.active_speed = SPEED_1000;
2629 tp->link_config.active_duplex = DUPLEX_FULL;
2630 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2631 LED_CTRL_LNKLED_OVERRIDE |
2632 LED_CTRL_1000MBPS_ON));
2633 } else {
2634 tp->link_config.active_speed = SPEED_INVALID;
2635 tp->link_config.active_duplex = DUPLEX_INVALID;
2636 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2637 LED_CTRL_LNKLED_OVERRIDE |
2638 LED_CTRL_TRAFFIC_OVERRIDE));
2639 }
2640
2641 if (current_link_up != netif_carrier_ok(tp->dev)) {
2642 if (current_link_up)
2643 netif_carrier_on(tp->dev);
2644 else
2645 netif_carrier_off(tp->dev);
2646 tg3_link_report(tp);
2647 } else {
2648 u32 now_pause_cfg =
2649 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2650 TG3_FLAG_TX_PAUSE);
2651 if (orig_pause_cfg != now_pause_cfg ||
2652 orig_active_speed != tp->link_config.active_speed ||
2653 orig_active_duplex != tp->link_config.active_duplex)
2654 tg3_link_report(tp);
2655 }
2656
2657 return 0;
2658}
2659
747e8f8b
MC
2660static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2661{
2662 int current_link_up, err = 0;
2663 u32 bmsr, bmcr;
2664 u16 current_speed;
2665 u8 current_duplex;
2666
2667 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2668 tw32_f(MAC_MODE, tp->mac_mode);
2669 udelay(40);
2670
2671 tw32(MAC_EVENT, 0);
2672
2673 tw32_f(MAC_STATUS,
2674 (MAC_STATUS_SYNC_CHANGED |
2675 MAC_STATUS_CFG_CHANGED |
2676 MAC_STATUS_MI_COMPLETION |
2677 MAC_STATUS_LNKSTATE_CHANGED));
2678 udelay(40);
2679
2680 if (force_reset)
2681 tg3_phy_reset(tp);
2682
2683 current_link_up = 0;
2684 current_speed = SPEED_INVALID;
2685 current_duplex = DUPLEX_INVALID;
2686
2687 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2688 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
2689 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2690 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2691 bmsr |= BMSR_LSTATUS;
2692 else
2693 bmsr &= ~BMSR_LSTATUS;
2694 }
747e8f8b
MC
2695
2696 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2697
2698 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2699 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2700 /* do nothing, just check for link up at the end */
2701 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2702 u32 adv, new_adv;
2703
2704 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2705 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2706 ADVERTISE_1000XPAUSE |
2707 ADVERTISE_1000XPSE_ASYM |
2708 ADVERTISE_SLCT);
2709
2710 /* Always advertise symmetric PAUSE just like copper */
2711 new_adv |= ADVERTISE_1000XPAUSE;
2712
2713 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2714 new_adv |= ADVERTISE_1000XHALF;
2715 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2716 new_adv |= ADVERTISE_1000XFULL;
2717
2718 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2719 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2720 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2721 tg3_writephy(tp, MII_BMCR, bmcr);
2722
2723 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2724 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2725 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2726
2727 return err;
2728 }
2729 } else {
2730 u32 new_bmcr;
2731
2732 bmcr &= ~BMCR_SPEED1000;
2733 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2734
2735 if (tp->link_config.duplex == DUPLEX_FULL)
2736 new_bmcr |= BMCR_FULLDPLX;
2737
2738 if (new_bmcr != bmcr) {
2739 /* BMCR_SPEED1000 is a reserved bit that needs
2740 * to be set on write.
2741 */
2742 new_bmcr |= BMCR_SPEED1000;
2743
2744 /* Force a linkdown */
2745 if (netif_carrier_ok(tp->dev)) {
2746 u32 adv;
2747
2748 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2749 adv &= ~(ADVERTISE_1000XFULL |
2750 ADVERTISE_1000XHALF |
2751 ADVERTISE_SLCT);
2752 tg3_writephy(tp, MII_ADVERTISE, adv);
2753 tg3_writephy(tp, MII_BMCR, bmcr |
2754 BMCR_ANRESTART |
2755 BMCR_ANENABLE);
2756 udelay(10);
2757 netif_carrier_off(tp->dev);
2758 }
2759 tg3_writephy(tp, MII_BMCR, new_bmcr);
2760 bmcr = new_bmcr;
2761 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2762 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
2763 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2764 ASIC_REV_5714) {
2765 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2766 bmsr |= BMSR_LSTATUS;
2767 else
2768 bmsr &= ~BMSR_LSTATUS;
2769 }
747e8f8b
MC
2770 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2771 }
2772 }
2773
2774 if (bmsr & BMSR_LSTATUS) {
2775 current_speed = SPEED_1000;
2776 current_link_up = 1;
2777 if (bmcr & BMCR_FULLDPLX)
2778 current_duplex = DUPLEX_FULL;
2779 else
2780 current_duplex = DUPLEX_HALF;
2781
2782 if (bmcr & BMCR_ANENABLE) {
2783 u32 local_adv, remote_adv, common;
2784
2785 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2786 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2787 common = local_adv & remote_adv;
2788 if (common & (ADVERTISE_1000XHALF |
2789 ADVERTISE_1000XFULL)) {
2790 if (common & ADVERTISE_1000XFULL)
2791 current_duplex = DUPLEX_FULL;
2792 else
2793 current_duplex = DUPLEX_HALF;
2794
2795 tg3_setup_flow_control(tp, local_adv,
2796 remote_adv);
2797 }
2798 else
2799 current_link_up = 0;
2800 }
2801 }
2802
2803 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2804 if (tp->link_config.active_duplex == DUPLEX_HALF)
2805 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2806
2807 tw32_f(MAC_MODE, tp->mac_mode);
2808 udelay(40);
2809
2810 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2811
2812 tp->link_config.active_speed = current_speed;
2813 tp->link_config.active_duplex = current_duplex;
2814
2815 if (current_link_up != netif_carrier_ok(tp->dev)) {
2816 if (current_link_up)
2817 netif_carrier_on(tp->dev);
2818 else {
2819 netif_carrier_off(tp->dev);
2820 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2821 }
2822 tg3_link_report(tp);
2823 }
2824 return err;
2825}
2826
2827static void tg3_serdes_parallel_detect(struct tg3 *tp)
2828{
2829 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2830 /* Give autoneg time to complete. */
2831 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2832 return;
2833 }
2834 if (!netif_carrier_ok(tp->dev) &&
2835 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2836 u32 bmcr;
2837
2838 tg3_readphy(tp, MII_BMCR, &bmcr);
2839 if (bmcr & BMCR_ANENABLE) {
2840 u32 phy1, phy2;
2841
2842 /* Select shadow register 0x1f */
2843 tg3_writephy(tp, 0x1c, 0x7c00);
2844 tg3_readphy(tp, 0x1c, &phy1);
2845
2846 /* Select expansion interrupt status register */
2847 tg3_writephy(tp, 0x17, 0x0f01);
2848 tg3_readphy(tp, 0x15, &phy2);
2849 tg3_readphy(tp, 0x15, &phy2);
2850
2851 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2852 /* We have signal detect and not receiving
2853 * config code words, link is up by parallel
2854 * detection.
2855 */
2856
2857 bmcr &= ~BMCR_ANENABLE;
2858 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2859 tg3_writephy(tp, MII_BMCR, bmcr);
2860 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2861 }
2862 }
2863 }
2864 else if (netif_carrier_ok(tp->dev) &&
2865 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2866 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2867 u32 phy2;
2868
2869 /* Select expansion interrupt status register */
2870 tg3_writephy(tp, 0x17, 0x0f01);
2871 tg3_readphy(tp, 0x15, &phy2);
2872 if (phy2 & 0x20) {
2873 u32 bmcr;
2874
2875 /* Config code words received, turn on autoneg. */
2876 tg3_readphy(tp, MII_BMCR, &bmcr);
2877 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2878
2879 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2880
2881 }
2882 }
2883}
2884
1da177e4
LT
2885static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2886{
2887 int err;
2888
2889 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2890 err = tg3_setup_fiber_phy(tp, force_reset);
747e8f8b
MC
2891 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2892 err = tg3_setup_fiber_mii_phy(tp, force_reset);
1da177e4
LT
2893 } else {
2894 err = tg3_setup_copper_phy(tp, force_reset);
2895 }
2896
2897 if (tp->link_config.active_speed == SPEED_1000 &&
2898 tp->link_config.active_duplex == DUPLEX_HALF)
2899 tw32(MAC_TX_LENGTHS,
2900 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2901 (6 << TX_LENGTHS_IPG_SHIFT) |
2902 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2903 else
2904 tw32(MAC_TX_LENGTHS,
2905 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2906 (6 << TX_LENGTHS_IPG_SHIFT) |
2907 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2908
2909 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2910 if (netif_carrier_ok(tp->dev)) {
2911 tw32(HOSTCC_STAT_COAL_TICKS,
15f9850d 2912 tp->coal.stats_block_coalesce_usecs);
1da177e4
LT
2913 } else {
2914 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2915 }
2916 }
2917
2918 return err;
2919}
2920
2921/* Tigon3 never reports partial packet sends. So we do not
2922 * need special logic to handle SKBs that have not had all
2923 * of their frags sent yet, like SunGEM does.
2924 */
2925static void tg3_tx(struct tg3 *tp)
2926{
2927 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2928 u32 sw_idx = tp->tx_cons;
2929
2930 while (sw_idx != hw_idx) {
2931 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2932 struct sk_buff *skb = ri->skb;
2933 int i;
2934
2935 if (unlikely(skb == NULL))
2936 BUG();
2937
2938 pci_unmap_single(tp->pdev,
2939 pci_unmap_addr(ri, mapping),
2940 skb_headlen(skb),
2941 PCI_DMA_TODEVICE);
2942
2943 ri->skb = NULL;
2944
2945 sw_idx = NEXT_TX(sw_idx);
2946
2947 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2948 if (unlikely(sw_idx == hw_idx))
2949 BUG();
2950
2951 ri = &tp->tx_buffers[sw_idx];
2952 if (unlikely(ri->skb != NULL))
2953 BUG();
2954
2955 pci_unmap_page(tp->pdev,
2956 pci_unmap_addr(ri, mapping),
2957 skb_shinfo(skb)->frags[i].size,
2958 PCI_DMA_TODEVICE);
2959
2960 sw_idx = NEXT_TX(sw_idx);
2961 }
2962
f47c11ee 2963 dev_kfree_skb(skb);
1da177e4
LT
2964 }
2965
2966 tp->tx_cons = sw_idx;
2967
51b91468
MC
2968 if (unlikely(netif_queue_stopped(tp->dev))) {
2969 spin_lock(&tp->tx_lock);
2970 if (netif_queue_stopped(tp->dev) &&
2971 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2972 netif_wake_queue(tp->dev);
2973 spin_unlock(&tp->tx_lock);
2974 }
1da177e4
LT
2975}
2976
2977/* Returns size of skb allocated or < 0 on error.
2978 *
2979 * We only need to fill in the address because the other members
2980 * of the RX descriptor are invariant, see tg3_init_rings.
2981 *
2982 * Note the purposeful assymetry of cpu vs. chip accesses. For
2983 * posting buffers we only dirty the first cache line of the RX
2984 * descriptor (containing the address). Whereas for the RX status
2985 * buffers the cpu only reads the last cacheline of the RX descriptor
2986 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2987 */
2988static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2989 int src_idx, u32 dest_idx_unmasked)
2990{
2991 struct tg3_rx_buffer_desc *desc;
2992 struct ring_info *map, *src_map;
2993 struct sk_buff *skb;
2994 dma_addr_t mapping;
2995 int skb_size, dest_idx;
2996
2997 src_map = NULL;
2998 switch (opaque_key) {
2999 case RXD_OPAQUE_RING_STD:
3000 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3001 desc = &tp->rx_std[dest_idx];
3002 map = &tp->rx_std_buffers[dest_idx];
3003 if (src_idx >= 0)
3004 src_map = &tp->rx_std_buffers[src_idx];
7e72aad4 3005 skb_size = tp->rx_pkt_buf_sz;
1da177e4
LT
3006 break;
3007
3008 case RXD_OPAQUE_RING_JUMBO:
3009 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3010 desc = &tp->rx_jumbo[dest_idx];
3011 map = &tp->rx_jumbo_buffers[dest_idx];
3012 if (src_idx >= 0)
3013 src_map = &tp->rx_jumbo_buffers[src_idx];
3014 skb_size = RX_JUMBO_PKT_BUF_SZ;
3015 break;
3016
3017 default:
3018 return -EINVAL;
3019 };
3020
3021 /* Do not overwrite any of the map or rp information
3022 * until we are sure we can commit to a new buffer.
3023 *
3024 * Callers depend upon this behavior and assume that
3025 * we leave everything unchanged if we fail.
3026 */
3027 skb = dev_alloc_skb(skb_size);
3028 if (skb == NULL)
3029 return -ENOMEM;
3030
3031 skb->dev = tp->dev;
3032 skb_reserve(skb, tp->rx_offset);
3033
3034 mapping = pci_map_single(tp->pdev, skb->data,
3035 skb_size - tp->rx_offset,
3036 PCI_DMA_FROMDEVICE);
3037
3038 map->skb = skb;
3039 pci_unmap_addr_set(map, mapping, mapping);
3040
3041 if (src_map != NULL)
3042 src_map->skb = NULL;
3043
3044 desc->addr_hi = ((u64)mapping >> 32);
3045 desc->addr_lo = ((u64)mapping & 0xffffffff);
3046
3047 return skb_size;
3048}
3049
3050/* We only need to move over in the address because the other
3051 * members of the RX descriptor are invariant. See notes above
3052 * tg3_alloc_rx_skb for full details.
3053 */
3054static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3055 int src_idx, u32 dest_idx_unmasked)
3056{
3057 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3058 struct ring_info *src_map, *dest_map;
3059 int dest_idx;
3060
3061 switch (opaque_key) {
3062 case RXD_OPAQUE_RING_STD:
3063 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3064 dest_desc = &tp->rx_std[dest_idx];
3065 dest_map = &tp->rx_std_buffers[dest_idx];
3066 src_desc = &tp->rx_std[src_idx];
3067 src_map = &tp->rx_std_buffers[src_idx];
3068 break;
3069
3070 case RXD_OPAQUE_RING_JUMBO:
3071 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3072 dest_desc = &tp->rx_jumbo[dest_idx];
3073 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3074 src_desc = &tp->rx_jumbo[src_idx];
3075 src_map = &tp->rx_jumbo_buffers[src_idx];
3076 break;
3077
3078 default:
3079 return;
3080 };
3081
3082 dest_map->skb = src_map->skb;
3083 pci_unmap_addr_set(dest_map, mapping,
3084 pci_unmap_addr(src_map, mapping));
3085 dest_desc->addr_hi = src_desc->addr_hi;
3086 dest_desc->addr_lo = src_desc->addr_lo;
3087
3088 src_map->skb = NULL;
3089}
3090
3091#if TG3_VLAN_TAG_USED
3092static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3093{
3094 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3095}
3096#endif
3097
3098/* The RX ring scheme is composed of multiple rings which post fresh
3099 * buffers to the chip, and one special ring the chip uses to report
3100 * status back to the host.
3101 *
3102 * The special ring reports the status of received packets to the
3103 * host. The chip does not write into the original descriptor the
3104 * RX buffer was obtained from. The chip simply takes the original
3105 * descriptor as provided by the host, updates the status and length
3106 * field, then writes this into the next status ring entry.
3107 *
3108 * Each ring the host uses to post buffers to the chip is described
3109 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3110 * it is first placed into the on-chip ram. When the packet's length
3111 * is known, it walks down the TG3_BDINFO entries to select the ring.
3112 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3113 * which is within the range of the new packet's length is chosen.
3114 *
3115 * The "separate ring for rx status" scheme may sound queer, but it makes
3116 * sense from a cache coherency perspective. If only the host writes
3117 * to the buffer post rings, and only the chip writes to the rx status
3118 * rings, then cache lines never move beyond shared-modified state.
3119 * If both the host and chip were to write into the same ring, cache line
3120 * eviction could occur since both entities want it in an exclusive state.
3121 */
3122static int tg3_rx(struct tg3 *tp, int budget)
3123{
3124 u32 work_mask;
483ba50b
MC
3125 u32 sw_idx = tp->rx_rcb_ptr;
3126 u16 hw_idx;
1da177e4
LT
3127 int received;
3128
3129 hw_idx = tp->hw_status->idx[0].rx_producer;
3130 /*
3131 * We need to order the read of hw_idx and the read of
3132 * the opaque cookie.
3133 */
3134 rmb();
1da177e4
LT
3135 work_mask = 0;
3136 received = 0;
3137 while (sw_idx != hw_idx && budget > 0) {
3138 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3139 unsigned int len;
3140 struct sk_buff *skb;
3141 dma_addr_t dma_addr;
3142 u32 opaque_key, desc_idx, *post_ptr;
3143
3144 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3145 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3146 if (opaque_key == RXD_OPAQUE_RING_STD) {
3147 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3148 mapping);
3149 skb = tp->rx_std_buffers[desc_idx].skb;
3150 post_ptr = &tp->rx_std_ptr;
3151 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3152 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3153 mapping);
3154 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3155 post_ptr = &tp->rx_jumbo_ptr;
3156 }
3157 else {
3158 goto next_pkt_nopost;
3159 }
3160
3161 work_mask |= opaque_key;
3162
3163 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3164 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3165 drop_it:
3166 tg3_recycle_rx(tp, opaque_key,
3167 desc_idx, *post_ptr);
3168 drop_it_no_recycle:
3169 /* Other statistics kept track of by card. */
3170 tp->net_stats.rx_dropped++;
3171 goto next_pkt;
3172 }
3173
3174 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3175
3176 if (len > RX_COPY_THRESHOLD
3177 && tp->rx_offset == 2
3178 /* rx_offset != 2 iff this is a 5701 card running
3179 * in PCI-X mode [see tg3_get_invariants()] */
3180 ) {
3181 int skb_size;
3182
3183 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3184 desc_idx, *post_ptr);
3185 if (skb_size < 0)
3186 goto drop_it;
3187
3188 pci_unmap_single(tp->pdev, dma_addr,
3189 skb_size - tp->rx_offset,
3190 PCI_DMA_FROMDEVICE);
3191
3192 skb_put(skb, len);
3193 } else {
3194 struct sk_buff *copy_skb;
3195
3196 tg3_recycle_rx(tp, opaque_key,
3197 desc_idx, *post_ptr);
3198
3199 copy_skb = dev_alloc_skb(len + 2);
3200 if (copy_skb == NULL)
3201 goto drop_it_no_recycle;
3202
3203 copy_skb->dev = tp->dev;
3204 skb_reserve(copy_skb, 2);
3205 skb_put(copy_skb, len);
3206 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3207 memcpy(copy_skb->data, skb->data, len);
3208 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3209
3210 /* We'll reuse the original ring buffer. */
3211 skb = copy_skb;
3212 }
3213
3214 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3215 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3216 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3217 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3218 skb->ip_summed = CHECKSUM_UNNECESSARY;
3219 else
3220 skb->ip_summed = CHECKSUM_NONE;
3221
3222 skb->protocol = eth_type_trans(skb, tp->dev);
3223#if TG3_VLAN_TAG_USED
3224 if (tp->vlgrp != NULL &&
3225 desc->type_flags & RXD_FLAG_VLAN) {
3226 tg3_vlan_rx(tp, skb,
3227 desc->err_vlan & RXD_VLAN_MASK);
3228 } else
3229#endif
3230 netif_receive_skb(skb);
3231
3232 tp->dev->last_rx = jiffies;
3233 received++;
3234 budget--;
3235
3236next_pkt:
3237 (*post_ptr)++;
3238next_pkt_nopost:
483ba50b
MC
3239 sw_idx++;
3240 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
52f6d697
MC
3241
3242 /* Refresh hw_idx to see if there is new work */
3243 if (sw_idx == hw_idx) {
3244 hw_idx = tp->hw_status->idx[0].rx_producer;
3245 rmb();
3246 }
1da177e4
LT
3247 }
3248
3249 /* ACK the status ring. */
483ba50b
MC
3250 tp->rx_rcb_ptr = sw_idx;
3251 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
1da177e4
LT
3252
3253 /* Refill RX ring(s). */
3254 if (work_mask & RXD_OPAQUE_RING_STD) {
3255 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3256 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3257 sw_idx);
3258 }
3259 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3260 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3261 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3262 sw_idx);
3263 }
3264 mmiowb();
3265
3266 return received;
3267}
3268
3269static int tg3_poll(struct net_device *netdev, int *budget)
3270{
3271 struct tg3 *tp = netdev_priv(netdev);
3272 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4
LT
3273 int done;
3274
1da177e4
LT
3275 /* handle link change and other phy events */
3276 if (!(tp->tg3_flags &
3277 (TG3_FLAG_USE_LINKCHG_REG |
3278 TG3_FLAG_POLL_SERDES))) {
3279 if (sblk->status & SD_STATUS_LINK_CHG) {
3280 sblk->status = SD_STATUS_UPDATED |
3281 (sblk->status & ~SD_STATUS_LINK_CHG);
f47c11ee 3282 spin_lock(&tp->lock);
1da177e4 3283 tg3_setup_phy(tp, 0);
f47c11ee 3284 spin_unlock(&tp->lock);
1da177e4
LT
3285 }
3286 }
3287
3288 /* run TX completion thread */
3289 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
1da177e4 3290 tg3_tx(tp);
1da177e4
LT
3291 }
3292
1da177e4
LT
3293 /* run RX thread, within the bounds set by NAPI.
3294 * All RX "locking" is done by ensuring outside
3295 * code synchronizes with dev->poll()
3296 */
1da177e4
LT
3297 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3298 int orig_budget = *budget;
3299 int work_done;
3300
3301 if (orig_budget > netdev->quota)
3302 orig_budget = netdev->quota;
3303
3304 work_done = tg3_rx(tp, orig_budget);
3305
3306 *budget -= work_done;
3307 netdev->quota -= work_done;
1da177e4
LT
3308 }
3309
38f3843e 3310 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
f7383c22 3311 tp->last_tag = sblk->status_tag;
38f3843e
MC
3312 rmb();
3313 } else
3314 sblk->status &= ~SD_STATUS_UPDATED;
f7383c22 3315
1da177e4 3316 /* if no more work, tell net stack and NIC we're done */
f7383c22 3317 done = !tg3_has_work(tp);
1da177e4 3318 if (done) {
f47c11ee 3319 netif_rx_complete(netdev);
1da177e4 3320 tg3_restart_ints(tp);
1da177e4
LT
3321 }
3322
3323 return (done ? 0 : 1);
3324}
3325
f47c11ee
DM
3326static void tg3_irq_quiesce(struct tg3 *tp)
3327{
3328 BUG_ON(tp->irq_sync);
3329
3330 tp->irq_sync = 1;
3331 smp_mb();
3332
3333 synchronize_irq(tp->pdev->irq);
3334}
3335
3336static inline int tg3_irq_sync(struct tg3 *tp)
3337{
3338 return tp->irq_sync;
3339}
3340
3341/* Fully shutdown all tg3 driver activity elsewhere in the system.
3342 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3343 * with as well. Most of the time, this is not necessary except when
3344 * shutting down the device.
3345 */
3346static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3347{
3348 if (irq_sync)
3349 tg3_irq_quiesce(tp);
3350 spin_lock_bh(&tp->lock);
3351 spin_lock(&tp->tx_lock);
3352}
3353
3354static inline void tg3_full_unlock(struct tg3 *tp)
3355{
3356 spin_unlock(&tp->tx_lock);
3357 spin_unlock_bh(&tp->lock);
3358}
3359
88b06bc2
MC
3360/* MSI ISR - No need to check for interrupt sharing and no need to
3361 * flush status block and interrupt mailbox. PCI ordering rules
3362 * guarantee that MSI will arrive after the status block.
3363 */
3364static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3365{
3366 struct net_device *dev = dev_id;
3367 struct tg3 *tp = netdev_priv(dev);
88b06bc2 3368
61487480
MC
3369 prefetch(tp->hw_status);
3370 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
88b06bc2 3371 /*
fac9b83e 3372 * Writing any value to intr-mbox-0 clears PCI INTA# and
88b06bc2 3373 * chip-internal interrupt pending events.
fac9b83e 3374 * Writing non-zero to intr-mbox-0 additional tells the
88b06bc2
MC
3375 * NIC to stop sending us irqs, engaging "in-intr-handler"
3376 * event coalescing.
3377 */
3378 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
61487480 3379 if (likely(!tg3_irq_sync(tp)))
88b06bc2 3380 netif_rx_schedule(dev); /* schedule NAPI poll */
61487480 3381
88b06bc2
MC
3382 return IRQ_RETVAL(1);
3383}
3384
1da177e4
LT
3385static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3386{
3387 struct net_device *dev = dev_id;
3388 struct tg3 *tp = netdev_priv(dev);
3389 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4
LT
3390 unsigned int handled = 1;
3391
1da177e4
LT
3392 /* In INTx mode, it is possible for the interrupt to arrive at
3393 * the CPU before the status block posted prior to the interrupt.
3394 * Reading the PCI State register will confirm whether the
3395 * interrupt is ours and will flush the status block.
3396 */
3397 if ((sblk->status & SD_STATUS_UPDATED) ||
3398 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3399 /*
fac9b83e 3400 * Writing any value to intr-mbox-0 clears PCI INTA# and
1da177e4 3401 * chip-internal interrupt pending events.
fac9b83e 3402 * Writing non-zero to intr-mbox-0 additional tells the
1da177e4
LT
3403 * NIC to stop sending us irqs, engaging "in-intr-handler"
3404 * event coalescing.
3405 */
3406 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3407 0x00000001);
f47c11ee
DM
3408 if (tg3_irq_sync(tp))
3409 goto out;
fac9b83e 3410 sblk->status &= ~SD_STATUS_UPDATED;
61487480
MC
3411 if (likely(tg3_has_work(tp))) {
3412 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
fac9b83e 3413 netif_rx_schedule(dev); /* schedule NAPI poll */
61487480 3414 } else {
fac9b83e
DM
3415 /* No work, shared interrupt perhaps? re-enable
3416 * interrupts, and flush that PCI write
3417 */
09ee929c 3418 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
fac9b83e 3419 0x00000000);
fac9b83e
DM
3420 }
3421 } else { /* shared interrupt */
3422 handled = 0;
3423 }
f47c11ee 3424out:
fac9b83e
DM
3425 return IRQ_RETVAL(handled);
3426}
3427
3428static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3429{
3430 struct net_device *dev = dev_id;
3431 struct tg3 *tp = netdev_priv(dev);
3432 struct tg3_hw_status *sblk = tp->hw_status;
fac9b83e
DM
3433 unsigned int handled = 1;
3434
fac9b83e
DM
3435 /* In INTx mode, it is possible for the interrupt to arrive at
3436 * the CPU before the status block posted prior to the interrupt.
3437 * Reading the PCI State register will confirm whether the
3438 * interrupt is ours and will flush the status block.
3439 */
38f3843e 3440 if ((sblk->status_tag != tp->last_tag) ||
fac9b83e 3441 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
1da177e4 3442 /*
fac9b83e
DM
3443 * writing any value to intr-mbox-0 clears PCI INTA# and
3444 * chip-internal interrupt pending events.
3445 * writing non-zero to intr-mbox-0 additional tells the
3446 * NIC to stop sending us irqs, engaging "in-intr-handler"
3447 * event coalescing.
1da177e4 3448 */
fac9b83e
DM
3449 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3450 0x00000001);
f47c11ee
DM
3451 if (tg3_irq_sync(tp))
3452 goto out;
38f3843e 3453 if (netif_rx_schedule_prep(dev)) {
61487480 3454 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
38f3843e
MC
3455 /* Update last_tag to mark that this status has been
3456 * seen. Because interrupt may be shared, we may be
3457 * racing with tg3_poll(), so only update last_tag
3458 * if tg3_poll() is not scheduled.
1da177e4 3459 */
38f3843e
MC
3460 tp->last_tag = sblk->status_tag;
3461 __netif_rx_schedule(dev);
1da177e4
LT
3462 }
3463 } else { /* shared interrupt */
3464 handled = 0;
3465 }
f47c11ee 3466out:
1da177e4
LT
3467 return IRQ_RETVAL(handled);
3468}
3469
7938109f
MC
3470/* ISR for interrupt test */
3471static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3472 struct pt_regs *regs)
3473{
3474 struct net_device *dev = dev_id;
3475 struct tg3 *tp = netdev_priv(dev);
3476 struct tg3_hw_status *sblk = tp->hw_status;
3477
f9804ddb
MC
3478 if ((sblk->status & SD_STATUS_UPDATED) ||
3479 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7938109f
MC
3480 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3481 0x00000001);
3482 return IRQ_RETVAL(1);
3483 }
3484 return IRQ_RETVAL(0);
3485}
3486
1da177e4 3487static int tg3_init_hw(struct tg3 *);
944d980e 3488static int tg3_halt(struct tg3 *, int, int);
1da177e4
LT
3489
3490#ifdef CONFIG_NET_POLL_CONTROLLER
3491static void tg3_poll_controller(struct net_device *dev)
3492{
88b06bc2
MC
3493 struct tg3 *tp = netdev_priv(dev);
3494
3495 tg3_interrupt(tp->pdev->irq, dev, NULL);
1da177e4
LT
3496}
3497#endif
3498
3499static void tg3_reset_task(void *_data)
3500{
3501 struct tg3 *tp = _data;
3502 unsigned int restart_timer;
3503
7faa006f
MC
3504 tg3_full_lock(tp, 0);
3505 tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3506
3507 if (!netif_running(tp->dev)) {
3508 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3509 tg3_full_unlock(tp);
3510 return;
3511 }
3512
3513 tg3_full_unlock(tp);
3514
1da177e4
LT
3515 tg3_netif_stop(tp);
3516
f47c11ee 3517 tg3_full_lock(tp, 1);
1da177e4
LT
3518
3519 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3520 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3521
944d980e 3522 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
1da177e4
LT
3523 tg3_init_hw(tp);
3524
3525 tg3_netif_start(tp);
3526
1da177e4
LT
3527 if (restart_timer)
3528 mod_timer(&tp->timer, jiffies + 1);
7faa006f
MC
3529
3530 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3531
3532 tg3_full_unlock(tp);
1da177e4
LT
3533}
3534
3535static void tg3_tx_timeout(struct net_device *dev)
3536{
3537 struct tg3 *tp = netdev_priv(dev);
3538
3539 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3540 dev->name);
3541
3542 schedule_work(&tp->reset_task);
3543}
3544
c58ec932
MC
3545/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3546static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3547{
3548 u32 base = (u32) mapping & 0xffffffff;
3549
3550 return ((base > 0xffffdcc0) &&
3551 (base + len + 8 < base));
3552}
3553
72f2afb8
MC
3554/* Test for DMA addresses > 40-bit */
3555static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3556 int len)
3557{
3558#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3559 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3560 return (((u64) mapping + len) > DMA_40BIT_MASK);
3561 return 0;
3562#else
3563 return 0;
3564#endif
3565}
3566
1da177e4
LT
3567static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3568
72f2afb8
MC
3569/* Workaround 4GB and 40-bit hardware DMA bugs. */
3570static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
c58ec932
MC
3571 u32 last_plus_one, u32 *start,
3572 u32 base_flags, u32 mss)
1da177e4
LT
3573{
3574 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
c58ec932 3575 dma_addr_t new_addr = 0;
1da177e4 3576 u32 entry = *start;
c58ec932 3577 int i, ret = 0;
1da177e4
LT
3578
3579 if (!new_skb) {
c58ec932
MC
3580 ret = -1;
3581 } else {
3582 /* New SKB is guaranteed to be linear. */
3583 entry = *start;
3584 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3585 PCI_DMA_TODEVICE);
3586 /* Make sure new skb does not cross any 4G boundaries.
3587 * Drop the packet if it does.
3588 */
3589 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3590 ret = -1;
3591 dev_kfree_skb(new_skb);
3592 new_skb = NULL;
3593 } else {
3594 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3595 base_flags, 1 | (mss << 1));
3596 *start = NEXT_TX(entry);
3597 }
1da177e4
LT
3598 }
3599
1da177e4
LT
3600 /* Now clean up the sw ring entries. */
3601 i = 0;
3602 while (entry != last_plus_one) {
3603 int len;
3604
3605 if (i == 0)
3606 len = skb_headlen(skb);
3607 else
3608 len = skb_shinfo(skb)->frags[i-1].size;
3609 pci_unmap_single(tp->pdev,
3610 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3611 len, PCI_DMA_TODEVICE);
3612 if (i == 0) {
3613 tp->tx_buffers[entry].skb = new_skb;
3614 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3615 } else {
3616 tp->tx_buffers[entry].skb = NULL;
3617 }
3618 entry = NEXT_TX(entry);
3619 i++;
3620 }
3621
3622 dev_kfree_skb(skb);
3623
c58ec932 3624 return ret;
1da177e4
LT
3625}
3626
3627static void tg3_set_txd(struct tg3 *tp, int entry,
3628 dma_addr_t mapping, int len, u32 flags,
3629 u32 mss_and_is_end)
3630{
3631 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3632 int is_end = (mss_and_is_end & 0x1);
3633 u32 mss = (mss_and_is_end >> 1);
3634 u32 vlan_tag = 0;
3635
3636 if (is_end)
3637 flags |= TXD_FLAG_END;
3638 if (flags & TXD_FLAG_VLAN) {
3639 vlan_tag = flags >> 16;
3640 flags &= 0xffff;
3641 }
3642 vlan_tag |= (mss << TXD_MSS_SHIFT);
3643
3644 txd->addr_hi = ((u64) mapping >> 32);
3645 txd->addr_lo = ((u64) mapping & 0xffffffff);
3646 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3647 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3648}
3649
1da177e4
LT
3650static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3651{
3652 struct tg3 *tp = netdev_priv(dev);
3653 dma_addr_t mapping;
1da177e4
LT
3654 u32 len, entry, base_flags, mss;
3655 int would_hit_hwbug;
1da177e4
LT
3656
3657 len = skb_headlen(skb);
3658
3659 /* No BH disabling for tx_lock here. We are running in BH disabled
3660 * context and TX reclaim runs via tp->poll inside of a software
f47c11ee
DM
3661 * interrupt. Furthermore, IRQ processing runs lockless so we have
3662 * no IRQ context deadlocks to worry about either. Rejoice!
1da177e4 3663 */
f47c11ee 3664 if (!spin_trylock(&tp->tx_lock))
1da177e4 3665 return NETDEV_TX_LOCKED;
1da177e4 3666
1da177e4 3667 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
1f064a87
SH
3668 if (!netif_queue_stopped(dev)) {
3669 netif_stop_queue(dev);
3670
3671 /* This is a hard error, log it. */
3672 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3673 "queue awake!\n", dev->name);
3674 }
f47c11ee 3675 spin_unlock(&tp->tx_lock);
1da177e4
LT
3676 return NETDEV_TX_BUSY;
3677 }
3678
3679 entry = tp->tx_prod;
3680 base_flags = 0;
3681 if (skb->ip_summed == CHECKSUM_HW)
3682 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3683#if TG3_TSO_SUPPORT != 0
3684 mss = 0;
3685 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3686 (mss = skb_shinfo(skb)->tso_size) != 0) {
3687 int tcp_opt_len, ip_tcp_len;
3688
3689 if (skb_header_cloned(skb) &&
3690 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3691 dev_kfree_skb(skb);
3692 goto out_unlock;
3693 }
3694
3695 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3696 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3697
3698 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3699 TXD_FLAG_CPU_POST_DMA);
3700
3701 skb->nh.iph->check = 0;
fd30333d 3702 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
1da177e4
LT
3703 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3704 skb->h.th->check = 0;
3705 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3706 }
3707 else {
3708 skb->h.th->check =
3709 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3710 skb->nh.iph->daddr,
3711 0, IPPROTO_TCP, 0);
3712 }
3713
3714 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3715 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3716 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3717 int tsflags;
3718
3719 tsflags = ((skb->nh.iph->ihl - 5) +
3720 (tcp_opt_len >> 2));
3721 mss |= (tsflags << 11);
3722 }
3723 } else {
3724 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3725 int tsflags;
3726
3727 tsflags = ((skb->nh.iph->ihl - 5) +
3728 (tcp_opt_len >> 2));
3729 base_flags |= tsflags << 12;
3730 }
3731 }
3732 }
3733#else
3734 mss = 0;
3735#endif
3736#if TG3_VLAN_TAG_USED
3737 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3738 base_flags |= (TXD_FLAG_VLAN |
3739 (vlan_tx_tag_get(skb) << 16));
3740#endif
3741
3742 /* Queue skb data, a.k.a. the main skb fragment. */
3743 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3744
3745 tp->tx_buffers[entry].skb = skb;
3746 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3747
3748 would_hit_hwbug = 0;
3749
3750 if (tg3_4g_overflow_test(mapping, len))
c58ec932 3751 would_hit_hwbug = 1;
1da177e4
LT
3752
3753 tg3_set_txd(tp, entry, mapping, len, base_flags,
3754 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3755
3756 entry = NEXT_TX(entry);
3757
3758 /* Now loop through additional data fragments, and queue them. */
3759 if (skb_shinfo(skb)->nr_frags > 0) {
3760 unsigned int i, last;
3761
3762 last = skb_shinfo(skb)->nr_frags - 1;
3763 for (i = 0; i <= last; i++) {
3764 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3765
3766 len = frag->size;
3767 mapping = pci_map_page(tp->pdev,
3768 frag->page,
3769 frag->page_offset,
3770 len, PCI_DMA_TODEVICE);
3771
3772 tp->tx_buffers[entry].skb = NULL;
3773 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3774
c58ec932
MC
3775 if (tg3_4g_overflow_test(mapping, len))
3776 would_hit_hwbug = 1;
1da177e4 3777
72f2afb8
MC
3778 if (tg3_40bit_overflow_test(tp, mapping, len))
3779 would_hit_hwbug = 1;
3780
1da177e4
LT
3781 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3782 tg3_set_txd(tp, entry, mapping, len,
3783 base_flags, (i == last)|(mss << 1));
3784 else
3785 tg3_set_txd(tp, entry, mapping, len,
3786 base_flags, (i == last));
3787
3788 entry = NEXT_TX(entry);
3789 }
3790 }
3791
3792 if (would_hit_hwbug) {
3793 u32 last_plus_one = entry;
3794 u32 start;
1da177e4 3795
c58ec932
MC
3796 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3797 start &= (TG3_TX_RING_SIZE - 1);
1da177e4
LT
3798
3799 /* If the workaround fails due to memory/mapping
3800 * failure, silently drop this packet.
3801 */
72f2afb8 3802 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
c58ec932 3803 &start, base_flags, mss))
1da177e4
LT
3804 goto out_unlock;
3805
3806 entry = start;
3807 }
3808
3809 /* Packets are ready, update Tx producer idx local and on card. */
3810 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3811
3812 tp->tx_prod = entry;
51b91468 3813 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
1da177e4 3814 netif_stop_queue(dev);
51b91468
MC
3815 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3816 netif_wake_queue(tp->dev);
3817 }
1da177e4
LT
3818
3819out_unlock:
3820 mmiowb();
f47c11ee 3821 spin_unlock(&tp->tx_lock);
1da177e4
LT
3822
3823 dev->trans_start = jiffies;
3824
3825 return NETDEV_TX_OK;
3826}
3827
3828static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3829 int new_mtu)
3830{
3831 dev->mtu = new_mtu;
3832
ef7f5ec0 3833 if (new_mtu > ETH_DATA_LEN) {
a4e2b347 3834 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
ef7f5ec0
MC
3835 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3836 ethtool_op_set_tso(dev, 0);
3837 }
3838 else
3839 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3840 } else {
a4e2b347 3841 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
ef7f5ec0 3842 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
0f893dc6 3843 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
ef7f5ec0 3844 }
1da177e4
LT
3845}
3846
3847static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3848{
3849 struct tg3 *tp = netdev_priv(dev);
3850
3851 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3852 return -EINVAL;
3853
3854 if (!netif_running(dev)) {
3855 /* We'll just catch it later when the
3856 * device is up'd.
3857 */
3858 tg3_set_mtu(dev, tp, new_mtu);
3859 return 0;
3860 }
3861
3862 tg3_netif_stop(tp);
f47c11ee
DM
3863
3864 tg3_full_lock(tp, 1);
1da177e4 3865
944d980e 3866 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
3867
3868 tg3_set_mtu(dev, tp, new_mtu);
3869
3870 tg3_init_hw(tp);
3871
3872 tg3_netif_start(tp);
3873
f47c11ee 3874 tg3_full_unlock(tp);
1da177e4
LT
3875
3876 return 0;
3877}
3878
3879/* Free up pending packets in all rx/tx rings.
3880 *
3881 * The chip has been shut down and the driver detached from
3882 * the networking, so no interrupts or new tx packets will
3883 * end up in the driver. tp->{tx,}lock is not held and we are not
3884 * in an interrupt context and thus may sleep.
3885 */
3886static void tg3_free_rings(struct tg3 *tp)
3887{
3888 struct ring_info *rxp;
3889 int i;
3890
3891 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3892 rxp = &tp->rx_std_buffers[i];
3893
3894 if (rxp->skb == NULL)
3895 continue;
3896 pci_unmap_single(tp->pdev,
3897 pci_unmap_addr(rxp, mapping),
7e72aad4 3898 tp->rx_pkt_buf_sz - tp->rx_offset,
1da177e4
LT
3899 PCI_DMA_FROMDEVICE);
3900 dev_kfree_skb_any(rxp->skb);
3901 rxp->skb = NULL;
3902 }
3903
3904 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3905 rxp = &tp->rx_jumbo_buffers[i];
3906
3907 if (rxp->skb == NULL)
3908 continue;
3909 pci_unmap_single(tp->pdev,
3910 pci_unmap_addr(rxp, mapping),
3911 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3912 PCI_DMA_FROMDEVICE);
3913 dev_kfree_skb_any(rxp->skb);
3914 rxp->skb = NULL;
3915 }
3916
3917 for (i = 0; i < TG3_TX_RING_SIZE; ) {
3918 struct tx_ring_info *txp;
3919 struct sk_buff *skb;
3920 int j;
3921
3922 txp = &tp->tx_buffers[i];
3923 skb = txp->skb;
3924
3925 if (skb == NULL) {
3926 i++;
3927 continue;
3928 }
3929
3930 pci_unmap_single(tp->pdev,
3931 pci_unmap_addr(txp, mapping),
3932 skb_headlen(skb),
3933 PCI_DMA_TODEVICE);
3934 txp->skb = NULL;
3935
3936 i++;
3937
3938 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3939 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3940 pci_unmap_page(tp->pdev,
3941 pci_unmap_addr(txp, mapping),
3942 skb_shinfo(skb)->frags[j].size,
3943 PCI_DMA_TODEVICE);
3944 i++;
3945 }
3946
3947 dev_kfree_skb_any(skb);
3948 }
3949}
3950
3951/* Initialize tx/rx rings for packet processing.
3952 *
3953 * The chip has been shut down and the driver detached from
3954 * the networking, so no interrupts or new tx packets will
3955 * end up in the driver. tp->{tx,}lock are held and thus
3956 * we may not sleep.
3957 */
3958static void tg3_init_rings(struct tg3 *tp)
3959{
3960 u32 i;
3961
3962 /* Free up all the SKBs. */
3963 tg3_free_rings(tp);
3964
3965 /* Zero out all descriptors. */
3966 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3967 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3968 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3969 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3970
7e72aad4 3971 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
a4e2b347 3972 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
7e72aad4
MC
3973 (tp->dev->mtu > ETH_DATA_LEN))
3974 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3975
1da177e4
LT
3976 /* Initialize invariants of the rings, we only set this
3977 * stuff once. This works because the card does not
3978 * write into the rx buffer posting rings.
3979 */
3980 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3981 struct tg3_rx_buffer_desc *rxd;
3982
3983 rxd = &tp->rx_std[i];
7e72aad4 3984 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
1da177e4
LT
3985 << RXD_LEN_SHIFT;
3986 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3987 rxd->opaque = (RXD_OPAQUE_RING_STD |
3988 (i << RXD_OPAQUE_INDEX_SHIFT));
3989 }
3990
0f893dc6 3991 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
3992 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3993 struct tg3_rx_buffer_desc *rxd;
3994
3995 rxd = &tp->rx_jumbo[i];
3996 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3997 << RXD_LEN_SHIFT;
3998 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3999 RXD_FLAG_JUMBO;
4000 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4001 (i << RXD_OPAQUE_INDEX_SHIFT));
4002 }
4003 }
4004
4005 /* Now allocate fresh SKBs for each rx ring. */
4006 for (i = 0; i < tp->rx_pending; i++) {
4007 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4008 -1, i) < 0)
4009 break;
4010 }
4011
0f893dc6 4012 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
4013 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4014 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4015 -1, i) < 0)
4016 break;
4017 }
4018 }
4019}
4020
4021/*
4022 * Must not be invoked with interrupt sources disabled and
4023 * the hardware shutdown down.
4024 */
4025static void tg3_free_consistent(struct tg3 *tp)
4026{
b4558ea9
JJ
4027 kfree(tp->rx_std_buffers);
4028 tp->rx_std_buffers = NULL;
1da177e4
LT
4029 if (tp->rx_std) {
4030 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4031 tp->rx_std, tp->rx_std_mapping);
4032 tp->rx_std = NULL;
4033 }
4034 if (tp->rx_jumbo) {
4035 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4036 tp->rx_jumbo, tp->rx_jumbo_mapping);
4037 tp->rx_jumbo = NULL;
4038 }
4039 if (tp->rx_rcb) {
4040 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4041 tp->rx_rcb, tp->rx_rcb_mapping);
4042 tp->rx_rcb = NULL;
4043 }
4044 if (tp->tx_ring) {
4045 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4046 tp->tx_ring, tp->tx_desc_mapping);
4047 tp->tx_ring = NULL;
4048 }
4049 if (tp->hw_status) {
4050 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4051 tp->hw_status, tp->status_mapping);
4052 tp->hw_status = NULL;
4053 }
4054 if (tp->hw_stats) {
4055 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4056 tp->hw_stats, tp->stats_mapping);
4057 tp->hw_stats = NULL;
4058 }
4059}
4060
4061/*
4062 * Must not be invoked with interrupt sources disabled and
4063 * the hardware shutdown down. Can sleep.
4064 */
4065static int tg3_alloc_consistent(struct tg3 *tp)
4066{
4067 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4068 (TG3_RX_RING_SIZE +
4069 TG3_RX_JUMBO_RING_SIZE)) +
4070 (sizeof(struct tx_ring_info) *
4071 TG3_TX_RING_SIZE),
4072 GFP_KERNEL);
4073 if (!tp->rx_std_buffers)
4074 return -ENOMEM;
4075
4076 memset(tp->rx_std_buffers, 0,
4077 (sizeof(struct ring_info) *
4078 (TG3_RX_RING_SIZE +
4079 TG3_RX_JUMBO_RING_SIZE)) +
4080 (sizeof(struct tx_ring_info) *
4081 TG3_TX_RING_SIZE));
4082
4083 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4084 tp->tx_buffers = (struct tx_ring_info *)
4085 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4086
4087 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4088 &tp->rx_std_mapping);
4089 if (!tp->rx_std)
4090 goto err_out;
4091
4092 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4093 &tp->rx_jumbo_mapping);
4094
4095 if (!tp->rx_jumbo)
4096 goto err_out;
4097
4098 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4099 &tp->rx_rcb_mapping);
4100 if (!tp->rx_rcb)
4101 goto err_out;
4102
4103 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4104 &tp->tx_desc_mapping);
4105 if (!tp->tx_ring)
4106 goto err_out;
4107
4108 tp->hw_status = pci_alloc_consistent(tp->pdev,
4109 TG3_HW_STATUS_SIZE,
4110 &tp->status_mapping);
4111 if (!tp->hw_status)
4112 goto err_out;
4113
4114 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4115 sizeof(struct tg3_hw_stats),
4116 &tp->stats_mapping);
4117 if (!tp->hw_stats)
4118 goto err_out;
4119
4120 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4121 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4122
4123 return 0;
4124
4125err_out:
4126 tg3_free_consistent(tp);
4127 return -ENOMEM;
4128}
4129
4130#define MAX_WAIT_CNT 1000
4131
4132/* To stop a block, clear the enable bit and poll till it
4133 * clears. tp->lock is held.
4134 */
b3b7d6be 4135static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
1da177e4
LT
4136{
4137 unsigned int i;
4138 u32 val;
4139
4140 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4141 switch (ofs) {
4142 case RCVLSC_MODE:
4143 case DMAC_MODE:
4144 case MBFREE_MODE:
4145 case BUFMGR_MODE:
4146 case MEMARB_MODE:
4147 /* We can't enable/disable these bits of the
4148 * 5705/5750, just say success.
4149 */
4150 return 0;
4151
4152 default:
4153 break;
4154 };
4155 }
4156
4157 val = tr32(ofs);
4158 val &= ~enable_bit;
4159 tw32_f(ofs, val);
4160
4161 for (i = 0; i < MAX_WAIT_CNT; i++) {
4162 udelay(100);
4163 val = tr32(ofs);
4164 if ((val & enable_bit) == 0)
4165 break;
4166 }
4167
b3b7d6be 4168 if (i == MAX_WAIT_CNT && !silent) {
1da177e4
LT
4169 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4170 "ofs=%lx enable_bit=%x\n",
4171 ofs, enable_bit);
4172 return -ENODEV;
4173 }
4174
4175 return 0;
4176}
4177
4178/* tp->lock is held. */
b3b7d6be 4179static int tg3_abort_hw(struct tg3 *tp, int silent)
1da177e4
LT
4180{
4181 int i, err;
4182
4183 tg3_disable_ints(tp);
4184
4185 tp->rx_mode &= ~RX_MODE_ENABLE;
4186 tw32_f(MAC_RX_MODE, tp->rx_mode);
4187 udelay(10);
4188
b3b7d6be
DM
4189 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4190 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4191 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4192 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4193 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4194 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4195
4196 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4197 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4198 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4199 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4200 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4201 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4202 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
1da177e4
LT
4203
4204 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4205 tw32_f(MAC_MODE, tp->mac_mode);
4206 udelay(40);
4207
4208 tp->tx_mode &= ~TX_MODE_ENABLE;
4209 tw32_f(MAC_TX_MODE, tp->tx_mode);
4210
4211 for (i = 0; i < MAX_WAIT_CNT; i++) {
4212 udelay(100);
4213 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4214 break;
4215 }
4216 if (i >= MAX_WAIT_CNT) {
4217 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4218 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4219 tp->dev->name, tr32(MAC_TX_MODE));
e6de8ad1 4220 err |= -ENODEV;
1da177e4
LT
4221 }
4222
e6de8ad1 4223 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
b3b7d6be
DM
4224 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4225 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
1da177e4
LT
4226
4227 tw32(FTQ_RESET, 0xffffffff);
4228 tw32(FTQ_RESET, 0x00000000);
4229
b3b7d6be
DM
4230 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4231 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
1da177e4
LT
4232
4233 if (tp->hw_status)
4234 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4235 if (tp->hw_stats)
4236 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4237
1da177e4
LT
4238 return err;
4239}
4240
4241/* tp->lock is held. */
4242static int tg3_nvram_lock(struct tg3 *tp)
4243{
4244 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4245 int i;
4246
ec41c7df
MC
4247 if (tp->nvram_lock_cnt == 0) {
4248 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4249 for (i = 0; i < 8000; i++) {
4250 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4251 break;
4252 udelay(20);
4253 }
4254 if (i == 8000) {
4255 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4256 return -ENODEV;
4257 }
1da177e4 4258 }
ec41c7df 4259 tp->nvram_lock_cnt++;
1da177e4
LT
4260 }
4261 return 0;
4262}
4263
4264/* tp->lock is held. */
4265static void tg3_nvram_unlock(struct tg3 *tp)
4266{
ec41c7df
MC
4267 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4268 if (tp->nvram_lock_cnt > 0)
4269 tp->nvram_lock_cnt--;
4270 if (tp->nvram_lock_cnt == 0)
4271 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4272 }
1da177e4
LT
4273}
4274
e6af301b
MC
4275/* tp->lock is held. */
4276static void tg3_enable_nvram_access(struct tg3 *tp)
4277{
4278 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4279 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4280 u32 nvaccess = tr32(NVRAM_ACCESS);
4281
4282 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4283 }
4284}
4285
4286/* tp->lock is held. */
4287static void tg3_disable_nvram_access(struct tg3 *tp)
4288{
4289 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4290 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4291 u32 nvaccess = tr32(NVRAM_ACCESS);
4292
4293 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4294 }
4295}
4296
1da177e4
LT
4297/* tp->lock is held. */
4298static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4299{
4300 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4301 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4302 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4303
4304 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4305 switch (kind) {
4306 case RESET_KIND_INIT:
4307 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4308 DRV_STATE_START);
4309 break;
4310
4311 case RESET_KIND_SHUTDOWN:
4312 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4313 DRV_STATE_UNLOAD);
4314 break;
4315
4316 case RESET_KIND_SUSPEND:
4317 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4318 DRV_STATE_SUSPEND);
4319 break;
4320
4321 default:
4322 break;
4323 };
4324 }
4325}
4326
4327/* tp->lock is held. */
4328static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4329{
4330 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4331 switch (kind) {
4332 case RESET_KIND_INIT:
4333 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4334 DRV_STATE_START_DONE);
4335 break;
4336
4337 case RESET_KIND_SHUTDOWN:
4338 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4339 DRV_STATE_UNLOAD_DONE);
4340 break;
4341
4342 default:
4343 break;
4344 };
4345 }
4346}
4347
4348/* tp->lock is held. */
4349static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4350{
4351 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4352 switch (kind) {
4353 case RESET_KIND_INIT:
4354 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4355 DRV_STATE_START);
4356 break;
4357
4358 case RESET_KIND_SHUTDOWN:
4359 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4360 DRV_STATE_UNLOAD);
4361 break;
4362
4363 case RESET_KIND_SUSPEND:
4364 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4365 DRV_STATE_SUSPEND);
4366 break;
4367
4368 default:
4369 break;
4370 };
4371 }
4372}
4373
4374static void tg3_stop_fw(struct tg3 *);
4375
4376/* tp->lock is held. */
4377static int tg3_chip_reset(struct tg3 *tp)
4378{
4379 u32 val;
1ee582d8 4380 void (*write_op)(struct tg3 *, u32, u32);
1da177e4
LT
4381 int i;
4382
ec41c7df 4383 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
1da177e4 4384 tg3_nvram_lock(tp);
ec41c7df
MC
4385 /* No matching tg3_nvram_unlock() after this because
4386 * chip reset below will undo the nvram lock.
4387 */
4388 tp->nvram_lock_cnt = 0;
4389 }
1da177e4
LT
4390
4391 /*
4392 * We must avoid the readl() that normally takes place.
4393 * It locks machines, causes machine checks, and other
4394 * fun things. So, temporarily disable the 5701
4395 * hardware workaround, while we do the reset.
4396 */
1ee582d8
MC
4397 write_op = tp->write32;
4398 if (write_op == tg3_write_flush_reg32)
4399 tp->write32 = tg3_write32;
1da177e4
LT
4400
4401 /* do the reset */
4402 val = GRC_MISC_CFG_CORECLK_RESET;
4403
4404 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4405 if (tr32(0x7e2c) == 0x60) {
4406 tw32(0x7e2c, 0x20);
4407 }
4408 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4409 tw32(GRC_MISC_CFG, (1 << 29));
4410 val |= (1 << 29);
4411 }
4412 }
4413
4414 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4415 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4416 tw32(GRC_MISC_CFG, val);
4417
1ee582d8
MC
4418 /* restore 5701 hardware bug workaround write method */
4419 tp->write32 = write_op;
1da177e4
LT
4420
4421 /* Unfortunately, we have to delay before the PCI read back.
4422 * Some 575X chips even will not respond to a PCI cfg access
4423 * when the reset command is given to the chip.
4424 *
4425 * How do these hardware designers expect things to work
4426 * properly if the PCI write is posted for a long period
4427 * of time? It is always necessary to have some method by
4428 * which a register read back can occur to push the write
4429 * out which does the reset.
4430 *
4431 * For most tg3 variants the trick below was working.
4432 * Ho hum...
4433 */
4434 udelay(120);
4435
4436 /* Flush PCI posted writes. The normal MMIO registers
4437 * are inaccessible at this time so this is the only
4438 * way to make this reliably (actually, this is no longer
4439 * the case, see above). I tried to use indirect
4440 * register read/write but this upset some 5701 variants.
4441 */
4442 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4443
4444 udelay(120);
4445
4446 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4447 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4448 int i;
4449 u32 cfg_val;
4450
4451 /* Wait for link training to complete. */
4452 for (i = 0; i < 5000; i++)
4453 udelay(100);
4454
4455 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4456 pci_write_config_dword(tp->pdev, 0xc4,
4457 cfg_val | (1 << 15));
4458 }
4459 /* Set PCIE max payload size and clear error status. */
4460 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4461 }
4462
4463 /* Re-enable indirect register accesses. */
4464 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4465 tp->misc_host_ctrl);
4466
4467 /* Set MAX PCI retry to zero. */
4468 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4469 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4470 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4471 val |= PCISTATE_RETRY_SAME_DMA;
4472 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4473
4474 pci_restore_state(tp->pdev);
4475
4476 /* Make sure PCI-X relaxed ordering bit is clear. */
4477 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4478 val &= ~PCIX_CAPS_RELAXED_ORDERING;
4479 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4480
a4e2b347 4481 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4cf78e4f
MC
4482 u32 val;
4483
4484 /* Chip reset on 5780 will reset MSI enable bit,
4485 * so need to restore it.
4486 */
4487 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4488 u16 ctrl;
4489
4490 pci_read_config_word(tp->pdev,
4491 tp->msi_cap + PCI_MSI_FLAGS,
4492 &ctrl);
4493 pci_write_config_word(tp->pdev,
4494 tp->msi_cap + PCI_MSI_FLAGS,
4495 ctrl | PCI_MSI_FLAGS_ENABLE);
4496 val = tr32(MSGINT_MODE);
4497 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4498 }
4499
4500 val = tr32(MEMARB_MODE);
4501 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4502
4503 } else
4504 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
1da177e4
LT
4505
4506 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4507 tg3_stop_fw(tp);
4508 tw32(0x5000, 0x400);
4509 }
4510
4511 tw32(GRC_MODE, tp->grc_mode);
4512
4513 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4514 u32 val = tr32(0xc4);
4515
4516 tw32(0xc4, val | (1 << 15));
4517 }
4518
4519 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4520 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4521 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4522 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4523 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4524 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4525 }
4526
4527 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4528 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4529 tw32_f(MAC_MODE, tp->mac_mode);
747e8f8b
MC
4530 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4531 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4532 tw32_f(MAC_MODE, tp->mac_mode);
1da177e4
LT
4533 } else
4534 tw32_f(MAC_MODE, 0);
4535 udelay(40);
4536
4537 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4538 /* Wait for firmware initialization to complete. */
4539 for (i = 0; i < 100000; i++) {
4540 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4541 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4542 break;
4543 udelay(10);
4544 }
4545 if (i >= 100000) {
4546 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4547 "firmware will not restart magic=%08x\n",
4548 tp->dev->name, val);
4549 return -ENODEV;
4550 }
4551 }
4552
4553 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4554 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4555 u32 val = tr32(0x7c00);
4556
4557 tw32(0x7c00, val | (1 << 25));
4558 }
4559
4560 /* Reprobe ASF enable state. */
4561 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4562 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4563 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4564 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4565 u32 nic_cfg;
4566
4567 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4568 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4569 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 4570 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
4571 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4572 }
4573 }
4574
4575 return 0;
4576}
4577
4578/* tp->lock is held. */
4579static void tg3_stop_fw(struct tg3 *tp)
4580{
4581 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4582 u32 val;
4583 int i;
4584
4585 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4586 val = tr32(GRC_RX_CPU_EVENT);
4587 val |= (1 << 14);
4588 tw32(GRC_RX_CPU_EVENT, val);
4589
4590 /* Wait for RX cpu to ACK the event. */
4591 for (i = 0; i < 100; i++) {
4592 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4593 break;
4594 udelay(1);
4595 }
4596 }
4597}
4598
4599/* tp->lock is held. */
944d980e 4600static int tg3_halt(struct tg3 *tp, int kind, int silent)
1da177e4
LT
4601{
4602 int err;
4603
4604 tg3_stop_fw(tp);
4605
944d980e 4606 tg3_write_sig_pre_reset(tp, kind);
1da177e4 4607
b3b7d6be 4608 tg3_abort_hw(tp, silent);
1da177e4
LT
4609 err = tg3_chip_reset(tp);
4610
944d980e
MC
4611 tg3_write_sig_legacy(tp, kind);
4612 tg3_write_sig_post_reset(tp, kind);
1da177e4
LT
4613
4614 if (err)
4615 return err;
4616
4617 return 0;
4618}
4619
4620#define TG3_FW_RELEASE_MAJOR 0x0
4621#define TG3_FW_RELASE_MINOR 0x0
4622#define TG3_FW_RELEASE_FIX 0x0
4623#define TG3_FW_START_ADDR 0x08000000
4624#define TG3_FW_TEXT_ADDR 0x08000000
4625#define TG3_FW_TEXT_LEN 0x9c0
4626#define TG3_FW_RODATA_ADDR 0x080009c0
4627#define TG3_FW_RODATA_LEN 0x60
4628#define TG3_FW_DATA_ADDR 0x08000a40
4629#define TG3_FW_DATA_LEN 0x20
4630#define TG3_FW_SBSS_ADDR 0x08000a60
4631#define TG3_FW_SBSS_LEN 0xc
4632#define TG3_FW_BSS_ADDR 0x08000a70
4633#define TG3_FW_BSS_LEN 0x10
4634
4635static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4636 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4637 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4638 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4639 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4640 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4641 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4642 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4643 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4644 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4645 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4646 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4647 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4648 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4649 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4650 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4651 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4652 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4653 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4654 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4655 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4656 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4657 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4658 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4659 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4660 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4661 0, 0, 0, 0, 0, 0,
4662 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4663 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4664 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4665 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4666 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4667 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4668 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4669 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4670 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4671 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4672 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4673 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4674 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4675 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4676 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4677 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4678 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4679 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4680 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4681 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4682 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4683 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4684 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4685 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4686 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4687 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4688 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4689 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4690 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4691 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4692 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4693 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4694 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4695 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4696 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4697 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4698 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4699 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4700 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4701 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4702 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4703 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4704 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4705 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4706 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4707 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4708 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4709 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4710 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4711 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4712 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4713 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4714 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4715 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4716 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4717 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4718 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4719 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4720 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4721 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4722 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4723 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4724 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4725 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4726 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4727};
4728
4729static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4730 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4731 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4732 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4733 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4734 0x00000000
4735};
4736
4737#if 0 /* All zeros, don't eat up space with it. */
4738u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4739 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4740 0x00000000, 0x00000000, 0x00000000, 0x00000000
4741};
4742#endif
4743
4744#define RX_CPU_SCRATCH_BASE 0x30000
4745#define RX_CPU_SCRATCH_SIZE 0x04000
4746#define TX_CPU_SCRATCH_BASE 0x34000
4747#define TX_CPU_SCRATCH_SIZE 0x04000
4748
4749/* tp->lock is held. */
4750static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4751{
4752 int i;
4753
4754 if (offset == TX_CPU_BASE &&
4755 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4756 BUG();
4757
4758 if (offset == RX_CPU_BASE) {
4759 for (i = 0; i < 10000; i++) {
4760 tw32(offset + CPU_STATE, 0xffffffff);
4761 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4762 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4763 break;
4764 }
4765
4766 tw32(offset + CPU_STATE, 0xffffffff);
4767 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
4768 udelay(10);
4769 } else {
4770 for (i = 0; i < 10000; i++) {
4771 tw32(offset + CPU_STATE, 0xffffffff);
4772 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4773 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4774 break;
4775 }
4776 }
4777
4778 if (i >= 10000) {
4779 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4780 "and %s CPU\n",
4781 tp->dev->name,
4782 (offset == RX_CPU_BASE ? "RX" : "TX"));
4783 return -ENODEV;
4784 }
ec41c7df
MC
4785
4786 /* Clear firmware's nvram arbitration. */
4787 if (tp->tg3_flags & TG3_FLAG_NVRAM)
4788 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
1da177e4
LT
4789 return 0;
4790}
4791
4792struct fw_info {
4793 unsigned int text_base;
4794 unsigned int text_len;
4795 u32 *text_data;
4796 unsigned int rodata_base;
4797 unsigned int rodata_len;
4798 u32 *rodata_data;
4799 unsigned int data_base;
4800 unsigned int data_len;
4801 u32 *data_data;
4802};
4803
4804/* tp->lock is held. */
4805static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4806 int cpu_scratch_size, struct fw_info *info)
4807{
ec41c7df 4808 int err, lock_err, i;
1da177e4
LT
4809 void (*write_op)(struct tg3 *, u32, u32);
4810
4811 if (cpu_base == TX_CPU_BASE &&
4812 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4813 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4814 "TX cpu firmware on %s which is 5705.\n",
4815 tp->dev->name);
4816 return -EINVAL;
4817 }
4818
4819 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4820 write_op = tg3_write_mem;
4821 else
4822 write_op = tg3_write_indirect_reg32;
4823
1b628151
MC
4824 /* It is possible that bootcode is still loading at this point.
4825 * Get the nvram lock first before halting the cpu.
4826 */
ec41c7df 4827 lock_err = tg3_nvram_lock(tp);
1da177e4 4828 err = tg3_halt_cpu(tp, cpu_base);
ec41c7df
MC
4829 if (!lock_err)
4830 tg3_nvram_unlock(tp);
1da177e4
LT
4831 if (err)
4832 goto out;
4833
4834 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4835 write_op(tp, cpu_scratch_base + i, 0);
4836 tw32(cpu_base + CPU_STATE, 0xffffffff);
4837 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4838 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4839 write_op(tp, (cpu_scratch_base +
4840 (info->text_base & 0xffff) +
4841 (i * sizeof(u32))),
4842 (info->text_data ?
4843 info->text_data[i] : 0));
4844 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4845 write_op(tp, (cpu_scratch_base +
4846 (info->rodata_base & 0xffff) +
4847 (i * sizeof(u32))),
4848 (info->rodata_data ?
4849 info->rodata_data[i] : 0));
4850 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4851 write_op(tp, (cpu_scratch_base +
4852 (info->data_base & 0xffff) +
4853 (i * sizeof(u32))),
4854 (info->data_data ?
4855 info->data_data[i] : 0));
4856
4857 err = 0;
4858
4859out:
1da177e4
LT
4860 return err;
4861}
4862
4863/* tp->lock is held. */
4864static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4865{
4866 struct fw_info info;
4867 int err, i;
4868
4869 info.text_base = TG3_FW_TEXT_ADDR;
4870 info.text_len = TG3_FW_TEXT_LEN;
4871 info.text_data = &tg3FwText[0];
4872 info.rodata_base = TG3_FW_RODATA_ADDR;
4873 info.rodata_len = TG3_FW_RODATA_LEN;
4874 info.rodata_data = &tg3FwRodata[0];
4875 info.data_base = TG3_FW_DATA_ADDR;
4876 info.data_len = TG3_FW_DATA_LEN;
4877 info.data_data = NULL;
4878
4879 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4880 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4881 &info);
4882 if (err)
4883 return err;
4884
4885 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4886 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4887 &info);
4888 if (err)
4889 return err;
4890
4891 /* Now startup only the RX cpu. */
4892 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4893 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4894
4895 for (i = 0; i < 5; i++) {
4896 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4897 break;
4898 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4899 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
4900 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4901 udelay(1000);
4902 }
4903 if (i >= 5) {
4904 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4905 "to set RX CPU PC, is %08x should be %08x\n",
4906 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4907 TG3_FW_TEXT_ADDR);
4908 return -ENODEV;
4909 }
4910 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4911 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
4912
4913 return 0;
4914}
4915
4916#if TG3_TSO_SUPPORT != 0
4917
4918#define TG3_TSO_FW_RELEASE_MAJOR 0x1
4919#define TG3_TSO_FW_RELASE_MINOR 0x6
4920#define TG3_TSO_FW_RELEASE_FIX 0x0
4921#define TG3_TSO_FW_START_ADDR 0x08000000
4922#define TG3_TSO_FW_TEXT_ADDR 0x08000000
4923#define TG3_TSO_FW_TEXT_LEN 0x1aa0
4924#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
4925#define TG3_TSO_FW_RODATA_LEN 0x60
4926#define TG3_TSO_FW_DATA_ADDR 0x08001b20
4927#define TG3_TSO_FW_DATA_LEN 0x30
4928#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
4929#define TG3_TSO_FW_SBSS_LEN 0x2c
4930#define TG3_TSO_FW_BSS_ADDR 0x08001b80
4931#define TG3_TSO_FW_BSS_LEN 0x894
4932
4933static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4934 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4935 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4936 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4937 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4938 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4939 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4940 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4941 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4942 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4943 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4944 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4945 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4946 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4947 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4948 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4949 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4950 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4951 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4952 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4953 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4954 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4955 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4956 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4957 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4958 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4959 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4960 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4961 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4962 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4963 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4964 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4965 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4966 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4967 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4968 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4969 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4970 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4971 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4972 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4973 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4974 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4975 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4976 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4977 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4978 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4979 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4980 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4981 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4982 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4983 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4984 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4985 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4986 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4987 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4988 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4989 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4990 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4991 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4992 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4993 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4994 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4995 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4996 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4997 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4998 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4999 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5000 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5001 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5002 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5003 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5004 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5005 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5006 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5007 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5008 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5009 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5010 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5011 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5012 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5013 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5014 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5015 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5016 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5017 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5018 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5019 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5020 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5021 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5022 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5023 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5024 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5025 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5026 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5027 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5028 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5029 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5030 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5031 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5032 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5033 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5034 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5035 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5036 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5037 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5038 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5039 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5040 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5041 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5042 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5043 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5044 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5045 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5046 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5047 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5048 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5049 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5050 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5051 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5052 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5053 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5054 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5055 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5056 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5057 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5058 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5059 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5060 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5061 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5062 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5063 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5064 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5065 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5066 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5067 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5068 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5069 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5070 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5071 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5072 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5073 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5074 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5075 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5076 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5077 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5078 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5079 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5080 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5081 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5082 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5083 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5084 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5085 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5086 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5087 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5088 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5089 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5090 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5091 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5092 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5093 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5094 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5095 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5096 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5097 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5098 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5099 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5100 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5101 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5102 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5103 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5104 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5105 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5106 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5107 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5108 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5109 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5110 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5111 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5112 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5113 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5114 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5115 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5116 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5117 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5118 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5119 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5120 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5121 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5122 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5123 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5124 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5125 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5126 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5127 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5128 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5129 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5130 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5131 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5132 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5133 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5134 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5135 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5136 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5137 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5138 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5139 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5140 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5141 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5142 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5143 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5144 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5145 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5146 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5147 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5148 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5149 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5150 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5151 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5152 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5153 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5154 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5155 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5156 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5157 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5158 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5159 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5160 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5161 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5162 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5163 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5164 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5165 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5166 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5167 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5168 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5169 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5170 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5171 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5172 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5173 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5174 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5175 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5176 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5177 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5178 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5179 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5180 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5181 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5182 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5183 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5184 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5185 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5186 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5187 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5188 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5189 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5190 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5191 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5192 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5193 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5194 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5195 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5196 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5197 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5198 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5199 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5200 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5201 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5202 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5203 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5204 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5205 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5206 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5207 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5208 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5209 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5210 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5211 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5212 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5213 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5214 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5215 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5216 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5217 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5218};
5219
5220static u32 tg3TsoFwRodata[] = {
5221 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5222 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5223 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5224 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5225 0x00000000,
5226};
5227
5228static u32 tg3TsoFwData[] = {
5229 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5230 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5231 0x00000000,
5232};
5233
5234/* 5705 needs a special version of the TSO firmware. */
5235#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5236#define TG3_TSO5_FW_RELASE_MINOR 0x2
5237#define TG3_TSO5_FW_RELEASE_FIX 0x0
5238#define TG3_TSO5_FW_START_ADDR 0x00010000
5239#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5240#define TG3_TSO5_FW_TEXT_LEN 0xe90
5241#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5242#define TG3_TSO5_FW_RODATA_LEN 0x50
5243#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5244#define TG3_TSO5_FW_DATA_LEN 0x20
5245#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5246#define TG3_TSO5_FW_SBSS_LEN 0x28
5247#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5248#define TG3_TSO5_FW_BSS_LEN 0x88
5249
5250static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5251 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5252 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5253 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5254 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5255 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5256 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5257 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5258 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5259 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5260 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5261 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5262 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5263 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5264 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5265 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5266 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5267 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5268 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5269 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5270 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5271 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5272 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5273 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5274 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5275 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5276 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5277 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5278 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5279 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5280 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5281 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5282 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5283 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5284 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5285 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5286 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5287 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5288 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5289 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5290 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5291 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5292 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5293 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5294 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5295 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5296 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5297 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5298 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5299 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5300 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5301 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5302 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5303 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5304 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5305 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5306 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5307 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5308 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5309 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5310 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5311 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5312 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5313 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5314 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5315 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5316 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5317 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5318 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5319 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5320 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5321 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5322 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5323 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5324 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5325 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5326 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5327 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5328 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5329 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5330 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5331 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5332 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5333 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5334 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5335 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5336 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5337 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5338 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5339 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5340 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5341 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5342 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5343 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5344 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5345 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5346 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5347 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5348 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5349 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5350 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5351 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5352 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5353 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5354 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5355 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5356 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5357 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5358 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5359 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5360 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5361 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5362 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5363 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5364 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5365 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5366 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5367 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5368 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5369 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5370 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5371 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5372 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5373 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5374 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5375 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5376 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5377 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5378 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5379 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5380 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5381 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5382 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5383 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5384 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5385 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5386 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5387 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5388 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5389 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5390 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5391 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5392 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5393 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5394 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5395 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5396 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5397 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5398 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5399 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5400 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5401 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5402 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5403 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5404 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5405 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5406 0x00000000, 0x00000000, 0x00000000,
5407};
5408
5409static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5410 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5411 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5412 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5413 0x00000000, 0x00000000, 0x00000000,
5414};
5415
5416static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5417 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5418 0x00000000, 0x00000000, 0x00000000,
5419};
5420
5421/* tp->lock is held. */
5422static int tg3_load_tso_firmware(struct tg3 *tp)
5423{
5424 struct fw_info info;
5425 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5426 int err, i;
5427
5428 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5429 return 0;
5430
5431 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5432 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5433 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5434 info.text_data = &tg3Tso5FwText[0];
5435 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5436 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5437 info.rodata_data = &tg3Tso5FwRodata[0];
5438 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5439 info.data_len = TG3_TSO5_FW_DATA_LEN;
5440 info.data_data = &tg3Tso5FwData[0];
5441 cpu_base = RX_CPU_BASE;
5442 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5443 cpu_scratch_size = (info.text_len +
5444 info.rodata_len +
5445 info.data_len +
5446 TG3_TSO5_FW_SBSS_LEN +
5447 TG3_TSO5_FW_BSS_LEN);
5448 } else {
5449 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5450 info.text_len = TG3_TSO_FW_TEXT_LEN;
5451 info.text_data = &tg3TsoFwText[0];
5452 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5453 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5454 info.rodata_data = &tg3TsoFwRodata[0];
5455 info.data_base = TG3_TSO_FW_DATA_ADDR;
5456 info.data_len = TG3_TSO_FW_DATA_LEN;
5457 info.data_data = &tg3TsoFwData[0];
5458 cpu_base = TX_CPU_BASE;
5459 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5460 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5461 }
5462
5463 err = tg3_load_firmware_cpu(tp, cpu_base,
5464 cpu_scratch_base, cpu_scratch_size,
5465 &info);
5466 if (err)
5467 return err;
5468
5469 /* Now startup the cpu. */
5470 tw32(cpu_base + CPU_STATE, 0xffffffff);
5471 tw32_f(cpu_base + CPU_PC, info.text_base);
5472
5473 for (i = 0; i < 5; i++) {
5474 if (tr32(cpu_base + CPU_PC) == info.text_base)
5475 break;
5476 tw32(cpu_base + CPU_STATE, 0xffffffff);
5477 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
5478 tw32_f(cpu_base + CPU_PC, info.text_base);
5479 udelay(1000);
5480 }
5481 if (i >= 5) {
5482 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5483 "to set CPU PC, is %08x should be %08x\n",
5484 tp->dev->name, tr32(cpu_base + CPU_PC),
5485 info.text_base);
5486 return -ENODEV;
5487 }
5488 tw32(cpu_base + CPU_STATE, 0xffffffff);
5489 tw32_f(cpu_base + CPU_MODE, 0x00000000);
5490 return 0;
5491}
5492
5493#endif /* TG3_TSO_SUPPORT != 0 */
5494
5495/* tp->lock is held. */
5496static void __tg3_set_mac_addr(struct tg3 *tp)
5497{
5498 u32 addr_high, addr_low;
5499 int i;
5500
5501 addr_high = ((tp->dev->dev_addr[0] << 8) |
5502 tp->dev->dev_addr[1]);
5503 addr_low = ((tp->dev->dev_addr[2] << 24) |
5504 (tp->dev->dev_addr[3] << 16) |
5505 (tp->dev->dev_addr[4] << 8) |
5506 (tp->dev->dev_addr[5] << 0));
5507 for (i = 0; i < 4; i++) {
5508 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5509 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5510 }
5511
5512 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5513 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5514 for (i = 0; i < 12; i++) {
5515 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5516 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5517 }
5518 }
5519
5520 addr_high = (tp->dev->dev_addr[0] +
5521 tp->dev->dev_addr[1] +
5522 tp->dev->dev_addr[2] +
5523 tp->dev->dev_addr[3] +
5524 tp->dev->dev_addr[4] +
5525 tp->dev->dev_addr[5]) &
5526 TX_BACKOFF_SEED_MASK;
5527 tw32(MAC_TX_BACKOFF_SEED, addr_high);
5528}
5529
5530static int tg3_set_mac_addr(struct net_device *dev, void *p)
5531{
5532 struct tg3 *tp = netdev_priv(dev);
5533 struct sockaddr *addr = p;
5534
f9804ddb
MC
5535 if (!is_valid_ether_addr(addr->sa_data))
5536 return -EINVAL;
5537
1da177e4
LT
5538 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5539
e75f7c90
MC
5540 if (!netif_running(dev))
5541 return 0;
5542
f47c11ee 5543 spin_lock_bh(&tp->lock);
1da177e4 5544 __tg3_set_mac_addr(tp);
f47c11ee 5545 spin_unlock_bh(&tp->lock);
1da177e4
LT
5546
5547 return 0;
5548}
5549
5550/* tp->lock is held. */
5551static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5552 dma_addr_t mapping, u32 maxlen_flags,
5553 u32 nic_addr)
5554{
5555 tg3_write_mem(tp,
5556 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5557 ((u64) mapping >> 32));
5558 tg3_write_mem(tp,
5559 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5560 ((u64) mapping & 0xffffffff));
5561 tg3_write_mem(tp,
5562 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5563 maxlen_flags);
5564
5565 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5566 tg3_write_mem(tp,
5567 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5568 nic_addr);
5569}
5570
5571static void __tg3_set_rx_mode(struct net_device *);
d244c892 5572static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
15f9850d
DM
5573{
5574 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5575 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5576 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5577 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5578 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5579 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5580 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5581 }
5582 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5583 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5584 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5585 u32 val = ec->stats_block_coalesce_usecs;
5586
5587 if (!netif_carrier_ok(tp->dev))
5588 val = 0;
5589
5590 tw32(HOSTCC_STAT_COAL_TICKS, val);
5591 }
5592}
1da177e4
LT
5593
5594/* tp->lock is held. */
5595static int tg3_reset_hw(struct tg3 *tp)
5596{
5597 u32 val, rdmac_mode;
5598 int i, err, limit;
5599
5600 tg3_disable_ints(tp);
5601
5602 tg3_stop_fw(tp);
5603
5604 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5605
5606 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
e6de8ad1 5607 tg3_abort_hw(tp, 1);
1da177e4
LT
5608 }
5609
d4d2c558
MC
5610 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
5611 tg3_phy_reset(tp);
5612
1da177e4
LT
5613 err = tg3_chip_reset(tp);
5614 if (err)
5615 return err;
5616
5617 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5618
5619 /* This works around an issue with Athlon chipsets on
5620 * B3 tigon3 silicon. This bit has no effect on any
5621 * other revision. But do not set this on PCI Express
5622 * chips.
5623 */
5624 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5625 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5626 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5627
5628 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5629 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5630 val = tr32(TG3PCI_PCISTATE);
5631 val |= PCISTATE_RETRY_SAME_DMA;
5632 tw32(TG3PCI_PCISTATE, val);
5633 }
5634
5635 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5636 /* Enable some hw fixes. */
5637 val = tr32(TG3PCI_MSI_DATA);
5638 val |= (1 << 26) | (1 << 28) | (1 << 29);
5639 tw32(TG3PCI_MSI_DATA, val);
5640 }
5641
5642 /* Descriptor ring init may make accesses to the
5643 * NIC SRAM area to setup the TX descriptors, so we
5644 * can only do this after the hardware has been
5645 * successfully reset.
5646 */
5647 tg3_init_rings(tp);
5648
5649 /* This value is determined during the probe time DMA
5650 * engine test, tg3_test_dma.
5651 */
5652 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5653
5654 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5655 GRC_MODE_4X_NIC_SEND_RINGS |
5656 GRC_MODE_NO_TX_PHDR_CSUM |
5657 GRC_MODE_NO_RX_PHDR_CSUM);
5658 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5659 if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5660 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5661 if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5662 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5663
5664 tw32(GRC_MODE,
5665 tp->grc_mode |
5666 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5667
5668 /* Setup the timer prescalar register. Clock is always 66Mhz. */
5669 val = tr32(GRC_MISC_CFG);
5670 val &= ~0xff;
5671 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5672 tw32(GRC_MISC_CFG, val);
5673
5674 /* Initialize MBUF/DESC pool. */
cbf46853 5675 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
1da177e4
LT
5676 /* Do nothing. */
5677 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5678 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5679 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5680 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5681 else
5682 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5683 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5684 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5685 }
5686#if TG3_TSO_SUPPORT != 0
5687 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5688 int fw_len;
5689
5690 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5691 TG3_TSO5_FW_RODATA_LEN +
5692 TG3_TSO5_FW_DATA_LEN +
5693 TG3_TSO5_FW_SBSS_LEN +
5694 TG3_TSO5_FW_BSS_LEN);
5695 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5696 tw32(BUFMGR_MB_POOL_ADDR,
5697 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5698 tw32(BUFMGR_MB_POOL_SIZE,
5699 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5700 }
5701#endif
5702
0f893dc6 5703 if (tp->dev->mtu <= ETH_DATA_LEN) {
1da177e4
LT
5704 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5705 tp->bufmgr_config.mbuf_read_dma_low_water);
5706 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5707 tp->bufmgr_config.mbuf_mac_rx_low_water);
5708 tw32(BUFMGR_MB_HIGH_WATER,
5709 tp->bufmgr_config.mbuf_high_water);
5710 } else {
5711 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5712 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5713 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5714 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5715 tw32(BUFMGR_MB_HIGH_WATER,
5716 tp->bufmgr_config.mbuf_high_water_jumbo);
5717 }
5718 tw32(BUFMGR_DMA_LOW_WATER,
5719 tp->bufmgr_config.dma_low_water);
5720 tw32(BUFMGR_DMA_HIGH_WATER,
5721 tp->bufmgr_config.dma_high_water);
5722
5723 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5724 for (i = 0; i < 2000; i++) {
5725 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5726 break;
5727 udelay(10);
5728 }
5729 if (i >= 2000) {
5730 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5731 tp->dev->name);
5732 return -ENODEV;
5733 }
5734
5735 /* Setup replenish threshold. */
5736 tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5737
5738 /* Initialize TG3_BDINFO's at:
5739 * RCVDBDI_STD_BD: standard eth size rx ring
5740 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
5741 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
5742 *
5743 * like so:
5744 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
5745 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
5746 * ring attribute flags
5747 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
5748 *
5749 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5750 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5751 *
5752 * The size of each ring is fixed in the firmware, but the location is
5753 * configurable.
5754 */
5755 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5756 ((u64) tp->rx_std_mapping >> 32));
5757 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5758 ((u64) tp->rx_std_mapping & 0xffffffff));
5759 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5760 NIC_SRAM_RX_BUFFER_DESC);
5761
5762 /* Don't even try to program the JUMBO/MINI buffer descriptor
5763 * configs on 5705.
5764 */
5765 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5766 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5767 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5768 } else {
5769 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5770 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5771
5772 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5773 BDINFO_FLAGS_DISABLED);
5774
5775 /* Setup replenish threshold. */
5776 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5777
0f893dc6 5778 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
5779 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5780 ((u64) tp->rx_jumbo_mapping >> 32));
5781 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5782 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5783 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5784 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5785 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5786 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5787 } else {
5788 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5789 BDINFO_FLAGS_DISABLED);
5790 }
5791
5792 }
5793
5794 /* There is only one send ring on 5705/5750, no need to explicitly
5795 * disable the others.
5796 */
5797 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5798 /* Clear out send RCB ring in SRAM. */
5799 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5800 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5801 BDINFO_FLAGS_DISABLED);
5802 }
5803
5804 tp->tx_prod = 0;
5805 tp->tx_cons = 0;
5806 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5807 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5808
5809 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5810 tp->tx_desc_mapping,
5811 (TG3_TX_RING_SIZE <<
5812 BDINFO_FLAGS_MAXLEN_SHIFT),
5813 NIC_SRAM_TX_BUFFER_DESC);
5814
5815 /* There is only one receive return ring on 5705/5750, no need
5816 * to explicitly disable the others.
5817 */
5818 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5819 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5820 i += TG3_BDINFO_SIZE) {
5821 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5822 BDINFO_FLAGS_DISABLED);
5823 }
5824 }
5825
5826 tp->rx_rcb_ptr = 0;
5827 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5828
5829 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5830 tp->rx_rcb_mapping,
5831 (TG3_RX_RCB_RING_SIZE(tp) <<
5832 BDINFO_FLAGS_MAXLEN_SHIFT),
5833 0);
5834
5835 tp->rx_std_ptr = tp->rx_pending;
5836 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5837 tp->rx_std_ptr);
5838
0f893dc6 5839 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
1da177e4
LT
5840 tp->rx_jumbo_pending : 0;
5841 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5842 tp->rx_jumbo_ptr);
5843
5844 /* Initialize MAC address and backoff seed. */
5845 __tg3_set_mac_addr(tp);
5846
5847 /* MTU + ethernet header + FCS + optional VLAN tag */
5848 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5849
5850 /* The slot time is changed by tg3_setup_phy if we
5851 * run at gigabit with half duplex.
5852 */
5853 tw32(MAC_TX_LENGTHS,
5854 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5855 (6 << TX_LENGTHS_IPG_SHIFT) |
5856 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5857
5858 /* Receive rules. */
5859 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5860 tw32(RCVLPC_CONFIG, 0x0181);
5861
5862 /* Calculate RDMAC_MODE setting early, we need it to determine
5863 * the RCVLPC_STATE_ENABLE mask.
5864 */
5865 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5866 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5867 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5868 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5869 RDMAC_MODE_LNGREAD_ENAB);
5870 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5871 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
85e94ced
MC
5872
5873 /* If statement applies to 5705 and 5750 PCI devices only */
5874 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5875 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5876 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
1da177e4
LT
5877 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5878 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5879 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5880 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5881 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5882 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5883 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5884 }
5885 }
5886
85e94ced
MC
5887 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5888 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5889
1da177e4
LT
5890#if TG3_TSO_SUPPORT != 0
5891 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5892 rdmac_mode |= (1 << 27);
5893#endif
5894
5895 /* Receive/send statistics. */
5896 if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5897 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5898 val = tr32(RCVLPC_STATS_ENABLE);
5899 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5900 tw32(RCVLPC_STATS_ENABLE, val);
5901 } else {
5902 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5903 }
5904 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5905 tw32(SNDDATAI_STATSENAB, 0xffffff);
5906 tw32(SNDDATAI_STATSCTRL,
5907 (SNDDATAI_SCTRL_ENABLE |
5908 SNDDATAI_SCTRL_FASTUPD));
5909
5910 /* Setup host coalescing engine. */
5911 tw32(HOSTCC_MODE, 0);
5912 for (i = 0; i < 2000; i++) {
5913 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5914 break;
5915 udelay(10);
5916 }
5917
d244c892 5918 __tg3_set_coalesce(tp, &tp->coal);
1da177e4
LT
5919
5920 /* set status block DMA address */
5921 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5922 ((u64) tp->status_mapping >> 32));
5923 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5924 ((u64) tp->status_mapping & 0xffffffff));
5925
5926 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5927 /* Status/statistics block address. See tg3_timer,
5928 * the tg3_periodic_fetch_stats call there, and
5929 * tg3_get_stats to see how this works for 5705/5750 chips.
5930 */
1da177e4
LT
5931 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5932 ((u64) tp->stats_mapping >> 32));
5933 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5934 ((u64) tp->stats_mapping & 0xffffffff));
5935 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5936 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5937 }
5938
5939 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5940
5941 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5942 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5943 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5944 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5945
5946 /* Clear statistics/status block in chip, and status block in ram. */
5947 for (i = NIC_SRAM_STATS_BLK;
5948 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5949 i += sizeof(u32)) {
5950 tg3_write_mem(tp, i, 0);
5951 udelay(40);
5952 }
5953 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5954
c94e3941
MC
5955 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5956 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
5957 /* reset to prevent losing 1st rx packet intermittently */
5958 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5959 udelay(10);
5960 }
5961
1da177e4
LT
5962 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5963 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5964 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5965 udelay(40);
5966
314fba34
MC
5967 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5968 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5969 * register to preserve the GPIO settings for LOMs. The GPIOs,
5970 * whether used as inputs or outputs, are set by boot code after
5971 * reset.
5972 */
5973 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5974 u32 gpio_mask;
5975
5976 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5977 GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
3e7d83bc
MC
5978
5979 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5980 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5981 GRC_LCLCTRL_GPIO_OUTPUT3;
5982
314fba34
MC
5983 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5984
5985 /* GPIO1 must be driven high for eeprom write protect */
1da177e4
LT
5986 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5987 GRC_LCLCTRL_GPIO_OUTPUT1);
314fba34 5988 }
1da177e4
LT
5989 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5990 udelay(100);
5991
09ee929c 5992 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
fac9b83e 5993 tp->last_tag = 0;
1da177e4
LT
5994
5995 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5996 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5997 udelay(40);
5998 }
5999
6000 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6001 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6002 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6003 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6004 WDMAC_MODE_LNGREAD_ENAB);
6005
85e94ced
MC
6006 /* If statement applies to 5705 and 5750 PCI devices only */
6007 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6008 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6009 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1da177e4
LT
6010 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6011 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6012 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6013 /* nothing */
6014 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6015 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6016 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6017 val |= WDMAC_MODE_RX_ACCEL;
6018 }
6019 }
6020
6021 tw32_f(WDMAC_MODE, val);
6022 udelay(40);
6023
6024 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6025 val = tr32(TG3PCI_X_CAPS);
6026 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6027 val &= ~PCIX_CAPS_BURST_MASK;
6028 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6029 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6030 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6031 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6032 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6033 val |= (tp->split_mode_max_reqs <<
6034 PCIX_CAPS_SPLIT_SHIFT);
6035 }
6036 tw32(TG3PCI_X_CAPS, val);
6037 }
6038
6039 tw32_f(RDMAC_MODE, rdmac_mode);
6040 udelay(40);
6041
6042 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6043 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6044 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6045 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6046 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6047 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6048 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6049 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6050#if TG3_TSO_SUPPORT != 0
6051 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6052 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6053#endif
6054 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6055 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6056
6057 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6058 err = tg3_load_5701_a0_firmware_fix(tp);
6059 if (err)
6060 return err;
6061 }
6062
6063#if TG3_TSO_SUPPORT != 0
6064 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6065 err = tg3_load_tso_firmware(tp);
6066 if (err)
6067 return err;
6068 }
6069#endif
6070
6071 tp->tx_mode = TX_MODE_ENABLE;
6072 tw32_f(MAC_TX_MODE, tp->tx_mode);
6073 udelay(100);
6074
6075 tp->rx_mode = RX_MODE_ENABLE;
6076 tw32_f(MAC_RX_MODE, tp->rx_mode);
6077 udelay(10);
6078
6079 if (tp->link_config.phy_is_low_power) {
6080 tp->link_config.phy_is_low_power = 0;
6081 tp->link_config.speed = tp->link_config.orig_speed;
6082 tp->link_config.duplex = tp->link_config.orig_duplex;
6083 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6084 }
6085
6086 tp->mi_mode = MAC_MI_MODE_BASE;
6087 tw32_f(MAC_MI_MODE, tp->mi_mode);
6088 udelay(80);
6089
6090 tw32(MAC_LED_CTRL, tp->led_ctrl);
6091
6092 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
c94e3941 6093 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1da177e4
LT
6094 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6095 udelay(10);
6096 }
6097 tw32_f(MAC_RX_MODE, tp->rx_mode);
6098 udelay(10);
6099
6100 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6101 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6102 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6103 /* Set drive transmission level to 1.2V */
6104 /* only if the signal pre-emphasis bit is not set */
6105 val = tr32(MAC_SERDES_CFG);
6106 val &= 0xfffff000;
6107 val |= 0x880;
6108 tw32(MAC_SERDES_CFG, val);
6109 }
6110 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6111 tw32(MAC_SERDES_CFG, 0x616000);
6112 }
6113
6114 /* Prevent chip from dropping frames when flow control
6115 * is enabled.
6116 */
6117 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6118
6119 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6120 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6121 /* Use hardware link auto-negotiation */
6122 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6123 }
6124
d4d2c558
MC
6125 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6126 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6127 u32 tmp;
6128
6129 tmp = tr32(SERDES_RX_CTRL);
6130 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6131 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6132 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6133 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6134 }
6135
1da177e4
LT
6136 err = tg3_setup_phy(tp, 1);
6137 if (err)
6138 return err;
6139
6140 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6141 u32 tmp;
6142
6143 /* Clear CRC stats. */
6144 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6145 tg3_writephy(tp, 0x1e, tmp | 0x8000);
6146 tg3_readphy(tp, 0x14, &tmp);
6147 }
6148 }
6149
6150 __tg3_set_rx_mode(tp->dev);
6151
6152 /* Initialize receive rules. */
6153 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
6154 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6155 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
6156 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6157
4cf78e4f 6158 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
a4e2b347 6159 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
1da177e4
LT
6160 limit = 8;
6161 else
6162 limit = 16;
6163 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6164 limit -= 4;
6165 switch (limit) {
6166 case 16:
6167 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
6168 case 15:
6169 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
6170 case 14:
6171 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
6172 case 13:
6173 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
6174 case 12:
6175 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
6176 case 11:
6177 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
6178 case 10:
6179 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
6180 case 9:
6181 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
6182 case 8:
6183 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
6184 case 7:
6185 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
6186 case 6:
6187 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
6188 case 5:
6189 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
6190 case 4:
6191 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6192 case 3:
6193 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6194 case 2:
6195 case 1:
6196
6197 default:
6198 break;
6199 };
6200
6201 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6202
1da177e4
LT
6203 return 0;
6204}
6205
6206/* Called at device open time to get the chip ready for
6207 * packet processing. Invoked with tp->lock held.
6208 */
6209static int tg3_init_hw(struct tg3 *tp)
6210{
6211 int err;
6212
6213 /* Force the chip into D0. */
bc1c7567 6214 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
6215 if (err)
6216 goto out;
6217
6218 tg3_switch_clocks(tp);
6219
6220 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6221
6222 err = tg3_reset_hw(tp);
6223
6224out:
6225 return err;
6226}
6227
6228#define TG3_STAT_ADD32(PSTAT, REG) \
6229do { u32 __val = tr32(REG); \
6230 (PSTAT)->low += __val; \
6231 if ((PSTAT)->low < __val) \
6232 (PSTAT)->high += 1; \
6233} while (0)
6234
6235static void tg3_periodic_fetch_stats(struct tg3 *tp)
6236{
6237 struct tg3_hw_stats *sp = tp->hw_stats;
6238
6239 if (!netif_carrier_ok(tp->dev))
6240 return;
6241
6242 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6243 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6244 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6245 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6246 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6247 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6248 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6249 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6250 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6251 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6252 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6253 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6254 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6255
6256 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6257 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6258 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6259 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6260 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6261 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6262 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6263 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6264 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6265 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6266 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6267 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6268 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6269 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6270}
6271
6272static void tg3_timer(unsigned long __opaque)
6273{
6274 struct tg3 *tp = (struct tg3 *) __opaque;
1da177e4 6275
f47c11ee 6276 spin_lock(&tp->lock);
1da177e4 6277
fac9b83e
DM
6278 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6279 /* All of this garbage is because when using non-tagged
6280 * IRQ status the mailbox/status_block protocol the chip
6281 * uses with the cpu is race prone.
6282 */
6283 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6284 tw32(GRC_LOCAL_CTRL,
6285 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6286 } else {
6287 tw32(HOSTCC_MODE, tp->coalesce_mode |
6288 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6289 }
1da177e4 6290
fac9b83e
DM
6291 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6292 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
f47c11ee 6293 spin_unlock(&tp->lock);
fac9b83e
DM
6294 schedule_work(&tp->reset_task);
6295 return;
6296 }
1da177e4
LT
6297 }
6298
1da177e4
LT
6299 /* This part only runs once per second. */
6300 if (!--tp->timer_counter) {
fac9b83e
DM
6301 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6302 tg3_periodic_fetch_stats(tp);
6303
1da177e4
LT
6304 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6305 u32 mac_stat;
6306 int phy_event;
6307
6308 mac_stat = tr32(MAC_STATUS);
6309
6310 phy_event = 0;
6311 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6312 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6313 phy_event = 1;
6314 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6315 phy_event = 1;
6316
6317 if (phy_event)
6318 tg3_setup_phy(tp, 0);
6319 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6320 u32 mac_stat = tr32(MAC_STATUS);
6321 int need_setup = 0;
6322
6323 if (netif_carrier_ok(tp->dev) &&
6324 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6325 need_setup = 1;
6326 }
6327 if (! netif_carrier_ok(tp->dev) &&
6328 (mac_stat & (MAC_STATUS_PCS_SYNCED |
6329 MAC_STATUS_SIGNAL_DET))) {
6330 need_setup = 1;
6331 }
6332 if (need_setup) {
6333 tw32_f(MAC_MODE,
6334 (tp->mac_mode &
6335 ~MAC_MODE_PORT_MODE_MASK));
6336 udelay(40);
6337 tw32_f(MAC_MODE, tp->mac_mode);
6338 udelay(40);
6339 tg3_setup_phy(tp, 0);
6340 }
747e8f8b
MC
6341 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6342 tg3_serdes_parallel_detect(tp);
1da177e4
LT
6343
6344 tp->timer_counter = tp->timer_multiplier;
6345 }
6346
28fbef78 6347 /* Heartbeat is only sent once every 2 seconds. */
1da177e4
LT
6348 if (!--tp->asf_counter) {
6349 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6350 u32 val;
6351
28fbef78
MC
6352 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
6353 FWCMD_NICDRV_ALIVE2);
6354 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6355 /* 5 seconds timeout */
6356 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
1da177e4
LT
6357 val = tr32(GRC_RX_CPU_EVENT);
6358 val |= (1 << 14);
6359 tw32(GRC_RX_CPU_EVENT, val);
6360 }
6361 tp->asf_counter = tp->asf_multiplier;
6362 }
6363
f47c11ee 6364 spin_unlock(&tp->lock);
1da177e4
LT
6365
6366 tp->timer.expires = jiffies + tp->timer_offset;
6367 add_timer(&tp->timer);
6368}
6369
7938109f
MC
6370static int tg3_test_interrupt(struct tg3 *tp)
6371{
6372 struct net_device *dev = tp->dev;
6373 int err, i;
6374 u32 int_mbox = 0;
6375
d4bc3927
MC
6376 if (!netif_running(dev))
6377 return -ENODEV;
6378
7938109f
MC
6379 tg3_disable_ints(tp);
6380
6381 free_irq(tp->pdev->irq, dev);
6382
6383 err = request_irq(tp->pdev->irq, tg3_test_isr,
f4d0ee98 6384 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
7938109f
MC
6385 if (err)
6386 return err;
6387
38f3843e 6388 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7938109f
MC
6389 tg3_enable_ints(tp);
6390
6391 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6392 HOSTCC_MODE_NOW);
6393
6394 for (i = 0; i < 5; i++) {
09ee929c
MC
6395 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6396 TG3_64BIT_REG_LOW);
7938109f
MC
6397 if (int_mbox != 0)
6398 break;
6399 msleep(10);
6400 }
6401
6402 tg3_disable_ints(tp);
6403
6404 free_irq(tp->pdev->irq, dev);
6405
6406 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6407 err = request_irq(tp->pdev->irq, tg3_msi,
f4d0ee98 6408 SA_SAMPLE_RANDOM, dev->name, dev);
fac9b83e
DM
6409 else {
6410 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6411 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6412 fn = tg3_interrupt_tagged;
6413 err = request_irq(tp->pdev->irq, fn,
f4d0ee98 6414 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
fac9b83e 6415 }
7938109f
MC
6416
6417 if (err)
6418 return err;
6419
6420 if (int_mbox != 0)
6421 return 0;
6422
6423 return -EIO;
6424}
6425
6426/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6427 * successfully restored
6428 */
6429static int tg3_test_msi(struct tg3 *tp)
6430{
6431 struct net_device *dev = tp->dev;
6432 int err;
6433 u16 pci_cmd;
6434
6435 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6436 return 0;
6437
6438 /* Turn off SERR reporting in case MSI terminates with Master
6439 * Abort.
6440 */
6441 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6442 pci_write_config_word(tp->pdev, PCI_COMMAND,
6443 pci_cmd & ~PCI_COMMAND_SERR);
6444
6445 err = tg3_test_interrupt(tp);
6446
6447 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6448
6449 if (!err)
6450 return 0;
6451
6452 /* other failures */
6453 if (err != -EIO)
6454 return err;
6455
6456 /* MSI test failed, go back to INTx mode */
6457 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6458 "switching to INTx mode. Please report this failure to "
6459 "the PCI maintainer and include system chipset information.\n",
6460 tp->dev->name);
6461
6462 free_irq(tp->pdev->irq, dev);
6463 pci_disable_msi(tp->pdev);
6464
6465 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6466
fac9b83e
DM
6467 {
6468 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6469 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6470 fn = tg3_interrupt_tagged;
7938109f 6471
fac9b83e
DM
6472 err = request_irq(tp->pdev->irq, fn,
6473 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6474 }
7938109f
MC
6475 if (err)
6476 return err;
6477
6478 /* Need to reset the chip because the MSI cycle may have terminated
6479 * with Master Abort.
6480 */
f47c11ee 6481 tg3_full_lock(tp, 1);
7938109f 6482
944d980e 6483 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f
MC
6484 err = tg3_init_hw(tp);
6485
f47c11ee 6486 tg3_full_unlock(tp);
7938109f
MC
6487
6488 if (err)
6489 free_irq(tp->pdev->irq, dev);
6490
6491 return err;
6492}
6493
1da177e4
LT
6494static int tg3_open(struct net_device *dev)
6495{
6496 struct tg3 *tp = netdev_priv(dev);
6497 int err;
6498
f47c11ee 6499 tg3_full_lock(tp, 0);
1da177e4 6500
bc1c7567
MC
6501 err = tg3_set_power_state(tp, PCI_D0);
6502 if (err)
6503 return err;
6504
1da177e4
LT
6505 tg3_disable_ints(tp);
6506 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6507
f47c11ee 6508 tg3_full_unlock(tp);
1da177e4
LT
6509
6510 /* The placement of this call is tied
6511 * to the setup and use of Host TX descriptors.
6512 */
6513 err = tg3_alloc_consistent(tp);
6514 if (err)
6515 return err;
6516
88b06bc2
MC
6517 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6518 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
d4d2c558
MC
6519 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6520 !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6521 (tp->pdev_peer == tp->pdev))) {
fac9b83e
DM
6522 /* All MSI supporting chips should support tagged
6523 * status. Assert that this is the case.
6524 */
6525 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6526 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6527 "Not using MSI.\n", tp->dev->name);
6528 } else if (pci_enable_msi(tp->pdev) == 0) {
88b06bc2
MC
6529 u32 msi_mode;
6530
6531 msi_mode = tr32(MSGINT_MODE);
6532 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6533 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6534 }
6535 }
6536 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6537 err = request_irq(tp->pdev->irq, tg3_msi,
f4d0ee98 6538 SA_SAMPLE_RANDOM, dev->name, dev);
fac9b83e
DM
6539 else {
6540 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6541 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6542 fn = tg3_interrupt_tagged;
6543
6544 err = request_irq(tp->pdev->irq, fn,
f4d0ee98 6545 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
fac9b83e 6546 }
1da177e4
LT
6547
6548 if (err) {
88b06bc2
MC
6549 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6550 pci_disable_msi(tp->pdev);
6551 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6552 }
1da177e4
LT
6553 tg3_free_consistent(tp);
6554 return err;
6555 }
6556
f47c11ee 6557 tg3_full_lock(tp, 0);
1da177e4
LT
6558
6559 err = tg3_init_hw(tp);
6560 if (err) {
944d980e 6561 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
6562 tg3_free_rings(tp);
6563 } else {
fac9b83e
DM
6564 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6565 tp->timer_offset = HZ;
6566 else
6567 tp->timer_offset = HZ / 10;
6568
6569 BUG_ON(tp->timer_offset > HZ);
6570 tp->timer_counter = tp->timer_multiplier =
6571 (HZ / tp->timer_offset);
6572 tp->asf_counter = tp->asf_multiplier =
28fbef78 6573 ((HZ / tp->timer_offset) * 2);
1da177e4
LT
6574
6575 init_timer(&tp->timer);
6576 tp->timer.expires = jiffies + tp->timer_offset;
6577 tp->timer.data = (unsigned long) tp;
6578 tp->timer.function = tg3_timer;
1da177e4
LT
6579 }
6580
f47c11ee 6581 tg3_full_unlock(tp);
1da177e4
LT
6582
6583 if (err) {
88b06bc2
MC
6584 free_irq(tp->pdev->irq, dev);
6585 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6586 pci_disable_msi(tp->pdev);
6587 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6588 }
1da177e4
LT
6589 tg3_free_consistent(tp);
6590 return err;
6591 }
6592
7938109f
MC
6593 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6594 err = tg3_test_msi(tp);
fac9b83e 6595
7938109f 6596 if (err) {
f47c11ee 6597 tg3_full_lock(tp, 0);
7938109f
MC
6598
6599 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6600 pci_disable_msi(tp->pdev);
6601 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6602 }
944d980e 6603 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f
MC
6604 tg3_free_rings(tp);
6605 tg3_free_consistent(tp);
6606
f47c11ee 6607 tg3_full_unlock(tp);
7938109f
MC
6608
6609 return err;
6610 }
6611 }
6612
f47c11ee 6613 tg3_full_lock(tp, 0);
1da177e4 6614
7938109f
MC
6615 add_timer(&tp->timer);
6616 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
1da177e4
LT
6617 tg3_enable_ints(tp);
6618
f47c11ee 6619 tg3_full_unlock(tp);
1da177e4
LT
6620
6621 netif_start_queue(dev);
6622
6623 return 0;
6624}
6625
6626#if 0
6627/*static*/ void tg3_dump_state(struct tg3 *tp)
6628{
6629 u32 val32, val32_2, val32_3, val32_4, val32_5;
6630 u16 val16;
6631 int i;
6632
6633 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6634 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6635 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6636 val16, val32);
6637
6638 /* MAC block */
6639 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6640 tr32(MAC_MODE), tr32(MAC_STATUS));
6641 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6642 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6643 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6644 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6645 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6646 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6647
6648 /* Send data initiator control block */
6649 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6650 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6651 printk(" SNDDATAI_STATSCTRL[%08x]\n",
6652 tr32(SNDDATAI_STATSCTRL));
6653
6654 /* Send data completion control block */
6655 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6656
6657 /* Send BD ring selector block */
6658 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6659 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6660
6661 /* Send BD initiator control block */
6662 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6663 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6664
6665 /* Send BD completion control block */
6666 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6667
6668 /* Receive list placement control block */
6669 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6670 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6671 printk(" RCVLPC_STATSCTRL[%08x]\n",
6672 tr32(RCVLPC_STATSCTRL));
6673
6674 /* Receive data and receive BD initiator control block */
6675 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6676 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6677
6678 /* Receive data completion control block */
6679 printk("DEBUG: RCVDCC_MODE[%08x]\n",
6680 tr32(RCVDCC_MODE));
6681
6682 /* Receive BD initiator control block */
6683 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6684 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6685
6686 /* Receive BD completion control block */
6687 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6688 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6689
6690 /* Receive list selector control block */
6691 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6692 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6693
6694 /* Mbuf cluster free block */
6695 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6696 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6697
6698 /* Host coalescing control block */
6699 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6700 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6701 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6702 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6703 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6704 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6705 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6706 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6707 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6708 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6709 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6710 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6711
6712 /* Memory arbiter control block */
6713 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6714 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6715
6716 /* Buffer manager control block */
6717 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6718 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6719 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6720 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6721 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6722 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6723 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6724 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6725
6726 /* Read DMA control block */
6727 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6728 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6729
6730 /* Write DMA control block */
6731 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6732 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6733
6734 /* DMA completion block */
6735 printk("DEBUG: DMAC_MODE[%08x]\n",
6736 tr32(DMAC_MODE));
6737
6738 /* GRC block */
6739 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6740 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6741 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6742 tr32(GRC_LOCAL_CTRL));
6743
6744 /* TG3_BDINFOs */
6745 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6746 tr32(RCVDBDI_JUMBO_BD + 0x0),
6747 tr32(RCVDBDI_JUMBO_BD + 0x4),
6748 tr32(RCVDBDI_JUMBO_BD + 0x8),
6749 tr32(RCVDBDI_JUMBO_BD + 0xc));
6750 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6751 tr32(RCVDBDI_STD_BD + 0x0),
6752 tr32(RCVDBDI_STD_BD + 0x4),
6753 tr32(RCVDBDI_STD_BD + 0x8),
6754 tr32(RCVDBDI_STD_BD + 0xc));
6755 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6756 tr32(RCVDBDI_MINI_BD + 0x0),
6757 tr32(RCVDBDI_MINI_BD + 0x4),
6758 tr32(RCVDBDI_MINI_BD + 0x8),
6759 tr32(RCVDBDI_MINI_BD + 0xc));
6760
6761 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6762 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6763 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6764 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6765 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6766 val32, val32_2, val32_3, val32_4);
6767
6768 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6769 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6770 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6771 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6772 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6773 val32, val32_2, val32_3, val32_4);
6774
6775 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6776 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6777 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6778 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6779 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6780 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6781 val32, val32_2, val32_3, val32_4, val32_5);
6782
6783 /* SW status block */
6784 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6785 tp->hw_status->status,
6786 tp->hw_status->status_tag,
6787 tp->hw_status->rx_jumbo_consumer,
6788 tp->hw_status->rx_consumer,
6789 tp->hw_status->rx_mini_consumer,
6790 tp->hw_status->idx[0].rx_producer,
6791 tp->hw_status->idx[0].tx_consumer);
6792
6793 /* SW statistics block */
6794 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6795 ((u32 *)tp->hw_stats)[0],
6796 ((u32 *)tp->hw_stats)[1],
6797 ((u32 *)tp->hw_stats)[2],
6798 ((u32 *)tp->hw_stats)[3]);
6799
6800 /* Mailboxes */
6801 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
09ee929c
MC
6802 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6803 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6804 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6805 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
1da177e4
LT
6806
6807 /* NIC side send descriptors. */
6808 for (i = 0; i < 6; i++) {
6809 unsigned long txd;
6810
6811 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6812 + (i * sizeof(struct tg3_tx_buffer_desc));
6813 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6814 i,
6815 readl(txd + 0x0), readl(txd + 0x4),
6816 readl(txd + 0x8), readl(txd + 0xc));
6817 }
6818
6819 /* NIC side RX descriptors. */
6820 for (i = 0; i < 6; i++) {
6821 unsigned long rxd;
6822
6823 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6824 + (i * sizeof(struct tg3_rx_buffer_desc));
6825 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6826 i,
6827 readl(rxd + 0x0), readl(rxd + 0x4),
6828 readl(rxd + 0x8), readl(rxd + 0xc));
6829 rxd += (4 * sizeof(u32));
6830 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6831 i,
6832 readl(rxd + 0x0), readl(rxd + 0x4),
6833 readl(rxd + 0x8), readl(rxd + 0xc));
6834 }
6835
6836 for (i = 0; i < 6; i++) {
6837 unsigned long rxd;
6838
6839 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6840 + (i * sizeof(struct tg3_rx_buffer_desc));
6841 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6842 i,
6843 readl(rxd + 0x0), readl(rxd + 0x4),
6844 readl(rxd + 0x8), readl(rxd + 0xc));
6845 rxd += (4 * sizeof(u32));
6846 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6847 i,
6848 readl(rxd + 0x0), readl(rxd + 0x4),
6849 readl(rxd + 0x8), readl(rxd + 0xc));
6850 }
6851}
6852#endif
6853
6854static struct net_device_stats *tg3_get_stats(struct net_device *);
6855static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6856
6857static int tg3_close(struct net_device *dev)
6858{
6859 struct tg3 *tp = netdev_priv(dev);
6860
7faa006f
MC
6861 /* Calling flush_scheduled_work() may deadlock because
6862 * linkwatch_event() may be on the workqueue and it will try to get
6863 * the rtnl_lock which we are holding.
6864 */
6865 while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
6866 msleep(1);
6867
1da177e4
LT
6868 netif_stop_queue(dev);
6869
6870 del_timer_sync(&tp->timer);
6871
f47c11ee 6872 tg3_full_lock(tp, 1);
1da177e4
LT
6873#if 0
6874 tg3_dump_state(tp);
6875#endif
6876
6877 tg3_disable_ints(tp);
6878
944d980e 6879 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
6880 tg3_free_rings(tp);
6881 tp->tg3_flags &=
6882 ~(TG3_FLAG_INIT_COMPLETE |
6883 TG3_FLAG_GOT_SERDES_FLOWCTL);
1da177e4 6884
f47c11ee 6885 tg3_full_unlock(tp);
1da177e4 6886
88b06bc2
MC
6887 free_irq(tp->pdev->irq, dev);
6888 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6889 pci_disable_msi(tp->pdev);
6890 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6891 }
1da177e4
LT
6892
6893 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6894 sizeof(tp->net_stats_prev));
6895 memcpy(&tp->estats_prev, tg3_get_estats(tp),
6896 sizeof(tp->estats_prev));
6897
6898 tg3_free_consistent(tp);
6899
bc1c7567
MC
6900 tg3_set_power_state(tp, PCI_D3hot);
6901
6902 netif_carrier_off(tp->dev);
6903
1da177e4
LT
6904 return 0;
6905}
6906
6907static inline unsigned long get_stat64(tg3_stat64_t *val)
6908{
6909 unsigned long ret;
6910
6911#if (BITS_PER_LONG == 32)
6912 ret = val->low;
6913#else
6914 ret = ((u64)val->high << 32) | ((u64)val->low);
6915#endif
6916 return ret;
6917}
6918
6919static unsigned long calc_crc_errors(struct tg3 *tp)
6920{
6921 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6922
6923 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6924 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6925 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1da177e4
LT
6926 u32 val;
6927
f47c11ee 6928 spin_lock_bh(&tp->lock);
1da177e4
LT
6929 if (!tg3_readphy(tp, 0x1e, &val)) {
6930 tg3_writephy(tp, 0x1e, val | 0x8000);
6931 tg3_readphy(tp, 0x14, &val);
6932 } else
6933 val = 0;
f47c11ee 6934 spin_unlock_bh(&tp->lock);
1da177e4
LT
6935
6936 tp->phy_crc_errors += val;
6937
6938 return tp->phy_crc_errors;
6939 }
6940
6941 return get_stat64(&hw_stats->rx_fcs_errors);
6942}
6943
6944#define ESTAT_ADD(member) \
6945 estats->member = old_estats->member + \
6946 get_stat64(&hw_stats->member)
6947
6948static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6949{
6950 struct tg3_ethtool_stats *estats = &tp->estats;
6951 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6952 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6953
6954 if (!hw_stats)
6955 return old_estats;
6956
6957 ESTAT_ADD(rx_octets);
6958 ESTAT_ADD(rx_fragments);
6959 ESTAT_ADD(rx_ucast_packets);
6960 ESTAT_ADD(rx_mcast_packets);
6961 ESTAT_ADD(rx_bcast_packets);
6962 ESTAT_ADD(rx_fcs_errors);
6963 ESTAT_ADD(rx_align_errors);
6964 ESTAT_ADD(rx_xon_pause_rcvd);
6965 ESTAT_ADD(rx_xoff_pause_rcvd);
6966 ESTAT_ADD(rx_mac_ctrl_rcvd);
6967 ESTAT_ADD(rx_xoff_entered);
6968 ESTAT_ADD(rx_frame_too_long_errors);
6969 ESTAT_ADD(rx_jabbers);
6970 ESTAT_ADD(rx_undersize_packets);
6971 ESTAT_ADD(rx_in_length_errors);
6972 ESTAT_ADD(rx_out_length_errors);
6973 ESTAT_ADD(rx_64_or_less_octet_packets);
6974 ESTAT_ADD(rx_65_to_127_octet_packets);
6975 ESTAT_ADD(rx_128_to_255_octet_packets);
6976 ESTAT_ADD(rx_256_to_511_octet_packets);
6977 ESTAT_ADD(rx_512_to_1023_octet_packets);
6978 ESTAT_ADD(rx_1024_to_1522_octet_packets);
6979 ESTAT_ADD(rx_1523_to_2047_octet_packets);
6980 ESTAT_ADD(rx_2048_to_4095_octet_packets);
6981 ESTAT_ADD(rx_4096_to_8191_octet_packets);
6982 ESTAT_ADD(rx_8192_to_9022_octet_packets);
6983
6984 ESTAT_ADD(tx_octets);
6985 ESTAT_ADD(tx_collisions);
6986 ESTAT_ADD(tx_xon_sent);
6987 ESTAT_ADD(tx_xoff_sent);
6988 ESTAT_ADD(tx_flow_control);
6989 ESTAT_ADD(tx_mac_errors);
6990 ESTAT_ADD(tx_single_collisions);
6991 ESTAT_ADD(tx_mult_collisions);
6992 ESTAT_ADD(tx_deferred);
6993 ESTAT_ADD(tx_excessive_collisions);
6994 ESTAT_ADD(tx_late_collisions);
6995 ESTAT_ADD(tx_collide_2times);
6996 ESTAT_ADD(tx_collide_3times);
6997 ESTAT_ADD(tx_collide_4times);
6998 ESTAT_ADD(tx_collide_5times);
6999 ESTAT_ADD(tx_collide_6times);
7000 ESTAT_ADD(tx_collide_7times);
7001 ESTAT_ADD(tx_collide_8times);
7002 ESTAT_ADD(tx_collide_9times);
7003 ESTAT_ADD(tx_collide_10times);
7004 ESTAT_ADD(tx_collide_11times);
7005 ESTAT_ADD(tx_collide_12times);
7006 ESTAT_ADD(tx_collide_13times);
7007 ESTAT_ADD(tx_collide_14times);
7008 ESTAT_ADD(tx_collide_15times);
7009 ESTAT_ADD(tx_ucast_packets);
7010 ESTAT_ADD(tx_mcast_packets);
7011 ESTAT_ADD(tx_bcast_packets);
7012 ESTAT_ADD(tx_carrier_sense_errors);
7013 ESTAT_ADD(tx_discards);
7014 ESTAT_ADD(tx_errors);
7015
7016 ESTAT_ADD(dma_writeq_full);
7017 ESTAT_ADD(dma_write_prioq_full);
7018 ESTAT_ADD(rxbds_empty);
7019 ESTAT_ADD(rx_discards);
7020 ESTAT_ADD(rx_errors);
7021 ESTAT_ADD(rx_threshold_hit);
7022
7023 ESTAT_ADD(dma_readq_full);
7024 ESTAT_ADD(dma_read_prioq_full);
7025 ESTAT_ADD(tx_comp_queue_full);
7026
7027 ESTAT_ADD(ring_set_send_prod_index);
7028 ESTAT_ADD(ring_status_update);
7029 ESTAT_ADD(nic_irqs);
7030 ESTAT_ADD(nic_avoided_irqs);
7031 ESTAT_ADD(nic_tx_threshold_hit);
7032
7033 return estats;
7034}
7035
7036static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7037{
7038 struct tg3 *tp = netdev_priv(dev);
7039 struct net_device_stats *stats = &tp->net_stats;
7040 struct net_device_stats *old_stats = &tp->net_stats_prev;
7041 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7042
7043 if (!hw_stats)
7044 return old_stats;
7045
7046 stats->rx_packets = old_stats->rx_packets +
7047 get_stat64(&hw_stats->rx_ucast_packets) +
7048 get_stat64(&hw_stats->rx_mcast_packets) +
7049 get_stat64(&hw_stats->rx_bcast_packets);
7050
7051 stats->tx_packets = old_stats->tx_packets +
7052 get_stat64(&hw_stats->tx_ucast_packets) +
7053 get_stat64(&hw_stats->tx_mcast_packets) +
7054 get_stat64(&hw_stats->tx_bcast_packets);
7055
7056 stats->rx_bytes = old_stats->rx_bytes +
7057 get_stat64(&hw_stats->rx_octets);
7058 stats->tx_bytes = old_stats->tx_bytes +
7059 get_stat64(&hw_stats->tx_octets);
7060
7061 stats->rx_errors = old_stats->rx_errors +
4f63b877 7062 get_stat64(&hw_stats->rx_errors);
1da177e4
LT
7063 stats->tx_errors = old_stats->tx_errors +
7064 get_stat64(&hw_stats->tx_errors) +
7065 get_stat64(&hw_stats->tx_mac_errors) +
7066 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7067 get_stat64(&hw_stats->tx_discards);
7068
7069 stats->multicast = old_stats->multicast +
7070 get_stat64(&hw_stats->rx_mcast_packets);
7071 stats->collisions = old_stats->collisions +
7072 get_stat64(&hw_stats->tx_collisions);
7073
7074 stats->rx_length_errors = old_stats->rx_length_errors +
7075 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7076 get_stat64(&hw_stats->rx_undersize_packets);
7077
7078 stats->rx_over_errors = old_stats->rx_over_errors +
7079 get_stat64(&hw_stats->rxbds_empty);
7080 stats->rx_frame_errors = old_stats->rx_frame_errors +
7081 get_stat64(&hw_stats->rx_align_errors);
7082 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7083 get_stat64(&hw_stats->tx_discards);
7084 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7085 get_stat64(&hw_stats->tx_carrier_sense_errors);
7086
7087 stats->rx_crc_errors = old_stats->rx_crc_errors +
7088 calc_crc_errors(tp);
7089
4f63b877
JL
7090 stats->rx_missed_errors = old_stats->rx_missed_errors +
7091 get_stat64(&hw_stats->rx_discards);
7092
1da177e4
LT
7093 return stats;
7094}
7095
7096static inline u32 calc_crc(unsigned char *buf, int len)
7097{
7098 u32 reg;
7099 u32 tmp;
7100 int j, k;
7101
7102 reg = 0xffffffff;
7103
7104 for (j = 0; j < len; j++) {
7105 reg ^= buf[j];
7106
7107 for (k = 0; k < 8; k++) {
7108 tmp = reg & 0x01;
7109
7110 reg >>= 1;
7111
7112 if (tmp) {
7113 reg ^= 0xedb88320;
7114 }
7115 }
7116 }
7117
7118 return ~reg;
7119}
7120
7121static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7122{
7123 /* accept or reject all multicast frames */
7124 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7125 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7126 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7127 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7128}
7129
7130static void __tg3_set_rx_mode(struct net_device *dev)
7131{
7132 struct tg3 *tp = netdev_priv(dev);
7133 u32 rx_mode;
7134
7135 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7136 RX_MODE_KEEP_VLAN_TAG);
7137
7138 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7139 * flag clear.
7140 */
7141#if TG3_VLAN_TAG_USED
7142 if (!tp->vlgrp &&
7143 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7144 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7145#else
7146 /* By definition, VLAN is disabled always in this
7147 * case.
7148 */
7149 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7150 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7151#endif
7152
7153 if (dev->flags & IFF_PROMISC) {
7154 /* Promiscuous mode. */
7155 rx_mode |= RX_MODE_PROMISC;
7156 } else if (dev->flags & IFF_ALLMULTI) {
7157 /* Accept all multicast. */
7158 tg3_set_multi (tp, 1);
7159 } else if (dev->mc_count < 1) {
7160 /* Reject all multicast. */
7161 tg3_set_multi (tp, 0);
7162 } else {
7163 /* Accept one or more multicast(s). */
7164 struct dev_mc_list *mclist;
7165 unsigned int i;
7166 u32 mc_filter[4] = { 0, };
7167 u32 regidx;
7168 u32 bit;
7169 u32 crc;
7170
7171 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7172 i++, mclist = mclist->next) {
7173
7174 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7175 bit = ~crc & 0x7f;
7176 regidx = (bit & 0x60) >> 5;
7177 bit &= 0x1f;
7178 mc_filter[regidx] |= (1 << bit);
7179 }
7180
7181 tw32(MAC_HASH_REG_0, mc_filter[0]);
7182 tw32(MAC_HASH_REG_1, mc_filter[1]);
7183 tw32(MAC_HASH_REG_2, mc_filter[2]);
7184 tw32(MAC_HASH_REG_3, mc_filter[3]);
7185 }
7186
7187 if (rx_mode != tp->rx_mode) {
7188 tp->rx_mode = rx_mode;
7189 tw32_f(MAC_RX_MODE, rx_mode);
7190 udelay(10);
7191 }
7192}
7193
7194static void tg3_set_rx_mode(struct net_device *dev)
7195{
7196 struct tg3 *tp = netdev_priv(dev);
7197
e75f7c90
MC
7198 if (!netif_running(dev))
7199 return;
7200
f47c11ee 7201 tg3_full_lock(tp, 0);
1da177e4 7202 __tg3_set_rx_mode(dev);
f47c11ee 7203 tg3_full_unlock(tp);
1da177e4
LT
7204}
7205
7206#define TG3_REGDUMP_LEN (32 * 1024)
7207
7208static int tg3_get_regs_len(struct net_device *dev)
7209{
7210 return TG3_REGDUMP_LEN;
7211}
7212
7213static void tg3_get_regs(struct net_device *dev,
7214 struct ethtool_regs *regs, void *_p)
7215{
7216 u32 *p = _p;
7217 struct tg3 *tp = netdev_priv(dev);
7218 u8 *orig_p = _p;
7219 int i;
7220
7221 regs->version = 0;
7222
7223 memset(p, 0, TG3_REGDUMP_LEN);
7224
bc1c7567
MC
7225 if (tp->link_config.phy_is_low_power)
7226 return;
7227
f47c11ee 7228 tg3_full_lock(tp, 0);
1da177e4
LT
7229
7230#define __GET_REG32(reg) (*(p)++ = tr32(reg))
7231#define GET_REG32_LOOP(base,len) \
7232do { p = (u32 *)(orig_p + (base)); \
7233 for (i = 0; i < len; i += 4) \
7234 __GET_REG32((base) + i); \
7235} while (0)
7236#define GET_REG32_1(reg) \
7237do { p = (u32 *)(orig_p + (reg)); \
7238 __GET_REG32((reg)); \
7239} while (0)
7240
7241 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7242 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7243 GET_REG32_LOOP(MAC_MODE, 0x4f0);
7244 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7245 GET_REG32_1(SNDDATAC_MODE);
7246 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7247 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7248 GET_REG32_1(SNDBDC_MODE);
7249 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7250 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7251 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7252 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7253 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7254 GET_REG32_1(RCVDCC_MODE);
7255 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7256 GET_REG32_LOOP(RCVCC_MODE, 0x14);
7257 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7258 GET_REG32_1(MBFREE_MODE);
7259 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7260 GET_REG32_LOOP(MEMARB_MODE, 0x10);
7261 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7262 GET_REG32_LOOP(RDMAC_MODE, 0x08);
7263 GET_REG32_LOOP(WDMAC_MODE, 0x08);
091465d7
CE
7264 GET_REG32_1(RX_CPU_MODE);
7265 GET_REG32_1(RX_CPU_STATE);
7266 GET_REG32_1(RX_CPU_PGMCTR);
7267 GET_REG32_1(RX_CPU_HWBKPT);
7268 GET_REG32_1(TX_CPU_MODE);
7269 GET_REG32_1(TX_CPU_STATE);
7270 GET_REG32_1(TX_CPU_PGMCTR);
1da177e4
LT
7271 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7272 GET_REG32_LOOP(FTQ_RESET, 0x120);
7273 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7274 GET_REG32_1(DMAC_MODE);
7275 GET_REG32_LOOP(GRC_MODE, 0x4c);
7276 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7277 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7278
7279#undef __GET_REG32
7280#undef GET_REG32_LOOP
7281#undef GET_REG32_1
7282
f47c11ee 7283 tg3_full_unlock(tp);
1da177e4
LT
7284}
7285
7286static int tg3_get_eeprom_len(struct net_device *dev)
7287{
7288 struct tg3 *tp = netdev_priv(dev);
7289
7290 return tp->nvram_size;
7291}
7292
7293static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7294
7295static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7296{
7297 struct tg3 *tp = netdev_priv(dev);
7298 int ret;
7299 u8 *pd;
7300 u32 i, offset, len, val, b_offset, b_count;
7301
bc1c7567
MC
7302 if (tp->link_config.phy_is_low_power)
7303 return -EAGAIN;
7304
1da177e4
LT
7305 offset = eeprom->offset;
7306 len = eeprom->len;
7307 eeprom->len = 0;
7308
7309 eeprom->magic = TG3_EEPROM_MAGIC;
7310
7311 if (offset & 3) {
7312 /* adjustments to start on required 4 byte boundary */
7313 b_offset = offset & 3;
7314 b_count = 4 - b_offset;
7315 if (b_count > len) {
7316 /* i.e. offset=1 len=2 */
7317 b_count = len;
7318 }
7319 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7320 if (ret)
7321 return ret;
7322 val = cpu_to_le32(val);
7323 memcpy(data, ((char*)&val) + b_offset, b_count);
7324 len -= b_count;
7325 offset += b_count;
7326 eeprom->len += b_count;
7327 }
7328
7329 /* read bytes upto the last 4 byte boundary */
7330 pd = &data[eeprom->len];
7331 for (i = 0; i < (len - (len & 3)); i += 4) {
7332 ret = tg3_nvram_read(tp, offset + i, &val);
7333 if (ret) {
7334 eeprom->len += i;
7335 return ret;
7336 }
7337 val = cpu_to_le32(val);
7338 memcpy(pd + i, &val, 4);
7339 }
7340 eeprom->len += i;
7341
7342 if (len & 3) {
7343 /* read last bytes not ending on 4 byte boundary */
7344 pd = &data[eeprom->len];
7345 b_count = len & 3;
7346 b_offset = offset + len - b_count;
7347 ret = tg3_nvram_read(tp, b_offset, &val);
7348 if (ret)
7349 return ret;
7350 val = cpu_to_le32(val);
7351 memcpy(pd, ((char*)&val), b_count);
7352 eeprom->len += b_count;
7353 }
7354 return 0;
7355}
7356
7357static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7358
7359static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7360{
7361 struct tg3 *tp = netdev_priv(dev);
7362 int ret;
7363 u32 offset, len, b_offset, odd_len, start, end;
7364 u8 *buf;
7365
bc1c7567
MC
7366 if (tp->link_config.phy_is_low_power)
7367 return -EAGAIN;
7368
1da177e4
LT
7369 if (eeprom->magic != TG3_EEPROM_MAGIC)
7370 return -EINVAL;
7371
7372 offset = eeprom->offset;
7373 len = eeprom->len;
7374
7375 if ((b_offset = (offset & 3))) {
7376 /* adjustments to start on required 4 byte boundary */
7377 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7378 if (ret)
7379 return ret;
7380 start = cpu_to_le32(start);
7381 len += b_offset;
7382 offset &= ~3;
1c8594b4
MC
7383 if (len < 4)
7384 len = 4;
1da177e4
LT
7385 }
7386
7387 odd_len = 0;
1c8594b4 7388 if (len & 3) {
1da177e4
LT
7389 /* adjustments to end on required 4 byte boundary */
7390 odd_len = 1;
7391 len = (len + 3) & ~3;
7392 ret = tg3_nvram_read(tp, offset+len-4, &end);
7393 if (ret)
7394 return ret;
7395 end = cpu_to_le32(end);
7396 }
7397
7398 buf = data;
7399 if (b_offset || odd_len) {
7400 buf = kmalloc(len, GFP_KERNEL);
7401 if (buf == 0)
7402 return -ENOMEM;
7403 if (b_offset)
7404 memcpy(buf, &start, 4);
7405 if (odd_len)
7406 memcpy(buf+len-4, &end, 4);
7407 memcpy(buf + b_offset, data, eeprom->len);
7408 }
7409
7410 ret = tg3_nvram_write_block(tp, offset, len, buf);
7411
7412 if (buf != data)
7413 kfree(buf);
7414
7415 return ret;
7416}
7417
7418static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7419{
7420 struct tg3 *tp = netdev_priv(dev);
7421
7422 cmd->supported = (SUPPORTED_Autoneg);
7423
7424 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7425 cmd->supported |= (SUPPORTED_1000baseT_Half |
7426 SUPPORTED_1000baseT_Full);
7427
a4e2b347 7428 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1da177e4
LT
7429 cmd->supported |= (SUPPORTED_100baseT_Half |
7430 SUPPORTED_100baseT_Full |
7431 SUPPORTED_10baseT_Half |
7432 SUPPORTED_10baseT_Full |
7433 SUPPORTED_MII);
7434 else
7435 cmd->supported |= SUPPORTED_FIBRE;
7436
7437 cmd->advertising = tp->link_config.advertising;
7438 if (netif_running(dev)) {
7439 cmd->speed = tp->link_config.active_speed;
7440 cmd->duplex = tp->link_config.active_duplex;
7441 }
7442 cmd->port = 0;
7443 cmd->phy_address = PHY_ADDR;
7444 cmd->transceiver = 0;
7445 cmd->autoneg = tp->link_config.autoneg;
7446 cmd->maxtxpkt = 0;
7447 cmd->maxrxpkt = 0;
7448 return 0;
7449}
7450
7451static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7452{
7453 struct tg3 *tp = netdev_priv(dev);
7454
37ff238d 7455 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
1da177e4
LT
7456 /* These are the only valid advertisement bits allowed. */
7457 if (cmd->autoneg == AUTONEG_ENABLE &&
7458 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7459 ADVERTISED_1000baseT_Full |
7460 ADVERTISED_Autoneg |
7461 ADVERTISED_FIBRE)))
7462 return -EINVAL;
37ff238d
MC
7463 /* Fiber can only do SPEED_1000. */
7464 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7465 (cmd->speed != SPEED_1000))
7466 return -EINVAL;
7467 /* Copper cannot force SPEED_1000. */
7468 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7469 (cmd->speed == SPEED_1000))
7470 return -EINVAL;
7471 else if ((cmd->speed == SPEED_1000) &&
7472 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7473 return -EINVAL;
1da177e4 7474
f47c11ee 7475 tg3_full_lock(tp, 0);
1da177e4
LT
7476
7477 tp->link_config.autoneg = cmd->autoneg;
7478 if (cmd->autoneg == AUTONEG_ENABLE) {
7479 tp->link_config.advertising = cmd->advertising;
7480 tp->link_config.speed = SPEED_INVALID;
7481 tp->link_config.duplex = DUPLEX_INVALID;
7482 } else {
7483 tp->link_config.advertising = 0;
7484 tp->link_config.speed = cmd->speed;
7485 tp->link_config.duplex = cmd->duplex;
7486 }
7487
7488 if (netif_running(dev))
7489 tg3_setup_phy(tp, 1);
7490
f47c11ee 7491 tg3_full_unlock(tp);
1da177e4
LT
7492
7493 return 0;
7494}
7495
7496static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7497{
7498 struct tg3 *tp = netdev_priv(dev);
7499
7500 strcpy(info->driver, DRV_MODULE_NAME);
7501 strcpy(info->version, DRV_MODULE_VERSION);
7502 strcpy(info->bus_info, pci_name(tp->pdev));
7503}
7504
7505static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7506{
7507 struct tg3 *tp = netdev_priv(dev);
7508
7509 wol->supported = WAKE_MAGIC;
7510 wol->wolopts = 0;
7511 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7512 wol->wolopts = WAKE_MAGIC;
7513 memset(&wol->sopass, 0, sizeof(wol->sopass));
7514}
7515
7516static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7517{
7518 struct tg3 *tp = netdev_priv(dev);
7519
7520 if (wol->wolopts & ~WAKE_MAGIC)
7521 return -EINVAL;
7522 if ((wol->wolopts & WAKE_MAGIC) &&
7523 tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7524 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7525 return -EINVAL;
7526
f47c11ee 7527 spin_lock_bh(&tp->lock);
1da177e4
LT
7528 if (wol->wolopts & WAKE_MAGIC)
7529 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7530 else
7531 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
f47c11ee 7532 spin_unlock_bh(&tp->lock);
1da177e4
LT
7533
7534 return 0;
7535}
7536
7537static u32 tg3_get_msglevel(struct net_device *dev)
7538{
7539 struct tg3 *tp = netdev_priv(dev);
7540 return tp->msg_enable;
7541}
7542
7543static void tg3_set_msglevel(struct net_device *dev, u32 value)
7544{
7545 struct tg3 *tp = netdev_priv(dev);
7546 tp->msg_enable = value;
7547}
7548
7549#if TG3_TSO_SUPPORT != 0
7550static int tg3_set_tso(struct net_device *dev, u32 value)
7551{
7552 struct tg3 *tp = netdev_priv(dev);
7553
7554 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7555 if (value)
7556 return -EINVAL;
7557 return 0;
7558 }
7559 return ethtool_op_set_tso(dev, value);
7560}
7561#endif
7562
7563static int tg3_nway_reset(struct net_device *dev)
7564{
7565 struct tg3 *tp = netdev_priv(dev);
7566 u32 bmcr;
7567 int r;
7568
7569 if (!netif_running(dev))
7570 return -EAGAIN;
7571
c94e3941
MC
7572 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7573 return -EINVAL;
7574
f47c11ee 7575 spin_lock_bh(&tp->lock);
1da177e4
LT
7576 r = -EINVAL;
7577 tg3_readphy(tp, MII_BMCR, &bmcr);
7578 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
c94e3941
MC
7579 ((bmcr & BMCR_ANENABLE) ||
7580 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7581 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7582 BMCR_ANENABLE);
1da177e4
LT
7583 r = 0;
7584 }
f47c11ee 7585 spin_unlock_bh(&tp->lock);
1da177e4
LT
7586
7587 return r;
7588}
7589
7590static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7591{
7592 struct tg3 *tp = netdev_priv(dev);
7593
7594 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7595 ering->rx_mini_max_pending = 0;
4f81c32b
MC
7596 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7597 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7598 else
7599 ering->rx_jumbo_max_pending = 0;
7600
7601 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
1da177e4
LT
7602
7603 ering->rx_pending = tp->rx_pending;
7604 ering->rx_mini_pending = 0;
4f81c32b
MC
7605 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7606 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7607 else
7608 ering->rx_jumbo_pending = 0;
7609
1da177e4
LT
7610 ering->tx_pending = tp->tx_pending;
7611}
7612
7613static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7614{
7615 struct tg3 *tp = netdev_priv(dev);
bbe832c0 7616 int irq_sync = 0;
1da177e4
LT
7617
7618 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7619 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7620 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7621 return -EINVAL;
7622
bbe832c0 7623 if (netif_running(dev)) {
1da177e4 7624 tg3_netif_stop(tp);
bbe832c0
MC
7625 irq_sync = 1;
7626 }
1da177e4 7627
bbe832c0 7628 tg3_full_lock(tp, irq_sync);
1da177e4
LT
7629
7630 tp->rx_pending = ering->rx_pending;
7631
7632 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7633 tp->rx_pending > 63)
7634 tp->rx_pending = 63;
7635 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7636 tp->tx_pending = ering->tx_pending;
7637
7638 if (netif_running(dev)) {
944d980e 7639 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7640 tg3_init_hw(tp);
7641 tg3_netif_start(tp);
7642 }
7643
f47c11ee 7644 tg3_full_unlock(tp);
1da177e4
LT
7645
7646 return 0;
7647}
7648
7649static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7650{
7651 struct tg3 *tp = netdev_priv(dev);
7652
7653 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7654 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7655 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7656}
7657
7658static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7659{
7660 struct tg3 *tp = netdev_priv(dev);
bbe832c0 7661 int irq_sync = 0;
1da177e4 7662
bbe832c0 7663 if (netif_running(dev)) {
1da177e4 7664 tg3_netif_stop(tp);
bbe832c0
MC
7665 irq_sync = 1;
7666 }
1da177e4 7667
bbe832c0 7668 tg3_full_lock(tp, irq_sync);
f47c11ee 7669
1da177e4
LT
7670 if (epause->autoneg)
7671 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7672 else
7673 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7674 if (epause->rx_pause)
7675 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7676 else
7677 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7678 if (epause->tx_pause)
7679 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7680 else
7681 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7682
7683 if (netif_running(dev)) {
944d980e 7684 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7685 tg3_init_hw(tp);
7686 tg3_netif_start(tp);
7687 }
f47c11ee
DM
7688
7689 tg3_full_unlock(tp);
1da177e4
LT
7690
7691 return 0;
7692}
7693
7694static u32 tg3_get_rx_csum(struct net_device *dev)
7695{
7696 struct tg3 *tp = netdev_priv(dev);
7697 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7698}
7699
7700static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7701{
7702 struct tg3 *tp = netdev_priv(dev);
7703
7704 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7705 if (data != 0)
7706 return -EINVAL;
7707 return 0;
7708 }
7709
f47c11ee 7710 spin_lock_bh(&tp->lock);
1da177e4
LT
7711 if (data)
7712 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7713 else
7714 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
f47c11ee 7715 spin_unlock_bh(&tp->lock);
1da177e4
LT
7716
7717 return 0;
7718}
7719
7720static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7721{
7722 struct tg3 *tp = netdev_priv(dev);
7723
7724 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7725 if (data != 0)
7726 return -EINVAL;
7727 return 0;
7728 }
7729
7730 if (data)
7731 dev->features |= NETIF_F_IP_CSUM;
7732 else
7733 dev->features &= ~NETIF_F_IP_CSUM;
7734
7735 return 0;
7736}
7737
7738static int tg3_get_stats_count (struct net_device *dev)
7739{
7740 return TG3_NUM_STATS;
7741}
7742
4cafd3f5
MC
7743static int tg3_get_test_count (struct net_device *dev)
7744{
7745 return TG3_NUM_TEST;
7746}
7747
1da177e4
LT
7748static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7749{
7750 switch (stringset) {
7751 case ETH_SS_STATS:
7752 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7753 break;
4cafd3f5
MC
7754 case ETH_SS_TEST:
7755 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7756 break;
1da177e4
LT
7757 default:
7758 WARN_ON(1); /* we need a WARN() */
7759 break;
7760 }
7761}
7762
4009a93d
MC
7763static int tg3_phys_id(struct net_device *dev, u32 data)
7764{
7765 struct tg3 *tp = netdev_priv(dev);
7766 int i;
7767
7768 if (!netif_running(tp->dev))
7769 return -EAGAIN;
7770
7771 if (data == 0)
7772 data = 2;
7773
7774 for (i = 0; i < (data * 2); i++) {
7775 if ((i % 2) == 0)
7776 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7777 LED_CTRL_1000MBPS_ON |
7778 LED_CTRL_100MBPS_ON |
7779 LED_CTRL_10MBPS_ON |
7780 LED_CTRL_TRAFFIC_OVERRIDE |
7781 LED_CTRL_TRAFFIC_BLINK |
7782 LED_CTRL_TRAFFIC_LED);
7783
7784 else
7785 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7786 LED_CTRL_TRAFFIC_OVERRIDE);
7787
7788 if (msleep_interruptible(500))
7789 break;
7790 }
7791 tw32(MAC_LED_CTRL, tp->led_ctrl);
7792 return 0;
7793}
7794
1da177e4
LT
7795static void tg3_get_ethtool_stats (struct net_device *dev,
7796 struct ethtool_stats *estats, u64 *tmp_stats)
7797{
7798 struct tg3 *tp = netdev_priv(dev);
7799 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7800}
7801
566f86ad
MC
7802#define NVRAM_TEST_SIZE 0x100
7803
7804static int tg3_test_nvram(struct tg3 *tp)
7805{
7806 u32 *buf, csum;
7807 int i, j, err = 0;
7808
7809 buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7810 if (buf == NULL)
7811 return -ENOMEM;
7812
7813 for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7814 u32 val;
7815
7816 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7817 break;
7818 buf[j] = cpu_to_le32(val);
7819 }
7820 if (i < NVRAM_TEST_SIZE)
7821 goto out;
7822
7823 err = -EIO;
7824 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7825 goto out;
7826
7827 /* Bootstrap checksum at offset 0x10 */
7828 csum = calc_crc((unsigned char *) buf, 0x10);
7829 if(csum != cpu_to_le32(buf[0x10/4]))
7830 goto out;
7831
7832 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7833 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7834 if (csum != cpu_to_le32(buf[0xfc/4]))
7835 goto out;
7836
7837 err = 0;
7838
7839out:
7840 kfree(buf);
7841 return err;
7842}
7843
ca43007a
MC
7844#define TG3_SERDES_TIMEOUT_SEC 2
7845#define TG3_COPPER_TIMEOUT_SEC 6
7846
7847static int tg3_test_link(struct tg3 *tp)
7848{
7849 int i, max;
7850
7851 if (!netif_running(tp->dev))
7852 return -ENODEV;
7853
4c987487 7854 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
ca43007a
MC
7855 max = TG3_SERDES_TIMEOUT_SEC;
7856 else
7857 max = TG3_COPPER_TIMEOUT_SEC;
7858
7859 for (i = 0; i < max; i++) {
7860 if (netif_carrier_ok(tp->dev))
7861 return 0;
7862
7863 if (msleep_interruptible(1000))
7864 break;
7865 }
7866
7867 return -EIO;
7868}
7869
a71116d1 7870/* Only test the commonly used registers */
f71e1309 7871static const int tg3_test_registers(struct tg3 *tp)
a71116d1
MC
7872{
7873 int i, is_5705;
7874 u32 offset, read_mask, write_mask, val, save_val, read_val;
7875 static struct {
7876 u16 offset;
7877 u16 flags;
7878#define TG3_FL_5705 0x1
7879#define TG3_FL_NOT_5705 0x2
7880#define TG3_FL_NOT_5788 0x4
7881 u32 read_mask;
7882 u32 write_mask;
7883 } reg_tbl[] = {
7884 /* MAC Control Registers */
7885 { MAC_MODE, TG3_FL_NOT_5705,
7886 0x00000000, 0x00ef6f8c },
7887 { MAC_MODE, TG3_FL_5705,
7888 0x00000000, 0x01ef6b8c },
7889 { MAC_STATUS, TG3_FL_NOT_5705,
7890 0x03800107, 0x00000000 },
7891 { MAC_STATUS, TG3_FL_5705,
7892 0x03800100, 0x00000000 },
7893 { MAC_ADDR_0_HIGH, 0x0000,
7894 0x00000000, 0x0000ffff },
7895 { MAC_ADDR_0_LOW, 0x0000,
7896 0x00000000, 0xffffffff },
7897 { MAC_RX_MTU_SIZE, 0x0000,
7898 0x00000000, 0x0000ffff },
7899 { MAC_TX_MODE, 0x0000,
7900 0x00000000, 0x00000070 },
7901 { MAC_TX_LENGTHS, 0x0000,
7902 0x00000000, 0x00003fff },
7903 { MAC_RX_MODE, TG3_FL_NOT_5705,
7904 0x00000000, 0x000007fc },
7905 { MAC_RX_MODE, TG3_FL_5705,
7906 0x00000000, 0x000007dc },
7907 { MAC_HASH_REG_0, 0x0000,
7908 0x00000000, 0xffffffff },
7909 { MAC_HASH_REG_1, 0x0000,
7910 0x00000000, 0xffffffff },
7911 { MAC_HASH_REG_2, 0x0000,
7912 0x00000000, 0xffffffff },
7913 { MAC_HASH_REG_3, 0x0000,
7914 0x00000000, 0xffffffff },
7915
7916 /* Receive Data and Receive BD Initiator Control Registers. */
7917 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7918 0x00000000, 0xffffffff },
7919 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7920 0x00000000, 0xffffffff },
7921 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7922 0x00000000, 0x00000003 },
7923 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7924 0x00000000, 0xffffffff },
7925 { RCVDBDI_STD_BD+0, 0x0000,
7926 0x00000000, 0xffffffff },
7927 { RCVDBDI_STD_BD+4, 0x0000,
7928 0x00000000, 0xffffffff },
7929 { RCVDBDI_STD_BD+8, 0x0000,
7930 0x00000000, 0xffff0002 },
7931 { RCVDBDI_STD_BD+0xc, 0x0000,
7932 0x00000000, 0xffffffff },
7933
7934 /* Receive BD Initiator Control Registers. */
7935 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7936 0x00000000, 0xffffffff },
7937 { RCVBDI_STD_THRESH, TG3_FL_5705,
7938 0x00000000, 0x000003ff },
7939 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7940 0x00000000, 0xffffffff },
7941
7942 /* Host Coalescing Control Registers. */
7943 { HOSTCC_MODE, TG3_FL_NOT_5705,
7944 0x00000000, 0x00000004 },
7945 { HOSTCC_MODE, TG3_FL_5705,
7946 0x00000000, 0x000000f6 },
7947 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7948 0x00000000, 0xffffffff },
7949 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7950 0x00000000, 0x000003ff },
7951 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7952 0x00000000, 0xffffffff },
7953 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7954 0x00000000, 0x000003ff },
7955 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7956 0x00000000, 0xffffffff },
7957 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7958 0x00000000, 0x000000ff },
7959 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7960 0x00000000, 0xffffffff },
7961 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7962 0x00000000, 0x000000ff },
7963 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7964 0x00000000, 0xffffffff },
7965 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7966 0x00000000, 0xffffffff },
7967 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7968 0x00000000, 0xffffffff },
7969 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7970 0x00000000, 0x000000ff },
7971 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7972 0x00000000, 0xffffffff },
7973 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7974 0x00000000, 0x000000ff },
7975 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7976 0x00000000, 0xffffffff },
7977 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7978 0x00000000, 0xffffffff },
7979 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7980 0x00000000, 0xffffffff },
7981 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7982 0x00000000, 0xffffffff },
7983 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7984 0x00000000, 0xffffffff },
7985 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7986 0xffffffff, 0x00000000 },
7987 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7988 0xffffffff, 0x00000000 },
7989
7990 /* Buffer Manager Control Registers. */
7991 { BUFMGR_MB_POOL_ADDR, 0x0000,
7992 0x00000000, 0x007fff80 },
7993 { BUFMGR_MB_POOL_SIZE, 0x0000,
7994 0x00000000, 0x007fffff },
7995 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7996 0x00000000, 0x0000003f },
7997 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7998 0x00000000, 0x000001ff },
7999 { BUFMGR_MB_HIGH_WATER, 0x0000,
8000 0x00000000, 0x000001ff },
8001 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8002 0xffffffff, 0x00000000 },
8003 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8004 0xffffffff, 0x00000000 },
8005
8006 /* Mailbox Registers */
8007 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8008 0x00000000, 0x000001ff },
8009 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8010 0x00000000, 0x000001ff },
8011 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8012 0x00000000, 0x000007ff },
8013 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8014 0x00000000, 0x000001ff },
8015
8016 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8017 };
8018
8019 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8020 is_5705 = 1;
8021 else
8022 is_5705 = 0;
8023
8024 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8025 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8026 continue;
8027
8028 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8029 continue;
8030
8031 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8032 (reg_tbl[i].flags & TG3_FL_NOT_5788))
8033 continue;
8034
8035 offset = (u32) reg_tbl[i].offset;
8036 read_mask = reg_tbl[i].read_mask;
8037 write_mask = reg_tbl[i].write_mask;
8038
8039 /* Save the original register content */
8040 save_val = tr32(offset);
8041
8042 /* Determine the read-only value. */
8043 read_val = save_val & read_mask;
8044
8045 /* Write zero to the register, then make sure the read-only bits
8046 * are not changed and the read/write bits are all zeros.
8047 */
8048 tw32(offset, 0);
8049
8050 val = tr32(offset);
8051
8052 /* Test the read-only and read/write bits. */
8053 if (((val & read_mask) != read_val) || (val & write_mask))
8054 goto out;
8055
8056 /* Write ones to all the bits defined by RdMask and WrMask, then
8057 * make sure the read-only bits are not changed and the
8058 * read/write bits are all ones.
8059 */
8060 tw32(offset, read_mask | write_mask);
8061
8062 val = tr32(offset);
8063
8064 /* Test the read-only bits. */
8065 if ((val & read_mask) != read_val)
8066 goto out;
8067
8068 /* Test the read/write bits. */
8069 if ((val & write_mask) != write_mask)
8070 goto out;
8071
8072 tw32(offset, save_val);
8073 }
8074
8075 return 0;
8076
8077out:
8078 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8079 tw32(offset, save_val);
8080 return -EIO;
8081}
8082
7942e1db
MC
8083static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8084{
f71e1309 8085 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7942e1db
MC
8086 int i;
8087 u32 j;
8088
8089 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8090 for (j = 0; j < len; j += 4) {
8091 u32 val;
8092
8093 tg3_write_mem(tp, offset + j, test_pattern[i]);
8094 tg3_read_mem(tp, offset + j, &val);
8095 if (val != test_pattern[i])
8096 return -EIO;
8097 }
8098 }
8099 return 0;
8100}
8101
8102static int tg3_test_memory(struct tg3 *tp)
8103{
8104 static struct mem_entry {
8105 u32 offset;
8106 u32 len;
8107 } mem_tbl_570x[] = {
38690194 8108 { 0x00000000, 0x00b50},
7942e1db
MC
8109 { 0x00002000, 0x1c000},
8110 { 0xffffffff, 0x00000}
8111 }, mem_tbl_5705[] = {
8112 { 0x00000100, 0x0000c},
8113 { 0x00000200, 0x00008},
7942e1db
MC
8114 { 0x00004000, 0x00800},
8115 { 0x00006000, 0x01000},
8116 { 0x00008000, 0x02000},
8117 { 0x00010000, 0x0e000},
8118 { 0xffffffff, 0x00000}
8119 };
8120 struct mem_entry *mem_tbl;
8121 int err = 0;
8122 int i;
8123
8124 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8125 mem_tbl = mem_tbl_5705;
8126 else
8127 mem_tbl = mem_tbl_570x;
8128
8129 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8130 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8131 mem_tbl[i].len)) != 0)
8132 break;
8133 }
8134
8135 return err;
8136}
8137
9f40dead
MC
8138#define TG3_MAC_LOOPBACK 0
8139#define TG3_PHY_LOOPBACK 1
8140
8141static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
c76949a6 8142{
9f40dead 8143 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
c76949a6
MC
8144 u32 desc_idx;
8145 struct sk_buff *skb, *rx_skb;
8146 u8 *tx_data;
8147 dma_addr_t map;
8148 int num_pkts, tx_len, rx_len, i, err;
8149 struct tg3_rx_buffer_desc *desc;
8150
9f40dead 8151 if (loopback_mode == TG3_MAC_LOOPBACK) {
c94e3941
MC
8152 /* HW errata - mac loopback fails in some cases on 5780.
8153 * Normal traffic and PHY loopback are not affected by
8154 * errata.
8155 */
8156 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8157 return 0;
8158
9f40dead
MC
8159 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8160 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8161 MAC_MODE_PORT_MODE_GMII;
8162 tw32(MAC_MODE, mac_mode);
8163 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
c94e3941
MC
8164 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8165 BMCR_SPEED1000);
8166 udelay(40);
8167 /* reset to prevent losing 1st rx packet intermittently */
8168 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8169 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8170 udelay(10);
8171 tw32_f(MAC_RX_MODE, tp->rx_mode);
8172 }
9f40dead
MC
8173 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8174 MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8175 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
8176 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8177 tw32(MAC_MODE, mac_mode);
9f40dead
MC
8178 }
8179 else
8180 return -EINVAL;
c76949a6
MC
8181
8182 err = -EIO;
8183
c76949a6
MC
8184 tx_len = 1514;
8185 skb = dev_alloc_skb(tx_len);
8186 tx_data = skb_put(skb, tx_len);
8187 memcpy(tx_data, tp->dev->dev_addr, 6);
8188 memset(tx_data + 6, 0x0, 8);
8189
8190 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8191
8192 for (i = 14; i < tx_len; i++)
8193 tx_data[i] = (u8) (i & 0xff);
8194
8195 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8196
8197 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8198 HOSTCC_MODE_NOW);
8199
8200 udelay(10);
8201
8202 rx_start_idx = tp->hw_status->idx[0].rx_producer;
8203
c76949a6
MC
8204 num_pkts = 0;
8205
9f40dead 8206 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
c76949a6 8207
9f40dead 8208 tp->tx_prod++;
c76949a6
MC
8209 num_pkts++;
8210
9f40dead
MC
8211 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8212 tp->tx_prod);
09ee929c 8213 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
c76949a6
MC
8214
8215 udelay(10);
8216
8217 for (i = 0; i < 10; i++) {
8218 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8219 HOSTCC_MODE_NOW);
8220
8221 udelay(10);
8222
8223 tx_idx = tp->hw_status->idx[0].tx_consumer;
8224 rx_idx = tp->hw_status->idx[0].rx_producer;
9f40dead 8225 if ((tx_idx == tp->tx_prod) &&
c76949a6
MC
8226 (rx_idx == (rx_start_idx + num_pkts)))
8227 break;
8228 }
8229
8230 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8231 dev_kfree_skb(skb);
8232
9f40dead 8233 if (tx_idx != tp->tx_prod)
c76949a6
MC
8234 goto out;
8235
8236 if (rx_idx != rx_start_idx + num_pkts)
8237 goto out;
8238
8239 desc = &tp->rx_rcb[rx_start_idx];
8240 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8241 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8242 if (opaque_key != RXD_OPAQUE_RING_STD)
8243 goto out;
8244
8245 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8246 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8247 goto out;
8248
8249 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8250 if (rx_len != tx_len)
8251 goto out;
8252
8253 rx_skb = tp->rx_std_buffers[desc_idx].skb;
8254
8255 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8256 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8257
8258 for (i = 14; i < tx_len; i++) {
8259 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8260 goto out;
8261 }
8262 err = 0;
8263
8264 /* tg3_free_rings will unmap and free the rx_skb */
8265out:
8266 return err;
8267}
8268
9f40dead
MC
8269#define TG3_MAC_LOOPBACK_FAILED 1
8270#define TG3_PHY_LOOPBACK_FAILED 2
8271#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
8272 TG3_PHY_LOOPBACK_FAILED)
8273
8274static int tg3_test_loopback(struct tg3 *tp)
8275{
8276 int err = 0;
8277
8278 if (!netif_running(tp->dev))
8279 return TG3_LOOPBACK_FAILED;
8280
8281 tg3_reset_hw(tp);
8282
8283 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8284 err |= TG3_MAC_LOOPBACK_FAILED;
8285 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8286 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8287 err |= TG3_PHY_LOOPBACK_FAILED;
8288 }
8289
8290 return err;
8291}
8292
4cafd3f5
MC
8293static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8294 u64 *data)
8295{
566f86ad
MC
8296 struct tg3 *tp = netdev_priv(dev);
8297
bc1c7567
MC
8298 if (tp->link_config.phy_is_low_power)
8299 tg3_set_power_state(tp, PCI_D0);
8300
566f86ad
MC
8301 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8302
8303 if (tg3_test_nvram(tp) != 0) {
8304 etest->flags |= ETH_TEST_FL_FAILED;
8305 data[0] = 1;
8306 }
ca43007a
MC
8307 if (tg3_test_link(tp) != 0) {
8308 etest->flags |= ETH_TEST_FL_FAILED;
8309 data[1] = 1;
8310 }
a71116d1 8311 if (etest->flags & ETH_TEST_FL_OFFLINE) {
ec41c7df 8312 int err, irq_sync = 0;
bbe832c0
MC
8313
8314 if (netif_running(dev)) {
a71116d1 8315 tg3_netif_stop(tp);
bbe832c0
MC
8316 irq_sync = 1;
8317 }
a71116d1 8318
bbe832c0 8319 tg3_full_lock(tp, irq_sync);
a71116d1
MC
8320
8321 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
ec41c7df 8322 err = tg3_nvram_lock(tp);
a71116d1
MC
8323 tg3_halt_cpu(tp, RX_CPU_BASE);
8324 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8325 tg3_halt_cpu(tp, TX_CPU_BASE);
ec41c7df
MC
8326 if (!err)
8327 tg3_nvram_unlock(tp);
a71116d1
MC
8328
8329 if (tg3_test_registers(tp) != 0) {
8330 etest->flags |= ETH_TEST_FL_FAILED;
8331 data[2] = 1;
8332 }
7942e1db
MC
8333 if (tg3_test_memory(tp) != 0) {
8334 etest->flags |= ETH_TEST_FL_FAILED;
8335 data[3] = 1;
8336 }
9f40dead 8337 if ((data[4] = tg3_test_loopback(tp)) != 0)
c76949a6 8338 etest->flags |= ETH_TEST_FL_FAILED;
a71116d1 8339
f47c11ee
DM
8340 tg3_full_unlock(tp);
8341
d4bc3927
MC
8342 if (tg3_test_interrupt(tp) != 0) {
8343 etest->flags |= ETH_TEST_FL_FAILED;
8344 data[5] = 1;
8345 }
f47c11ee
DM
8346
8347 tg3_full_lock(tp, 0);
d4bc3927 8348
a71116d1
MC
8349 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8350 if (netif_running(dev)) {
8351 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8352 tg3_init_hw(tp);
8353 tg3_netif_start(tp);
8354 }
f47c11ee
DM
8355
8356 tg3_full_unlock(tp);
a71116d1 8357 }
bc1c7567
MC
8358 if (tp->link_config.phy_is_low_power)
8359 tg3_set_power_state(tp, PCI_D3hot);
8360
4cafd3f5
MC
8361}
8362
1da177e4
LT
8363static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8364{
8365 struct mii_ioctl_data *data = if_mii(ifr);
8366 struct tg3 *tp = netdev_priv(dev);
8367 int err;
8368
8369 switch(cmd) {
8370 case SIOCGMIIPHY:
8371 data->phy_id = PHY_ADDR;
8372
8373 /* fallthru */
8374 case SIOCGMIIREG: {
8375 u32 mii_regval;
8376
8377 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8378 break; /* We have no PHY */
8379
bc1c7567
MC
8380 if (tp->link_config.phy_is_low_power)
8381 return -EAGAIN;
8382
f47c11ee 8383 spin_lock_bh(&tp->lock);
1da177e4 8384 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
f47c11ee 8385 spin_unlock_bh(&tp->lock);
1da177e4
LT
8386
8387 data->val_out = mii_regval;
8388
8389 return err;
8390 }
8391
8392 case SIOCSMIIREG:
8393 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8394 break; /* We have no PHY */
8395
8396 if (!capable(CAP_NET_ADMIN))
8397 return -EPERM;
8398
bc1c7567
MC
8399 if (tp->link_config.phy_is_low_power)
8400 return -EAGAIN;
8401
f47c11ee 8402 spin_lock_bh(&tp->lock);
1da177e4 8403 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
f47c11ee 8404 spin_unlock_bh(&tp->lock);
1da177e4
LT
8405
8406 return err;
8407
8408 default:
8409 /* do nothing */
8410 break;
8411 }
8412 return -EOPNOTSUPP;
8413}
8414
8415#if TG3_VLAN_TAG_USED
8416static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8417{
8418 struct tg3 *tp = netdev_priv(dev);
8419
f47c11ee 8420 tg3_full_lock(tp, 0);
1da177e4
LT
8421
8422 tp->vlgrp = grp;
8423
8424 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8425 __tg3_set_rx_mode(dev);
8426
f47c11ee 8427 tg3_full_unlock(tp);
1da177e4
LT
8428}
8429
8430static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8431{
8432 struct tg3 *tp = netdev_priv(dev);
8433
f47c11ee 8434 tg3_full_lock(tp, 0);
1da177e4
LT
8435 if (tp->vlgrp)
8436 tp->vlgrp->vlan_devices[vid] = NULL;
f47c11ee 8437 tg3_full_unlock(tp);
1da177e4
LT
8438}
8439#endif
8440
15f9850d
DM
8441static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8442{
8443 struct tg3 *tp = netdev_priv(dev);
8444
8445 memcpy(ec, &tp->coal, sizeof(*ec));
8446 return 0;
8447}
8448
d244c892
MC
8449static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8450{
8451 struct tg3 *tp = netdev_priv(dev);
8452 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8453 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8454
8455 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8456 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8457 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8458 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8459 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8460 }
8461
8462 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8463 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8464 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8465 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8466 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8467 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8468 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8469 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8470 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8471 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8472 return -EINVAL;
8473
8474 /* No rx interrupts will be generated if both are zero */
8475 if ((ec->rx_coalesce_usecs == 0) &&
8476 (ec->rx_max_coalesced_frames == 0))
8477 return -EINVAL;
8478
8479 /* No tx interrupts will be generated if both are zero */
8480 if ((ec->tx_coalesce_usecs == 0) &&
8481 (ec->tx_max_coalesced_frames == 0))
8482 return -EINVAL;
8483
8484 /* Only copy relevant parameters, ignore all others. */
8485 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8486 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8487 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8488 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8489 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8490 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8491 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8492 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8493 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8494
8495 if (netif_running(dev)) {
8496 tg3_full_lock(tp, 0);
8497 __tg3_set_coalesce(tp, &tp->coal);
8498 tg3_full_unlock(tp);
8499 }
8500 return 0;
8501}
8502
1da177e4
LT
8503static struct ethtool_ops tg3_ethtool_ops = {
8504 .get_settings = tg3_get_settings,
8505 .set_settings = tg3_set_settings,
8506 .get_drvinfo = tg3_get_drvinfo,
8507 .get_regs_len = tg3_get_regs_len,
8508 .get_regs = tg3_get_regs,
8509 .get_wol = tg3_get_wol,
8510 .set_wol = tg3_set_wol,
8511 .get_msglevel = tg3_get_msglevel,
8512 .set_msglevel = tg3_set_msglevel,
8513 .nway_reset = tg3_nway_reset,
8514 .get_link = ethtool_op_get_link,
8515 .get_eeprom_len = tg3_get_eeprom_len,
8516 .get_eeprom = tg3_get_eeprom,
8517 .set_eeprom = tg3_set_eeprom,
8518 .get_ringparam = tg3_get_ringparam,
8519 .set_ringparam = tg3_set_ringparam,
8520 .get_pauseparam = tg3_get_pauseparam,
8521 .set_pauseparam = tg3_set_pauseparam,
8522 .get_rx_csum = tg3_get_rx_csum,
8523 .set_rx_csum = tg3_set_rx_csum,
8524 .get_tx_csum = ethtool_op_get_tx_csum,
8525 .set_tx_csum = tg3_set_tx_csum,
8526 .get_sg = ethtool_op_get_sg,
8527 .set_sg = ethtool_op_set_sg,
8528#if TG3_TSO_SUPPORT != 0
8529 .get_tso = ethtool_op_get_tso,
8530 .set_tso = tg3_set_tso,
8531#endif
4cafd3f5
MC
8532 .self_test_count = tg3_get_test_count,
8533 .self_test = tg3_self_test,
1da177e4 8534 .get_strings = tg3_get_strings,
4009a93d 8535 .phys_id = tg3_phys_id,
1da177e4
LT
8536 .get_stats_count = tg3_get_stats_count,
8537 .get_ethtool_stats = tg3_get_ethtool_stats,
15f9850d 8538 .get_coalesce = tg3_get_coalesce,
d244c892 8539 .set_coalesce = tg3_set_coalesce,
2ff43697 8540 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
8541};
8542
8543static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8544{
8545 u32 cursize, val;
8546
8547 tp->nvram_size = EEPROM_CHIP_SIZE;
8548
8549 if (tg3_nvram_read(tp, 0, &val) != 0)
8550 return;
8551
8552 if (swab32(val) != TG3_EEPROM_MAGIC)
8553 return;
8554
8555 /*
8556 * Size the chip by reading offsets at increasing powers of two.
8557 * When we encounter our validation signature, we know the addressing
8558 * has wrapped around, and thus have our chip size.
8559 */
8560 cursize = 0x800;
8561
8562 while (cursize < tp->nvram_size) {
8563 if (tg3_nvram_read(tp, cursize, &val) != 0)
8564 return;
8565
8566 if (swab32(val) == TG3_EEPROM_MAGIC)
8567 break;
8568
8569 cursize <<= 1;
8570 }
8571
8572 tp->nvram_size = cursize;
8573}
8574
8575static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8576{
8577 u32 val;
8578
8579 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8580 if (val != 0) {
8581 tp->nvram_size = (val >> 16) * 1024;
8582 return;
8583 }
8584 }
8585 tp->nvram_size = 0x20000;
8586}
8587
8588static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8589{
8590 u32 nvcfg1;
8591
8592 nvcfg1 = tr32(NVRAM_CFG1);
8593 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8594 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8595 }
8596 else {
8597 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8598 tw32(NVRAM_CFG1, nvcfg1);
8599 }
8600
4c987487 8601 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
a4e2b347 8602 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
8603 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8604 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8605 tp->nvram_jedecnum = JEDEC_ATMEL;
8606 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8607 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8608 break;
8609 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8610 tp->nvram_jedecnum = JEDEC_ATMEL;
8611 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8612 break;
8613 case FLASH_VENDOR_ATMEL_EEPROM:
8614 tp->nvram_jedecnum = JEDEC_ATMEL;
8615 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8616 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8617 break;
8618 case FLASH_VENDOR_ST:
8619 tp->nvram_jedecnum = JEDEC_ST;
8620 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8621 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8622 break;
8623 case FLASH_VENDOR_SAIFUN:
8624 tp->nvram_jedecnum = JEDEC_SAIFUN;
8625 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8626 break;
8627 case FLASH_VENDOR_SST_SMALL:
8628 case FLASH_VENDOR_SST_LARGE:
8629 tp->nvram_jedecnum = JEDEC_SST;
8630 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8631 break;
8632 }
8633 }
8634 else {
8635 tp->nvram_jedecnum = JEDEC_ATMEL;
8636 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8637 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8638 }
8639}
8640
361b4ac2
MC
8641static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8642{
8643 u32 nvcfg1;
8644
8645 nvcfg1 = tr32(NVRAM_CFG1);
8646
e6af301b
MC
8647 /* NVRAM protection for TPM */
8648 if (nvcfg1 & (1 << 27))
8649 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8650
361b4ac2
MC
8651 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8652 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8653 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8654 tp->nvram_jedecnum = JEDEC_ATMEL;
8655 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8656 break;
8657 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8658 tp->nvram_jedecnum = JEDEC_ATMEL;
8659 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8660 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8661 break;
8662 case FLASH_5752VENDOR_ST_M45PE10:
8663 case FLASH_5752VENDOR_ST_M45PE20:
8664 case FLASH_5752VENDOR_ST_M45PE40:
8665 tp->nvram_jedecnum = JEDEC_ST;
8666 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8667 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8668 break;
8669 }
8670
8671 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8672 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8673 case FLASH_5752PAGE_SIZE_256:
8674 tp->nvram_pagesize = 256;
8675 break;
8676 case FLASH_5752PAGE_SIZE_512:
8677 tp->nvram_pagesize = 512;
8678 break;
8679 case FLASH_5752PAGE_SIZE_1K:
8680 tp->nvram_pagesize = 1024;
8681 break;
8682 case FLASH_5752PAGE_SIZE_2K:
8683 tp->nvram_pagesize = 2048;
8684 break;
8685 case FLASH_5752PAGE_SIZE_4K:
8686 tp->nvram_pagesize = 4096;
8687 break;
8688 case FLASH_5752PAGE_SIZE_264:
8689 tp->nvram_pagesize = 264;
8690 break;
8691 }
8692 }
8693 else {
8694 /* For eeprom, set pagesize to maximum eeprom size */
8695 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8696
8697 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8698 tw32(NVRAM_CFG1, nvcfg1);
8699 }
8700}
8701
1da177e4
LT
8702/* Chips other than 5700/5701 use the NVRAM for fetching info. */
8703static void __devinit tg3_nvram_init(struct tg3 *tp)
8704{
8705 int j;
8706
8707 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8708 return;
8709
8710 tw32_f(GRC_EEPROM_ADDR,
8711 (EEPROM_ADDR_FSM_RESET |
8712 (EEPROM_DEFAULT_CLOCK_PERIOD <<
8713 EEPROM_ADDR_CLKPERD_SHIFT)));
8714
8715 /* XXX schedule_timeout() ... */
8716 for (j = 0; j < 100; j++)
8717 udelay(10);
8718
8719 /* Enable seeprom accesses. */
8720 tw32_f(GRC_LOCAL_CTRL,
8721 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8722 udelay(100);
8723
8724 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8725 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8726 tp->tg3_flags |= TG3_FLAG_NVRAM;
8727
ec41c7df
MC
8728 if (tg3_nvram_lock(tp)) {
8729 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
8730 "tg3_nvram_init failed.\n", tp->dev->name);
8731 return;
8732 }
e6af301b 8733 tg3_enable_nvram_access(tp);
1da177e4 8734
361b4ac2
MC
8735 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8736 tg3_get_5752_nvram_info(tp);
8737 else
8738 tg3_get_nvram_info(tp);
8739
1da177e4
LT
8740 tg3_get_nvram_size(tp);
8741
e6af301b 8742 tg3_disable_nvram_access(tp);
381291b7 8743 tg3_nvram_unlock(tp);
1da177e4
LT
8744
8745 } else {
8746 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8747
8748 tg3_get_eeprom_size(tp);
8749 }
8750}
8751
8752static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8753 u32 offset, u32 *val)
8754{
8755 u32 tmp;
8756 int i;
8757
8758 if (offset > EEPROM_ADDR_ADDR_MASK ||
8759 (offset % 4) != 0)
8760 return -EINVAL;
8761
8762 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8763 EEPROM_ADDR_DEVID_MASK |
8764 EEPROM_ADDR_READ);
8765 tw32(GRC_EEPROM_ADDR,
8766 tmp |
8767 (0 << EEPROM_ADDR_DEVID_SHIFT) |
8768 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8769 EEPROM_ADDR_ADDR_MASK) |
8770 EEPROM_ADDR_READ | EEPROM_ADDR_START);
8771
8772 for (i = 0; i < 10000; i++) {
8773 tmp = tr32(GRC_EEPROM_ADDR);
8774
8775 if (tmp & EEPROM_ADDR_COMPLETE)
8776 break;
8777 udelay(100);
8778 }
8779 if (!(tmp & EEPROM_ADDR_COMPLETE))
8780 return -EBUSY;
8781
8782 *val = tr32(GRC_EEPROM_DATA);
8783 return 0;
8784}
8785
8786#define NVRAM_CMD_TIMEOUT 10000
8787
8788static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8789{
8790 int i;
8791
8792 tw32(NVRAM_CMD, nvram_cmd);
8793 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8794 udelay(10);
8795 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8796 udelay(10);
8797 break;
8798 }
8799 }
8800 if (i == NVRAM_CMD_TIMEOUT) {
8801 return -EBUSY;
8802 }
8803 return 0;
8804}
8805
8806static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8807{
8808 int ret;
8809
8810 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8811 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8812 return -EINVAL;
8813 }
8814
8815 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8816 return tg3_nvram_read_using_eeprom(tp, offset, val);
8817
8818 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8819 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8820 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8821
8822 offset = ((offset / tp->nvram_pagesize) <<
8823 ATMEL_AT45DB0X1B_PAGE_POS) +
8824 (offset % tp->nvram_pagesize);
8825 }
8826
8827 if (offset > NVRAM_ADDR_MSK)
8828 return -EINVAL;
8829
ec41c7df
MC
8830 ret = tg3_nvram_lock(tp);
8831 if (ret)
8832 return ret;
1da177e4 8833
e6af301b 8834 tg3_enable_nvram_access(tp);
1da177e4
LT
8835
8836 tw32(NVRAM_ADDR, offset);
8837 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8838 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8839
8840 if (ret == 0)
8841 *val = swab32(tr32(NVRAM_RDDATA));
8842
e6af301b 8843 tg3_disable_nvram_access(tp);
1da177e4 8844
381291b7
MC
8845 tg3_nvram_unlock(tp);
8846
1da177e4
LT
8847 return ret;
8848}
8849
8850static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8851 u32 offset, u32 len, u8 *buf)
8852{
8853 int i, j, rc = 0;
8854 u32 val;
8855
8856 for (i = 0; i < len; i += 4) {
8857 u32 addr, data;
8858
8859 addr = offset + i;
8860
8861 memcpy(&data, buf + i, 4);
8862
8863 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8864
8865 val = tr32(GRC_EEPROM_ADDR);
8866 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8867
8868 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8869 EEPROM_ADDR_READ);
8870 tw32(GRC_EEPROM_ADDR, val |
8871 (0 << EEPROM_ADDR_DEVID_SHIFT) |
8872 (addr & EEPROM_ADDR_ADDR_MASK) |
8873 EEPROM_ADDR_START |
8874 EEPROM_ADDR_WRITE);
8875
8876 for (j = 0; j < 10000; j++) {
8877 val = tr32(GRC_EEPROM_ADDR);
8878
8879 if (val & EEPROM_ADDR_COMPLETE)
8880 break;
8881 udelay(100);
8882 }
8883 if (!(val & EEPROM_ADDR_COMPLETE)) {
8884 rc = -EBUSY;
8885 break;
8886 }
8887 }
8888
8889 return rc;
8890}
8891
8892/* offset and length are dword aligned */
8893static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8894 u8 *buf)
8895{
8896 int ret = 0;
8897 u32 pagesize = tp->nvram_pagesize;
8898 u32 pagemask = pagesize - 1;
8899 u32 nvram_cmd;
8900 u8 *tmp;
8901
8902 tmp = kmalloc(pagesize, GFP_KERNEL);
8903 if (tmp == NULL)
8904 return -ENOMEM;
8905
8906 while (len) {
8907 int j;
e6af301b 8908 u32 phy_addr, page_off, size;
1da177e4
LT
8909
8910 phy_addr = offset & ~pagemask;
8911
8912 for (j = 0; j < pagesize; j += 4) {
8913 if ((ret = tg3_nvram_read(tp, phy_addr + j,
8914 (u32 *) (tmp + j))))
8915 break;
8916 }
8917 if (ret)
8918 break;
8919
8920 page_off = offset & pagemask;
8921 size = pagesize;
8922 if (len < size)
8923 size = len;
8924
8925 len -= size;
8926
8927 memcpy(tmp + page_off, buf, size);
8928
8929 offset = offset + (pagesize - page_off);
8930
e6af301b 8931 tg3_enable_nvram_access(tp);
1da177e4
LT
8932
8933 /*
8934 * Before we can erase the flash page, we need
8935 * to issue a special "write enable" command.
8936 */
8937 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8938
8939 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8940 break;
8941
8942 /* Erase the target page */
8943 tw32(NVRAM_ADDR, phy_addr);
8944
8945 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8946 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8947
8948 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8949 break;
8950
8951 /* Issue another write enable to start the write. */
8952 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8953
8954 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8955 break;
8956
8957 for (j = 0; j < pagesize; j += 4) {
8958 u32 data;
8959
8960 data = *((u32 *) (tmp + j));
8961 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8962
8963 tw32(NVRAM_ADDR, phy_addr + j);
8964
8965 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8966 NVRAM_CMD_WR;
8967
8968 if (j == 0)
8969 nvram_cmd |= NVRAM_CMD_FIRST;
8970 else if (j == (pagesize - 4))
8971 nvram_cmd |= NVRAM_CMD_LAST;
8972
8973 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8974 break;
8975 }
8976 if (ret)
8977 break;
8978 }
8979
8980 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8981 tg3_nvram_exec_cmd(tp, nvram_cmd);
8982
8983 kfree(tmp);
8984
8985 return ret;
8986}
8987
8988/* offset and length are dword aligned */
8989static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8990 u8 *buf)
8991{
8992 int i, ret = 0;
8993
8994 for (i = 0; i < len; i += 4, offset += 4) {
8995 u32 data, page_off, phy_addr, nvram_cmd;
8996
8997 memcpy(&data, buf + i, 4);
8998 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8999
9000 page_off = offset % tp->nvram_pagesize;
9001
9002 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9003 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
9004
9005 phy_addr = ((offset / tp->nvram_pagesize) <<
9006 ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
9007 }
9008 else {
9009 phy_addr = offset;
9010 }
9011
9012 tw32(NVRAM_ADDR, phy_addr);
9013
9014 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9015
9016 if ((page_off == 0) || (i == 0))
9017 nvram_cmd |= NVRAM_CMD_FIRST;
9018 else if (page_off == (tp->nvram_pagesize - 4))
9019 nvram_cmd |= NVRAM_CMD_LAST;
9020
9021 if (i == (len - 4))
9022 nvram_cmd |= NVRAM_CMD_LAST;
9023
4c987487
MC
9024 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9025 (tp->nvram_jedecnum == JEDEC_ST) &&
9026 (nvram_cmd & NVRAM_CMD_FIRST)) {
1da177e4
LT
9027
9028 if ((ret = tg3_nvram_exec_cmd(tp,
9029 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9030 NVRAM_CMD_DONE)))
9031
9032 break;
9033 }
9034 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9035 /* We always do complete word writes to eeprom. */
9036 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9037 }
9038
9039 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9040 break;
9041 }
9042 return ret;
9043}
9044
9045/* offset and length are dword aligned */
9046static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9047{
9048 int ret;
9049
9050 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9051 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
9052 return -EINVAL;
9053 }
9054
9055 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34
MC
9056 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9057 ~GRC_LCLCTRL_GPIO_OUTPUT1);
1da177e4
LT
9058 udelay(40);
9059 }
9060
9061 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9062 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9063 }
9064 else {
9065 u32 grc_mode;
9066
ec41c7df
MC
9067 ret = tg3_nvram_lock(tp);
9068 if (ret)
9069 return ret;
1da177e4 9070
e6af301b
MC
9071 tg3_enable_nvram_access(tp);
9072 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9073 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
1da177e4 9074 tw32(NVRAM_WRITE1, 0x406);
1da177e4
LT
9075
9076 grc_mode = tr32(GRC_MODE);
9077 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9078
9079 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9080 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9081
9082 ret = tg3_nvram_write_block_buffered(tp, offset, len,
9083 buf);
9084 }
9085 else {
9086 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9087 buf);
9088 }
9089
9090 grc_mode = tr32(GRC_MODE);
9091 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9092
e6af301b 9093 tg3_disable_nvram_access(tp);
1da177e4
LT
9094 tg3_nvram_unlock(tp);
9095 }
9096
9097 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34 9098 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1da177e4
LT
9099 udelay(40);
9100 }
9101
9102 return ret;
9103}
9104
9105struct subsys_tbl_ent {
9106 u16 subsys_vendor, subsys_devid;
9107 u32 phy_id;
9108};
9109
9110static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9111 /* Broadcom boards. */
9112 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9113 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9114 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9115 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
9116 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9117 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9118 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
9119 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9120 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9121 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9122 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9123
9124 /* 3com boards. */
9125 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9126 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9127 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
9128 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9129 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9130
9131 /* DELL boards. */
9132 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9133 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9134 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9135 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9136
9137 /* Compaq boards. */
9138 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9139 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9140 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
9141 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9142 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9143
9144 /* IBM boards. */
9145 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9146};
9147
9148static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9149{
9150 int i;
9151
9152 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9153 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9154 tp->pdev->subsystem_vendor) &&
9155 (subsys_id_to_phy_id[i].subsys_devid ==
9156 tp->pdev->subsystem_device))
9157 return &subsys_id_to_phy_id[i];
9158 }
9159 return NULL;
9160}
9161
7d0c41ef
MC
9162/* Since this function may be called in D3-hot power state during
9163 * tg3_init_one(), only config cycles are allowed.
9164 */
9165static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
1da177e4 9166{
1da177e4 9167 u32 val;
7d0c41ef
MC
9168
9169 /* Make sure register accesses (indirect or otherwise)
9170 * will function correctly.
9171 */
9172 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9173 tp->misc_host_ctrl);
1da177e4
LT
9174
9175 tp->phy_id = PHY_ID_INVALID;
7d0c41ef
MC
9176 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9177
72b845e0
DM
9178 /* Do not even try poking around in here on Sun parts. */
9179 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9180 return;
9181
1da177e4
LT
9182 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9183 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9184 u32 nic_cfg, led_cfg;
7d0c41ef
MC
9185 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9186 int eeprom_phy_serdes = 0;
1da177e4
LT
9187
9188 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9189 tp->nic_sram_data_cfg = nic_cfg;
9190
9191 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9192 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9193 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9194 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9195 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9196 (ver > 0) && (ver < 0x100))
9197 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9198
1da177e4
LT
9199 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9200 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9201 eeprom_phy_serdes = 1;
9202
9203 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9204 if (nic_phy_id != 0) {
9205 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9206 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9207
9208 eeprom_phy_id = (id1 >> 16) << 10;
9209 eeprom_phy_id |= (id2 & 0xfc00) << 16;
9210 eeprom_phy_id |= (id2 & 0x03ff) << 0;
9211 } else
9212 eeprom_phy_id = 0;
9213
7d0c41ef 9214 tp->phy_id = eeprom_phy_id;
747e8f8b 9215 if (eeprom_phy_serdes) {
a4e2b347 9216 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
747e8f8b
MC
9217 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9218 else
9219 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9220 }
7d0c41ef 9221
cbf46853 9222 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
9223 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9224 SHASTA_EXT_LED_MODE_MASK);
cbf46853 9225 else
1da177e4
LT
9226 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9227
9228 switch (led_cfg) {
9229 default:
9230 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9231 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9232 break;
9233
9234 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9235 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9236 break;
9237
9238 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9239 tp->led_ctrl = LED_CTRL_MODE_MAC;
9ba27794
MC
9240
9241 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9242 * read on some older 5700/5701 bootcode.
9243 */
9244 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9245 ASIC_REV_5700 ||
9246 GET_ASIC_REV(tp->pci_chip_rev_id) ==
9247 ASIC_REV_5701)
9248 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9249
1da177e4
LT
9250 break;
9251
9252 case SHASTA_EXT_LED_SHARED:
9253 tp->led_ctrl = LED_CTRL_MODE_SHARED;
9254 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9255 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9256 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9257 LED_CTRL_MODE_PHY_2);
9258 break;
9259
9260 case SHASTA_EXT_LED_MAC:
9261 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9262 break;
9263
9264 case SHASTA_EXT_LED_COMBO:
9265 tp->led_ctrl = LED_CTRL_MODE_COMBO;
9266 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9267 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9268 LED_CTRL_MODE_PHY_2);
9269 break;
9270
9271 };
9272
9273 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9274 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9275 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9276 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9277
9278 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9279 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9280 (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9281 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9282
9283 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9284 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 9285 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
9286 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9287 }
9288 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9289 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9290
9291 if (cfg2 & (1 << 17))
9292 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9293
9294 /* serdes signal pre-emphasis in register 0x590 set by */
9295 /* bootcode if bit 18 is set */
9296 if (cfg2 & (1 << 18))
9297 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9298 }
7d0c41ef
MC
9299}
9300
9301static int __devinit tg3_phy_probe(struct tg3 *tp)
9302{
9303 u32 hw_phy_id_1, hw_phy_id_2;
9304 u32 hw_phy_id, hw_phy_id_masked;
9305 int err;
1da177e4
LT
9306
9307 /* Reading the PHY ID register can conflict with ASF
9308 * firwmare access to the PHY hardware.
9309 */
9310 err = 0;
9311 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9312 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9313 } else {
9314 /* Now read the physical PHY_ID from the chip and verify
9315 * that it is sane. If it doesn't look good, we fall back
9316 * to either the hard-coded table based PHY_ID and failing
9317 * that the value found in the eeprom area.
9318 */
9319 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9320 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9321
9322 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
9323 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9324 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
9325
9326 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9327 }
9328
9329 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9330 tp->phy_id = hw_phy_id;
9331 if (hw_phy_id_masked == PHY_ID_BCM8002)
9332 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
da6b2d01
MC
9333 else
9334 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
1da177e4 9335 } else {
7d0c41ef
MC
9336 if (tp->phy_id != PHY_ID_INVALID) {
9337 /* Do nothing, phy ID already set up in
9338 * tg3_get_eeprom_hw_cfg().
9339 */
1da177e4
LT
9340 } else {
9341 struct subsys_tbl_ent *p;
9342
9343 /* No eeprom signature? Try the hardcoded
9344 * subsys device table.
9345 */
9346 p = lookup_by_subsys(tp);
9347 if (!p)
9348 return -ENODEV;
9349
9350 tp->phy_id = p->phy_id;
9351 if (!tp->phy_id ||
9352 tp->phy_id == PHY_ID_BCM8002)
9353 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9354 }
9355 }
9356
747e8f8b 9357 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
1da177e4
LT
9358 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9359 u32 bmsr, adv_reg, tg3_ctrl;
9360
9361 tg3_readphy(tp, MII_BMSR, &bmsr);
9362 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9363 (bmsr & BMSR_LSTATUS))
9364 goto skip_phy_reset;
9365
9366 err = tg3_phy_reset(tp);
9367 if (err)
9368 return err;
9369
9370 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9371 ADVERTISE_100HALF | ADVERTISE_100FULL |
9372 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9373 tg3_ctrl = 0;
9374 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9375 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9376 MII_TG3_CTRL_ADV_1000_FULL);
9377 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9378 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9379 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9380 MII_TG3_CTRL_ENABLE_AS_MASTER);
9381 }
9382
9383 if (!tg3_copper_is_advertising_all(tp)) {
9384 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9385
9386 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9387 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9388
9389 tg3_writephy(tp, MII_BMCR,
9390 BMCR_ANENABLE | BMCR_ANRESTART);
9391 }
9392 tg3_phy_set_wirespeed(tp);
9393
9394 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9395 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9396 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9397 }
9398
9399skip_phy_reset:
9400 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9401 err = tg3_init_5401phy_dsp(tp);
9402 if (err)
9403 return err;
9404 }
9405
9406 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9407 err = tg3_init_5401phy_dsp(tp);
9408 }
9409
747e8f8b 9410 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1da177e4
LT
9411 tp->link_config.advertising =
9412 (ADVERTISED_1000baseT_Half |
9413 ADVERTISED_1000baseT_Full |
9414 ADVERTISED_Autoneg |
9415 ADVERTISED_FIBRE);
9416 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9417 tp->link_config.advertising &=
9418 ~(ADVERTISED_1000baseT_Half |
9419 ADVERTISED_1000baseT_Full);
9420
9421 return err;
9422}
9423
9424static void __devinit tg3_read_partno(struct tg3 *tp)
9425{
9426 unsigned char vpd_data[256];
9427 int i;
9428
9429 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9430 /* Sun decided not to put the necessary bits in the
9431 * NVRAM of their onboard tg3 parts :(
9432 */
9433 strcpy(tp->board_part_number, "Sun 570X");
9434 return;
9435 }
9436
9437 for (i = 0; i < 256; i += 4) {
9438 u32 tmp;
9439
9440 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9441 goto out_not_found;
9442
9443 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
9444 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
9445 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9446 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9447 }
9448
9449 /* Now parse and find the part number. */
9450 for (i = 0; i < 256; ) {
9451 unsigned char val = vpd_data[i];
9452 int block_end;
9453
9454 if (val == 0x82 || val == 0x91) {
9455 i = (i + 3 +
9456 (vpd_data[i + 1] +
9457 (vpd_data[i + 2] << 8)));
9458 continue;
9459 }
9460
9461 if (val != 0x90)
9462 goto out_not_found;
9463
9464 block_end = (i + 3 +
9465 (vpd_data[i + 1] +
9466 (vpd_data[i + 2] << 8)));
9467 i += 3;
9468 while (i < block_end) {
9469 if (vpd_data[i + 0] == 'P' &&
9470 vpd_data[i + 1] == 'N') {
9471 int partno_len = vpd_data[i + 2];
9472
9473 if (partno_len > 24)
9474 goto out_not_found;
9475
9476 memcpy(tp->board_part_number,
9477 &vpd_data[i + 3],
9478 partno_len);
9479
9480 /* Success. */
9481 return;
9482 }
9483 }
9484
9485 /* Part number not found. */
9486 goto out_not_found;
9487 }
9488
9489out_not_found:
9490 strcpy(tp->board_part_number, "none");
9491}
9492
9493#ifdef CONFIG_SPARC64
9494static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9495{
9496 struct pci_dev *pdev = tp->pdev;
9497 struct pcidev_cookie *pcp = pdev->sysdata;
9498
9499 if (pcp != NULL) {
9500 int node = pcp->prom_node;
9501 u32 venid;
9502 int err;
9503
9504 err = prom_getproperty(node, "subsystem-vendor-id",
9505 (char *) &venid, sizeof(venid));
9506 if (err == 0 || err == -1)
9507 return 0;
9508 if (venid == PCI_VENDOR_ID_SUN)
9509 return 1;
051d3cbd
DM
9510
9511 /* TG3 chips onboard the SunBlade-2500 don't have the
9512 * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
9513 * are distinguishable from non-Sun variants by being
9514 * named "network" by the firmware. Non-Sun cards will
9515 * show up as being named "ethernet".
9516 */
9517 if (!strcmp(pcp->prom_name, "network"))
9518 return 1;
1da177e4
LT
9519 }
9520 return 0;
9521}
9522#endif
9523
9524static int __devinit tg3_get_invariants(struct tg3 *tp)
9525{
9526 static struct pci_device_id write_reorder_chipsets[] = {
1da177e4
LT
9527 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9528 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
399de50b
MC
9529 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9530 PCI_DEVICE_ID_VIA_8385_0) },
1da177e4
LT
9531 { },
9532 };
9533 u32 misc_ctrl_reg;
9534 u32 cacheline_sz_reg;
9535 u32 pci_state_reg, grc_misc_cfg;
9536 u32 val;
9537 u16 pci_cmd;
9538 int err;
9539
9540#ifdef CONFIG_SPARC64
9541 if (tg3_is_sun_570X(tp))
9542 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9543#endif
9544
1da177e4
LT
9545 /* Force memory write invalidate off. If we leave it on,
9546 * then on 5700_BX chips we have to enable a workaround.
9547 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9548 * to match the cacheline size. The Broadcom driver have this
9549 * workaround but turns MWI off all the times so never uses
9550 * it. This seems to suggest that the workaround is insufficient.
9551 */
9552 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9553 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9554 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9555
9556 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9557 * has the register indirect write enable bit set before
9558 * we try to access any of the MMIO registers. It is also
9559 * critical that the PCI-X hw workaround situation is decided
9560 * before that as well.
9561 */
9562 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9563 &misc_ctrl_reg);
9564
9565 tp->pci_chip_rev_id = (misc_ctrl_reg >>
9566 MISC_HOST_CTRL_CHIPREV_SHIFT);
9567
ff645bec
MC
9568 /* Wrong chip ID in 5752 A0. This code can be removed later
9569 * as A0 is not in production.
9570 */
9571 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9572 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9573
6892914f
MC
9574 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9575 * we need to disable memory and use config. cycles
9576 * only to access all registers. The 5702/03 chips
9577 * can mistakenly decode the special cycles from the
9578 * ICH chipsets as memory write cycles, causing corruption
9579 * of register and memory space. Only certain ICH bridges
9580 * will drive special cycles with non-zero data during the
9581 * address phase which can fall within the 5703's address
9582 * range. This is not an ICH bug as the PCI spec allows
9583 * non-zero address during special cycles. However, only
9584 * these ICH bridges are known to drive non-zero addresses
9585 * during special cycles.
9586 *
9587 * Since special cycles do not cross PCI bridges, we only
9588 * enable this workaround if the 5703 is on the secondary
9589 * bus of these ICH bridges.
9590 */
9591 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
9592 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
9593 static struct tg3_dev_id {
9594 u32 vendor;
9595 u32 device;
9596 u32 rev;
9597 } ich_chipsets[] = {
9598 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
9599 PCI_ANY_ID },
9600 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
9601 PCI_ANY_ID },
9602 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
9603 0xa },
9604 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
9605 PCI_ANY_ID },
9606 { },
9607 };
9608 struct tg3_dev_id *pci_id = &ich_chipsets[0];
9609 struct pci_dev *bridge = NULL;
9610
9611 while (pci_id->vendor != 0) {
9612 bridge = pci_get_device(pci_id->vendor, pci_id->device,
9613 bridge);
9614 if (!bridge) {
9615 pci_id++;
9616 continue;
9617 }
9618 if (pci_id->rev != PCI_ANY_ID) {
9619 u8 rev;
9620
9621 pci_read_config_byte(bridge, PCI_REVISION_ID,
9622 &rev);
9623 if (rev > pci_id->rev)
9624 continue;
9625 }
9626 if (bridge->subordinate &&
9627 (bridge->subordinate->number ==
9628 tp->pdev->bus->number)) {
9629
9630 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
9631 pci_dev_put(bridge);
9632 break;
9633 }
9634 }
9635 }
9636
4a29cc2e
MC
9637 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
9638 * DMA addresses > 40-bit. This bridge may have other additional
9639 * 57xx devices behind it in some 4-port NIC designs for example.
9640 * Any tg3 device found behind the bridge will also need the 40-bit
9641 * DMA workaround.
9642 */
a4e2b347
MC
9643 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
9644 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9645 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
4a29cc2e 9646 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
4cf78e4f 9647 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
a4e2b347 9648 }
4a29cc2e
MC
9649 else {
9650 struct pci_dev *bridge = NULL;
9651
9652 do {
9653 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
9654 PCI_DEVICE_ID_SERVERWORKS_EPB,
9655 bridge);
9656 if (bridge && bridge->subordinate &&
9657 (bridge->subordinate->number <=
9658 tp->pdev->bus->number) &&
9659 (bridge->subordinate->subordinate >=
9660 tp->pdev->bus->number)) {
9661 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
9662 pci_dev_put(bridge);
9663 break;
9664 }
9665 } while (bridge);
9666 }
4cf78e4f 9667
1da177e4
LT
9668 /* Initialize misc host control in PCI block. */
9669 tp->misc_host_ctrl |= (misc_ctrl_reg &
9670 MISC_HOST_CTRL_CHIPREV);
9671 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9672 tp->misc_host_ctrl);
9673
9674 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9675 &cacheline_sz_reg);
9676
9677 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
9678 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
9679 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
9680 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
9681
6708e5cc 9682 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
4cf78e4f 9683 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
a4e2b347 9684 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6708e5cc
JL
9685 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9686
1b440c56
JL
9687 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
9688 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9689 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9690
bb7064dc 9691 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
9692 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
9693
0f893dc6
MC
9694 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9695 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9696 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
9697 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9698
1da177e4
LT
9699 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9700 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9701
399de50b
MC
9702 /* If we have an AMD 762 or VIA K8T800 chipset, write
9703 * reordering to the mailbox registers done by the host
9704 * controller can cause major troubles. We read back from
9705 * every mailbox register write to force the writes to be
9706 * posted to the chip in order.
9707 */
9708 if (pci_dev_present(write_reorder_chipsets) &&
9709 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9710 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9711
1da177e4
LT
9712 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9713 tp->pci_lat_timer < 64) {
9714 tp->pci_lat_timer = 64;
9715
9716 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
9717 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
9718 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
9719 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
9720
9721 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9722 cacheline_sz_reg);
9723 }
9724
9725 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9726 &pci_state_reg);
9727
9728 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9729 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9730
9731 /* If this is a 5700 BX chipset, and we are in PCI-X
9732 * mode, enable register write workaround.
9733 *
9734 * The workaround is to use indirect register accesses
9735 * for all chip writes not to mailbox registers.
9736 */
9737 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9738 u32 pm_reg;
9739 u16 pci_cmd;
9740
9741 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9742
9743 /* The chip can have it's power management PCI config
9744 * space registers clobbered due to this bug.
9745 * So explicitly force the chip into D0 here.
9746 */
9747 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9748 &pm_reg);
9749 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9750 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9751 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9752 pm_reg);
9753
9754 /* Also, force SERR#/PERR# in PCI command. */
9755 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9756 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9757 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9758 }
9759 }
9760
087fe256
MC
9761 /* 5700 BX chips need to have their TX producer index mailboxes
9762 * written twice to workaround a bug.
9763 */
9764 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9765 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9766
1da177e4
LT
9767 /* Back to back register writes can cause problems on this chip,
9768 * the workaround is to read back all reg writes except those to
9769 * mailbox regs. See tg3_write_indirect_reg32().
9770 *
9771 * PCI Express 5750_A0 rev chips need this workaround too.
9772 */
9773 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9774 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9775 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9776 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9777
9778 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9779 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9780 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9781 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9782
9783 /* Chip-specific fixup from Broadcom driver */
9784 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9785 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9786 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9787 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9788 }
9789
1ee582d8 9790 /* Default fast path register access methods */
20094930 9791 tp->read32 = tg3_read32;
1ee582d8 9792 tp->write32 = tg3_write32;
09ee929c 9793 tp->read32_mbox = tg3_read32;
20094930 9794 tp->write32_mbox = tg3_write32;
1ee582d8
MC
9795 tp->write32_tx_mbox = tg3_write32;
9796 tp->write32_rx_mbox = tg3_write32;
9797
9798 /* Various workaround register access methods */
9799 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
9800 tp->write32 = tg3_write_indirect_reg32;
9801 else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
9802 tp->write32 = tg3_write_flush_reg32;
9803
9804 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
9805 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
9806 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9807 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
9808 tp->write32_rx_mbox = tg3_write_flush_reg32;
9809 }
20094930 9810
6892914f
MC
9811 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
9812 tp->read32 = tg3_read_indirect_reg32;
9813 tp->write32 = tg3_write_indirect_reg32;
9814 tp->read32_mbox = tg3_read_indirect_mbox;
9815 tp->write32_mbox = tg3_write_indirect_mbox;
9816 tp->write32_tx_mbox = tg3_write_indirect_mbox;
9817 tp->write32_rx_mbox = tg3_write_indirect_mbox;
9818
9819 iounmap(tp->regs);
22abe310 9820 tp->regs = NULL;
6892914f
MC
9821
9822 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9823 pci_cmd &= ~PCI_COMMAND_MEMORY;
9824 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9825 }
9826
7d0c41ef
MC
9827 /* Get eeprom hw config before calling tg3_set_power_state().
9828 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9829 * determined before calling tg3_set_power_state() so that
9830 * we know whether or not to switch out of Vaux power.
9831 * When the flag is set, it means that GPIO1 is used for eeprom
9832 * write protect and also implies that it is a LOM where GPIOs
9833 * are not used to switch power.
9834 */
9835 tg3_get_eeprom_hw_cfg(tp);
9836
314fba34
MC
9837 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9838 * GPIO1 driven high will bring 5700's external PHY out of reset.
9839 * It is also used as eeprom write protect on LOMs.
9840 */
9841 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9842 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9843 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9844 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9845 GRC_LCLCTRL_GPIO_OUTPUT1);
3e7d83bc
MC
9846 /* Unused GPIO3 must be driven as output on 5752 because there
9847 * are no pull-up resistors on unused GPIO pins.
9848 */
9849 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9850 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
314fba34 9851
1da177e4 9852 /* Force the chip into D0. */
bc1c7567 9853 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
9854 if (err) {
9855 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9856 pci_name(tp->pdev));
9857 return err;
9858 }
9859
9860 /* 5700 B0 chips do not support checksumming correctly due
9861 * to hardware bugs.
9862 */
9863 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9864 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9865
9866 /* Pseudo-header checksum is done by hardware logic and not
9867 * the offload processers, so make the chip do the pseudo-
9868 * header checksums on receive. For transmit it is more
9869 * convenient to do the pseudo-header checksum in software
9870 * as Linux does that on transmit for us in all cases.
9871 */
9872 tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9873 tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9874
9875 /* Derive initial jumbo mode from MTU assigned in
9876 * ether_setup() via the alloc_etherdev() call
9877 */
0f893dc6 9878 if (tp->dev->mtu > ETH_DATA_LEN &&
a4e2b347 9879 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
0f893dc6 9880 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
1da177e4
LT
9881
9882 /* Determine WakeOnLan speed to use. */
9883 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9884 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9885 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9886 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9887 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9888 } else {
9889 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9890 }
9891
9892 /* A few boards don't want Ethernet@WireSpeed phy feature */
9893 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9894 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9895 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
747e8f8b
MC
9896 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
9897 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1da177e4
LT
9898 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9899
9900 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9901 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9902 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9903 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9904 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9905
bb7064dc 9906 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
1da177e4
LT
9907 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9908
1da177e4 9909 tp->coalesce_mode = 0;
1da177e4
LT
9910 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9911 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9912 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9913
9914 /* Initialize MAC MI mode, polling disabled. */
9915 tw32_f(MAC_MI_MODE, tp->mi_mode);
9916 udelay(80);
9917
9918 /* Initialize data/descriptor byte/word swapping. */
9919 val = tr32(GRC_MODE);
9920 val &= GRC_MODE_HOST_STACKUP;
9921 tw32(GRC_MODE, val | tp->grc_mode);
9922
9923 tg3_switch_clocks(tp);
9924
9925 /* Clear this out for sanity. */
9926 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9927
9928 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9929 &pci_state_reg);
9930 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9931 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9932 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9933
9934 if (chiprevid == CHIPREV_ID_5701_A0 ||
9935 chiprevid == CHIPREV_ID_5701_B0 ||
9936 chiprevid == CHIPREV_ID_5701_B2 ||
9937 chiprevid == CHIPREV_ID_5701_B5) {
9938 void __iomem *sram_base;
9939
9940 /* Write some dummy words into the SRAM status block
9941 * area, see if it reads back correctly. If the return
9942 * value is bad, force enable the PCIX workaround.
9943 */
9944 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9945
9946 writel(0x00000000, sram_base);
9947 writel(0x00000000, sram_base + 4);
9948 writel(0xffffffff, sram_base + 4);
9949 if (readl(sram_base) != 0x00000000)
9950 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9951 }
9952 }
9953
9954 udelay(50);
9955 tg3_nvram_init(tp);
9956
9957 grc_misc_cfg = tr32(GRC_MISC_CFG);
9958 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9959
9960 /* Broadcom's driver says that CIOBE multisplit has a bug */
9961#if 0
9962 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9963 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9964 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9965 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9966 }
9967#endif
9968 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9969 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9970 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9971 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9972
fac9b83e
DM
9973 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9974 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9975 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9976 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9977 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9978 HOSTCC_MODE_CLRTICK_TXBD);
9979
9980 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9981 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9982 tp->misc_host_ctrl);
9983 }
9984
1da177e4
LT
9985 /* these are limited to 10/100 only */
9986 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9987 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9988 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9989 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9990 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9991 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9992 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9993 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9994 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9995 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9996 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9997
9998 err = tg3_phy_probe(tp);
9999 if (err) {
10000 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10001 pci_name(tp->pdev), err);
10002 /* ... but do not return immediately ... */
10003 }
10004
10005 tg3_read_partno(tp);
10006
10007 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10008 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10009 } else {
10010 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10011 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10012 else
10013 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10014 }
10015
10016 /* 5700 {AX,BX} chips have a broken status block link
10017 * change bit implementation, so we must use the
10018 * status register in those cases.
10019 */
10020 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10021 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10022 else
10023 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10024
10025 /* The led_ctrl is set during tg3_phy_probe, here we might
10026 * have to force the link status polling mechanism based
10027 * upon subsystem IDs.
10028 */
10029 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10030 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10031 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10032 TG3_FLAG_USE_LINKCHG_REG);
10033 }
10034
10035 /* For all SERDES we poll the MAC status register. */
10036 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10037 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10038 else
10039 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10040
1da177e4
LT
10041 /* It seems all chips can get confused if TX buffers
10042 * straddle the 4GB address boundary in some cases.
10043 */
10044 tp->dev->hard_start_xmit = tg3_start_xmit;
10045
10046 tp->rx_offset = 2;
10047 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10048 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10049 tp->rx_offset = 0;
10050
10051 /* By default, disable wake-on-lan. User can change this
10052 * using ETHTOOL_SWOL.
10053 */
10054 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10055
10056 return err;
10057}
10058
10059#ifdef CONFIG_SPARC64
10060static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10061{
10062 struct net_device *dev = tp->dev;
10063 struct pci_dev *pdev = tp->pdev;
10064 struct pcidev_cookie *pcp = pdev->sysdata;
10065
10066 if (pcp != NULL) {
10067 int node = pcp->prom_node;
10068
10069 if (prom_getproplen(node, "local-mac-address") == 6) {
10070 prom_getproperty(node, "local-mac-address",
10071 dev->dev_addr, 6);
2ff43697 10072 memcpy(dev->perm_addr, dev->dev_addr, 6);
1da177e4
LT
10073 return 0;
10074 }
10075 }
10076 return -ENODEV;
10077}
10078
10079static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10080{
10081 struct net_device *dev = tp->dev;
10082
10083 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2ff43697 10084 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
1da177e4
LT
10085 return 0;
10086}
10087#endif
10088
10089static int __devinit tg3_get_device_address(struct tg3 *tp)
10090{
10091 struct net_device *dev = tp->dev;
10092 u32 hi, lo, mac_offset;
10093
10094#ifdef CONFIG_SPARC64
10095 if (!tg3_get_macaddr_sparc(tp))
10096 return 0;
10097#endif
10098
10099 mac_offset = 0x7c;
4cf78e4f
MC
10100 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10101 !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
a4e2b347 10102 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
10103 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10104 mac_offset = 0xcc;
10105 if (tg3_nvram_lock(tp))
10106 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10107 else
10108 tg3_nvram_unlock(tp);
10109 }
10110
10111 /* First try to get it from MAC address mailbox. */
10112 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10113 if ((hi >> 16) == 0x484b) {
10114 dev->dev_addr[0] = (hi >> 8) & 0xff;
10115 dev->dev_addr[1] = (hi >> 0) & 0xff;
10116
10117 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10118 dev->dev_addr[2] = (lo >> 24) & 0xff;
10119 dev->dev_addr[3] = (lo >> 16) & 0xff;
10120 dev->dev_addr[4] = (lo >> 8) & 0xff;
10121 dev->dev_addr[5] = (lo >> 0) & 0xff;
10122 }
10123 /* Next, try NVRAM. */
10124 else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
10125 !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10126 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10127 dev->dev_addr[0] = ((hi >> 16) & 0xff);
10128 dev->dev_addr[1] = ((hi >> 24) & 0xff);
10129 dev->dev_addr[2] = ((lo >> 0) & 0xff);
10130 dev->dev_addr[3] = ((lo >> 8) & 0xff);
10131 dev->dev_addr[4] = ((lo >> 16) & 0xff);
10132 dev->dev_addr[5] = ((lo >> 24) & 0xff);
10133 }
10134 /* Finally just fetch it out of the MAC control regs. */
10135 else {
10136 hi = tr32(MAC_ADDR_0_HIGH);
10137 lo = tr32(MAC_ADDR_0_LOW);
10138
10139 dev->dev_addr[5] = lo & 0xff;
10140 dev->dev_addr[4] = (lo >> 8) & 0xff;
10141 dev->dev_addr[3] = (lo >> 16) & 0xff;
10142 dev->dev_addr[2] = (lo >> 24) & 0xff;
10143 dev->dev_addr[1] = hi & 0xff;
10144 dev->dev_addr[0] = (hi >> 8) & 0xff;
10145 }
10146
10147 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10148#ifdef CONFIG_SPARC64
10149 if (!tg3_get_default_macaddr_sparc(tp))
10150 return 0;
10151#endif
10152 return -EINVAL;
10153 }
2ff43697 10154 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
10155 return 0;
10156}
10157
59e6b434
DM
10158#define BOUNDARY_SINGLE_CACHELINE 1
10159#define BOUNDARY_MULTI_CACHELINE 2
10160
10161static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10162{
10163 int cacheline_size;
10164 u8 byte;
10165 int goal;
10166
10167 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10168 if (byte == 0)
10169 cacheline_size = 1024;
10170 else
10171 cacheline_size = (int) byte * 4;
10172
10173 /* On 5703 and later chips, the boundary bits have no
10174 * effect.
10175 */
10176 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10177 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10178 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10179 goto out;
10180
10181#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10182 goal = BOUNDARY_MULTI_CACHELINE;
10183#else
10184#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10185 goal = BOUNDARY_SINGLE_CACHELINE;
10186#else
10187 goal = 0;
10188#endif
10189#endif
10190
10191 if (!goal)
10192 goto out;
10193
10194 /* PCI controllers on most RISC systems tend to disconnect
10195 * when a device tries to burst across a cache-line boundary.
10196 * Therefore, letting tg3 do so just wastes PCI bandwidth.
10197 *
10198 * Unfortunately, for PCI-E there are only limited
10199 * write-side controls for this, and thus for reads
10200 * we will still get the disconnects. We'll also waste
10201 * these PCI cycles for both read and write for chips
10202 * other than 5700 and 5701 which do not implement the
10203 * boundary bits.
10204 */
10205 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10206 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10207 switch (cacheline_size) {
10208 case 16:
10209 case 32:
10210 case 64:
10211 case 128:
10212 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10213 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10214 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10215 } else {
10216 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10217 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10218 }
10219 break;
10220
10221 case 256:
10222 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10223 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10224 break;
10225
10226 default:
10227 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10228 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10229 break;
10230 };
10231 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10232 switch (cacheline_size) {
10233 case 16:
10234 case 32:
10235 case 64:
10236 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10237 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10238 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10239 break;
10240 }
10241 /* fallthrough */
10242 case 128:
10243 default:
10244 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10245 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10246 break;
10247 };
10248 } else {
10249 switch (cacheline_size) {
10250 case 16:
10251 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10252 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10253 DMA_RWCTRL_WRITE_BNDRY_16);
10254 break;
10255 }
10256 /* fallthrough */
10257 case 32:
10258 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10259 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10260 DMA_RWCTRL_WRITE_BNDRY_32);
10261 break;
10262 }
10263 /* fallthrough */
10264 case 64:
10265 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10266 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10267 DMA_RWCTRL_WRITE_BNDRY_64);
10268 break;
10269 }
10270 /* fallthrough */
10271 case 128:
10272 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10273 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10274 DMA_RWCTRL_WRITE_BNDRY_128);
10275 break;
10276 }
10277 /* fallthrough */
10278 case 256:
10279 val |= (DMA_RWCTRL_READ_BNDRY_256 |
10280 DMA_RWCTRL_WRITE_BNDRY_256);
10281 break;
10282 case 512:
10283 val |= (DMA_RWCTRL_READ_BNDRY_512 |
10284 DMA_RWCTRL_WRITE_BNDRY_512);
10285 break;
10286 case 1024:
10287 default:
10288 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10289 DMA_RWCTRL_WRITE_BNDRY_1024);
10290 break;
10291 };
10292 }
10293
10294out:
10295 return val;
10296}
10297
1da177e4
LT
10298static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10299{
10300 struct tg3_internal_buffer_desc test_desc;
10301 u32 sram_dma_descs;
10302 int i, ret;
10303
10304 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10305
10306 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10307 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10308 tw32(RDMAC_STATUS, 0);
10309 tw32(WDMAC_STATUS, 0);
10310
10311 tw32(BUFMGR_MODE, 0);
10312 tw32(FTQ_RESET, 0);
10313
10314 test_desc.addr_hi = ((u64) buf_dma) >> 32;
10315 test_desc.addr_lo = buf_dma & 0xffffffff;
10316 test_desc.nic_mbuf = 0x00002100;
10317 test_desc.len = size;
10318
10319 /*
10320 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10321 * the *second* time the tg3 driver was getting loaded after an
10322 * initial scan.
10323 *
10324 * Broadcom tells me:
10325 * ...the DMA engine is connected to the GRC block and a DMA
10326 * reset may affect the GRC block in some unpredictable way...
10327 * The behavior of resets to individual blocks has not been tested.
10328 *
10329 * Broadcom noted the GRC reset will also reset all sub-components.
10330 */
10331 if (to_device) {
10332 test_desc.cqid_sqid = (13 << 8) | 2;
10333
10334 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10335 udelay(40);
10336 } else {
10337 test_desc.cqid_sqid = (16 << 8) | 7;
10338
10339 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10340 udelay(40);
10341 }
10342 test_desc.flags = 0x00000005;
10343
10344 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10345 u32 val;
10346
10347 val = *(((u32 *)&test_desc) + i);
10348 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10349 sram_dma_descs + (i * sizeof(u32)));
10350 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10351 }
10352 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10353
10354 if (to_device) {
10355 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10356 } else {
10357 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10358 }
10359
10360 ret = -ENODEV;
10361 for (i = 0; i < 40; i++) {
10362 u32 val;
10363
10364 if (to_device)
10365 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10366 else
10367 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10368 if ((val & 0xffff) == sram_dma_descs) {
10369 ret = 0;
10370 break;
10371 }
10372
10373 udelay(100);
10374 }
10375
10376 return ret;
10377}
10378
ded7340d 10379#define TEST_BUFFER_SIZE 0x2000
1da177e4
LT
10380
10381static int __devinit tg3_test_dma(struct tg3 *tp)
10382{
10383 dma_addr_t buf_dma;
59e6b434 10384 u32 *buf, saved_dma_rwctrl;
1da177e4
LT
10385 int ret;
10386
10387 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10388 if (!buf) {
10389 ret = -ENOMEM;
10390 goto out_nofree;
10391 }
10392
10393 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10394 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10395
59e6b434 10396 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
1da177e4
LT
10397
10398 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10399 /* DMA read watermark not used on PCIE */
10400 tp->dma_rwctrl |= 0x00180000;
10401 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
85e94ced
MC
10402 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10403 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
1da177e4
LT
10404 tp->dma_rwctrl |= 0x003f0000;
10405 else
10406 tp->dma_rwctrl |= 0x003f000f;
10407 } else {
10408 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10409 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10410 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10411
4a29cc2e
MC
10412 /* If the 5704 is behind the EPB bridge, we can
10413 * do the less restrictive ONE_DMA workaround for
10414 * better performance.
10415 */
10416 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10417 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10418 tp->dma_rwctrl |= 0x8000;
10419 else if (ccval == 0x6 || ccval == 0x7)
1da177e4
LT
10420 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10421
59e6b434 10422 /* Set bit 23 to enable PCIX hw bug fix */
1da177e4 10423 tp->dma_rwctrl |= 0x009f0000;
4cf78e4f
MC
10424 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10425 /* 5780 always in PCIX mode */
10426 tp->dma_rwctrl |= 0x00144000;
a4e2b347
MC
10427 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10428 /* 5714 always in PCIX mode */
10429 tp->dma_rwctrl |= 0x00148000;
1da177e4
LT
10430 } else {
10431 tp->dma_rwctrl |= 0x001b000f;
10432 }
10433 }
10434
10435 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10436 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10437 tp->dma_rwctrl &= 0xfffffff0;
10438
10439 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10440 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10441 /* Remove this if it causes problems for some boards. */
10442 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10443
10444 /* On 5700/5701 chips, we need to set this bit.
10445 * Otherwise the chip will issue cacheline transactions
10446 * to streamable DMA memory with not all the byte
10447 * enables turned on. This is an error on several
10448 * RISC PCI controllers, in particular sparc64.
10449 *
10450 * On 5703/5704 chips, this bit has been reassigned
10451 * a different meaning. In particular, it is used
10452 * on those chips to enable a PCI-X workaround.
10453 */
10454 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10455 }
10456
10457 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10458
10459#if 0
10460 /* Unneeded, already done by tg3_get_invariants. */
10461 tg3_switch_clocks(tp);
10462#endif
10463
10464 ret = 0;
10465 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10466 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10467 goto out;
10468
59e6b434
DM
10469 /* It is best to perform DMA test with maximum write burst size
10470 * to expose the 5700/5701 write DMA bug.
10471 */
10472 saved_dma_rwctrl = tp->dma_rwctrl;
10473 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10474 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10475
1da177e4
LT
10476 while (1) {
10477 u32 *p = buf, i;
10478
10479 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10480 p[i] = i;
10481
10482 /* Send the buffer to the chip. */
10483 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10484 if (ret) {
10485 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10486 break;
10487 }
10488
10489#if 0
10490 /* validate data reached card RAM correctly. */
10491 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10492 u32 val;
10493 tg3_read_mem(tp, 0x2100 + (i*4), &val);
10494 if (le32_to_cpu(val) != p[i]) {
10495 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
10496 /* ret = -ENODEV here? */
10497 }
10498 p[i] = 0;
10499 }
10500#endif
10501 /* Now read it back. */
10502 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10503 if (ret) {
10504 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10505
10506 break;
10507 }
10508
10509 /* Verify it. */
10510 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10511 if (p[i] == i)
10512 continue;
10513
59e6b434
DM
10514 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10515 DMA_RWCTRL_WRITE_BNDRY_16) {
10516 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
1da177e4
LT
10517 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10518 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10519 break;
10520 } else {
10521 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10522 ret = -ENODEV;
10523 goto out;
10524 }
10525 }
10526
10527 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10528 /* Success. */
10529 ret = 0;
10530 break;
10531 }
10532 }
59e6b434
DM
10533 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10534 DMA_RWCTRL_WRITE_BNDRY_16) {
6d1cfbab
MC
10535 static struct pci_device_id dma_wait_state_chipsets[] = {
10536 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10537 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10538 { },
10539 };
10540
59e6b434 10541 /* DMA test passed without adjusting DMA boundary,
6d1cfbab
MC
10542 * now look for chipsets that are known to expose the
10543 * DMA bug without failing the test.
59e6b434 10544 */
6d1cfbab
MC
10545 if (pci_dev_present(dma_wait_state_chipsets)) {
10546 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10547 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10548 }
10549 else
10550 /* Safe to use the calculated DMA boundary. */
10551 tp->dma_rwctrl = saved_dma_rwctrl;
10552
59e6b434
DM
10553 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10554 }
1da177e4
LT
10555
10556out:
10557 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10558out_nofree:
10559 return ret;
10560}
10561
10562static void __devinit tg3_init_link_config(struct tg3 *tp)
10563{
10564 tp->link_config.advertising =
10565 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10566 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10567 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10568 ADVERTISED_Autoneg | ADVERTISED_MII);
10569 tp->link_config.speed = SPEED_INVALID;
10570 tp->link_config.duplex = DUPLEX_INVALID;
10571 tp->link_config.autoneg = AUTONEG_ENABLE;
10572 netif_carrier_off(tp->dev);
10573 tp->link_config.active_speed = SPEED_INVALID;
10574 tp->link_config.active_duplex = DUPLEX_INVALID;
10575 tp->link_config.phy_is_low_power = 0;
10576 tp->link_config.orig_speed = SPEED_INVALID;
10577 tp->link_config.orig_duplex = DUPLEX_INVALID;
10578 tp->link_config.orig_autoneg = AUTONEG_INVALID;
10579}
10580
10581static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10582{
fdfec172
MC
10583 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10584 tp->bufmgr_config.mbuf_read_dma_low_water =
10585 DEFAULT_MB_RDMA_LOW_WATER_5705;
10586 tp->bufmgr_config.mbuf_mac_rx_low_water =
10587 DEFAULT_MB_MACRX_LOW_WATER_5705;
10588 tp->bufmgr_config.mbuf_high_water =
10589 DEFAULT_MB_HIGH_WATER_5705;
10590
10591 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10592 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10593 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10594 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10595 tp->bufmgr_config.mbuf_high_water_jumbo =
10596 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10597 } else {
10598 tp->bufmgr_config.mbuf_read_dma_low_water =
10599 DEFAULT_MB_RDMA_LOW_WATER;
10600 tp->bufmgr_config.mbuf_mac_rx_low_water =
10601 DEFAULT_MB_MACRX_LOW_WATER;
10602 tp->bufmgr_config.mbuf_high_water =
10603 DEFAULT_MB_HIGH_WATER;
10604
10605 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10606 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10607 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10608 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10609 tp->bufmgr_config.mbuf_high_water_jumbo =
10610 DEFAULT_MB_HIGH_WATER_JUMBO;
10611 }
1da177e4
LT
10612
10613 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10614 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10615}
10616
10617static char * __devinit tg3_phy_string(struct tg3 *tp)
10618{
10619 switch (tp->phy_id & PHY_ID_MASK) {
10620 case PHY_ID_BCM5400: return "5400";
10621 case PHY_ID_BCM5401: return "5401";
10622 case PHY_ID_BCM5411: return "5411";
10623 case PHY_ID_BCM5701: return "5701";
10624 case PHY_ID_BCM5703: return "5703";
10625 case PHY_ID_BCM5704: return "5704";
10626 case PHY_ID_BCM5705: return "5705";
10627 case PHY_ID_BCM5750: return "5750";
85e94ced 10628 case PHY_ID_BCM5752: return "5752";
a4e2b347 10629 case PHY_ID_BCM5714: return "5714";
4cf78e4f 10630 case PHY_ID_BCM5780: return "5780";
1da177e4
LT
10631 case PHY_ID_BCM8002: return "8002/serdes";
10632 case 0: return "serdes";
10633 default: return "unknown";
10634 };
10635}
10636
f9804ddb
MC
10637static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
10638{
10639 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10640 strcpy(str, "PCI Express");
10641 return str;
10642 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
10643 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
10644
10645 strcpy(str, "PCIX:");
10646
10647 if ((clock_ctrl == 7) ||
10648 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
10649 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
10650 strcat(str, "133MHz");
10651 else if (clock_ctrl == 0)
10652 strcat(str, "33MHz");
10653 else if (clock_ctrl == 2)
10654 strcat(str, "50MHz");
10655 else if (clock_ctrl == 4)
10656 strcat(str, "66MHz");
10657 else if (clock_ctrl == 6)
10658 strcat(str, "100MHz");
f9804ddb
MC
10659 } else {
10660 strcpy(str, "PCI:");
10661 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
10662 strcat(str, "66MHz");
10663 else
10664 strcat(str, "33MHz");
10665 }
10666 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
10667 strcat(str, ":32-bit");
10668 else
10669 strcat(str, ":64-bit");
10670 return str;
10671}
10672
8c2dc7e1 10673static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
1da177e4
LT
10674{
10675 struct pci_dev *peer;
10676 unsigned int func, devnr = tp->pdev->devfn & ~7;
10677
10678 for (func = 0; func < 8; func++) {
10679 peer = pci_get_slot(tp->pdev->bus, devnr | func);
10680 if (peer && peer != tp->pdev)
10681 break;
10682 pci_dev_put(peer);
10683 }
16fe9d74
MC
10684 /* 5704 can be configured in single-port mode, set peer to
10685 * tp->pdev in that case.
10686 */
10687 if (!peer) {
10688 peer = tp->pdev;
10689 return peer;
10690 }
1da177e4
LT
10691
10692 /*
10693 * We don't need to keep the refcount elevated; there's no way
10694 * to remove one half of this device without removing the other
10695 */
10696 pci_dev_put(peer);
10697
10698 return peer;
10699}
10700
15f9850d
DM
10701static void __devinit tg3_init_coal(struct tg3 *tp)
10702{
10703 struct ethtool_coalesce *ec = &tp->coal;
10704
10705 memset(ec, 0, sizeof(*ec));
10706 ec->cmd = ETHTOOL_GCOALESCE;
10707 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
10708 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
10709 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
10710 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
10711 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
10712 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
10713 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
10714 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
10715 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
10716
10717 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
10718 HOSTCC_MODE_CLRTICK_TXBD)) {
10719 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
10720 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
10721 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
10722 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
10723 }
d244c892
MC
10724
10725 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10726 ec->rx_coalesce_usecs_irq = 0;
10727 ec->tx_coalesce_usecs_irq = 0;
10728 ec->stats_block_coalesce_usecs = 0;
10729 }
15f9850d
DM
10730}
10731
1da177e4
LT
10732static int __devinit tg3_init_one(struct pci_dev *pdev,
10733 const struct pci_device_id *ent)
10734{
10735 static int tg3_version_printed = 0;
10736 unsigned long tg3reg_base, tg3reg_len;
10737 struct net_device *dev;
10738 struct tg3 *tp;
72f2afb8 10739 int i, err, pm_cap;
f9804ddb 10740 char str[40];
72f2afb8 10741 u64 dma_mask, persist_dma_mask;
1da177e4
LT
10742
10743 if (tg3_version_printed++ == 0)
10744 printk(KERN_INFO "%s", version);
10745
10746 err = pci_enable_device(pdev);
10747 if (err) {
10748 printk(KERN_ERR PFX "Cannot enable PCI device, "
10749 "aborting.\n");
10750 return err;
10751 }
10752
10753 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10754 printk(KERN_ERR PFX "Cannot find proper PCI device "
10755 "base address, aborting.\n");
10756 err = -ENODEV;
10757 goto err_out_disable_pdev;
10758 }
10759
10760 err = pci_request_regions(pdev, DRV_MODULE_NAME);
10761 if (err) {
10762 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
10763 "aborting.\n");
10764 goto err_out_disable_pdev;
10765 }
10766
10767 pci_set_master(pdev);
10768
10769 /* Find power-management capability. */
10770 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10771 if (pm_cap == 0) {
10772 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
10773 "aborting.\n");
10774 err = -EIO;
10775 goto err_out_free_res;
10776 }
10777
1da177e4
LT
10778 tg3reg_base = pci_resource_start(pdev, 0);
10779 tg3reg_len = pci_resource_len(pdev, 0);
10780
10781 dev = alloc_etherdev(sizeof(*tp));
10782 if (!dev) {
10783 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
10784 err = -ENOMEM;
10785 goto err_out_free_res;
10786 }
10787
10788 SET_MODULE_OWNER(dev);
10789 SET_NETDEV_DEV(dev, &pdev->dev);
10790
1da177e4
LT
10791 dev->features |= NETIF_F_LLTX;
10792#if TG3_VLAN_TAG_USED
10793 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
10794 dev->vlan_rx_register = tg3_vlan_rx_register;
10795 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
10796#endif
10797
10798 tp = netdev_priv(dev);
10799 tp->pdev = pdev;
10800 tp->dev = dev;
10801 tp->pm_cap = pm_cap;
10802 tp->mac_mode = TG3_DEF_MAC_MODE;
10803 tp->rx_mode = TG3_DEF_RX_MODE;
10804 tp->tx_mode = TG3_DEF_TX_MODE;
10805 tp->mi_mode = MAC_MI_MODE_BASE;
10806 if (tg3_debug > 0)
10807 tp->msg_enable = tg3_debug;
10808 else
10809 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10810
10811 /* The word/byte swap controls here control register access byte
10812 * swapping. DMA data byte swapping is controlled in the GRC_MODE
10813 * setting below.
10814 */
10815 tp->misc_host_ctrl =
10816 MISC_HOST_CTRL_MASK_PCI_INT |
10817 MISC_HOST_CTRL_WORD_SWAP |
10818 MISC_HOST_CTRL_INDIR_ACCESS |
10819 MISC_HOST_CTRL_PCISTATE_RW;
10820
10821 /* The NONFRM (non-frame) byte/word swap controls take effect
10822 * on descriptor entries, anything which isn't packet data.
10823 *
10824 * The StrongARM chips on the board (one for tx, one for rx)
10825 * are running in big-endian mode.
10826 */
10827 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10828 GRC_MODE_WSWAP_NONFRM_DATA);
10829#ifdef __BIG_ENDIAN
10830 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10831#endif
10832 spin_lock_init(&tp->lock);
10833 spin_lock_init(&tp->tx_lock);
10834 spin_lock_init(&tp->indirect_lock);
10835 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10836
10837 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10838 if (tp->regs == 0UL) {
10839 printk(KERN_ERR PFX "Cannot map device registers, "
10840 "aborting.\n");
10841 err = -ENOMEM;
10842 goto err_out_free_dev;
10843 }
10844
10845 tg3_init_link_config(tp);
10846
1da177e4
LT
10847 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10848 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10849 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10850
10851 dev->open = tg3_open;
10852 dev->stop = tg3_close;
10853 dev->get_stats = tg3_get_stats;
10854 dev->set_multicast_list = tg3_set_rx_mode;
10855 dev->set_mac_address = tg3_set_mac_addr;
10856 dev->do_ioctl = tg3_ioctl;
10857 dev->tx_timeout = tg3_tx_timeout;
10858 dev->poll = tg3_poll;
10859 dev->ethtool_ops = &tg3_ethtool_ops;
10860 dev->weight = 64;
10861 dev->watchdog_timeo = TG3_TX_TIMEOUT;
10862 dev->change_mtu = tg3_change_mtu;
10863 dev->irq = pdev->irq;
10864#ifdef CONFIG_NET_POLL_CONTROLLER
10865 dev->poll_controller = tg3_poll_controller;
10866#endif
10867
10868 err = tg3_get_invariants(tp);
10869 if (err) {
10870 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10871 "aborting.\n");
10872 goto err_out_iounmap;
10873 }
10874
4a29cc2e
MC
10875 /* The EPB bridge inside 5714, 5715, and 5780 and any
10876 * device behind the EPB cannot support DMA addresses > 40-bit.
72f2afb8
MC
10877 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
10878 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
10879 * do DMA address check in tg3_start_xmit().
10880 */
4a29cc2e
MC
10881 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10882 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
10883 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
72f2afb8
MC
10884 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
10885#ifdef CONFIG_HIGHMEM
10886 dma_mask = DMA_64BIT_MASK;
10887#endif
4a29cc2e 10888 } else
72f2afb8
MC
10889 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
10890
10891 /* Configure DMA attributes. */
10892 if (dma_mask > DMA_32BIT_MASK) {
10893 err = pci_set_dma_mask(pdev, dma_mask);
10894 if (!err) {
10895 dev->features |= NETIF_F_HIGHDMA;
10896 err = pci_set_consistent_dma_mask(pdev,
10897 persist_dma_mask);
10898 if (err < 0) {
10899 printk(KERN_ERR PFX "Unable to obtain 64 bit "
10900 "DMA for consistent allocations\n");
10901 goto err_out_iounmap;
10902 }
10903 }
10904 }
10905 if (err || dma_mask == DMA_32BIT_MASK) {
10906 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
10907 if (err) {
10908 printk(KERN_ERR PFX "No usable DMA configuration, "
10909 "aborting.\n");
10910 goto err_out_iounmap;
10911 }
10912 }
10913
fdfec172 10914 tg3_init_bufmgr_config(tp);
1da177e4
LT
10915
10916#if TG3_TSO_SUPPORT != 0
10917 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10918 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10919 }
10920 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10921 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10922 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10923 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10924 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10925 } else {
10926 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10927 }
10928
4e3a7aaa
MC
10929 /* TSO is on by default on chips that support hardware TSO.
10930 * Firmware TSO on older chips gives lower performance, so it
10931 * is off by default, but can be enabled using ethtool.
10932 */
10933 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
1da177e4 10934 dev->features |= NETIF_F_TSO;
1da177e4
LT
10935
10936#endif
10937
10938 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10939 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10940 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10941 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10942 tp->rx_pending = 63;
10943 }
10944
8c2dc7e1
MC
10945 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10946 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
10947 tp->pdev_peer = tg3_find_peer(tp);
1da177e4
LT
10948
10949 err = tg3_get_device_address(tp);
10950 if (err) {
10951 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10952 "aborting.\n");
10953 goto err_out_iounmap;
10954 }
10955
10956 /*
10957 * Reset chip in case UNDI or EFI driver did not shutdown
10958 * DMA self test will enable WDMAC and we'll see (spurious)
10959 * pending DMA on the PCI bus at that point.
10960 */
10961 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10962 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10963 pci_save_state(tp->pdev);
10964 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
944d980e 10965 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
10966 }
10967
10968 err = tg3_test_dma(tp);
10969 if (err) {
10970 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10971 goto err_out_iounmap;
10972 }
10973
10974 /* Tigon3 can do ipv4 only... and some chips have buggy
10975 * checksumming.
10976 */
10977 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10978 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10979 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10980 } else
10981 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10982
1da177e4
LT
10983 /* flow control autonegotiation is default behavior */
10984 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10985
15f9850d
DM
10986 tg3_init_coal(tp);
10987
7d3f4c97
DM
10988 /* Now that we have fully setup the chip, save away a snapshot
10989 * of the PCI config space. We need to restore this after
10990 * GRC_MISC_CFG core clock resets and some resume events.
10991 */
10992 pci_save_state(tp->pdev);
10993
1da177e4
LT
10994 err = register_netdev(dev);
10995 if (err) {
10996 printk(KERN_ERR PFX "Cannot register net device, "
10997 "aborting.\n");
10998 goto err_out_iounmap;
10999 }
11000
11001 pci_set_drvdata(pdev, dev);
11002
f9804ddb 11003 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
1da177e4
LT
11004 dev->name,
11005 tp->board_part_number,
11006 tp->pci_chip_rev_id,
11007 tg3_phy_string(tp),
f9804ddb 11008 tg3_bus_string(tp, str),
1da177e4
LT
11009 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11010
11011 for (i = 0; i < 6; i++)
11012 printk("%2.2x%c", dev->dev_addr[i],
11013 i == 5 ? '\n' : ':');
11014
11015 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11016 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11017 "TSOcap[%d] \n",
11018 dev->name,
11019 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11020 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11021 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11022 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11023 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11024 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11025 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
4a29cc2e
MC
11026 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11027 dev->name, tp->dma_rwctrl,
11028 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11029 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
1da177e4
LT
11030
11031 return 0;
11032
11033err_out_iounmap:
6892914f
MC
11034 if (tp->regs) {
11035 iounmap(tp->regs);
22abe310 11036 tp->regs = NULL;
6892914f 11037 }
1da177e4
LT
11038
11039err_out_free_dev:
11040 free_netdev(dev);
11041
11042err_out_free_res:
11043 pci_release_regions(pdev);
11044
11045err_out_disable_pdev:
11046 pci_disable_device(pdev);
11047 pci_set_drvdata(pdev, NULL);
11048 return err;
11049}
11050
11051static void __devexit tg3_remove_one(struct pci_dev *pdev)
11052{
11053 struct net_device *dev = pci_get_drvdata(pdev);
11054
11055 if (dev) {
11056 struct tg3 *tp = netdev_priv(dev);
11057
7faa006f 11058 flush_scheduled_work();
1da177e4 11059 unregister_netdev(dev);
6892914f
MC
11060 if (tp->regs) {
11061 iounmap(tp->regs);
22abe310 11062 tp->regs = NULL;
6892914f 11063 }
1da177e4
LT
11064 free_netdev(dev);
11065 pci_release_regions(pdev);
11066 pci_disable_device(pdev);
11067 pci_set_drvdata(pdev, NULL);
11068 }
11069}
11070
11071static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11072{
11073 struct net_device *dev = pci_get_drvdata(pdev);
11074 struct tg3 *tp = netdev_priv(dev);
11075 int err;
11076
11077 if (!netif_running(dev))
11078 return 0;
11079
7faa006f 11080 flush_scheduled_work();
1da177e4
LT
11081 tg3_netif_stop(tp);
11082
11083 del_timer_sync(&tp->timer);
11084
f47c11ee 11085 tg3_full_lock(tp, 1);
1da177e4 11086 tg3_disable_ints(tp);
f47c11ee 11087 tg3_full_unlock(tp);
1da177e4
LT
11088
11089 netif_device_detach(dev);
11090
f47c11ee 11091 tg3_full_lock(tp, 0);
944d980e 11092 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6a9eba15 11093 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
f47c11ee 11094 tg3_full_unlock(tp);
1da177e4
LT
11095
11096 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11097 if (err) {
f47c11ee 11098 tg3_full_lock(tp, 0);
1da177e4 11099
6a9eba15 11100 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
1da177e4
LT
11101 tg3_init_hw(tp);
11102
11103 tp->timer.expires = jiffies + tp->timer_offset;
11104 add_timer(&tp->timer);
11105
11106 netif_device_attach(dev);
11107 tg3_netif_start(tp);
11108
f47c11ee 11109 tg3_full_unlock(tp);
1da177e4
LT
11110 }
11111
11112 return err;
11113}
11114
11115static int tg3_resume(struct pci_dev *pdev)
11116{
11117 struct net_device *dev = pci_get_drvdata(pdev);
11118 struct tg3 *tp = netdev_priv(dev);
11119 int err;
11120
11121 if (!netif_running(dev))
11122 return 0;
11123
11124 pci_restore_state(tp->pdev);
11125
bc1c7567 11126 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
11127 if (err)
11128 return err;
11129
11130 netif_device_attach(dev);
11131
f47c11ee 11132 tg3_full_lock(tp, 0);
1da177e4 11133
6a9eba15 11134 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
1da177e4
LT
11135 tg3_init_hw(tp);
11136
11137 tp->timer.expires = jiffies + tp->timer_offset;
11138 add_timer(&tp->timer);
11139
1da177e4
LT
11140 tg3_netif_start(tp);
11141
f47c11ee 11142 tg3_full_unlock(tp);
1da177e4
LT
11143
11144 return 0;
11145}
11146
11147static struct pci_driver tg3_driver = {
11148 .name = DRV_MODULE_NAME,
11149 .id_table = tg3_pci_tbl,
11150 .probe = tg3_init_one,
11151 .remove = __devexit_p(tg3_remove_one),
11152 .suspend = tg3_suspend,
11153 .resume = tg3_resume
11154};
11155
11156static int __init tg3_init(void)
11157{
11158 return pci_module_init(&tg3_driver);
11159}
11160
11161static void __exit tg3_cleanup(void)
11162{
11163 pci_unregister_driver(&tg3_driver);
11164}
11165
11166module_init(tg3_init);
11167module_exit(tg3_cleanup);