]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/tg3.c
[TG3]: Add support for 5714S and 5715S
[net-next-2.6.git] / drivers / net / tg3.c
CommitLineData
1da177e4
LT
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
8 *
9 * Firmware is:
49cabf49
MC
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
1da177e4
LT
16 */
17
18#include <linux/config.h>
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
14c85021 27#include <linux/in.h>
1da177e4
LT
28#include <linux/init.h>
29#include <linux/ioport.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/ethtool.h>
35#include <linux/mii.h>
36#include <linux/if_vlan.h>
37#include <linux/ip.h>
38#include <linux/tcp.h>
39#include <linux/workqueue.h>
61487480 40#include <linux/prefetch.h>
f9a5f7d3 41#include <linux/dma-mapping.h>
1da177e4
LT
42
43#include <net/checksum.h>
44
45#include <asm/system.h>
46#include <asm/io.h>
47#include <asm/byteorder.h>
48#include <asm/uaccess.h>
49
50#ifdef CONFIG_SPARC64
51#include <asm/idprom.h>
52#include <asm/oplib.h>
53#include <asm/pbm.h>
54#endif
55
56#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57#define TG3_VLAN_TAG_USED 1
58#else
59#define TG3_VLAN_TAG_USED 0
60#endif
61
62#ifdef NETIF_F_TSO
63#define TG3_TSO_SUPPORT 1
64#else
65#define TG3_TSO_SUPPORT 0
66#endif
67
68#include "tg3.h"
69
70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": "
6e2be3ea
DM
72#define DRV_MODULE_VERSION "3.49"
73#define DRV_MODULE_RELDATE "Feb 2, 2006"
1da177e4
LT
74
75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0
77#define TG3_DEF_TX_MODE 0
78#define TG3_DEF_MSG_ENABLE \
79 (NETIF_MSG_DRV | \
80 NETIF_MSG_PROBE | \
81 NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | \
83 NETIF_MSG_IFDOWN | \
84 NETIF_MSG_IFUP | \
85 NETIF_MSG_RX_ERR | \
86 NETIF_MSG_TX_ERR)
87
88/* length of time before we decide the hardware is borked,
89 * and dev->tx_timeout() should be called to fix the problem
90 */
91#define TG3_TX_TIMEOUT (5 * HZ)
92
93/* hardware minimum and maximum for a single frame's data payload */
94#define TG3_MIN_MTU 60
95#define TG3_MAX_MTU(tp) \
0f893dc6 96 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
1da177e4
LT
97
98/* These numbers seem to be hard coded in the NIC firmware somehow.
99 * You can't change the ring sizes, but you can change where you place
100 * them in the NIC onboard memory.
101 */
102#define TG3_RX_RING_SIZE 512
103#define TG3_DEF_RX_RING_PENDING 200
104#define TG3_RX_JUMBO_RING_SIZE 256
105#define TG3_DEF_RX_JUMBO_RING_PENDING 100
106
107/* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
112 */
113#define TG3_RX_RCB_RING_SIZE(tp) \
114 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115
116#define TG3_TX_RING_SIZE 512
117#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118
119#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 TG3_RX_RING_SIZE)
121#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
126 TG3_TX_RING_SIZE)
1da177e4 127#define TX_BUFFS_AVAIL(TP) \
51b91468
MC
128 ((TP)->tx_pending - \
129 (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
1da177e4
LT
130#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
133#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
134
135/* minimum number of free TX descriptors required to wake up TX process */
136#define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
137
138/* number of ETHTOOL_GSTATS u64's */
139#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
4cafd3f5
MC
141#define TG3_NUM_TEST 6
142
1da177e4
LT
143static char version[] __devinitdata =
144 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148MODULE_LICENSE("GPL");
149MODULE_VERSION(DRV_MODULE_VERSION);
150
151static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
152module_param(tg3_debug, int, 0);
153MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155static struct pci_device_id tg3_pci_tbl[] = {
156 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
6e9017a7 214 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
af2bcd97 215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
d8659255
XVP
216 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1da177e4
LT
218 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
a4e2b347
MC
224 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
d4d2c558
MC
226 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
a4e2b347
MC
228 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
d4d2c558
MC
230 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
4cf78e4f
MC
232 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1da177e4
LT
236 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
237 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
239 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
245 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
247 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
249 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
250 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
251 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252 { 0, }
253};
254
255MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
256
257static struct {
258 const char string[ETH_GSTRING_LEN];
259} ethtool_stats_keys[TG3_NUM_STATS] = {
260 { "rx_octets" },
261 { "rx_fragments" },
262 { "rx_ucast_packets" },
263 { "rx_mcast_packets" },
264 { "rx_bcast_packets" },
265 { "rx_fcs_errors" },
266 { "rx_align_errors" },
267 { "rx_xon_pause_rcvd" },
268 { "rx_xoff_pause_rcvd" },
269 { "rx_mac_ctrl_rcvd" },
270 { "rx_xoff_entered" },
271 { "rx_frame_too_long_errors" },
272 { "rx_jabbers" },
273 { "rx_undersize_packets" },
274 { "rx_in_length_errors" },
275 { "rx_out_length_errors" },
276 { "rx_64_or_less_octet_packets" },
277 { "rx_65_to_127_octet_packets" },
278 { "rx_128_to_255_octet_packets" },
279 { "rx_256_to_511_octet_packets" },
280 { "rx_512_to_1023_octet_packets" },
281 { "rx_1024_to_1522_octet_packets" },
282 { "rx_1523_to_2047_octet_packets" },
283 { "rx_2048_to_4095_octet_packets" },
284 { "rx_4096_to_8191_octet_packets" },
285 { "rx_8192_to_9022_octet_packets" },
286
287 { "tx_octets" },
288 { "tx_collisions" },
289
290 { "tx_xon_sent" },
291 { "tx_xoff_sent" },
292 { "tx_flow_control" },
293 { "tx_mac_errors" },
294 { "tx_single_collisions" },
295 { "tx_mult_collisions" },
296 { "tx_deferred" },
297 { "tx_excessive_collisions" },
298 { "tx_late_collisions" },
299 { "tx_collide_2times" },
300 { "tx_collide_3times" },
301 { "tx_collide_4times" },
302 { "tx_collide_5times" },
303 { "tx_collide_6times" },
304 { "tx_collide_7times" },
305 { "tx_collide_8times" },
306 { "tx_collide_9times" },
307 { "tx_collide_10times" },
308 { "tx_collide_11times" },
309 { "tx_collide_12times" },
310 { "tx_collide_13times" },
311 { "tx_collide_14times" },
312 { "tx_collide_15times" },
313 { "tx_ucast_packets" },
314 { "tx_mcast_packets" },
315 { "tx_bcast_packets" },
316 { "tx_carrier_sense_errors" },
317 { "tx_discards" },
318 { "tx_errors" },
319
320 { "dma_writeq_full" },
321 { "dma_write_prioq_full" },
322 { "rxbds_empty" },
323 { "rx_discards" },
324 { "rx_errors" },
325 { "rx_threshold_hit" },
326
327 { "dma_readq_full" },
328 { "dma_read_prioq_full" },
329 { "tx_comp_queue_full" },
330
331 { "ring_set_send_prod_index" },
332 { "ring_status_update" },
333 { "nic_irqs" },
334 { "nic_avoided_irqs" },
335 { "nic_tx_threshold_hit" }
336};
337
4cafd3f5
MC
338static struct {
339 const char string[ETH_GSTRING_LEN];
340} ethtool_test_keys[TG3_NUM_TEST] = {
341 { "nvram test (online) " },
342 { "link test (online) " },
343 { "register test (offline)" },
344 { "memory test (offline)" },
345 { "loopback test (offline)" },
346 { "interrupt test (offline)" },
347};
348
b401e9e2
MC
349static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
350{
351 writel(val, tp->regs + off);
352}
353
354static u32 tg3_read32(struct tg3 *tp, u32 off)
355{
356 return (readl(tp->regs + off));
357}
358
1da177e4
LT
359static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
360{
6892914f
MC
361 unsigned long flags;
362
363 spin_lock_irqsave(&tp->indirect_lock, flags);
1ee582d8
MC
364 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
365 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
6892914f 366 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1ee582d8
MC
367}
368
369static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
370{
371 writel(val, tp->regs + off);
372 readl(tp->regs + off);
1da177e4
LT
373}
374
6892914f 375static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
1da177e4 376{
6892914f
MC
377 unsigned long flags;
378 u32 val;
379
380 spin_lock_irqsave(&tp->indirect_lock, flags);
381 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
382 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
383 spin_unlock_irqrestore(&tp->indirect_lock, flags);
384 return val;
385}
386
387static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
388{
389 unsigned long flags;
390
391 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
392 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
393 TG3_64BIT_REG_LOW, val);
394 return;
395 }
396 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
397 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
398 TG3_64BIT_REG_LOW, val);
399 return;
1da177e4 400 }
6892914f
MC
401
402 spin_lock_irqsave(&tp->indirect_lock, flags);
403 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
404 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
405 spin_unlock_irqrestore(&tp->indirect_lock, flags);
406
407 /* In indirect mode when disabling interrupts, we also need
408 * to clear the interrupt bit in the GRC local ctrl register.
409 */
410 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
411 (val == 0x1)) {
412 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
413 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
414 }
415}
416
417static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
418{
419 unsigned long flags;
420 u32 val;
421
422 spin_lock_irqsave(&tp->indirect_lock, flags);
423 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
424 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
425 spin_unlock_irqrestore(&tp->indirect_lock, flags);
426 return val;
427}
428
b401e9e2
MC
429/* usec_wait specifies the wait time in usec when writing to certain registers
430 * where it is unsafe to read back the register without some delay.
431 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
432 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
433 */
434static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
6892914f 435{
b401e9e2
MC
436 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
437 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
438 /* Non-posted methods */
439 tp->write32(tp, off, val);
440 else {
441 /* Posted method */
442 tg3_write32(tp, off, val);
443 if (usec_wait)
444 udelay(usec_wait);
445 tp->read32(tp, off);
446 }
447 /* Wait again after the read for the posted method to guarantee that
448 * the wait time is met.
449 */
450 if (usec_wait)
451 udelay(usec_wait);
1da177e4
LT
452}
453
09ee929c
MC
454static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
455{
456 tp->write32_mbox(tp, off, val);
6892914f
MC
457 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
458 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
459 tp->read32_mbox(tp, off);
09ee929c
MC
460}
461
20094930 462static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
1da177e4
LT
463{
464 void __iomem *mbox = tp->regs + off;
465 writel(val, mbox);
466 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
467 writel(val, mbox);
468 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
469 readl(mbox);
470}
471
20094930 472#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
09ee929c 473#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
20094930
MC
474#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
475#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
09ee929c 476#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
20094930
MC
477
478#define tw32(reg,val) tp->write32(tp, reg, val)
b401e9e2
MC
479#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
480#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
20094930 481#define tr32(reg) tp->read32(tp, reg)
1da177e4
LT
482
483static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
484{
6892914f
MC
485 unsigned long flags;
486
487 spin_lock_irqsave(&tp->indirect_lock, flags);
1da177e4
LT
488 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
489 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
490
491 /* Always leave this as zero. */
492 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
6892914f 493 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
494}
495
28fbef78
MC
496static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
497{
498 /* If no workaround is needed, write to mem space directly */
499 if (tp->write32 != tg3_write_indirect_reg32)
500 tw32(NIC_SRAM_WIN_BASE + off, val);
501 else
502 tg3_write_mem(tp, off, val);
503}
504
1da177e4
LT
505static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
506{
6892914f
MC
507 unsigned long flags;
508
509 spin_lock_irqsave(&tp->indirect_lock, flags);
1da177e4
LT
510 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
511 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
512
513 /* Always leave this as zero. */
514 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
6892914f 515 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
516}
517
518static void tg3_disable_ints(struct tg3 *tp)
519{
520 tw32(TG3PCI_MISC_HOST_CTRL,
521 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c 522 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
1da177e4
LT
523}
524
525static inline void tg3_cond_int(struct tg3 *tp)
526{
38f3843e
MC
527 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
528 (tp->hw_status->status & SD_STATUS_UPDATED))
1da177e4
LT
529 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
530}
531
532static void tg3_enable_ints(struct tg3 *tp)
533{
bbe832c0
MC
534 tp->irq_sync = 0;
535 wmb();
536
1da177e4
LT
537 tw32(TG3PCI_MISC_HOST_CTRL,
538 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c
MC
539 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
540 (tp->last_tag << 24));
1da177e4
LT
541 tg3_cond_int(tp);
542}
543
04237ddd
MC
544static inline unsigned int tg3_has_work(struct tg3 *tp)
545{
546 struct tg3_hw_status *sblk = tp->hw_status;
547 unsigned int work_exists = 0;
548
549 /* check for phy events */
550 if (!(tp->tg3_flags &
551 (TG3_FLAG_USE_LINKCHG_REG |
552 TG3_FLAG_POLL_SERDES))) {
553 if (sblk->status & SD_STATUS_LINK_CHG)
554 work_exists = 1;
555 }
556 /* check for RX/TX work to do */
557 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
558 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
559 work_exists = 1;
560
561 return work_exists;
562}
563
1da177e4 564/* tg3_restart_ints
04237ddd
MC
565 * similar to tg3_enable_ints, but it accurately determines whether there
566 * is new work pending and can return without flushing the PIO write
567 * which reenables interrupts
1da177e4
LT
568 */
569static void tg3_restart_ints(struct tg3 *tp)
570{
fac9b83e
DM
571 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
572 tp->last_tag << 24);
1da177e4
LT
573 mmiowb();
574
fac9b83e
DM
575 /* When doing tagged status, this work check is unnecessary.
576 * The last_tag we write above tells the chip which piece of
577 * work we've completed.
578 */
579 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
580 tg3_has_work(tp))
04237ddd
MC
581 tw32(HOSTCC_MODE, tp->coalesce_mode |
582 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
1da177e4
LT
583}
584
585static inline void tg3_netif_stop(struct tg3 *tp)
586{
bbe832c0 587 tp->dev->trans_start = jiffies; /* prevent tx timeout */
1da177e4
LT
588 netif_poll_disable(tp->dev);
589 netif_tx_disable(tp->dev);
590}
591
592static inline void tg3_netif_start(struct tg3 *tp)
593{
594 netif_wake_queue(tp->dev);
595 /* NOTE: unconditional netif_wake_queue is only appropriate
596 * so long as all callers are assured to have free tx slots
597 * (such as after tg3_init_hw)
598 */
599 netif_poll_enable(tp->dev);
f47c11ee
DM
600 tp->hw_status->status |= SD_STATUS_UPDATED;
601 tg3_enable_ints(tp);
1da177e4
LT
602}
603
604static void tg3_switch_clocks(struct tg3 *tp)
605{
606 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
607 u32 orig_clock_ctrl;
608
a4e2b347 609 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4cf78e4f
MC
610 return;
611
1da177e4
LT
612 orig_clock_ctrl = clock_ctrl;
613 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
614 CLOCK_CTRL_CLKRUN_OENABLE |
615 0x1f);
616 tp->pci_clock_ctrl = clock_ctrl;
617
618 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
619 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
b401e9e2
MC
620 tw32_wait_f(TG3PCI_CLOCK_CTRL,
621 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1da177e4
LT
622 }
623 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
b401e9e2
MC
624 tw32_wait_f(TG3PCI_CLOCK_CTRL,
625 clock_ctrl |
626 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
627 40);
628 tw32_wait_f(TG3PCI_CLOCK_CTRL,
629 clock_ctrl | (CLOCK_CTRL_ALTCLK),
630 40);
1da177e4 631 }
b401e9e2 632 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1da177e4
LT
633}
634
635#define PHY_BUSY_LOOPS 5000
636
637static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
638{
639 u32 frame_val;
640 unsigned int loops;
641 int ret;
642
643 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
644 tw32_f(MAC_MI_MODE,
645 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
646 udelay(80);
647 }
648
649 *val = 0x0;
650
651 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
652 MI_COM_PHY_ADDR_MASK);
653 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
654 MI_COM_REG_ADDR_MASK);
655 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
656
657 tw32_f(MAC_MI_COM, frame_val);
658
659 loops = PHY_BUSY_LOOPS;
660 while (loops != 0) {
661 udelay(10);
662 frame_val = tr32(MAC_MI_COM);
663
664 if ((frame_val & MI_COM_BUSY) == 0) {
665 udelay(5);
666 frame_val = tr32(MAC_MI_COM);
667 break;
668 }
669 loops -= 1;
670 }
671
672 ret = -EBUSY;
673 if (loops != 0) {
674 *val = frame_val & MI_COM_DATA_MASK;
675 ret = 0;
676 }
677
678 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
679 tw32_f(MAC_MI_MODE, tp->mi_mode);
680 udelay(80);
681 }
682
683 return ret;
684}
685
686static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
687{
688 u32 frame_val;
689 unsigned int loops;
690 int ret;
691
692 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
693 tw32_f(MAC_MI_MODE,
694 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
695 udelay(80);
696 }
697
698 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
699 MI_COM_PHY_ADDR_MASK);
700 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
701 MI_COM_REG_ADDR_MASK);
702 frame_val |= (val & MI_COM_DATA_MASK);
703 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
704
705 tw32_f(MAC_MI_COM, frame_val);
706
707 loops = PHY_BUSY_LOOPS;
708 while (loops != 0) {
709 udelay(10);
710 frame_val = tr32(MAC_MI_COM);
711 if ((frame_val & MI_COM_BUSY) == 0) {
712 udelay(5);
713 frame_val = tr32(MAC_MI_COM);
714 break;
715 }
716 loops -= 1;
717 }
718
719 ret = -EBUSY;
720 if (loops != 0)
721 ret = 0;
722
723 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
724 tw32_f(MAC_MI_MODE, tp->mi_mode);
725 udelay(80);
726 }
727
728 return ret;
729}
730
731static void tg3_phy_set_wirespeed(struct tg3 *tp)
732{
733 u32 val;
734
735 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
736 return;
737
738 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
739 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
740 tg3_writephy(tp, MII_TG3_AUX_CTRL,
741 (val | (1 << 15) | (1 << 4)));
742}
743
744static int tg3_bmcr_reset(struct tg3 *tp)
745{
746 u32 phy_control;
747 int limit, err;
748
749 /* OK, reset it, and poll the BMCR_RESET bit until it
750 * clears or we time out.
751 */
752 phy_control = BMCR_RESET;
753 err = tg3_writephy(tp, MII_BMCR, phy_control);
754 if (err != 0)
755 return -EBUSY;
756
757 limit = 5000;
758 while (limit--) {
759 err = tg3_readphy(tp, MII_BMCR, &phy_control);
760 if (err != 0)
761 return -EBUSY;
762
763 if ((phy_control & BMCR_RESET) == 0) {
764 udelay(40);
765 break;
766 }
767 udelay(10);
768 }
769 if (limit <= 0)
770 return -EBUSY;
771
772 return 0;
773}
774
775static int tg3_wait_macro_done(struct tg3 *tp)
776{
777 int limit = 100;
778
779 while (limit--) {
780 u32 tmp32;
781
782 if (!tg3_readphy(tp, 0x16, &tmp32)) {
783 if ((tmp32 & 0x1000) == 0)
784 break;
785 }
786 }
787 if (limit <= 0)
788 return -EBUSY;
789
790 return 0;
791}
792
793static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
794{
795 static const u32 test_pat[4][6] = {
796 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
797 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
798 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
799 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
800 };
801 int chan;
802
803 for (chan = 0; chan < 4; chan++) {
804 int i;
805
806 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
807 (chan * 0x2000) | 0x0200);
808 tg3_writephy(tp, 0x16, 0x0002);
809
810 for (i = 0; i < 6; i++)
811 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
812 test_pat[chan][i]);
813
814 tg3_writephy(tp, 0x16, 0x0202);
815 if (tg3_wait_macro_done(tp)) {
816 *resetp = 1;
817 return -EBUSY;
818 }
819
820 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
821 (chan * 0x2000) | 0x0200);
822 tg3_writephy(tp, 0x16, 0x0082);
823 if (tg3_wait_macro_done(tp)) {
824 *resetp = 1;
825 return -EBUSY;
826 }
827
828 tg3_writephy(tp, 0x16, 0x0802);
829 if (tg3_wait_macro_done(tp)) {
830 *resetp = 1;
831 return -EBUSY;
832 }
833
834 for (i = 0; i < 6; i += 2) {
835 u32 low, high;
836
837 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
838 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
839 tg3_wait_macro_done(tp)) {
840 *resetp = 1;
841 return -EBUSY;
842 }
843 low &= 0x7fff;
844 high &= 0x000f;
845 if (low != test_pat[chan][i] ||
846 high != test_pat[chan][i+1]) {
847 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
848 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
849 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
850
851 return -EBUSY;
852 }
853 }
854 }
855
856 return 0;
857}
858
859static int tg3_phy_reset_chanpat(struct tg3 *tp)
860{
861 int chan;
862
863 for (chan = 0; chan < 4; chan++) {
864 int i;
865
866 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
867 (chan * 0x2000) | 0x0200);
868 tg3_writephy(tp, 0x16, 0x0002);
869 for (i = 0; i < 6; i++)
870 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
871 tg3_writephy(tp, 0x16, 0x0202);
872 if (tg3_wait_macro_done(tp))
873 return -EBUSY;
874 }
875
876 return 0;
877}
878
879static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
880{
881 u32 reg32, phy9_orig;
882 int retries, do_phy_reset, err;
883
884 retries = 10;
885 do_phy_reset = 1;
886 do {
887 if (do_phy_reset) {
888 err = tg3_bmcr_reset(tp);
889 if (err)
890 return err;
891 do_phy_reset = 0;
892 }
893
894 /* Disable transmitter and interrupt. */
895 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
896 continue;
897
898 reg32 |= 0x3000;
899 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
900
901 /* Set full-duplex, 1000 mbps. */
902 tg3_writephy(tp, MII_BMCR,
903 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
904
905 /* Set to master mode. */
906 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
907 continue;
908
909 tg3_writephy(tp, MII_TG3_CTRL,
910 (MII_TG3_CTRL_AS_MASTER |
911 MII_TG3_CTRL_ENABLE_AS_MASTER));
912
913 /* Enable SM_DSP_CLOCK and 6dB. */
914 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
915
916 /* Block the PHY control access. */
917 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
918 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
919
920 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
921 if (!err)
922 break;
923 } while (--retries);
924
925 err = tg3_phy_reset_chanpat(tp);
926 if (err)
927 return err;
928
929 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
930 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
931
932 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
933 tg3_writephy(tp, 0x16, 0x0000);
934
935 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
936 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
937 /* Set Extended packet length bit for jumbo frames */
938 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
939 }
940 else {
941 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
942 }
943
944 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
945
946 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
947 reg32 &= ~0x3000;
948 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
949 } else if (!err)
950 err = -EBUSY;
951
952 return err;
953}
954
955/* This will reset the tigon3 PHY if there is no valid
956 * link unless the FORCE argument is non-zero.
957 */
958static int tg3_phy_reset(struct tg3 *tp)
959{
960 u32 phy_status;
961 int err;
962
963 err = tg3_readphy(tp, MII_BMSR, &phy_status);
964 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
965 if (err != 0)
966 return -EBUSY;
967
968 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
969 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
970 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
971 err = tg3_phy_reset_5703_4_5(tp);
972 if (err)
973 return err;
974 goto out;
975 }
976
977 err = tg3_bmcr_reset(tp);
978 if (err)
979 return err;
980
981out:
982 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
983 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
984 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
985 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
986 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
987 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
988 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
989 }
990 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
991 tg3_writephy(tp, 0x1c, 0x8d68);
992 tg3_writephy(tp, 0x1c, 0x8d68);
993 }
994 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
995 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
996 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
997 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
998 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
999 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1000 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1001 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1002 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1003 }
1004 /* Set Extended packet length bit (bit 14) on all chips that */
1005 /* support jumbo frames */
1006 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1007 /* Cannot do read-modify-write on 5401 */
1008 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
0f893dc6 1009 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1010 u32 phy_reg;
1011
1012 /* Set bit 14 with read-modify-write to preserve other bits */
1013 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1014 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1015 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1016 }
1017
1018 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1019 * jumbo frames transmission.
1020 */
0f893dc6 1021 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1022 u32 phy_reg;
1023
1024 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1025 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1026 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1027 }
1028
1029 tg3_phy_set_wirespeed(tp);
1030 return 0;
1031}
1032
1033static void tg3_frob_aux_power(struct tg3 *tp)
1034{
1035 struct tg3 *tp_peer = tp;
1036
1037 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1038 return;
1039
8c2dc7e1
MC
1040 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1041 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1042 struct net_device *dev_peer;
1043
1044 dev_peer = pci_get_drvdata(tp->pdev_peer);
1045 if (!dev_peer)
1da177e4 1046 BUG();
8c2dc7e1 1047 tp_peer = netdev_priv(dev_peer);
1da177e4
LT
1048 }
1049
1da177e4 1050 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
6921d201
MC
1051 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1052 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1053 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1da177e4
LT
1054 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1055 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
b401e9e2
MC
1056 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1057 (GRC_LCLCTRL_GPIO_OE0 |
1058 GRC_LCLCTRL_GPIO_OE1 |
1059 GRC_LCLCTRL_GPIO_OE2 |
1060 GRC_LCLCTRL_GPIO_OUTPUT0 |
1061 GRC_LCLCTRL_GPIO_OUTPUT1),
1062 100);
1da177e4
LT
1063 } else {
1064 u32 no_gpio2;
dc56b7d4 1065 u32 grc_local_ctrl = 0;
1da177e4
LT
1066
1067 if (tp_peer != tp &&
1068 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1069 return;
1070
dc56b7d4
MC
1071 /* Workaround to prevent overdrawing Amps. */
1072 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1073 ASIC_REV_5714) {
1074 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
b401e9e2
MC
1075 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1076 grc_local_ctrl, 100);
dc56b7d4
MC
1077 }
1078
1da177e4
LT
1079 /* On 5753 and variants, GPIO2 cannot be used. */
1080 no_gpio2 = tp->nic_sram_data_cfg &
1081 NIC_SRAM_DATA_CFG_NO_GPIO2;
1082
dc56b7d4 1083 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1da177e4
LT
1084 GRC_LCLCTRL_GPIO_OE1 |
1085 GRC_LCLCTRL_GPIO_OE2 |
1086 GRC_LCLCTRL_GPIO_OUTPUT1 |
1087 GRC_LCLCTRL_GPIO_OUTPUT2;
1088 if (no_gpio2) {
1089 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1090 GRC_LCLCTRL_GPIO_OUTPUT2);
1091 }
b401e9e2
MC
1092 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1093 grc_local_ctrl, 100);
1da177e4
LT
1094
1095 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1096
b401e9e2
MC
1097 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1098 grc_local_ctrl, 100);
1da177e4
LT
1099
1100 if (!no_gpio2) {
1101 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
b401e9e2
MC
1102 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1103 grc_local_ctrl, 100);
1da177e4
LT
1104 }
1105 }
1106 } else {
1107 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1108 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1109 if (tp_peer != tp &&
1110 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1111 return;
1112
b401e9e2
MC
1113 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1114 (GRC_LCLCTRL_GPIO_OE1 |
1115 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4 1116
b401e9e2
MC
1117 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1118 GRC_LCLCTRL_GPIO_OE1, 100);
1da177e4 1119
b401e9e2
MC
1120 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1121 (GRC_LCLCTRL_GPIO_OE1 |
1122 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4
LT
1123 }
1124 }
1125}
1126
1127static int tg3_setup_phy(struct tg3 *, int);
1128
1129#define RESET_KIND_SHUTDOWN 0
1130#define RESET_KIND_INIT 1
1131#define RESET_KIND_SUSPEND 2
1132
1133static void tg3_write_sig_post_reset(struct tg3 *, int);
1134static int tg3_halt_cpu(struct tg3 *, u32);
6921d201
MC
1135static int tg3_nvram_lock(struct tg3 *);
1136static void tg3_nvram_unlock(struct tg3 *);
1da177e4
LT
1137
1138static int tg3_set_power_state(struct tg3 *tp, int state)
1139{
1140 u32 misc_host_ctrl;
1141 u16 power_control, power_caps;
1142 int pm = tp->pm_cap;
1143
1144 /* Make sure register accesses (indirect or otherwise)
1145 * will function correctly.
1146 */
1147 pci_write_config_dword(tp->pdev,
1148 TG3PCI_MISC_HOST_CTRL,
1149 tp->misc_host_ctrl);
1150
1151 pci_read_config_word(tp->pdev,
1152 pm + PCI_PM_CTRL,
1153 &power_control);
1154 power_control |= PCI_PM_CTRL_PME_STATUS;
1155 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1156 switch (state) {
1157 case 0:
1158 power_control |= 0;
1159 pci_write_config_word(tp->pdev,
1160 pm + PCI_PM_CTRL,
1161 power_control);
8c6bda1a
MC
1162 udelay(100); /* Delay after power state change */
1163
1164 /* Switch out of Vaux if it is not a LOM */
b401e9e2
MC
1165 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1166 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1da177e4
LT
1167
1168 return 0;
1169
1170 case 1:
1171 power_control |= 1;
1172 break;
1173
1174 case 2:
1175 power_control |= 2;
1176 break;
1177
1178 case 3:
1179 power_control |= 3;
1180 break;
1181
1182 default:
1183 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1184 "requested.\n",
1185 tp->dev->name, state);
1186 return -EINVAL;
1187 };
1188
1189 power_control |= PCI_PM_CTRL_PME_ENABLE;
1190
1191 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1192 tw32(TG3PCI_MISC_HOST_CTRL,
1193 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1194
1195 if (tp->link_config.phy_is_low_power == 0) {
1196 tp->link_config.phy_is_low_power = 1;
1197 tp->link_config.orig_speed = tp->link_config.speed;
1198 tp->link_config.orig_duplex = tp->link_config.duplex;
1199 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1200 }
1201
747e8f8b 1202 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1da177e4
LT
1203 tp->link_config.speed = SPEED_10;
1204 tp->link_config.duplex = DUPLEX_HALF;
1205 tp->link_config.autoneg = AUTONEG_ENABLE;
1206 tg3_setup_phy(tp, 0);
1207 }
1208
6921d201
MC
1209 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1210 int i;
1211 u32 val;
1212
1213 for (i = 0; i < 200; i++) {
1214 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1215 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1216 break;
1217 msleep(1);
1218 }
1219 }
1220 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1221 WOL_DRV_STATE_SHUTDOWN |
1222 WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1223
1da177e4
LT
1224 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1225
1226 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1227 u32 mac_mode;
1228
1229 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1230 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1231 udelay(40);
1232
1233 mac_mode = MAC_MODE_PORT_MODE_MII;
1234
1235 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1236 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1237 mac_mode |= MAC_MODE_LINK_POLARITY;
1238 } else {
1239 mac_mode = MAC_MODE_PORT_MODE_TBI;
1240 }
1241
cbf46853 1242 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1da177e4
LT
1243 tw32(MAC_LED_CTRL, tp->led_ctrl);
1244
1245 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1246 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1247 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1248
1249 tw32_f(MAC_MODE, mac_mode);
1250 udelay(100);
1251
1252 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1253 udelay(10);
1254 }
1255
1256 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1257 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1258 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1259 u32 base_val;
1260
1261 base_val = tp->pci_clock_ctrl;
1262 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1263 CLOCK_CTRL_TXCLK_DISABLE);
1264
b401e9e2
MC
1265 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1266 CLOCK_CTRL_PWRDOWN_PLL133, 40);
a4e2b347 1267 } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4cf78e4f 1268 /* do nothing */
85e94ced 1269 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1da177e4
LT
1270 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1271 u32 newbits1, newbits2;
1272
1273 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1274 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1275 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1276 CLOCK_CTRL_TXCLK_DISABLE |
1277 CLOCK_CTRL_ALTCLK);
1278 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1279 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1280 newbits1 = CLOCK_CTRL_625_CORE;
1281 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1282 } else {
1283 newbits1 = CLOCK_CTRL_ALTCLK;
1284 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1285 }
1286
b401e9e2
MC
1287 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1288 40);
1da177e4 1289
b401e9e2
MC
1290 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1291 40);
1da177e4
LT
1292
1293 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1294 u32 newbits3;
1295
1296 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1297 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1298 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1299 CLOCK_CTRL_TXCLK_DISABLE |
1300 CLOCK_CTRL_44MHZ_CORE);
1301 } else {
1302 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1303 }
1304
b401e9e2
MC
1305 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1306 tp->pci_clock_ctrl | newbits3, 40);
1da177e4
LT
1307 }
1308 }
1309
6921d201
MC
1310 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1311 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1312 /* Turn off the PHY */
1313 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1314 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1315 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1316 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
dc56b7d4
MC
1317 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
1318 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
6921d201
MC
1319 }
1320 }
1321
1da177e4
LT
1322 tg3_frob_aux_power(tp);
1323
1324 /* Workaround for unstable PLL clock */
1325 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1326 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1327 u32 val = tr32(0x7d00);
1328
1329 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1330 tw32(0x7d00, val);
6921d201 1331 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
ec41c7df
MC
1332 int err;
1333
1334 err = tg3_nvram_lock(tp);
1da177e4 1335 tg3_halt_cpu(tp, RX_CPU_BASE);
ec41c7df
MC
1336 if (!err)
1337 tg3_nvram_unlock(tp);
6921d201 1338 }
1da177e4
LT
1339 }
1340
1341 /* Finally, set the new power state. */
1342 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
8c6bda1a 1343 udelay(100); /* Delay after power state change */
1da177e4
LT
1344
1345 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1346
1347 return 0;
1348}
1349
1350static void tg3_link_report(struct tg3 *tp)
1351{
1352 if (!netif_carrier_ok(tp->dev)) {
1353 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1354 } else {
1355 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1356 tp->dev->name,
1357 (tp->link_config.active_speed == SPEED_1000 ?
1358 1000 :
1359 (tp->link_config.active_speed == SPEED_100 ?
1360 100 : 10)),
1361 (tp->link_config.active_duplex == DUPLEX_FULL ?
1362 "full" : "half"));
1363
1364 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1365 "%s for RX.\n",
1366 tp->dev->name,
1367 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1368 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1369 }
1370}
1371
1372static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1373{
1374 u32 new_tg3_flags = 0;
1375 u32 old_rx_mode = tp->rx_mode;
1376 u32 old_tx_mode = tp->tx_mode;
1377
1378 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
747e8f8b
MC
1379
1380 /* Convert 1000BaseX flow control bits to 1000BaseT
1381 * bits before resolving flow control.
1382 */
1383 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1384 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1385 ADVERTISE_PAUSE_ASYM);
1386 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1387
1388 if (local_adv & ADVERTISE_1000XPAUSE)
1389 local_adv |= ADVERTISE_PAUSE_CAP;
1390 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1391 local_adv |= ADVERTISE_PAUSE_ASYM;
1392 if (remote_adv & LPA_1000XPAUSE)
1393 remote_adv |= LPA_PAUSE_CAP;
1394 if (remote_adv & LPA_1000XPAUSE_ASYM)
1395 remote_adv |= LPA_PAUSE_ASYM;
1396 }
1397
1da177e4
LT
1398 if (local_adv & ADVERTISE_PAUSE_CAP) {
1399 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1400 if (remote_adv & LPA_PAUSE_CAP)
1401 new_tg3_flags |=
1402 (TG3_FLAG_RX_PAUSE |
1403 TG3_FLAG_TX_PAUSE);
1404 else if (remote_adv & LPA_PAUSE_ASYM)
1405 new_tg3_flags |=
1406 (TG3_FLAG_RX_PAUSE);
1407 } else {
1408 if (remote_adv & LPA_PAUSE_CAP)
1409 new_tg3_flags |=
1410 (TG3_FLAG_RX_PAUSE |
1411 TG3_FLAG_TX_PAUSE);
1412 }
1413 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1414 if ((remote_adv & LPA_PAUSE_CAP) &&
1415 (remote_adv & LPA_PAUSE_ASYM))
1416 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1417 }
1418
1419 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1420 tp->tg3_flags |= new_tg3_flags;
1421 } else {
1422 new_tg3_flags = tp->tg3_flags;
1423 }
1424
1425 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1426 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1427 else
1428 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1429
1430 if (old_rx_mode != tp->rx_mode) {
1431 tw32_f(MAC_RX_MODE, tp->rx_mode);
1432 }
1433
1434 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1435 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1436 else
1437 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1438
1439 if (old_tx_mode != tp->tx_mode) {
1440 tw32_f(MAC_TX_MODE, tp->tx_mode);
1441 }
1442}
1443
1444static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1445{
1446 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1447 case MII_TG3_AUX_STAT_10HALF:
1448 *speed = SPEED_10;
1449 *duplex = DUPLEX_HALF;
1450 break;
1451
1452 case MII_TG3_AUX_STAT_10FULL:
1453 *speed = SPEED_10;
1454 *duplex = DUPLEX_FULL;
1455 break;
1456
1457 case MII_TG3_AUX_STAT_100HALF:
1458 *speed = SPEED_100;
1459 *duplex = DUPLEX_HALF;
1460 break;
1461
1462 case MII_TG3_AUX_STAT_100FULL:
1463 *speed = SPEED_100;
1464 *duplex = DUPLEX_FULL;
1465 break;
1466
1467 case MII_TG3_AUX_STAT_1000HALF:
1468 *speed = SPEED_1000;
1469 *duplex = DUPLEX_HALF;
1470 break;
1471
1472 case MII_TG3_AUX_STAT_1000FULL:
1473 *speed = SPEED_1000;
1474 *duplex = DUPLEX_FULL;
1475 break;
1476
1477 default:
1478 *speed = SPEED_INVALID;
1479 *duplex = DUPLEX_INVALID;
1480 break;
1481 };
1482}
1483
1484static void tg3_phy_copper_begin(struct tg3 *tp)
1485{
1486 u32 new_adv;
1487 int i;
1488
1489 if (tp->link_config.phy_is_low_power) {
1490 /* Entering low power mode. Disable gigabit and
1491 * 100baseT advertisements.
1492 */
1493 tg3_writephy(tp, MII_TG3_CTRL, 0);
1494
1495 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1496 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1497 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1498 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1499
1500 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1501 } else if (tp->link_config.speed == SPEED_INVALID) {
1502 tp->link_config.advertising =
1503 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1504 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1505 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1506 ADVERTISED_Autoneg | ADVERTISED_MII);
1507
1508 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1509 tp->link_config.advertising &=
1510 ~(ADVERTISED_1000baseT_Half |
1511 ADVERTISED_1000baseT_Full);
1512
1513 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1514 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1515 new_adv |= ADVERTISE_10HALF;
1516 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1517 new_adv |= ADVERTISE_10FULL;
1518 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1519 new_adv |= ADVERTISE_100HALF;
1520 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1521 new_adv |= ADVERTISE_100FULL;
1522 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1523
1524 if (tp->link_config.advertising &
1525 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1526 new_adv = 0;
1527 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1528 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1529 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1530 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1531 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1532 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1533 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1534 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1535 MII_TG3_CTRL_ENABLE_AS_MASTER);
1536 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1537 } else {
1538 tg3_writephy(tp, MII_TG3_CTRL, 0);
1539 }
1540 } else {
1541 /* Asking for a specific link mode. */
1542 if (tp->link_config.speed == SPEED_1000) {
1543 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1544 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1545
1546 if (tp->link_config.duplex == DUPLEX_FULL)
1547 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1548 else
1549 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1550 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1551 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1552 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1553 MII_TG3_CTRL_ENABLE_AS_MASTER);
1554 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1555 } else {
1556 tg3_writephy(tp, MII_TG3_CTRL, 0);
1557
1558 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1559 if (tp->link_config.speed == SPEED_100) {
1560 if (tp->link_config.duplex == DUPLEX_FULL)
1561 new_adv |= ADVERTISE_100FULL;
1562 else
1563 new_adv |= ADVERTISE_100HALF;
1564 } else {
1565 if (tp->link_config.duplex == DUPLEX_FULL)
1566 new_adv |= ADVERTISE_10FULL;
1567 else
1568 new_adv |= ADVERTISE_10HALF;
1569 }
1570 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1571 }
1572 }
1573
1574 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1575 tp->link_config.speed != SPEED_INVALID) {
1576 u32 bmcr, orig_bmcr;
1577
1578 tp->link_config.active_speed = tp->link_config.speed;
1579 tp->link_config.active_duplex = tp->link_config.duplex;
1580
1581 bmcr = 0;
1582 switch (tp->link_config.speed) {
1583 default:
1584 case SPEED_10:
1585 break;
1586
1587 case SPEED_100:
1588 bmcr |= BMCR_SPEED100;
1589 break;
1590
1591 case SPEED_1000:
1592 bmcr |= TG3_BMCR_SPEED1000;
1593 break;
1594 };
1595
1596 if (tp->link_config.duplex == DUPLEX_FULL)
1597 bmcr |= BMCR_FULLDPLX;
1598
1599 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1600 (bmcr != orig_bmcr)) {
1601 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1602 for (i = 0; i < 1500; i++) {
1603 u32 tmp;
1604
1605 udelay(10);
1606 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1607 tg3_readphy(tp, MII_BMSR, &tmp))
1608 continue;
1609 if (!(tmp & BMSR_LSTATUS)) {
1610 udelay(40);
1611 break;
1612 }
1613 }
1614 tg3_writephy(tp, MII_BMCR, bmcr);
1615 udelay(40);
1616 }
1617 } else {
1618 tg3_writephy(tp, MII_BMCR,
1619 BMCR_ANENABLE | BMCR_ANRESTART);
1620 }
1621}
1622
1623static int tg3_init_5401phy_dsp(struct tg3 *tp)
1624{
1625 int err;
1626
1627 /* Turn off tap power management. */
1628 /* Set Extended packet length bit */
1629 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1630
1631 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1632 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1633
1634 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1635 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1636
1637 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1638 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1639
1640 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1641 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1642
1643 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1644 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1645
1646 udelay(40);
1647
1648 return err;
1649}
1650
1651static int tg3_copper_is_advertising_all(struct tg3 *tp)
1652{
1653 u32 adv_reg, all_mask;
1654
1655 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1656 return 0;
1657
1658 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1659 ADVERTISE_100HALF | ADVERTISE_100FULL);
1660 if ((adv_reg & all_mask) != all_mask)
1661 return 0;
1662 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1663 u32 tg3_ctrl;
1664
1665 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1666 return 0;
1667
1668 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1669 MII_TG3_CTRL_ADV_1000_FULL);
1670 if ((tg3_ctrl & all_mask) != all_mask)
1671 return 0;
1672 }
1673 return 1;
1674}
1675
1676static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1677{
1678 int current_link_up;
1679 u32 bmsr, dummy;
1680 u16 current_speed;
1681 u8 current_duplex;
1682 int i, err;
1683
1684 tw32(MAC_EVENT, 0);
1685
1686 tw32_f(MAC_STATUS,
1687 (MAC_STATUS_SYNC_CHANGED |
1688 MAC_STATUS_CFG_CHANGED |
1689 MAC_STATUS_MI_COMPLETION |
1690 MAC_STATUS_LNKSTATE_CHANGED));
1691 udelay(40);
1692
1693 tp->mi_mode = MAC_MI_MODE_BASE;
1694 tw32_f(MAC_MI_MODE, tp->mi_mode);
1695 udelay(80);
1696
1697 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1698
1699 /* Some third-party PHYs need to be reset on link going
1700 * down.
1701 */
1702 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1703 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1704 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1705 netif_carrier_ok(tp->dev)) {
1706 tg3_readphy(tp, MII_BMSR, &bmsr);
1707 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1708 !(bmsr & BMSR_LSTATUS))
1709 force_reset = 1;
1710 }
1711 if (force_reset)
1712 tg3_phy_reset(tp);
1713
1714 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1715 tg3_readphy(tp, MII_BMSR, &bmsr);
1716 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1717 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1718 bmsr = 0;
1719
1720 if (!(bmsr & BMSR_LSTATUS)) {
1721 err = tg3_init_5401phy_dsp(tp);
1722 if (err)
1723 return err;
1724
1725 tg3_readphy(tp, MII_BMSR, &bmsr);
1726 for (i = 0; i < 1000; i++) {
1727 udelay(10);
1728 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1729 (bmsr & BMSR_LSTATUS)) {
1730 udelay(40);
1731 break;
1732 }
1733 }
1734
1735 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1736 !(bmsr & BMSR_LSTATUS) &&
1737 tp->link_config.active_speed == SPEED_1000) {
1738 err = tg3_phy_reset(tp);
1739 if (!err)
1740 err = tg3_init_5401phy_dsp(tp);
1741 if (err)
1742 return err;
1743 }
1744 }
1745 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1746 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1747 /* 5701 {A0,B0} CRC bug workaround */
1748 tg3_writephy(tp, 0x15, 0x0a75);
1749 tg3_writephy(tp, 0x1c, 0x8c68);
1750 tg3_writephy(tp, 0x1c, 0x8d68);
1751 tg3_writephy(tp, 0x1c, 0x8c68);
1752 }
1753
1754 /* Clear pending interrupts... */
1755 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1756 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1757
1758 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1759 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1760 else
1761 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1762
1763 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1764 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1765 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1766 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1767 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1768 else
1769 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1770 }
1771
1772 current_link_up = 0;
1773 current_speed = SPEED_INVALID;
1774 current_duplex = DUPLEX_INVALID;
1775
1776 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1777 u32 val;
1778
1779 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1780 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1781 if (!(val & (1 << 10))) {
1782 val |= (1 << 10);
1783 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1784 goto relink;
1785 }
1786 }
1787
1788 bmsr = 0;
1789 for (i = 0; i < 100; i++) {
1790 tg3_readphy(tp, MII_BMSR, &bmsr);
1791 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1792 (bmsr & BMSR_LSTATUS))
1793 break;
1794 udelay(40);
1795 }
1796
1797 if (bmsr & BMSR_LSTATUS) {
1798 u32 aux_stat, bmcr;
1799
1800 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1801 for (i = 0; i < 2000; i++) {
1802 udelay(10);
1803 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1804 aux_stat)
1805 break;
1806 }
1807
1808 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1809 &current_speed,
1810 &current_duplex);
1811
1812 bmcr = 0;
1813 for (i = 0; i < 200; i++) {
1814 tg3_readphy(tp, MII_BMCR, &bmcr);
1815 if (tg3_readphy(tp, MII_BMCR, &bmcr))
1816 continue;
1817 if (bmcr && bmcr != 0x7fff)
1818 break;
1819 udelay(10);
1820 }
1821
1822 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1823 if (bmcr & BMCR_ANENABLE) {
1824 current_link_up = 1;
1825
1826 /* Force autoneg restart if we are exiting
1827 * low power mode.
1828 */
1829 if (!tg3_copper_is_advertising_all(tp))
1830 current_link_up = 0;
1831 } else {
1832 current_link_up = 0;
1833 }
1834 } else {
1835 if (!(bmcr & BMCR_ANENABLE) &&
1836 tp->link_config.speed == current_speed &&
1837 tp->link_config.duplex == current_duplex) {
1838 current_link_up = 1;
1839 } else {
1840 current_link_up = 0;
1841 }
1842 }
1843
1844 tp->link_config.active_speed = current_speed;
1845 tp->link_config.active_duplex = current_duplex;
1846 }
1847
1848 if (current_link_up == 1 &&
1849 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1850 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1851 u32 local_adv, remote_adv;
1852
1853 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1854 local_adv = 0;
1855 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1856
1857 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1858 remote_adv = 0;
1859
1860 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1861
1862 /* If we are not advertising full pause capability,
1863 * something is wrong. Bring the link down and reconfigure.
1864 */
1865 if (local_adv != ADVERTISE_PAUSE_CAP) {
1866 current_link_up = 0;
1867 } else {
1868 tg3_setup_flow_control(tp, local_adv, remote_adv);
1869 }
1870 }
1871relink:
6921d201 1872 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1da177e4
LT
1873 u32 tmp;
1874
1875 tg3_phy_copper_begin(tp);
1876
1877 tg3_readphy(tp, MII_BMSR, &tmp);
1878 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1879 (tmp & BMSR_LSTATUS))
1880 current_link_up = 1;
1881 }
1882
1883 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1884 if (current_link_up == 1) {
1885 if (tp->link_config.active_speed == SPEED_100 ||
1886 tp->link_config.active_speed == SPEED_10)
1887 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1888 else
1889 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1890 } else
1891 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1892
1893 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1894 if (tp->link_config.active_duplex == DUPLEX_HALF)
1895 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1896
1897 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1898 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1899 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1900 (current_link_up == 1 &&
1901 tp->link_config.active_speed == SPEED_10))
1902 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1903 } else {
1904 if (current_link_up == 1)
1905 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1906 }
1907
1908 /* ??? Without this setting Netgear GA302T PHY does not
1909 * ??? send/receive packets...
1910 */
1911 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1912 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1913 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1914 tw32_f(MAC_MI_MODE, tp->mi_mode);
1915 udelay(80);
1916 }
1917
1918 tw32_f(MAC_MODE, tp->mac_mode);
1919 udelay(40);
1920
1921 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1922 /* Polled via timer. */
1923 tw32_f(MAC_EVENT, 0);
1924 } else {
1925 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1926 }
1927 udelay(40);
1928
1929 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1930 current_link_up == 1 &&
1931 tp->link_config.active_speed == SPEED_1000 &&
1932 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1933 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1934 udelay(120);
1935 tw32_f(MAC_STATUS,
1936 (MAC_STATUS_SYNC_CHANGED |
1937 MAC_STATUS_CFG_CHANGED));
1938 udelay(40);
1939 tg3_write_mem(tp,
1940 NIC_SRAM_FIRMWARE_MBOX,
1941 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1942 }
1943
1944 if (current_link_up != netif_carrier_ok(tp->dev)) {
1945 if (current_link_up)
1946 netif_carrier_on(tp->dev);
1947 else
1948 netif_carrier_off(tp->dev);
1949 tg3_link_report(tp);
1950 }
1951
1952 return 0;
1953}
1954
1955struct tg3_fiber_aneginfo {
1956 int state;
1957#define ANEG_STATE_UNKNOWN 0
1958#define ANEG_STATE_AN_ENABLE 1
1959#define ANEG_STATE_RESTART_INIT 2
1960#define ANEG_STATE_RESTART 3
1961#define ANEG_STATE_DISABLE_LINK_OK 4
1962#define ANEG_STATE_ABILITY_DETECT_INIT 5
1963#define ANEG_STATE_ABILITY_DETECT 6
1964#define ANEG_STATE_ACK_DETECT_INIT 7
1965#define ANEG_STATE_ACK_DETECT 8
1966#define ANEG_STATE_COMPLETE_ACK_INIT 9
1967#define ANEG_STATE_COMPLETE_ACK 10
1968#define ANEG_STATE_IDLE_DETECT_INIT 11
1969#define ANEG_STATE_IDLE_DETECT 12
1970#define ANEG_STATE_LINK_OK 13
1971#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
1972#define ANEG_STATE_NEXT_PAGE_WAIT 15
1973
1974 u32 flags;
1975#define MR_AN_ENABLE 0x00000001
1976#define MR_RESTART_AN 0x00000002
1977#define MR_AN_COMPLETE 0x00000004
1978#define MR_PAGE_RX 0x00000008
1979#define MR_NP_LOADED 0x00000010
1980#define MR_TOGGLE_TX 0x00000020
1981#define MR_LP_ADV_FULL_DUPLEX 0x00000040
1982#define MR_LP_ADV_HALF_DUPLEX 0x00000080
1983#define MR_LP_ADV_SYM_PAUSE 0x00000100
1984#define MR_LP_ADV_ASYM_PAUSE 0x00000200
1985#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1986#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1987#define MR_LP_ADV_NEXT_PAGE 0x00001000
1988#define MR_TOGGLE_RX 0x00002000
1989#define MR_NP_RX 0x00004000
1990
1991#define MR_LINK_OK 0x80000000
1992
1993 unsigned long link_time, cur_time;
1994
1995 u32 ability_match_cfg;
1996 int ability_match_count;
1997
1998 char ability_match, idle_match, ack_match;
1999
2000 u32 txconfig, rxconfig;
2001#define ANEG_CFG_NP 0x00000080
2002#define ANEG_CFG_ACK 0x00000040
2003#define ANEG_CFG_RF2 0x00000020
2004#define ANEG_CFG_RF1 0x00000010
2005#define ANEG_CFG_PS2 0x00000001
2006#define ANEG_CFG_PS1 0x00008000
2007#define ANEG_CFG_HD 0x00004000
2008#define ANEG_CFG_FD 0x00002000
2009#define ANEG_CFG_INVAL 0x00001f06
2010
2011};
2012#define ANEG_OK 0
2013#define ANEG_DONE 1
2014#define ANEG_TIMER_ENAB 2
2015#define ANEG_FAILED -1
2016
2017#define ANEG_STATE_SETTLE_TIME 10000
2018
2019static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2020 struct tg3_fiber_aneginfo *ap)
2021{
2022 unsigned long delta;
2023 u32 rx_cfg_reg;
2024 int ret;
2025
2026 if (ap->state == ANEG_STATE_UNKNOWN) {
2027 ap->rxconfig = 0;
2028 ap->link_time = 0;
2029 ap->cur_time = 0;
2030 ap->ability_match_cfg = 0;
2031 ap->ability_match_count = 0;
2032 ap->ability_match = 0;
2033 ap->idle_match = 0;
2034 ap->ack_match = 0;
2035 }
2036 ap->cur_time++;
2037
2038 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2039 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2040
2041 if (rx_cfg_reg != ap->ability_match_cfg) {
2042 ap->ability_match_cfg = rx_cfg_reg;
2043 ap->ability_match = 0;
2044 ap->ability_match_count = 0;
2045 } else {
2046 if (++ap->ability_match_count > 1) {
2047 ap->ability_match = 1;
2048 ap->ability_match_cfg = rx_cfg_reg;
2049 }
2050 }
2051 if (rx_cfg_reg & ANEG_CFG_ACK)
2052 ap->ack_match = 1;
2053 else
2054 ap->ack_match = 0;
2055
2056 ap->idle_match = 0;
2057 } else {
2058 ap->idle_match = 1;
2059 ap->ability_match_cfg = 0;
2060 ap->ability_match_count = 0;
2061 ap->ability_match = 0;
2062 ap->ack_match = 0;
2063
2064 rx_cfg_reg = 0;
2065 }
2066
2067 ap->rxconfig = rx_cfg_reg;
2068 ret = ANEG_OK;
2069
2070 switch(ap->state) {
2071 case ANEG_STATE_UNKNOWN:
2072 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2073 ap->state = ANEG_STATE_AN_ENABLE;
2074
2075 /* fallthru */
2076 case ANEG_STATE_AN_ENABLE:
2077 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2078 if (ap->flags & MR_AN_ENABLE) {
2079 ap->link_time = 0;
2080 ap->cur_time = 0;
2081 ap->ability_match_cfg = 0;
2082 ap->ability_match_count = 0;
2083 ap->ability_match = 0;
2084 ap->idle_match = 0;
2085 ap->ack_match = 0;
2086
2087 ap->state = ANEG_STATE_RESTART_INIT;
2088 } else {
2089 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2090 }
2091 break;
2092
2093 case ANEG_STATE_RESTART_INIT:
2094 ap->link_time = ap->cur_time;
2095 ap->flags &= ~(MR_NP_LOADED);
2096 ap->txconfig = 0;
2097 tw32(MAC_TX_AUTO_NEG, 0);
2098 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2099 tw32_f(MAC_MODE, tp->mac_mode);
2100 udelay(40);
2101
2102 ret = ANEG_TIMER_ENAB;
2103 ap->state = ANEG_STATE_RESTART;
2104
2105 /* fallthru */
2106 case ANEG_STATE_RESTART:
2107 delta = ap->cur_time - ap->link_time;
2108 if (delta > ANEG_STATE_SETTLE_TIME) {
2109 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2110 } else {
2111 ret = ANEG_TIMER_ENAB;
2112 }
2113 break;
2114
2115 case ANEG_STATE_DISABLE_LINK_OK:
2116 ret = ANEG_DONE;
2117 break;
2118
2119 case ANEG_STATE_ABILITY_DETECT_INIT:
2120 ap->flags &= ~(MR_TOGGLE_TX);
2121 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2122 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2123 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2124 tw32_f(MAC_MODE, tp->mac_mode);
2125 udelay(40);
2126
2127 ap->state = ANEG_STATE_ABILITY_DETECT;
2128 break;
2129
2130 case ANEG_STATE_ABILITY_DETECT:
2131 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2132 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2133 }
2134 break;
2135
2136 case ANEG_STATE_ACK_DETECT_INIT:
2137 ap->txconfig |= ANEG_CFG_ACK;
2138 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2139 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2140 tw32_f(MAC_MODE, tp->mac_mode);
2141 udelay(40);
2142
2143 ap->state = ANEG_STATE_ACK_DETECT;
2144
2145 /* fallthru */
2146 case ANEG_STATE_ACK_DETECT:
2147 if (ap->ack_match != 0) {
2148 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2149 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2150 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2151 } else {
2152 ap->state = ANEG_STATE_AN_ENABLE;
2153 }
2154 } else if (ap->ability_match != 0 &&
2155 ap->rxconfig == 0) {
2156 ap->state = ANEG_STATE_AN_ENABLE;
2157 }
2158 break;
2159
2160 case ANEG_STATE_COMPLETE_ACK_INIT:
2161 if (ap->rxconfig & ANEG_CFG_INVAL) {
2162 ret = ANEG_FAILED;
2163 break;
2164 }
2165 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2166 MR_LP_ADV_HALF_DUPLEX |
2167 MR_LP_ADV_SYM_PAUSE |
2168 MR_LP_ADV_ASYM_PAUSE |
2169 MR_LP_ADV_REMOTE_FAULT1 |
2170 MR_LP_ADV_REMOTE_FAULT2 |
2171 MR_LP_ADV_NEXT_PAGE |
2172 MR_TOGGLE_RX |
2173 MR_NP_RX);
2174 if (ap->rxconfig & ANEG_CFG_FD)
2175 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2176 if (ap->rxconfig & ANEG_CFG_HD)
2177 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2178 if (ap->rxconfig & ANEG_CFG_PS1)
2179 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2180 if (ap->rxconfig & ANEG_CFG_PS2)
2181 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2182 if (ap->rxconfig & ANEG_CFG_RF1)
2183 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2184 if (ap->rxconfig & ANEG_CFG_RF2)
2185 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2186 if (ap->rxconfig & ANEG_CFG_NP)
2187 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2188
2189 ap->link_time = ap->cur_time;
2190
2191 ap->flags ^= (MR_TOGGLE_TX);
2192 if (ap->rxconfig & 0x0008)
2193 ap->flags |= MR_TOGGLE_RX;
2194 if (ap->rxconfig & ANEG_CFG_NP)
2195 ap->flags |= MR_NP_RX;
2196 ap->flags |= MR_PAGE_RX;
2197
2198 ap->state = ANEG_STATE_COMPLETE_ACK;
2199 ret = ANEG_TIMER_ENAB;
2200 break;
2201
2202 case ANEG_STATE_COMPLETE_ACK:
2203 if (ap->ability_match != 0 &&
2204 ap->rxconfig == 0) {
2205 ap->state = ANEG_STATE_AN_ENABLE;
2206 break;
2207 }
2208 delta = ap->cur_time - ap->link_time;
2209 if (delta > ANEG_STATE_SETTLE_TIME) {
2210 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2211 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2212 } else {
2213 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2214 !(ap->flags & MR_NP_RX)) {
2215 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2216 } else {
2217 ret = ANEG_FAILED;
2218 }
2219 }
2220 }
2221 break;
2222
2223 case ANEG_STATE_IDLE_DETECT_INIT:
2224 ap->link_time = ap->cur_time;
2225 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2226 tw32_f(MAC_MODE, tp->mac_mode);
2227 udelay(40);
2228
2229 ap->state = ANEG_STATE_IDLE_DETECT;
2230 ret = ANEG_TIMER_ENAB;
2231 break;
2232
2233 case ANEG_STATE_IDLE_DETECT:
2234 if (ap->ability_match != 0 &&
2235 ap->rxconfig == 0) {
2236 ap->state = ANEG_STATE_AN_ENABLE;
2237 break;
2238 }
2239 delta = ap->cur_time - ap->link_time;
2240 if (delta > ANEG_STATE_SETTLE_TIME) {
2241 /* XXX another gem from the Broadcom driver :( */
2242 ap->state = ANEG_STATE_LINK_OK;
2243 }
2244 break;
2245
2246 case ANEG_STATE_LINK_OK:
2247 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2248 ret = ANEG_DONE;
2249 break;
2250
2251 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2252 /* ??? unimplemented */
2253 break;
2254
2255 case ANEG_STATE_NEXT_PAGE_WAIT:
2256 /* ??? unimplemented */
2257 break;
2258
2259 default:
2260 ret = ANEG_FAILED;
2261 break;
2262 };
2263
2264 return ret;
2265}
2266
2267static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2268{
2269 int res = 0;
2270 struct tg3_fiber_aneginfo aninfo;
2271 int status = ANEG_FAILED;
2272 unsigned int tick;
2273 u32 tmp;
2274
2275 tw32_f(MAC_TX_AUTO_NEG, 0);
2276
2277 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2278 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2279 udelay(40);
2280
2281 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2282 udelay(40);
2283
2284 memset(&aninfo, 0, sizeof(aninfo));
2285 aninfo.flags |= MR_AN_ENABLE;
2286 aninfo.state = ANEG_STATE_UNKNOWN;
2287 aninfo.cur_time = 0;
2288 tick = 0;
2289 while (++tick < 195000) {
2290 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2291 if (status == ANEG_DONE || status == ANEG_FAILED)
2292 break;
2293
2294 udelay(1);
2295 }
2296
2297 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2298 tw32_f(MAC_MODE, tp->mac_mode);
2299 udelay(40);
2300
2301 *flags = aninfo.flags;
2302
2303 if (status == ANEG_DONE &&
2304 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2305 MR_LP_ADV_FULL_DUPLEX)))
2306 res = 1;
2307
2308 return res;
2309}
2310
2311static void tg3_init_bcm8002(struct tg3 *tp)
2312{
2313 u32 mac_status = tr32(MAC_STATUS);
2314 int i;
2315
2316 /* Reset when initting first time or we have a link. */
2317 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2318 !(mac_status & MAC_STATUS_PCS_SYNCED))
2319 return;
2320
2321 /* Set PLL lock range. */
2322 tg3_writephy(tp, 0x16, 0x8007);
2323
2324 /* SW reset */
2325 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2326
2327 /* Wait for reset to complete. */
2328 /* XXX schedule_timeout() ... */
2329 for (i = 0; i < 500; i++)
2330 udelay(10);
2331
2332 /* Config mode; select PMA/Ch 1 regs. */
2333 tg3_writephy(tp, 0x10, 0x8411);
2334
2335 /* Enable auto-lock and comdet, select txclk for tx. */
2336 tg3_writephy(tp, 0x11, 0x0a10);
2337
2338 tg3_writephy(tp, 0x18, 0x00a0);
2339 tg3_writephy(tp, 0x16, 0x41ff);
2340
2341 /* Assert and deassert POR. */
2342 tg3_writephy(tp, 0x13, 0x0400);
2343 udelay(40);
2344 tg3_writephy(tp, 0x13, 0x0000);
2345
2346 tg3_writephy(tp, 0x11, 0x0a50);
2347 udelay(40);
2348 tg3_writephy(tp, 0x11, 0x0a10);
2349
2350 /* Wait for signal to stabilize */
2351 /* XXX schedule_timeout() ... */
2352 for (i = 0; i < 15000; i++)
2353 udelay(10);
2354
2355 /* Deselect the channel register so we can read the PHYID
2356 * later.
2357 */
2358 tg3_writephy(tp, 0x10, 0x8011);
2359}
2360
2361static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2362{
2363 u32 sg_dig_ctrl, sg_dig_status;
2364 u32 serdes_cfg, expected_sg_dig_ctrl;
2365 int workaround, port_a;
2366 int current_link_up;
2367
2368 serdes_cfg = 0;
2369 expected_sg_dig_ctrl = 0;
2370 workaround = 0;
2371 port_a = 1;
2372 current_link_up = 0;
2373
2374 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2375 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2376 workaround = 1;
2377 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2378 port_a = 0;
2379
2380 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2381 /* preserve bits 20-23 for voltage regulator */
2382 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2383 }
2384
2385 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2386
2387 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2388 if (sg_dig_ctrl & (1 << 31)) {
2389 if (workaround) {
2390 u32 val = serdes_cfg;
2391
2392 if (port_a)
2393 val |= 0xc010000;
2394 else
2395 val |= 0x4010000;
2396 tw32_f(MAC_SERDES_CFG, val);
2397 }
2398 tw32_f(SG_DIG_CTRL, 0x01388400);
2399 }
2400 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2401 tg3_setup_flow_control(tp, 0, 0);
2402 current_link_up = 1;
2403 }
2404 goto out;
2405 }
2406
2407 /* Want auto-negotiation. */
2408 expected_sg_dig_ctrl = 0x81388400;
2409
2410 /* Pause capability */
2411 expected_sg_dig_ctrl |= (1 << 11);
2412
2413 /* Asymettric pause */
2414 expected_sg_dig_ctrl |= (1 << 12);
2415
2416 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2417 if (workaround)
2418 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2419 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2420 udelay(5);
2421 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2422
2423 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2424 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2425 MAC_STATUS_SIGNAL_DET)) {
2426 int i;
2427
2428 /* Giver time to negotiate (~200ms) */
2429 for (i = 0; i < 40000; i++) {
2430 sg_dig_status = tr32(SG_DIG_STATUS);
2431 if (sg_dig_status & (0x3))
2432 break;
2433 udelay(5);
2434 }
2435 mac_status = tr32(MAC_STATUS);
2436
2437 if ((sg_dig_status & (1 << 1)) &&
2438 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2439 u32 local_adv, remote_adv;
2440
2441 local_adv = ADVERTISE_PAUSE_CAP;
2442 remote_adv = 0;
2443 if (sg_dig_status & (1 << 19))
2444 remote_adv |= LPA_PAUSE_CAP;
2445 if (sg_dig_status & (1 << 20))
2446 remote_adv |= LPA_PAUSE_ASYM;
2447
2448 tg3_setup_flow_control(tp, local_adv, remote_adv);
2449 current_link_up = 1;
2450 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2451 } else if (!(sg_dig_status & (1 << 1))) {
2452 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2453 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2454 else {
2455 if (workaround) {
2456 u32 val = serdes_cfg;
2457
2458 if (port_a)
2459 val |= 0xc010000;
2460 else
2461 val |= 0x4010000;
2462
2463 tw32_f(MAC_SERDES_CFG, val);
2464 }
2465
2466 tw32_f(SG_DIG_CTRL, 0x01388400);
2467 udelay(40);
2468
2469 /* Link parallel detection - link is up */
2470 /* only if we have PCS_SYNC and not */
2471 /* receiving config code words */
2472 mac_status = tr32(MAC_STATUS);
2473 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2474 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2475 tg3_setup_flow_control(tp, 0, 0);
2476 current_link_up = 1;
2477 }
2478 }
2479 }
2480 }
2481
2482out:
2483 return current_link_up;
2484}
2485
2486static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2487{
2488 int current_link_up = 0;
2489
2490 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2491 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2492 goto out;
2493 }
2494
2495 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2496 u32 flags;
2497 int i;
2498
2499 if (fiber_autoneg(tp, &flags)) {
2500 u32 local_adv, remote_adv;
2501
2502 local_adv = ADVERTISE_PAUSE_CAP;
2503 remote_adv = 0;
2504 if (flags & MR_LP_ADV_SYM_PAUSE)
2505 remote_adv |= LPA_PAUSE_CAP;
2506 if (flags & MR_LP_ADV_ASYM_PAUSE)
2507 remote_adv |= LPA_PAUSE_ASYM;
2508
2509 tg3_setup_flow_control(tp, local_adv, remote_adv);
2510
2511 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2512 current_link_up = 1;
2513 }
2514 for (i = 0; i < 30; i++) {
2515 udelay(20);
2516 tw32_f(MAC_STATUS,
2517 (MAC_STATUS_SYNC_CHANGED |
2518 MAC_STATUS_CFG_CHANGED));
2519 udelay(40);
2520 if ((tr32(MAC_STATUS) &
2521 (MAC_STATUS_SYNC_CHANGED |
2522 MAC_STATUS_CFG_CHANGED)) == 0)
2523 break;
2524 }
2525
2526 mac_status = tr32(MAC_STATUS);
2527 if (current_link_up == 0 &&
2528 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2529 !(mac_status & MAC_STATUS_RCVD_CFG))
2530 current_link_up = 1;
2531 } else {
2532 /* Forcing 1000FD link up. */
2533 current_link_up = 1;
2534 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2535
2536 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2537 udelay(40);
2538 }
2539
2540out:
2541 return current_link_up;
2542}
2543
2544static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2545{
2546 u32 orig_pause_cfg;
2547 u16 orig_active_speed;
2548 u8 orig_active_duplex;
2549 u32 mac_status;
2550 int current_link_up;
2551 int i;
2552
2553 orig_pause_cfg =
2554 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2555 TG3_FLAG_TX_PAUSE));
2556 orig_active_speed = tp->link_config.active_speed;
2557 orig_active_duplex = tp->link_config.active_duplex;
2558
2559 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2560 netif_carrier_ok(tp->dev) &&
2561 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2562 mac_status = tr32(MAC_STATUS);
2563 mac_status &= (MAC_STATUS_PCS_SYNCED |
2564 MAC_STATUS_SIGNAL_DET |
2565 MAC_STATUS_CFG_CHANGED |
2566 MAC_STATUS_RCVD_CFG);
2567 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2568 MAC_STATUS_SIGNAL_DET)) {
2569 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2570 MAC_STATUS_CFG_CHANGED));
2571 return 0;
2572 }
2573 }
2574
2575 tw32_f(MAC_TX_AUTO_NEG, 0);
2576
2577 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2578 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2579 tw32_f(MAC_MODE, tp->mac_mode);
2580 udelay(40);
2581
2582 if (tp->phy_id == PHY_ID_BCM8002)
2583 tg3_init_bcm8002(tp);
2584
2585 /* Enable link change event even when serdes polling. */
2586 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2587 udelay(40);
2588
2589 current_link_up = 0;
2590 mac_status = tr32(MAC_STATUS);
2591
2592 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2593 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2594 else
2595 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2596
2597 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2598 tw32_f(MAC_MODE, tp->mac_mode);
2599 udelay(40);
2600
2601 tp->hw_status->status =
2602 (SD_STATUS_UPDATED |
2603 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2604
2605 for (i = 0; i < 100; i++) {
2606 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2607 MAC_STATUS_CFG_CHANGED));
2608 udelay(5);
2609 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2610 MAC_STATUS_CFG_CHANGED)) == 0)
2611 break;
2612 }
2613
2614 mac_status = tr32(MAC_STATUS);
2615 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2616 current_link_up = 0;
2617 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2618 tw32_f(MAC_MODE, (tp->mac_mode |
2619 MAC_MODE_SEND_CONFIGS));
2620 udelay(1);
2621 tw32_f(MAC_MODE, tp->mac_mode);
2622 }
2623 }
2624
2625 if (current_link_up == 1) {
2626 tp->link_config.active_speed = SPEED_1000;
2627 tp->link_config.active_duplex = DUPLEX_FULL;
2628 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2629 LED_CTRL_LNKLED_OVERRIDE |
2630 LED_CTRL_1000MBPS_ON));
2631 } else {
2632 tp->link_config.active_speed = SPEED_INVALID;
2633 tp->link_config.active_duplex = DUPLEX_INVALID;
2634 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2635 LED_CTRL_LNKLED_OVERRIDE |
2636 LED_CTRL_TRAFFIC_OVERRIDE));
2637 }
2638
2639 if (current_link_up != netif_carrier_ok(tp->dev)) {
2640 if (current_link_up)
2641 netif_carrier_on(tp->dev);
2642 else
2643 netif_carrier_off(tp->dev);
2644 tg3_link_report(tp);
2645 } else {
2646 u32 now_pause_cfg =
2647 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2648 TG3_FLAG_TX_PAUSE);
2649 if (orig_pause_cfg != now_pause_cfg ||
2650 orig_active_speed != tp->link_config.active_speed ||
2651 orig_active_duplex != tp->link_config.active_duplex)
2652 tg3_link_report(tp);
2653 }
2654
2655 return 0;
2656}
2657
747e8f8b
MC
2658static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2659{
2660 int current_link_up, err = 0;
2661 u32 bmsr, bmcr;
2662 u16 current_speed;
2663 u8 current_duplex;
2664
2665 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2666 tw32_f(MAC_MODE, tp->mac_mode);
2667 udelay(40);
2668
2669 tw32(MAC_EVENT, 0);
2670
2671 tw32_f(MAC_STATUS,
2672 (MAC_STATUS_SYNC_CHANGED |
2673 MAC_STATUS_CFG_CHANGED |
2674 MAC_STATUS_MI_COMPLETION |
2675 MAC_STATUS_LNKSTATE_CHANGED));
2676 udelay(40);
2677
2678 if (force_reset)
2679 tg3_phy_reset(tp);
2680
2681 current_link_up = 0;
2682 current_speed = SPEED_INVALID;
2683 current_duplex = DUPLEX_INVALID;
2684
2685 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2686 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
2687 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2688 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2689 bmsr |= BMSR_LSTATUS;
2690 else
2691 bmsr &= ~BMSR_LSTATUS;
2692 }
747e8f8b
MC
2693
2694 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2695
2696 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2697 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2698 /* do nothing, just check for link up at the end */
2699 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2700 u32 adv, new_adv;
2701
2702 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2703 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2704 ADVERTISE_1000XPAUSE |
2705 ADVERTISE_1000XPSE_ASYM |
2706 ADVERTISE_SLCT);
2707
2708 /* Always advertise symmetric PAUSE just like copper */
2709 new_adv |= ADVERTISE_1000XPAUSE;
2710
2711 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2712 new_adv |= ADVERTISE_1000XHALF;
2713 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2714 new_adv |= ADVERTISE_1000XFULL;
2715
2716 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2717 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2718 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2719 tg3_writephy(tp, MII_BMCR, bmcr);
2720
2721 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2722 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2723 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2724
2725 return err;
2726 }
2727 } else {
2728 u32 new_bmcr;
2729
2730 bmcr &= ~BMCR_SPEED1000;
2731 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2732
2733 if (tp->link_config.duplex == DUPLEX_FULL)
2734 new_bmcr |= BMCR_FULLDPLX;
2735
2736 if (new_bmcr != bmcr) {
2737 /* BMCR_SPEED1000 is a reserved bit that needs
2738 * to be set on write.
2739 */
2740 new_bmcr |= BMCR_SPEED1000;
2741
2742 /* Force a linkdown */
2743 if (netif_carrier_ok(tp->dev)) {
2744 u32 adv;
2745
2746 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2747 adv &= ~(ADVERTISE_1000XFULL |
2748 ADVERTISE_1000XHALF |
2749 ADVERTISE_SLCT);
2750 tg3_writephy(tp, MII_ADVERTISE, adv);
2751 tg3_writephy(tp, MII_BMCR, bmcr |
2752 BMCR_ANRESTART |
2753 BMCR_ANENABLE);
2754 udelay(10);
2755 netif_carrier_off(tp->dev);
2756 }
2757 tg3_writephy(tp, MII_BMCR, new_bmcr);
2758 bmcr = new_bmcr;
2759 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2760 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
2761 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2762 ASIC_REV_5714) {
2763 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2764 bmsr |= BMSR_LSTATUS;
2765 else
2766 bmsr &= ~BMSR_LSTATUS;
2767 }
747e8f8b
MC
2768 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2769 }
2770 }
2771
2772 if (bmsr & BMSR_LSTATUS) {
2773 current_speed = SPEED_1000;
2774 current_link_up = 1;
2775 if (bmcr & BMCR_FULLDPLX)
2776 current_duplex = DUPLEX_FULL;
2777 else
2778 current_duplex = DUPLEX_HALF;
2779
2780 if (bmcr & BMCR_ANENABLE) {
2781 u32 local_adv, remote_adv, common;
2782
2783 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2784 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2785 common = local_adv & remote_adv;
2786 if (common & (ADVERTISE_1000XHALF |
2787 ADVERTISE_1000XFULL)) {
2788 if (common & ADVERTISE_1000XFULL)
2789 current_duplex = DUPLEX_FULL;
2790 else
2791 current_duplex = DUPLEX_HALF;
2792
2793 tg3_setup_flow_control(tp, local_adv,
2794 remote_adv);
2795 }
2796 else
2797 current_link_up = 0;
2798 }
2799 }
2800
2801 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2802 if (tp->link_config.active_duplex == DUPLEX_HALF)
2803 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2804
2805 tw32_f(MAC_MODE, tp->mac_mode);
2806 udelay(40);
2807
2808 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2809
2810 tp->link_config.active_speed = current_speed;
2811 tp->link_config.active_duplex = current_duplex;
2812
2813 if (current_link_up != netif_carrier_ok(tp->dev)) {
2814 if (current_link_up)
2815 netif_carrier_on(tp->dev);
2816 else {
2817 netif_carrier_off(tp->dev);
2818 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2819 }
2820 tg3_link_report(tp);
2821 }
2822 return err;
2823}
2824
2825static void tg3_serdes_parallel_detect(struct tg3 *tp)
2826{
2827 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2828 /* Give autoneg time to complete. */
2829 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2830 return;
2831 }
2832 if (!netif_carrier_ok(tp->dev) &&
2833 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2834 u32 bmcr;
2835
2836 tg3_readphy(tp, MII_BMCR, &bmcr);
2837 if (bmcr & BMCR_ANENABLE) {
2838 u32 phy1, phy2;
2839
2840 /* Select shadow register 0x1f */
2841 tg3_writephy(tp, 0x1c, 0x7c00);
2842 tg3_readphy(tp, 0x1c, &phy1);
2843
2844 /* Select expansion interrupt status register */
2845 tg3_writephy(tp, 0x17, 0x0f01);
2846 tg3_readphy(tp, 0x15, &phy2);
2847 tg3_readphy(tp, 0x15, &phy2);
2848
2849 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2850 /* We have signal detect and not receiving
2851 * config code words, link is up by parallel
2852 * detection.
2853 */
2854
2855 bmcr &= ~BMCR_ANENABLE;
2856 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2857 tg3_writephy(tp, MII_BMCR, bmcr);
2858 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2859 }
2860 }
2861 }
2862 else if (netif_carrier_ok(tp->dev) &&
2863 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2864 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2865 u32 phy2;
2866
2867 /* Select expansion interrupt status register */
2868 tg3_writephy(tp, 0x17, 0x0f01);
2869 tg3_readphy(tp, 0x15, &phy2);
2870 if (phy2 & 0x20) {
2871 u32 bmcr;
2872
2873 /* Config code words received, turn on autoneg. */
2874 tg3_readphy(tp, MII_BMCR, &bmcr);
2875 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2876
2877 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2878
2879 }
2880 }
2881}
2882
1da177e4
LT
2883static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2884{
2885 int err;
2886
2887 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2888 err = tg3_setup_fiber_phy(tp, force_reset);
747e8f8b
MC
2889 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2890 err = tg3_setup_fiber_mii_phy(tp, force_reset);
1da177e4
LT
2891 } else {
2892 err = tg3_setup_copper_phy(tp, force_reset);
2893 }
2894
2895 if (tp->link_config.active_speed == SPEED_1000 &&
2896 tp->link_config.active_duplex == DUPLEX_HALF)
2897 tw32(MAC_TX_LENGTHS,
2898 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2899 (6 << TX_LENGTHS_IPG_SHIFT) |
2900 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2901 else
2902 tw32(MAC_TX_LENGTHS,
2903 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2904 (6 << TX_LENGTHS_IPG_SHIFT) |
2905 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2906
2907 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2908 if (netif_carrier_ok(tp->dev)) {
2909 tw32(HOSTCC_STAT_COAL_TICKS,
15f9850d 2910 tp->coal.stats_block_coalesce_usecs);
1da177e4
LT
2911 } else {
2912 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2913 }
2914 }
2915
2916 return err;
2917}
2918
2919/* Tigon3 never reports partial packet sends. So we do not
2920 * need special logic to handle SKBs that have not had all
2921 * of their frags sent yet, like SunGEM does.
2922 */
2923static void tg3_tx(struct tg3 *tp)
2924{
2925 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2926 u32 sw_idx = tp->tx_cons;
2927
2928 while (sw_idx != hw_idx) {
2929 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2930 struct sk_buff *skb = ri->skb;
2931 int i;
2932
2933 if (unlikely(skb == NULL))
2934 BUG();
2935
2936 pci_unmap_single(tp->pdev,
2937 pci_unmap_addr(ri, mapping),
2938 skb_headlen(skb),
2939 PCI_DMA_TODEVICE);
2940
2941 ri->skb = NULL;
2942
2943 sw_idx = NEXT_TX(sw_idx);
2944
2945 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2946 if (unlikely(sw_idx == hw_idx))
2947 BUG();
2948
2949 ri = &tp->tx_buffers[sw_idx];
2950 if (unlikely(ri->skb != NULL))
2951 BUG();
2952
2953 pci_unmap_page(tp->pdev,
2954 pci_unmap_addr(ri, mapping),
2955 skb_shinfo(skb)->frags[i].size,
2956 PCI_DMA_TODEVICE);
2957
2958 sw_idx = NEXT_TX(sw_idx);
2959 }
2960
f47c11ee 2961 dev_kfree_skb(skb);
1da177e4
LT
2962 }
2963
2964 tp->tx_cons = sw_idx;
2965
51b91468
MC
2966 if (unlikely(netif_queue_stopped(tp->dev))) {
2967 spin_lock(&tp->tx_lock);
2968 if (netif_queue_stopped(tp->dev) &&
2969 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2970 netif_wake_queue(tp->dev);
2971 spin_unlock(&tp->tx_lock);
2972 }
1da177e4
LT
2973}
2974
2975/* Returns size of skb allocated or < 0 on error.
2976 *
2977 * We only need to fill in the address because the other members
2978 * of the RX descriptor are invariant, see tg3_init_rings.
2979 *
2980 * Note the purposeful assymetry of cpu vs. chip accesses. For
2981 * posting buffers we only dirty the first cache line of the RX
2982 * descriptor (containing the address). Whereas for the RX status
2983 * buffers the cpu only reads the last cacheline of the RX descriptor
2984 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2985 */
2986static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2987 int src_idx, u32 dest_idx_unmasked)
2988{
2989 struct tg3_rx_buffer_desc *desc;
2990 struct ring_info *map, *src_map;
2991 struct sk_buff *skb;
2992 dma_addr_t mapping;
2993 int skb_size, dest_idx;
2994
2995 src_map = NULL;
2996 switch (opaque_key) {
2997 case RXD_OPAQUE_RING_STD:
2998 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2999 desc = &tp->rx_std[dest_idx];
3000 map = &tp->rx_std_buffers[dest_idx];
3001 if (src_idx >= 0)
3002 src_map = &tp->rx_std_buffers[src_idx];
7e72aad4 3003 skb_size = tp->rx_pkt_buf_sz;
1da177e4
LT
3004 break;
3005
3006 case RXD_OPAQUE_RING_JUMBO:
3007 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3008 desc = &tp->rx_jumbo[dest_idx];
3009 map = &tp->rx_jumbo_buffers[dest_idx];
3010 if (src_idx >= 0)
3011 src_map = &tp->rx_jumbo_buffers[src_idx];
3012 skb_size = RX_JUMBO_PKT_BUF_SZ;
3013 break;
3014
3015 default:
3016 return -EINVAL;
3017 };
3018
3019 /* Do not overwrite any of the map or rp information
3020 * until we are sure we can commit to a new buffer.
3021 *
3022 * Callers depend upon this behavior and assume that
3023 * we leave everything unchanged if we fail.
3024 */
3025 skb = dev_alloc_skb(skb_size);
3026 if (skb == NULL)
3027 return -ENOMEM;
3028
3029 skb->dev = tp->dev;
3030 skb_reserve(skb, tp->rx_offset);
3031
3032 mapping = pci_map_single(tp->pdev, skb->data,
3033 skb_size - tp->rx_offset,
3034 PCI_DMA_FROMDEVICE);
3035
3036 map->skb = skb;
3037 pci_unmap_addr_set(map, mapping, mapping);
3038
3039 if (src_map != NULL)
3040 src_map->skb = NULL;
3041
3042 desc->addr_hi = ((u64)mapping >> 32);
3043 desc->addr_lo = ((u64)mapping & 0xffffffff);
3044
3045 return skb_size;
3046}
3047
3048/* We only need to move over in the address because the other
3049 * members of the RX descriptor are invariant. See notes above
3050 * tg3_alloc_rx_skb for full details.
3051 */
3052static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3053 int src_idx, u32 dest_idx_unmasked)
3054{
3055 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3056 struct ring_info *src_map, *dest_map;
3057 int dest_idx;
3058
3059 switch (opaque_key) {
3060 case RXD_OPAQUE_RING_STD:
3061 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3062 dest_desc = &tp->rx_std[dest_idx];
3063 dest_map = &tp->rx_std_buffers[dest_idx];
3064 src_desc = &tp->rx_std[src_idx];
3065 src_map = &tp->rx_std_buffers[src_idx];
3066 break;
3067
3068 case RXD_OPAQUE_RING_JUMBO:
3069 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3070 dest_desc = &tp->rx_jumbo[dest_idx];
3071 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3072 src_desc = &tp->rx_jumbo[src_idx];
3073 src_map = &tp->rx_jumbo_buffers[src_idx];
3074 break;
3075
3076 default:
3077 return;
3078 };
3079
3080 dest_map->skb = src_map->skb;
3081 pci_unmap_addr_set(dest_map, mapping,
3082 pci_unmap_addr(src_map, mapping));
3083 dest_desc->addr_hi = src_desc->addr_hi;
3084 dest_desc->addr_lo = src_desc->addr_lo;
3085
3086 src_map->skb = NULL;
3087}
3088
3089#if TG3_VLAN_TAG_USED
3090static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3091{
3092 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3093}
3094#endif
3095
3096/* The RX ring scheme is composed of multiple rings which post fresh
3097 * buffers to the chip, and one special ring the chip uses to report
3098 * status back to the host.
3099 *
3100 * The special ring reports the status of received packets to the
3101 * host. The chip does not write into the original descriptor the
3102 * RX buffer was obtained from. The chip simply takes the original
3103 * descriptor as provided by the host, updates the status and length
3104 * field, then writes this into the next status ring entry.
3105 *
3106 * Each ring the host uses to post buffers to the chip is described
3107 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3108 * it is first placed into the on-chip ram. When the packet's length
3109 * is known, it walks down the TG3_BDINFO entries to select the ring.
3110 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3111 * which is within the range of the new packet's length is chosen.
3112 *
3113 * The "separate ring for rx status" scheme may sound queer, but it makes
3114 * sense from a cache coherency perspective. If only the host writes
3115 * to the buffer post rings, and only the chip writes to the rx status
3116 * rings, then cache lines never move beyond shared-modified state.
3117 * If both the host and chip were to write into the same ring, cache line
3118 * eviction could occur since both entities want it in an exclusive state.
3119 */
3120static int tg3_rx(struct tg3 *tp, int budget)
3121{
3122 u32 work_mask;
483ba50b
MC
3123 u32 sw_idx = tp->rx_rcb_ptr;
3124 u16 hw_idx;
1da177e4
LT
3125 int received;
3126
3127 hw_idx = tp->hw_status->idx[0].rx_producer;
3128 /*
3129 * We need to order the read of hw_idx and the read of
3130 * the opaque cookie.
3131 */
3132 rmb();
1da177e4
LT
3133 work_mask = 0;
3134 received = 0;
3135 while (sw_idx != hw_idx && budget > 0) {
3136 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3137 unsigned int len;
3138 struct sk_buff *skb;
3139 dma_addr_t dma_addr;
3140 u32 opaque_key, desc_idx, *post_ptr;
3141
3142 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3143 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3144 if (opaque_key == RXD_OPAQUE_RING_STD) {
3145 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3146 mapping);
3147 skb = tp->rx_std_buffers[desc_idx].skb;
3148 post_ptr = &tp->rx_std_ptr;
3149 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3150 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3151 mapping);
3152 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3153 post_ptr = &tp->rx_jumbo_ptr;
3154 }
3155 else {
3156 goto next_pkt_nopost;
3157 }
3158
3159 work_mask |= opaque_key;
3160
3161 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3162 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3163 drop_it:
3164 tg3_recycle_rx(tp, opaque_key,
3165 desc_idx, *post_ptr);
3166 drop_it_no_recycle:
3167 /* Other statistics kept track of by card. */
3168 tp->net_stats.rx_dropped++;
3169 goto next_pkt;
3170 }
3171
3172 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3173
3174 if (len > RX_COPY_THRESHOLD
3175 && tp->rx_offset == 2
3176 /* rx_offset != 2 iff this is a 5701 card running
3177 * in PCI-X mode [see tg3_get_invariants()] */
3178 ) {
3179 int skb_size;
3180
3181 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3182 desc_idx, *post_ptr);
3183 if (skb_size < 0)
3184 goto drop_it;
3185
3186 pci_unmap_single(tp->pdev, dma_addr,
3187 skb_size - tp->rx_offset,
3188 PCI_DMA_FROMDEVICE);
3189
3190 skb_put(skb, len);
3191 } else {
3192 struct sk_buff *copy_skb;
3193
3194 tg3_recycle_rx(tp, opaque_key,
3195 desc_idx, *post_ptr);
3196
3197 copy_skb = dev_alloc_skb(len + 2);
3198 if (copy_skb == NULL)
3199 goto drop_it_no_recycle;
3200
3201 copy_skb->dev = tp->dev;
3202 skb_reserve(copy_skb, 2);
3203 skb_put(copy_skb, len);
3204 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3205 memcpy(copy_skb->data, skb->data, len);
3206 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3207
3208 /* We'll reuse the original ring buffer. */
3209 skb = copy_skb;
3210 }
3211
3212 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3213 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3214 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3215 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3216 skb->ip_summed = CHECKSUM_UNNECESSARY;
3217 else
3218 skb->ip_summed = CHECKSUM_NONE;
3219
3220 skb->protocol = eth_type_trans(skb, tp->dev);
3221#if TG3_VLAN_TAG_USED
3222 if (tp->vlgrp != NULL &&
3223 desc->type_flags & RXD_FLAG_VLAN) {
3224 tg3_vlan_rx(tp, skb,
3225 desc->err_vlan & RXD_VLAN_MASK);
3226 } else
3227#endif
3228 netif_receive_skb(skb);
3229
3230 tp->dev->last_rx = jiffies;
3231 received++;
3232 budget--;
3233
3234next_pkt:
3235 (*post_ptr)++;
3236next_pkt_nopost:
483ba50b
MC
3237 sw_idx++;
3238 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
52f6d697
MC
3239
3240 /* Refresh hw_idx to see if there is new work */
3241 if (sw_idx == hw_idx) {
3242 hw_idx = tp->hw_status->idx[0].rx_producer;
3243 rmb();
3244 }
1da177e4
LT
3245 }
3246
3247 /* ACK the status ring. */
483ba50b
MC
3248 tp->rx_rcb_ptr = sw_idx;
3249 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
1da177e4
LT
3250
3251 /* Refill RX ring(s). */
3252 if (work_mask & RXD_OPAQUE_RING_STD) {
3253 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3254 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3255 sw_idx);
3256 }
3257 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3258 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3259 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3260 sw_idx);
3261 }
3262 mmiowb();
3263
3264 return received;
3265}
3266
3267static int tg3_poll(struct net_device *netdev, int *budget)
3268{
3269 struct tg3 *tp = netdev_priv(netdev);
3270 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4
LT
3271 int done;
3272
1da177e4
LT
3273 /* handle link change and other phy events */
3274 if (!(tp->tg3_flags &
3275 (TG3_FLAG_USE_LINKCHG_REG |
3276 TG3_FLAG_POLL_SERDES))) {
3277 if (sblk->status & SD_STATUS_LINK_CHG) {
3278 sblk->status = SD_STATUS_UPDATED |
3279 (sblk->status & ~SD_STATUS_LINK_CHG);
f47c11ee 3280 spin_lock(&tp->lock);
1da177e4 3281 tg3_setup_phy(tp, 0);
f47c11ee 3282 spin_unlock(&tp->lock);
1da177e4
LT
3283 }
3284 }
3285
3286 /* run TX completion thread */
3287 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
1da177e4 3288 tg3_tx(tp);
1da177e4
LT
3289 }
3290
1da177e4
LT
3291 /* run RX thread, within the bounds set by NAPI.
3292 * All RX "locking" is done by ensuring outside
3293 * code synchronizes with dev->poll()
3294 */
1da177e4
LT
3295 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3296 int orig_budget = *budget;
3297 int work_done;
3298
3299 if (orig_budget > netdev->quota)
3300 orig_budget = netdev->quota;
3301
3302 work_done = tg3_rx(tp, orig_budget);
3303
3304 *budget -= work_done;
3305 netdev->quota -= work_done;
1da177e4
LT
3306 }
3307
38f3843e 3308 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
f7383c22 3309 tp->last_tag = sblk->status_tag;
38f3843e
MC
3310 rmb();
3311 } else
3312 sblk->status &= ~SD_STATUS_UPDATED;
f7383c22 3313
1da177e4 3314 /* if no more work, tell net stack and NIC we're done */
f7383c22 3315 done = !tg3_has_work(tp);
1da177e4 3316 if (done) {
f47c11ee 3317 netif_rx_complete(netdev);
1da177e4 3318 tg3_restart_ints(tp);
1da177e4
LT
3319 }
3320
3321 return (done ? 0 : 1);
3322}
3323
f47c11ee
DM
3324static void tg3_irq_quiesce(struct tg3 *tp)
3325{
3326 BUG_ON(tp->irq_sync);
3327
3328 tp->irq_sync = 1;
3329 smp_mb();
3330
3331 synchronize_irq(tp->pdev->irq);
3332}
3333
3334static inline int tg3_irq_sync(struct tg3 *tp)
3335{
3336 return tp->irq_sync;
3337}
3338
3339/* Fully shutdown all tg3 driver activity elsewhere in the system.
3340 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3341 * with as well. Most of the time, this is not necessary except when
3342 * shutting down the device.
3343 */
3344static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3345{
3346 if (irq_sync)
3347 tg3_irq_quiesce(tp);
3348 spin_lock_bh(&tp->lock);
3349 spin_lock(&tp->tx_lock);
3350}
3351
3352static inline void tg3_full_unlock(struct tg3 *tp)
3353{
3354 spin_unlock(&tp->tx_lock);
3355 spin_unlock_bh(&tp->lock);
3356}
3357
88b06bc2
MC
3358/* MSI ISR - No need to check for interrupt sharing and no need to
3359 * flush status block and interrupt mailbox. PCI ordering rules
3360 * guarantee that MSI will arrive after the status block.
3361 */
3362static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3363{
3364 struct net_device *dev = dev_id;
3365 struct tg3 *tp = netdev_priv(dev);
88b06bc2 3366
61487480
MC
3367 prefetch(tp->hw_status);
3368 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
88b06bc2 3369 /*
fac9b83e 3370 * Writing any value to intr-mbox-0 clears PCI INTA# and
88b06bc2 3371 * chip-internal interrupt pending events.
fac9b83e 3372 * Writing non-zero to intr-mbox-0 additional tells the
88b06bc2
MC
3373 * NIC to stop sending us irqs, engaging "in-intr-handler"
3374 * event coalescing.
3375 */
3376 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
61487480 3377 if (likely(!tg3_irq_sync(tp)))
88b06bc2 3378 netif_rx_schedule(dev); /* schedule NAPI poll */
61487480 3379
88b06bc2
MC
3380 return IRQ_RETVAL(1);
3381}
3382
1da177e4
LT
3383static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3384{
3385 struct net_device *dev = dev_id;
3386 struct tg3 *tp = netdev_priv(dev);
3387 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4
LT
3388 unsigned int handled = 1;
3389
1da177e4
LT
3390 /* In INTx mode, it is possible for the interrupt to arrive at
3391 * the CPU before the status block posted prior to the interrupt.
3392 * Reading the PCI State register will confirm whether the
3393 * interrupt is ours and will flush the status block.
3394 */
3395 if ((sblk->status & SD_STATUS_UPDATED) ||
3396 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3397 /*
fac9b83e 3398 * Writing any value to intr-mbox-0 clears PCI INTA# and
1da177e4 3399 * chip-internal interrupt pending events.
fac9b83e 3400 * Writing non-zero to intr-mbox-0 additional tells the
1da177e4
LT
3401 * NIC to stop sending us irqs, engaging "in-intr-handler"
3402 * event coalescing.
3403 */
3404 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3405 0x00000001);
f47c11ee
DM
3406 if (tg3_irq_sync(tp))
3407 goto out;
fac9b83e 3408 sblk->status &= ~SD_STATUS_UPDATED;
61487480
MC
3409 if (likely(tg3_has_work(tp))) {
3410 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
fac9b83e 3411 netif_rx_schedule(dev); /* schedule NAPI poll */
61487480 3412 } else {
fac9b83e
DM
3413 /* No work, shared interrupt perhaps? re-enable
3414 * interrupts, and flush that PCI write
3415 */
09ee929c 3416 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
fac9b83e 3417 0x00000000);
fac9b83e
DM
3418 }
3419 } else { /* shared interrupt */
3420 handled = 0;
3421 }
f47c11ee 3422out:
fac9b83e
DM
3423 return IRQ_RETVAL(handled);
3424}
3425
3426static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3427{
3428 struct net_device *dev = dev_id;
3429 struct tg3 *tp = netdev_priv(dev);
3430 struct tg3_hw_status *sblk = tp->hw_status;
fac9b83e
DM
3431 unsigned int handled = 1;
3432
fac9b83e
DM
3433 /* In INTx mode, it is possible for the interrupt to arrive at
3434 * the CPU before the status block posted prior to the interrupt.
3435 * Reading the PCI State register will confirm whether the
3436 * interrupt is ours and will flush the status block.
3437 */
38f3843e 3438 if ((sblk->status_tag != tp->last_tag) ||
fac9b83e 3439 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
1da177e4 3440 /*
fac9b83e
DM
3441 * writing any value to intr-mbox-0 clears PCI INTA# and
3442 * chip-internal interrupt pending events.
3443 * writing non-zero to intr-mbox-0 additional tells the
3444 * NIC to stop sending us irqs, engaging "in-intr-handler"
3445 * event coalescing.
1da177e4 3446 */
fac9b83e
DM
3447 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3448 0x00000001);
f47c11ee
DM
3449 if (tg3_irq_sync(tp))
3450 goto out;
38f3843e 3451 if (netif_rx_schedule_prep(dev)) {
61487480 3452 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
38f3843e
MC
3453 /* Update last_tag to mark that this status has been
3454 * seen. Because interrupt may be shared, we may be
3455 * racing with tg3_poll(), so only update last_tag
3456 * if tg3_poll() is not scheduled.
1da177e4 3457 */
38f3843e
MC
3458 tp->last_tag = sblk->status_tag;
3459 __netif_rx_schedule(dev);
1da177e4
LT
3460 }
3461 } else { /* shared interrupt */
3462 handled = 0;
3463 }
f47c11ee 3464out:
1da177e4
LT
3465 return IRQ_RETVAL(handled);
3466}
3467
7938109f
MC
3468/* ISR for interrupt test */
3469static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3470 struct pt_regs *regs)
3471{
3472 struct net_device *dev = dev_id;
3473 struct tg3 *tp = netdev_priv(dev);
3474 struct tg3_hw_status *sblk = tp->hw_status;
3475
f9804ddb
MC
3476 if ((sblk->status & SD_STATUS_UPDATED) ||
3477 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7938109f
MC
3478 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3479 0x00000001);
3480 return IRQ_RETVAL(1);
3481 }
3482 return IRQ_RETVAL(0);
3483}
3484
1da177e4 3485static int tg3_init_hw(struct tg3 *);
944d980e 3486static int tg3_halt(struct tg3 *, int, int);
1da177e4
LT
3487
3488#ifdef CONFIG_NET_POLL_CONTROLLER
3489static void tg3_poll_controller(struct net_device *dev)
3490{
88b06bc2
MC
3491 struct tg3 *tp = netdev_priv(dev);
3492
3493 tg3_interrupt(tp->pdev->irq, dev, NULL);
1da177e4
LT
3494}
3495#endif
3496
3497static void tg3_reset_task(void *_data)
3498{
3499 struct tg3 *tp = _data;
3500 unsigned int restart_timer;
3501
7faa006f
MC
3502 tg3_full_lock(tp, 0);
3503 tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3504
3505 if (!netif_running(tp->dev)) {
3506 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3507 tg3_full_unlock(tp);
3508 return;
3509 }
3510
3511 tg3_full_unlock(tp);
3512
1da177e4
LT
3513 tg3_netif_stop(tp);
3514
f47c11ee 3515 tg3_full_lock(tp, 1);
1da177e4
LT
3516
3517 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3518 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3519
944d980e 3520 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
1da177e4
LT
3521 tg3_init_hw(tp);
3522
3523 tg3_netif_start(tp);
3524
1da177e4
LT
3525 if (restart_timer)
3526 mod_timer(&tp->timer, jiffies + 1);
7faa006f
MC
3527
3528 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3529
3530 tg3_full_unlock(tp);
1da177e4
LT
3531}
3532
3533static void tg3_tx_timeout(struct net_device *dev)
3534{
3535 struct tg3 *tp = netdev_priv(dev);
3536
3537 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3538 dev->name);
3539
3540 schedule_work(&tp->reset_task);
3541}
3542
c58ec932
MC
3543/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3544static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3545{
3546 u32 base = (u32) mapping & 0xffffffff;
3547
3548 return ((base > 0xffffdcc0) &&
3549 (base + len + 8 < base));
3550}
3551
72f2afb8
MC
3552/* Test for DMA addresses > 40-bit */
3553static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3554 int len)
3555{
3556#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3557 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3558 return (((u64) mapping + len) > DMA_40BIT_MASK);
3559 return 0;
3560#else
3561 return 0;
3562#endif
3563}
3564
1da177e4
LT
3565static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3566
72f2afb8
MC
3567/* Workaround 4GB and 40-bit hardware DMA bugs. */
3568static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
c58ec932
MC
3569 u32 last_plus_one, u32 *start,
3570 u32 base_flags, u32 mss)
1da177e4
LT
3571{
3572 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
c58ec932 3573 dma_addr_t new_addr = 0;
1da177e4 3574 u32 entry = *start;
c58ec932 3575 int i, ret = 0;
1da177e4
LT
3576
3577 if (!new_skb) {
c58ec932
MC
3578 ret = -1;
3579 } else {
3580 /* New SKB is guaranteed to be linear. */
3581 entry = *start;
3582 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3583 PCI_DMA_TODEVICE);
3584 /* Make sure new skb does not cross any 4G boundaries.
3585 * Drop the packet if it does.
3586 */
3587 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3588 ret = -1;
3589 dev_kfree_skb(new_skb);
3590 new_skb = NULL;
3591 } else {
3592 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3593 base_flags, 1 | (mss << 1));
3594 *start = NEXT_TX(entry);
3595 }
1da177e4
LT
3596 }
3597
1da177e4
LT
3598 /* Now clean up the sw ring entries. */
3599 i = 0;
3600 while (entry != last_plus_one) {
3601 int len;
3602
3603 if (i == 0)
3604 len = skb_headlen(skb);
3605 else
3606 len = skb_shinfo(skb)->frags[i-1].size;
3607 pci_unmap_single(tp->pdev,
3608 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3609 len, PCI_DMA_TODEVICE);
3610 if (i == 0) {
3611 tp->tx_buffers[entry].skb = new_skb;
3612 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3613 } else {
3614 tp->tx_buffers[entry].skb = NULL;
3615 }
3616 entry = NEXT_TX(entry);
3617 i++;
3618 }
3619
3620 dev_kfree_skb(skb);
3621
c58ec932 3622 return ret;
1da177e4
LT
3623}
3624
3625static void tg3_set_txd(struct tg3 *tp, int entry,
3626 dma_addr_t mapping, int len, u32 flags,
3627 u32 mss_and_is_end)
3628{
3629 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3630 int is_end = (mss_and_is_end & 0x1);
3631 u32 mss = (mss_and_is_end >> 1);
3632 u32 vlan_tag = 0;
3633
3634 if (is_end)
3635 flags |= TXD_FLAG_END;
3636 if (flags & TXD_FLAG_VLAN) {
3637 vlan_tag = flags >> 16;
3638 flags &= 0xffff;
3639 }
3640 vlan_tag |= (mss << TXD_MSS_SHIFT);
3641
3642 txd->addr_hi = ((u64) mapping >> 32);
3643 txd->addr_lo = ((u64) mapping & 0xffffffff);
3644 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3645 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3646}
3647
1da177e4
LT
3648static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3649{
3650 struct tg3 *tp = netdev_priv(dev);
3651 dma_addr_t mapping;
1da177e4
LT
3652 u32 len, entry, base_flags, mss;
3653 int would_hit_hwbug;
1da177e4
LT
3654
3655 len = skb_headlen(skb);
3656
3657 /* No BH disabling for tx_lock here. We are running in BH disabled
3658 * context and TX reclaim runs via tp->poll inside of a software
f47c11ee
DM
3659 * interrupt. Furthermore, IRQ processing runs lockless so we have
3660 * no IRQ context deadlocks to worry about either. Rejoice!
1da177e4 3661 */
f47c11ee 3662 if (!spin_trylock(&tp->tx_lock))
1da177e4 3663 return NETDEV_TX_LOCKED;
1da177e4 3664
1da177e4 3665 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
1f064a87
SH
3666 if (!netif_queue_stopped(dev)) {
3667 netif_stop_queue(dev);
3668
3669 /* This is a hard error, log it. */
3670 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3671 "queue awake!\n", dev->name);
3672 }
f47c11ee 3673 spin_unlock(&tp->tx_lock);
1da177e4
LT
3674 return NETDEV_TX_BUSY;
3675 }
3676
3677 entry = tp->tx_prod;
3678 base_flags = 0;
3679 if (skb->ip_summed == CHECKSUM_HW)
3680 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3681#if TG3_TSO_SUPPORT != 0
3682 mss = 0;
3683 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3684 (mss = skb_shinfo(skb)->tso_size) != 0) {
3685 int tcp_opt_len, ip_tcp_len;
3686
3687 if (skb_header_cloned(skb) &&
3688 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3689 dev_kfree_skb(skb);
3690 goto out_unlock;
3691 }
3692
3693 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3694 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3695
3696 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3697 TXD_FLAG_CPU_POST_DMA);
3698
3699 skb->nh.iph->check = 0;
fd30333d 3700 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
1da177e4
LT
3701 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3702 skb->h.th->check = 0;
3703 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3704 }
3705 else {
3706 skb->h.th->check =
3707 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3708 skb->nh.iph->daddr,
3709 0, IPPROTO_TCP, 0);
3710 }
3711
3712 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3713 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3714 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3715 int tsflags;
3716
3717 tsflags = ((skb->nh.iph->ihl - 5) +
3718 (tcp_opt_len >> 2));
3719 mss |= (tsflags << 11);
3720 }
3721 } else {
3722 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3723 int tsflags;
3724
3725 tsflags = ((skb->nh.iph->ihl - 5) +
3726 (tcp_opt_len >> 2));
3727 base_flags |= tsflags << 12;
3728 }
3729 }
3730 }
3731#else
3732 mss = 0;
3733#endif
3734#if TG3_VLAN_TAG_USED
3735 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3736 base_flags |= (TXD_FLAG_VLAN |
3737 (vlan_tx_tag_get(skb) << 16));
3738#endif
3739
3740 /* Queue skb data, a.k.a. the main skb fragment. */
3741 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3742
3743 tp->tx_buffers[entry].skb = skb;
3744 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3745
3746 would_hit_hwbug = 0;
3747
3748 if (tg3_4g_overflow_test(mapping, len))
c58ec932 3749 would_hit_hwbug = 1;
1da177e4
LT
3750
3751 tg3_set_txd(tp, entry, mapping, len, base_flags,
3752 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3753
3754 entry = NEXT_TX(entry);
3755
3756 /* Now loop through additional data fragments, and queue them. */
3757 if (skb_shinfo(skb)->nr_frags > 0) {
3758 unsigned int i, last;
3759
3760 last = skb_shinfo(skb)->nr_frags - 1;
3761 for (i = 0; i <= last; i++) {
3762 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3763
3764 len = frag->size;
3765 mapping = pci_map_page(tp->pdev,
3766 frag->page,
3767 frag->page_offset,
3768 len, PCI_DMA_TODEVICE);
3769
3770 tp->tx_buffers[entry].skb = NULL;
3771 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3772
c58ec932
MC
3773 if (tg3_4g_overflow_test(mapping, len))
3774 would_hit_hwbug = 1;
1da177e4 3775
72f2afb8
MC
3776 if (tg3_40bit_overflow_test(tp, mapping, len))
3777 would_hit_hwbug = 1;
3778
1da177e4
LT
3779 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3780 tg3_set_txd(tp, entry, mapping, len,
3781 base_flags, (i == last)|(mss << 1));
3782 else
3783 tg3_set_txd(tp, entry, mapping, len,
3784 base_flags, (i == last));
3785
3786 entry = NEXT_TX(entry);
3787 }
3788 }
3789
3790 if (would_hit_hwbug) {
3791 u32 last_plus_one = entry;
3792 u32 start;
1da177e4 3793
c58ec932
MC
3794 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3795 start &= (TG3_TX_RING_SIZE - 1);
1da177e4
LT
3796
3797 /* If the workaround fails due to memory/mapping
3798 * failure, silently drop this packet.
3799 */
72f2afb8 3800 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
c58ec932 3801 &start, base_flags, mss))
1da177e4
LT
3802 goto out_unlock;
3803
3804 entry = start;
3805 }
3806
3807 /* Packets are ready, update Tx producer idx local and on card. */
3808 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3809
3810 tp->tx_prod = entry;
51b91468 3811 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
1da177e4 3812 netif_stop_queue(dev);
51b91468
MC
3813 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3814 netif_wake_queue(tp->dev);
3815 }
1da177e4
LT
3816
3817out_unlock:
3818 mmiowb();
f47c11ee 3819 spin_unlock(&tp->tx_lock);
1da177e4
LT
3820
3821 dev->trans_start = jiffies;
3822
3823 return NETDEV_TX_OK;
3824}
3825
3826static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3827 int new_mtu)
3828{
3829 dev->mtu = new_mtu;
3830
ef7f5ec0 3831 if (new_mtu > ETH_DATA_LEN) {
a4e2b347 3832 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
ef7f5ec0
MC
3833 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3834 ethtool_op_set_tso(dev, 0);
3835 }
3836 else
3837 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3838 } else {
a4e2b347 3839 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
ef7f5ec0 3840 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
0f893dc6 3841 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
ef7f5ec0 3842 }
1da177e4
LT
3843}
3844
3845static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3846{
3847 struct tg3 *tp = netdev_priv(dev);
3848
3849 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3850 return -EINVAL;
3851
3852 if (!netif_running(dev)) {
3853 /* We'll just catch it later when the
3854 * device is up'd.
3855 */
3856 tg3_set_mtu(dev, tp, new_mtu);
3857 return 0;
3858 }
3859
3860 tg3_netif_stop(tp);
f47c11ee
DM
3861
3862 tg3_full_lock(tp, 1);
1da177e4 3863
944d980e 3864 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
3865
3866 tg3_set_mtu(dev, tp, new_mtu);
3867
3868 tg3_init_hw(tp);
3869
3870 tg3_netif_start(tp);
3871
f47c11ee 3872 tg3_full_unlock(tp);
1da177e4
LT
3873
3874 return 0;
3875}
3876
3877/* Free up pending packets in all rx/tx rings.
3878 *
3879 * The chip has been shut down and the driver detached from
3880 * the networking, so no interrupts or new tx packets will
3881 * end up in the driver. tp->{tx,}lock is not held and we are not
3882 * in an interrupt context and thus may sleep.
3883 */
3884static void tg3_free_rings(struct tg3 *tp)
3885{
3886 struct ring_info *rxp;
3887 int i;
3888
3889 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3890 rxp = &tp->rx_std_buffers[i];
3891
3892 if (rxp->skb == NULL)
3893 continue;
3894 pci_unmap_single(tp->pdev,
3895 pci_unmap_addr(rxp, mapping),
7e72aad4 3896 tp->rx_pkt_buf_sz - tp->rx_offset,
1da177e4
LT
3897 PCI_DMA_FROMDEVICE);
3898 dev_kfree_skb_any(rxp->skb);
3899 rxp->skb = NULL;
3900 }
3901
3902 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3903 rxp = &tp->rx_jumbo_buffers[i];
3904
3905 if (rxp->skb == NULL)
3906 continue;
3907 pci_unmap_single(tp->pdev,
3908 pci_unmap_addr(rxp, mapping),
3909 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3910 PCI_DMA_FROMDEVICE);
3911 dev_kfree_skb_any(rxp->skb);
3912 rxp->skb = NULL;
3913 }
3914
3915 for (i = 0; i < TG3_TX_RING_SIZE; ) {
3916 struct tx_ring_info *txp;
3917 struct sk_buff *skb;
3918 int j;
3919
3920 txp = &tp->tx_buffers[i];
3921 skb = txp->skb;
3922
3923 if (skb == NULL) {
3924 i++;
3925 continue;
3926 }
3927
3928 pci_unmap_single(tp->pdev,
3929 pci_unmap_addr(txp, mapping),
3930 skb_headlen(skb),
3931 PCI_DMA_TODEVICE);
3932 txp->skb = NULL;
3933
3934 i++;
3935
3936 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3937 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3938 pci_unmap_page(tp->pdev,
3939 pci_unmap_addr(txp, mapping),
3940 skb_shinfo(skb)->frags[j].size,
3941 PCI_DMA_TODEVICE);
3942 i++;
3943 }
3944
3945 dev_kfree_skb_any(skb);
3946 }
3947}
3948
3949/* Initialize tx/rx rings for packet processing.
3950 *
3951 * The chip has been shut down and the driver detached from
3952 * the networking, so no interrupts or new tx packets will
3953 * end up in the driver. tp->{tx,}lock are held and thus
3954 * we may not sleep.
3955 */
3956static void tg3_init_rings(struct tg3 *tp)
3957{
3958 u32 i;
3959
3960 /* Free up all the SKBs. */
3961 tg3_free_rings(tp);
3962
3963 /* Zero out all descriptors. */
3964 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3965 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3966 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3967 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3968
7e72aad4 3969 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
a4e2b347 3970 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
7e72aad4
MC
3971 (tp->dev->mtu > ETH_DATA_LEN))
3972 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3973
1da177e4
LT
3974 /* Initialize invariants of the rings, we only set this
3975 * stuff once. This works because the card does not
3976 * write into the rx buffer posting rings.
3977 */
3978 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3979 struct tg3_rx_buffer_desc *rxd;
3980
3981 rxd = &tp->rx_std[i];
7e72aad4 3982 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
1da177e4
LT
3983 << RXD_LEN_SHIFT;
3984 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3985 rxd->opaque = (RXD_OPAQUE_RING_STD |
3986 (i << RXD_OPAQUE_INDEX_SHIFT));
3987 }
3988
0f893dc6 3989 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
3990 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3991 struct tg3_rx_buffer_desc *rxd;
3992
3993 rxd = &tp->rx_jumbo[i];
3994 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3995 << RXD_LEN_SHIFT;
3996 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3997 RXD_FLAG_JUMBO;
3998 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3999 (i << RXD_OPAQUE_INDEX_SHIFT));
4000 }
4001 }
4002
4003 /* Now allocate fresh SKBs for each rx ring. */
4004 for (i = 0; i < tp->rx_pending; i++) {
4005 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4006 -1, i) < 0)
4007 break;
4008 }
4009
0f893dc6 4010 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
4011 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4012 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4013 -1, i) < 0)
4014 break;
4015 }
4016 }
4017}
4018
4019/*
4020 * Must not be invoked with interrupt sources disabled and
4021 * the hardware shutdown down.
4022 */
4023static void tg3_free_consistent(struct tg3 *tp)
4024{
b4558ea9
JJ
4025 kfree(tp->rx_std_buffers);
4026 tp->rx_std_buffers = NULL;
1da177e4
LT
4027 if (tp->rx_std) {
4028 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4029 tp->rx_std, tp->rx_std_mapping);
4030 tp->rx_std = NULL;
4031 }
4032 if (tp->rx_jumbo) {
4033 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4034 tp->rx_jumbo, tp->rx_jumbo_mapping);
4035 tp->rx_jumbo = NULL;
4036 }
4037 if (tp->rx_rcb) {
4038 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4039 tp->rx_rcb, tp->rx_rcb_mapping);
4040 tp->rx_rcb = NULL;
4041 }
4042 if (tp->tx_ring) {
4043 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4044 tp->tx_ring, tp->tx_desc_mapping);
4045 tp->tx_ring = NULL;
4046 }
4047 if (tp->hw_status) {
4048 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4049 tp->hw_status, tp->status_mapping);
4050 tp->hw_status = NULL;
4051 }
4052 if (tp->hw_stats) {
4053 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4054 tp->hw_stats, tp->stats_mapping);
4055 tp->hw_stats = NULL;
4056 }
4057}
4058
4059/*
4060 * Must not be invoked with interrupt sources disabled and
4061 * the hardware shutdown down. Can sleep.
4062 */
4063static int tg3_alloc_consistent(struct tg3 *tp)
4064{
4065 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4066 (TG3_RX_RING_SIZE +
4067 TG3_RX_JUMBO_RING_SIZE)) +
4068 (sizeof(struct tx_ring_info) *
4069 TG3_TX_RING_SIZE),
4070 GFP_KERNEL);
4071 if (!tp->rx_std_buffers)
4072 return -ENOMEM;
4073
4074 memset(tp->rx_std_buffers, 0,
4075 (sizeof(struct ring_info) *
4076 (TG3_RX_RING_SIZE +
4077 TG3_RX_JUMBO_RING_SIZE)) +
4078 (sizeof(struct tx_ring_info) *
4079 TG3_TX_RING_SIZE));
4080
4081 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4082 tp->tx_buffers = (struct tx_ring_info *)
4083 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4084
4085 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4086 &tp->rx_std_mapping);
4087 if (!tp->rx_std)
4088 goto err_out;
4089
4090 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4091 &tp->rx_jumbo_mapping);
4092
4093 if (!tp->rx_jumbo)
4094 goto err_out;
4095
4096 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4097 &tp->rx_rcb_mapping);
4098 if (!tp->rx_rcb)
4099 goto err_out;
4100
4101 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4102 &tp->tx_desc_mapping);
4103 if (!tp->tx_ring)
4104 goto err_out;
4105
4106 tp->hw_status = pci_alloc_consistent(tp->pdev,
4107 TG3_HW_STATUS_SIZE,
4108 &tp->status_mapping);
4109 if (!tp->hw_status)
4110 goto err_out;
4111
4112 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4113 sizeof(struct tg3_hw_stats),
4114 &tp->stats_mapping);
4115 if (!tp->hw_stats)
4116 goto err_out;
4117
4118 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4119 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4120
4121 return 0;
4122
4123err_out:
4124 tg3_free_consistent(tp);
4125 return -ENOMEM;
4126}
4127
4128#define MAX_WAIT_CNT 1000
4129
4130/* To stop a block, clear the enable bit and poll till it
4131 * clears. tp->lock is held.
4132 */
b3b7d6be 4133static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
1da177e4
LT
4134{
4135 unsigned int i;
4136 u32 val;
4137
4138 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4139 switch (ofs) {
4140 case RCVLSC_MODE:
4141 case DMAC_MODE:
4142 case MBFREE_MODE:
4143 case BUFMGR_MODE:
4144 case MEMARB_MODE:
4145 /* We can't enable/disable these bits of the
4146 * 5705/5750, just say success.
4147 */
4148 return 0;
4149
4150 default:
4151 break;
4152 };
4153 }
4154
4155 val = tr32(ofs);
4156 val &= ~enable_bit;
4157 tw32_f(ofs, val);
4158
4159 for (i = 0; i < MAX_WAIT_CNT; i++) {
4160 udelay(100);
4161 val = tr32(ofs);
4162 if ((val & enable_bit) == 0)
4163 break;
4164 }
4165
b3b7d6be 4166 if (i == MAX_WAIT_CNT && !silent) {
1da177e4
LT
4167 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4168 "ofs=%lx enable_bit=%x\n",
4169 ofs, enable_bit);
4170 return -ENODEV;
4171 }
4172
4173 return 0;
4174}
4175
4176/* tp->lock is held. */
b3b7d6be 4177static int tg3_abort_hw(struct tg3 *tp, int silent)
1da177e4
LT
4178{
4179 int i, err;
4180
4181 tg3_disable_ints(tp);
4182
4183 tp->rx_mode &= ~RX_MODE_ENABLE;
4184 tw32_f(MAC_RX_MODE, tp->rx_mode);
4185 udelay(10);
4186
b3b7d6be
DM
4187 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4188 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4189 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4190 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4191 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4192 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4193
4194 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4195 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4196 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4197 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4198 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4199 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4200 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
1da177e4
LT
4201
4202 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4203 tw32_f(MAC_MODE, tp->mac_mode);
4204 udelay(40);
4205
4206 tp->tx_mode &= ~TX_MODE_ENABLE;
4207 tw32_f(MAC_TX_MODE, tp->tx_mode);
4208
4209 for (i = 0; i < MAX_WAIT_CNT; i++) {
4210 udelay(100);
4211 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4212 break;
4213 }
4214 if (i >= MAX_WAIT_CNT) {
4215 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4216 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4217 tp->dev->name, tr32(MAC_TX_MODE));
e6de8ad1 4218 err |= -ENODEV;
1da177e4
LT
4219 }
4220
e6de8ad1 4221 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
b3b7d6be
DM
4222 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4223 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
1da177e4
LT
4224
4225 tw32(FTQ_RESET, 0xffffffff);
4226 tw32(FTQ_RESET, 0x00000000);
4227
b3b7d6be
DM
4228 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4229 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
1da177e4
LT
4230
4231 if (tp->hw_status)
4232 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4233 if (tp->hw_stats)
4234 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4235
1da177e4
LT
4236 return err;
4237}
4238
4239/* tp->lock is held. */
4240static int tg3_nvram_lock(struct tg3 *tp)
4241{
4242 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4243 int i;
4244
ec41c7df
MC
4245 if (tp->nvram_lock_cnt == 0) {
4246 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4247 for (i = 0; i < 8000; i++) {
4248 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4249 break;
4250 udelay(20);
4251 }
4252 if (i == 8000) {
4253 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4254 return -ENODEV;
4255 }
1da177e4 4256 }
ec41c7df 4257 tp->nvram_lock_cnt++;
1da177e4
LT
4258 }
4259 return 0;
4260}
4261
4262/* tp->lock is held. */
4263static void tg3_nvram_unlock(struct tg3 *tp)
4264{
ec41c7df
MC
4265 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4266 if (tp->nvram_lock_cnt > 0)
4267 tp->nvram_lock_cnt--;
4268 if (tp->nvram_lock_cnt == 0)
4269 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4270 }
1da177e4
LT
4271}
4272
e6af301b
MC
4273/* tp->lock is held. */
4274static void tg3_enable_nvram_access(struct tg3 *tp)
4275{
4276 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4277 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4278 u32 nvaccess = tr32(NVRAM_ACCESS);
4279
4280 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4281 }
4282}
4283
4284/* tp->lock is held. */
4285static void tg3_disable_nvram_access(struct tg3 *tp)
4286{
4287 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4288 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4289 u32 nvaccess = tr32(NVRAM_ACCESS);
4290
4291 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4292 }
4293}
4294
1da177e4
LT
4295/* tp->lock is held. */
4296static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4297{
4298 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4299 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4300 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4301
4302 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4303 switch (kind) {
4304 case RESET_KIND_INIT:
4305 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4306 DRV_STATE_START);
4307 break;
4308
4309 case RESET_KIND_SHUTDOWN:
4310 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4311 DRV_STATE_UNLOAD);
4312 break;
4313
4314 case RESET_KIND_SUSPEND:
4315 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4316 DRV_STATE_SUSPEND);
4317 break;
4318
4319 default:
4320 break;
4321 };
4322 }
4323}
4324
4325/* tp->lock is held. */
4326static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4327{
4328 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4329 switch (kind) {
4330 case RESET_KIND_INIT:
4331 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4332 DRV_STATE_START_DONE);
4333 break;
4334
4335 case RESET_KIND_SHUTDOWN:
4336 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4337 DRV_STATE_UNLOAD_DONE);
4338 break;
4339
4340 default:
4341 break;
4342 };
4343 }
4344}
4345
4346/* tp->lock is held. */
4347static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4348{
4349 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4350 switch (kind) {
4351 case RESET_KIND_INIT:
4352 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4353 DRV_STATE_START);
4354 break;
4355
4356 case RESET_KIND_SHUTDOWN:
4357 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4358 DRV_STATE_UNLOAD);
4359 break;
4360
4361 case RESET_KIND_SUSPEND:
4362 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4363 DRV_STATE_SUSPEND);
4364 break;
4365
4366 default:
4367 break;
4368 };
4369 }
4370}
4371
4372static void tg3_stop_fw(struct tg3 *);
4373
4374/* tp->lock is held. */
4375static int tg3_chip_reset(struct tg3 *tp)
4376{
4377 u32 val;
1ee582d8 4378 void (*write_op)(struct tg3 *, u32, u32);
1da177e4
LT
4379 int i;
4380
ec41c7df 4381 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
1da177e4 4382 tg3_nvram_lock(tp);
ec41c7df
MC
4383 /* No matching tg3_nvram_unlock() after this because
4384 * chip reset below will undo the nvram lock.
4385 */
4386 tp->nvram_lock_cnt = 0;
4387 }
1da177e4
LT
4388
4389 /*
4390 * We must avoid the readl() that normally takes place.
4391 * It locks machines, causes machine checks, and other
4392 * fun things. So, temporarily disable the 5701
4393 * hardware workaround, while we do the reset.
4394 */
1ee582d8
MC
4395 write_op = tp->write32;
4396 if (write_op == tg3_write_flush_reg32)
4397 tp->write32 = tg3_write32;
1da177e4
LT
4398
4399 /* do the reset */
4400 val = GRC_MISC_CFG_CORECLK_RESET;
4401
4402 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4403 if (tr32(0x7e2c) == 0x60) {
4404 tw32(0x7e2c, 0x20);
4405 }
4406 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4407 tw32(GRC_MISC_CFG, (1 << 29));
4408 val |= (1 << 29);
4409 }
4410 }
4411
4412 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4413 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4414 tw32(GRC_MISC_CFG, val);
4415
1ee582d8
MC
4416 /* restore 5701 hardware bug workaround write method */
4417 tp->write32 = write_op;
1da177e4
LT
4418
4419 /* Unfortunately, we have to delay before the PCI read back.
4420 * Some 575X chips even will not respond to a PCI cfg access
4421 * when the reset command is given to the chip.
4422 *
4423 * How do these hardware designers expect things to work
4424 * properly if the PCI write is posted for a long period
4425 * of time? It is always necessary to have some method by
4426 * which a register read back can occur to push the write
4427 * out which does the reset.
4428 *
4429 * For most tg3 variants the trick below was working.
4430 * Ho hum...
4431 */
4432 udelay(120);
4433
4434 /* Flush PCI posted writes. The normal MMIO registers
4435 * are inaccessible at this time so this is the only
4436 * way to make this reliably (actually, this is no longer
4437 * the case, see above). I tried to use indirect
4438 * register read/write but this upset some 5701 variants.
4439 */
4440 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4441
4442 udelay(120);
4443
4444 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4445 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4446 int i;
4447 u32 cfg_val;
4448
4449 /* Wait for link training to complete. */
4450 for (i = 0; i < 5000; i++)
4451 udelay(100);
4452
4453 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4454 pci_write_config_dword(tp->pdev, 0xc4,
4455 cfg_val | (1 << 15));
4456 }
4457 /* Set PCIE max payload size and clear error status. */
4458 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4459 }
4460
4461 /* Re-enable indirect register accesses. */
4462 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4463 tp->misc_host_ctrl);
4464
4465 /* Set MAX PCI retry to zero. */
4466 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4467 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4468 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4469 val |= PCISTATE_RETRY_SAME_DMA;
4470 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4471
4472 pci_restore_state(tp->pdev);
4473
4474 /* Make sure PCI-X relaxed ordering bit is clear. */
4475 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4476 val &= ~PCIX_CAPS_RELAXED_ORDERING;
4477 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4478
a4e2b347 4479 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4cf78e4f
MC
4480 u32 val;
4481
4482 /* Chip reset on 5780 will reset MSI enable bit,
4483 * so need to restore it.
4484 */
4485 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4486 u16 ctrl;
4487
4488 pci_read_config_word(tp->pdev,
4489 tp->msi_cap + PCI_MSI_FLAGS,
4490 &ctrl);
4491 pci_write_config_word(tp->pdev,
4492 tp->msi_cap + PCI_MSI_FLAGS,
4493 ctrl | PCI_MSI_FLAGS_ENABLE);
4494 val = tr32(MSGINT_MODE);
4495 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4496 }
4497
4498 val = tr32(MEMARB_MODE);
4499 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4500
4501 } else
4502 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
1da177e4
LT
4503
4504 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4505 tg3_stop_fw(tp);
4506 tw32(0x5000, 0x400);
4507 }
4508
4509 tw32(GRC_MODE, tp->grc_mode);
4510
4511 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4512 u32 val = tr32(0xc4);
4513
4514 tw32(0xc4, val | (1 << 15));
4515 }
4516
4517 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4518 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4519 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4520 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4521 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4522 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4523 }
4524
4525 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4526 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4527 tw32_f(MAC_MODE, tp->mac_mode);
747e8f8b
MC
4528 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4529 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4530 tw32_f(MAC_MODE, tp->mac_mode);
1da177e4
LT
4531 } else
4532 tw32_f(MAC_MODE, 0);
4533 udelay(40);
4534
4535 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4536 /* Wait for firmware initialization to complete. */
4537 for (i = 0; i < 100000; i++) {
4538 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4539 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4540 break;
4541 udelay(10);
4542 }
4543 if (i >= 100000) {
4544 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4545 "firmware will not restart magic=%08x\n",
4546 tp->dev->name, val);
4547 return -ENODEV;
4548 }
4549 }
4550
4551 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4552 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4553 u32 val = tr32(0x7c00);
4554
4555 tw32(0x7c00, val | (1 << 25));
4556 }
4557
4558 /* Reprobe ASF enable state. */
4559 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4560 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4561 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4562 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4563 u32 nic_cfg;
4564
4565 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4566 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4567 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 4568 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
4569 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4570 }
4571 }
4572
4573 return 0;
4574}
4575
4576/* tp->lock is held. */
4577static void tg3_stop_fw(struct tg3 *tp)
4578{
4579 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4580 u32 val;
4581 int i;
4582
4583 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4584 val = tr32(GRC_RX_CPU_EVENT);
4585 val |= (1 << 14);
4586 tw32(GRC_RX_CPU_EVENT, val);
4587
4588 /* Wait for RX cpu to ACK the event. */
4589 for (i = 0; i < 100; i++) {
4590 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4591 break;
4592 udelay(1);
4593 }
4594 }
4595}
4596
4597/* tp->lock is held. */
944d980e 4598static int tg3_halt(struct tg3 *tp, int kind, int silent)
1da177e4
LT
4599{
4600 int err;
4601
4602 tg3_stop_fw(tp);
4603
944d980e 4604 tg3_write_sig_pre_reset(tp, kind);
1da177e4 4605
b3b7d6be 4606 tg3_abort_hw(tp, silent);
1da177e4
LT
4607 err = tg3_chip_reset(tp);
4608
944d980e
MC
4609 tg3_write_sig_legacy(tp, kind);
4610 tg3_write_sig_post_reset(tp, kind);
1da177e4
LT
4611
4612 if (err)
4613 return err;
4614
4615 return 0;
4616}
4617
4618#define TG3_FW_RELEASE_MAJOR 0x0
4619#define TG3_FW_RELASE_MINOR 0x0
4620#define TG3_FW_RELEASE_FIX 0x0
4621#define TG3_FW_START_ADDR 0x08000000
4622#define TG3_FW_TEXT_ADDR 0x08000000
4623#define TG3_FW_TEXT_LEN 0x9c0
4624#define TG3_FW_RODATA_ADDR 0x080009c0
4625#define TG3_FW_RODATA_LEN 0x60
4626#define TG3_FW_DATA_ADDR 0x08000a40
4627#define TG3_FW_DATA_LEN 0x20
4628#define TG3_FW_SBSS_ADDR 0x08000a60
4629#define TG3_FW_SBSS_LEN 0xc
4630#define TG3_FW_BSS_ADDR 0x08000a70
4631#define TG3_FW_BSS_LEN 0x10
4632
4633static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4634 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4635 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4636 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4637 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4638 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4639 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4640 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4641 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4642 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4643 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4644 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4645 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4646 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4647 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4648 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4649 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4650 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4651 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4652 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4653 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4654 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4655 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4656 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4657 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4658 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4659 0, 0, 0, 0, 0, 0,
4660 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4661 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4662 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4663 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4664 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4665 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4666 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4667 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4668 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4669 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4670 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4671 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4672 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4673 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4674 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4675 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4676 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4677 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4678 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4679 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4680 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4681 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4682 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4683 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4684 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4685 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4686 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4687 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4688 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4689 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4690 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4691 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4692 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4693 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4694 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4695 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4696 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4697 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4698 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4699 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4700 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4701 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4702 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4703 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4704 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4705 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4706 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4707 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4708 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4709 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4710 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4711 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4712 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4713 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4714 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4715 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4716 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4717 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4718 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4719 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4720 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4721 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4722 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4723 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4724 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4725};
4726
4727static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4728 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4729 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4730 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4731 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4732 0x00000000
4733};
4734
4735#if 0 /* All zeros, don't eat up space with it. */
4736u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4737 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4738 0x00000000, 0x00000000, 0x00000000, 0x00000000
4739};
4740#endif
4741
4742#define RX_CPU_SCRATCH_BASE 0x30000
4743#define RX_CPU_SCRATCH_SIZE 0x04000
4744#define TX_CPU_SCRATCH_BASE 0x34000
4745#define TX_CPU_SCRATCH_SIZE 0x04000
4746
4747/* tp->lock is held. */
4748static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4749{
4750 int i;
4751
4752 if (offset == TX_CPU_BASE &&
4753 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4754 BUG();
4755
4756 if (offset == RX_CPU_BASE) {
4757 for (i = 0; i < 10000; i++) {
4758 tw32(offset + CPU_STATE, 0xffffffff);
4759 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4760 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4761 break;
4762 }
4763
4764 tw32(offset + CPU_STATE, 0xffffffff);
4765 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
4766 udelay(10);
4767 } else {
4768 for (i = 0; i < 10000; i++) {
4769 tw32(offset + CPU_STATE, 0xffffffff);
4770 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4771 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4772 break;
4773 }
4774 }
4775
4776 if (i >= 10000) {
4777 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4778 "and %s CPU\n",
4779 tp->dev->name,
4780 (offset == RX_CPU_BASE ? "RX" : "TX"));
4781 return -ENODEV;
4782 }
ec41c7df
MC
4783
4784 /* Clear firmware's nvram arbitration. */
4785 if (tp->tg3_flags & TG3_FLAG_NVRAM)
4786 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
1da177e4
LT
4787 return 0;
4788}
4789
4790struct fw_info {
4791 unsigned int text_base;
4792 unsigned int text_len;
4793 u32 *text_data;
4794 unsigned int rodata_base;
4795 unsigned int rodata_len;
4796 u32 *rodata_data;
4797 unsigned int data_base;
4798 unsigned int data_len;
4799 u32 *data_data;
4800};
4801
4802/* tp->lock is held. */
4803static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4804 int cpu_scratch_size, struct fw_info *info)
4805{
ec41c7df 4806 int err, lock_err, i;
1da177e4
LT
4807 void (*write_op)(struct tg3 *, u32, u32);
4808
4809 if (cpu_base == TX_CPU_BASE &&
4810 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4811 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4812 "TX cpu firmware on %s which is 5705.\n",
4813 tp->dev->name);
4814 return -EINVAL;
4815 }
4816
4817 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4818 write_op = tg3_write_mem;
4819 else
4820 write_op = tg3_write_indirect_reg32;
4821
1b628151
MC
4822 /* It is possible that bootcode is still loading at this point.
4823 * Get the nvram lock first before halting the cpu.
4824 */
ec41c7df 4825 lock_err = tg3_nvram_lock(tp);
1da177e4 4826 err = tg3_halt_cpu(tp, cpu_base);
ec41c7df
MC
4827 if (!lock_err)
4828 tg3_nvram_unlock(tp);
1da177e4
LT
4829 if (err)
4830 goto out;
4831
4832 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4833 write_op(tp, cpu_scratch_base + i, 0);
4834 tw32(cpu_base + CPU_STATE, 0xffffffff);
4835 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4836 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4837 write_op(tp, (cpu_scratch_base +
4838 (info->text_base & 0xffff) +
4839 (i * sizeof(u32))),
4840 (info->text_data ?
4841 info->text_data[i] : 0));
4842 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4843 write_op(tp, (cpu_scratch_base +
4844 (info->rodata_base & 0xffff) +
4845 (i * sizeof(u32))),
4846 (info->rodata_data ?
4847 info->rodata_data[i] : 0));
4848 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4849 write_op(tp, (cpu_scratch_base +
4850 (info->data_base & 0xffff) +
4851 (i * sizeof(u32))),
4852 (info->data_data ?
4853 info->data_data[i] : 0));
4854
4855 err = 0;
4856
4857out:
1da177e4
LT
4858 return err;
4859}
4860
4861/* tp->lock is held. */
4862static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4863{
4864 struct fw_info info;
4865 int err, i;
4866
4867 info.text_base = TG3_FW_TEXT_ADDR;
4868 info.text_len = TG3_FW_TEXT_LEN;
4869 info.text_data = &tg3FwText[0];
4870 info.rodata_base = TG3_FW_RODATA_ADDR;
4871 info.rodata_len = TG3_FW_RODATA_LEN;
4872 info.rodata_data = &tg3FwRodata[0];
4873 info.data_base = TG3_FW_DATA_ADDR;
4874 info.data_len = TG3_FW_DATA_LEN;
4875 info.data_data = NULL;
4876
4877 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4878 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4879 &info);
4880 if (err)
4881 return err;
4882
4883 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4884 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4885 &info);
4886 if (err)
4887 return err;
4888
4889 /* Now startup only the RX cpu. */
4890 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4891 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4892
4893 for (i = 0; i < 5; i++) {
4894 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4895 break;
4896 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4897 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
4898 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4899 udelay(1000);
4900 }
4901 if (i >= 5) {
4902 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4903 "to set RX CPU PC, is %08x should be %08x\n",
4904 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4905 TG3_FW_TEXT_ADDR);
4906 return -ENODEV;
4907 }
4908 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4909 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
4910
4911 return 0;
4912}
4913
4914#if TG3_TSO_SUPPORT != 0
4915
4916#define TG3_TSO_FW_RELEASE_MAJOR 0x1
4917#define TG3_TSO_FW_RELASE_MINOR 0x6
4918#define TG3_TSO_FW_RELEASE_FIX 0x0
4919#define TG3_TSO_FW_START_ADDR 0x08000000
4920#define TG3_TSO_FW_TEXT_ADDR 0x08000000
4921#define TG3_TSO_FW_TEXT_LEN 0x1aa0
4922#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
4923#define TG3_TSO_FW_RODATA_LEN 0x60
4924#define TG3_TSO_FW_DATA_ADDR 0x08001b20
4925#define TG3_TSO_FW_DATA_LEN 0x30
4926#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
4927#define TG3_TSO_FW_SBSS_LEN 0x2c
4928#define TG3_TSO_FW_BSS_ADDR 0x08001b80
4929#define TG3_TSO_FW_BSS_LEN 0x894
4930
4931static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4932 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4933 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4934 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4935 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4936 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4937 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4938 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4939 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4940 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4941 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4942 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4943 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4944 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4945 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4946 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4947 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4948 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4949 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4950 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4951 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4952 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4953 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4954 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4955 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4956 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4957 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4958 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4959 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4960 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4961 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4962 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4963 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4964 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4965 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4966 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4967 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4968 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4969 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4970 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4971 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4972 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4973 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4974 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4975 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4976 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4977 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4978 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4979 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4980 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4981 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4982 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4983 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4984 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4985 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4986 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4987 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4988 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4989 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4990 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4991 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4992 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4993 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4994 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4995 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4996 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4997 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4998 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4999 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5000 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5001 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5002 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5003 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5004 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5005 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5006 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5007 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5008 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5009 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5010 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5011 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5012 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5013 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5014 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5015 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5016 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5017 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5018 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5019 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5020 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5021 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5022 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5023 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5024 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5025 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5026 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5027 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5028 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5029 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5030 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5031 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5032 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5033 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5034 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5035 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5036 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5037 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5038 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5039 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5040 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5041 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5042 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5043 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5044 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5045 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5046 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5047 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5048 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5049 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5050 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5051 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5052 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5053 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5054 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5055 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5056 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5057 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5058 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5059 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5060 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5061 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5062 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5063 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5064 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5065 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5066 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5067 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5068 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5069 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5070 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5071 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5072 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5073 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5074 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5075 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5076 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5077 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5078 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5079 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5080 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5081 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5082 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5083 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5084 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5085 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5086 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5087 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5088 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5089 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5090 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5091 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5092 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5093 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5094 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5095 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5096 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5097 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5098 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5099 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5100 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5101 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5102 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5103 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5104 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5105 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5106 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5107 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5108 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5109 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5110 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5111 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5112 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5113 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5114 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5115 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5116 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5117 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5118 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5119 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5120 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5121 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5122 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5123 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5124 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5125 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5126 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5127 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5128 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5129 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5130 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5131 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5132 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5133 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5134 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5135 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5136 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5137 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5138 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5139 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5140 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5141 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5142 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5143 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5144 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5145 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5146 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5147 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5148 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5149 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5150 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5151 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5152 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5153 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5154 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5155 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5156 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5157 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5158 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5159 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5160 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5161 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5162 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5163 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5164 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5165 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5166 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5167 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5168 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5169 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5170 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5171 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5172 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5173 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5174 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5175 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5176 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5177 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5178 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5179 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5180 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5181 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5182 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5183 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5184 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5185 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5186 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5187 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5188 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5189 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5190 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5191 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5192 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5193 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5194 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5195 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5196 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5197 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5198 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5199 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5200 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5201 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5202 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5203 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5204 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5205 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5206 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5207 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5208 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5209 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5210 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5211 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5212 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5213 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5214 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5215 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5216};
5217
5218static u32 tg3TsoFwRodata[] = {
5219 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5220 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5221 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5222 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5223 0x00000000,
5224};
5225
5226static u32 tg3TsoFwData[] = {
5227 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5228 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5229 0x00000000,
5230};
5231
5232/* 5705 needs a special version of the TSO firmware. */
5233#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5234#define TG3_TSO5_FW_RELASE_MINOR 0x2
5235#define TG3_TSO5_FW_RELEASE_FIX 0x0
5236#define TG3_TSO5_FW_START_ADDR 0x00010000
5237#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5238#define TG3_TSO5_FW_TEXT_LEN 0xe90
5239#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5240#define TG3_TSO5_FW_RODATA_LEN 0x50
5241#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5242#define TG3_TSO5_FW_DATA_LEN 0x20
5243#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5244#define TG3_TSO5_FW_SBSS_LEN 0x28
5245#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5246#define TG3_TSO5_FW_BSS_LEN 0x88
5247
5248static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5249 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5250 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5251 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5252 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5253 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5254 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5255 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5256 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5257 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5258 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5259 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5260 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5261 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5262 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5263 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5264 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5265 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5266 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5267 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5268 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5269 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5270 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5271 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5272 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5273 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5274 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5275 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5276 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5277 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5278 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5279 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5280 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5281 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5282 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5283 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5284 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5285 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5286 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5287 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5288 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5289 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5290 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5291 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5292 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5293 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5294 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5295 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5296 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5297 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5298 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5299 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5300 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5301 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5302 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5303 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5304 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5305 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5306 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5307 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5308 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5309 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5310 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5311 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5312 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5313 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5314 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5315 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5316 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5317 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5318 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5319 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5320 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5321 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5322 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5323 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5324 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5325 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5326 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5327 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5328 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5329 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5330 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5331 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5332 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5333 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5334 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5335 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5336 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5337 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5338 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5339 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5340 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5341 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5342 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5343 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5344 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5345 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5346 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5347 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5348 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5349 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5350 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5351 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5352 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5353 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5354 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5355 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5356 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5357 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5358 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5359 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5360 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5361 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5362 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5363 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5364 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5365 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5366 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5367 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5368 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5369 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5370 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5371 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5372 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5373 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5374 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5375 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5376 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5377 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5378 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5379 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5380 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5381 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5382 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5383 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5384 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5385 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5386 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5387 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5388 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5389 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5390 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5391 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5392 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5393 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5394 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5395 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5396 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5397 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5398 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5399 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5400 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5401 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5402 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5403 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5404 0x00000000, 0x00000000, 0x00000000,
5405};
5406
5407static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5408 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5409 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5410 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5411 0x00000000, 0x00000000, 0x00000000,
5412};
5413
5414static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5415 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5416 0x00000000, 0x00000000, 0x00000000,
5417};
5418
5419/* tp->lock is held. */
5420static int tg3_load_tso_firmware(struct tg3 *tp)
5421{
5422 struct fw_info info;
5423 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5424 int err, i;
5425
5426 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5427 return 0;
5428
5429 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5430 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5431 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5432 info.text_data = &tg3Tso5FwText[0];
5433 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5434 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5435 info.rodata_data = &tg3Tso5FwRodata[0];
5436 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5437 info.data_len = TG3_TSO5_FW_DATA_LEN;
5438 info.data_data = &tg3Tso5FwData[0];
5439 cpu_base = RX_CPU_BASE;
5440 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5441 cpu_scratch_size = (info.text_len +
5442 info.rodata_len +
5443 info.data_len +
5444 TG3_TSO5_FW_SBSS_LEN +
5445 TG3_TSO5_FW_BSS_LEN);
5446 } else {
5447 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5448 info.text_len = TG3_TSO_FW_TEXT_LEN;
5449 info.text_data = &tg3TsoFwText[0];
5450 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5451 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5452 info.rodata_data = &tg3TsoFwRodata[0];
5453 info.data_base = TG3_TSO_FW_DATA_ADDR;
5454 info.data_len = TG3_TSO_FW_DATA_LEN;
5455 info.data_data = &tg3TsoFwData[0];
5456 cpu_base = TX_CPU_BASE;
5457 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5458 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5459 }
5460
5461 err = tg3_load_firmware_cpu(tp, cpu_base,
5462 cpu_scratch_base, cpu_scratch_size,
5463 &info);
5464 if (err)
5465 return err;
5466
5467 /* Now startup the cpu. */
5468 tw32(cpu_base + CPU_STATE, 0xffffffff);
5469 tw32_f(cpu_base + CPU_PC, info.text_base);
5470
5471 for (i = 0; i < 5; i++) {
5472 if (tr32(cpu_base + CPU_PC) == info.text_base)
5473 break;
5474 tw32(cpu_base + CPU_STATE, 0xffffffff);
5475 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
5476 tw32_f(cpu_base + CPU_PC, info.text_base);
5477 udelay(1000);
5478 }
5479 if (i >= 5) {
5480 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5481 "to set CPU PC, is %08x should be %08x\n",
5482 tp->dev->name, tr32(cpu_base + CPU_PC),
5483 info.text_base);
5484 return -ENODEV;
5485 }
5486 tw32(cpu_base + CPU_STATE, 0xffffffff);
5487 tw32_f(cpu_base + CPU_MODE, 0x00000000);
5488 return 0;
5489}
5490
5491#endif /* TG3_TSO_SUPPORT != 0 */
5492
5493/* tp->lock is held. */
5494static void __tg3_set_mac_addr(struct tg3 *tp)
5495{
5496 u32 addr_high, addr_low;
5497 int i;
5498
5499 addr_high = ((tp->dev->dev_addr[0] << 8) |
5500 tp->dev->dev_addr[1]);
5501 addr_low = ((tp->dev->dev_addr[2] << 24) |
5502 (tp->dev->dev_addr[3] << 16) |
5503 (tp->dev->dev_addr[4] << 8) |
5504 (tp->dev->dev_addr[5] << 0));
5505 for (i = 0; i < 4; i++) {
5506 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5507 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5508 }
5509
5510 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5511 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5512 for (i = 0; i < 12; i++) {
5513 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5514 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5515 }
5516 }
5517
5518 addr_high = (tp->dev->dev_addr[0] +
5519 tp->dev->dev_addr[1] +
5520 tp->dev->dev_addr[2] +
5521 tp->dev->dev_addr[3] +
5522 tp->dev->dev_addr[4] +
5523 tp->dev->dev_addr[5]) &
5524 TX_BACKOFF_SEED_MASK;
5525 tw32(MAC_TX_BACKOFF_SEED, addr_high);
5526}
5527
5528static int tg3_set_mac_addr(struct net_device *dev, void *p)
5529{
5530 struct tg3 *tp = netdev_priv(dev);
5531 struct sockaddr *addr = p;
5532
f9804ddb
MC
5533 if (!is_valid_ether_addr(addr->sa_data))
5534 return -EINVAL;
5535
1da177e4
LT
5536 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5537
f47c11ee 5538 spin_lock_bh(&tp->lock);
1da177e4 5539 __tg3_set_mac_addr(tp);
f47c11ee 5540 spin_unlock_bh(&tp->lock);
1da177e4
LT
5541
5542 return 0;
5543}
5544
5545/* tp->lock is held. */
5546static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5547 dma_addr_t mapping, u32 maxlen_flags,
5548 u32 nic_addr)
5549{
5550 tg3_write_mem(tp,
5551 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5552 ((u64) mapping >> 32));
5553 tg3_write_mem(tp,
5554 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5555 ((u64) mapping & 0xffffffff));
5556 tg3_write_mem(tp,
5557 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5558 maxlen_flags);
5559
5560 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5561 tg3_write_mem(tp,
5562 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5563 nic_addr);
5564}
5565
5566static void __tg3_set_rx_mode(struct net_device *);
d244c892 5567static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
15f9850d
DM
5568{
5569 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5570 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5571 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5572 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5573 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5574 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5575 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5576 }
5577 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5578 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5579 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5580 u32 val = ec->stats_block_coalesce_usecs;
5581
5582 if (!netif_carrier_ok(tp->dev))
5583 val = 0;
5584
5585 tw32(HOSTCC_STAT_COAL_TICKS, val);
5586 }
5587}
1da177e4
LT
5588
5589/* tp->lock is held. */
5590static int tg3_reset_hw(struct tg3 *tp)
5591{
5592 u32 val, rdmac_mode;
5593 int i, err, limit;
5594
5595 tg3_disable_ints(tp);
5596
5597 tg3_stop_fw(tp);
5598
5599 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5600
5601 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
e6de8ad1 5602 tg3_abort_hw(tp, 1);
1da177e4
LT
5603 }
5604
d4d2c558
MC
5605 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
5606 tg3_phy_reset(tp);
5607
1da177e4
LT
5608 err = tg3_chip_reset(tp);
5609 if (err)
5610 return err;
5611
5612 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5613
5614 /* This works around an issue with Athlon chipsets on
5615 * B3 tigon3 silicon. This bit has no effect on any
5616 * other revision. But do not set this on PCI Express
5617 * chips.
5618 */
5619 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5620 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5621 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5622
5623 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5624 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5625 val = tr32(TG3PCI_PCISTATE);
5626 val |= PCISTATE_RETRY_SAME_DMA;
5627 tw32(TG3PCI_PCISTATE, val);
5628 }
5629
5630 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5631 /* Enable some hw fixes. */
5632 val = tr32(TG3PCI_MSI_DATA);
5633 val |= (1 << 26) | (1 << 28) | (1 << 29);
5634 tw32(TG3PCI_MSI_DATA, val);
5635 }
5636
5637 /* Descriptor ring init may make accesses to the
5638 * NIC SRAM area to setup the TX descriptors, so we
5639 * can only do this after the hardware has been
5640 * successfully reset.
5641 */
5642 tg3_init_rings(tp);
5643
5644 /* This value is determined during the probe time DMA
5645 * engine test, tg3_test_dma.
5646 */
5647 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5648
5649 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5650 GRC_MODE_4X_NIC_SEND_RINGS |
5651 GRC_MODE_NO_TX_PHDR_CSUM |
5652 GRC_MODE_NO_RX_PHDR_CSUM);
5653 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5654 if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5655 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5656 if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5657 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5658
5659 tw32(GRC_MODE,
5660 tp->grc_mode |
5661 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5662
5663 /* Setup the timer prescalar register. Clock is always 66Mhz. */
5664 val = tr32(GRC_MISC_CFG);
5665 val &= ~0xff;
5666 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5667 tw32(GRC_MISC_CFG, val);
5668
5669 /* Initialize MBUF/DESC pool. */
cbf46853 5670 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
1da177e4
LT
5671 /* Do nothing. */
5672 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5673 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5674 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5675 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5676 else
5677 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5678 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5679 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5680 }
5681#if TG3_TSO_SUPPORT != 0
5682 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5683 int fw_len;
5684
5685 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5686 TG3_TSO5_FW_RODATA_LEN +
5687 TG3_TSO5_FW_DATA_LEN +
5688 TG3_TSO5_FW_SBSS_LEN +
5689 TG3_TSO5_FW_BSS_LEN);
5690 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5691 tw32(BUFMGR_MB_POOL_ADDR,
5692 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5693 tw32(BUFMGR_MB_POOL_SIZE,
5694 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5695 }
5696#endif
5697
0f893dc6 5698 if (tp->dev->mtu <= ETH_DATA_LEN) {
1da177e4
LT
5699 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5700 tp->bufmgr_config.mbuf_read_dma_low_water);
5701 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5702 tp->bufmgr_config.mbuf_mac_rx_low_water);
5703 tw32(BUFMGR_MB_HIGH_WATER,
5704 tp->bufmgr_config.mbuf_high_water);
5705 } else {
5706 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5707 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5708 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5709 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5710 tw32(BUFMGR_MB_HIGH_WATER,
5711 tp->bufmgr_config.mbuf_high_water_jumbo);
5712 }
5713 tw32(BUFMGR_DMA_LOW_WATER,
5714 tp->bufmgr_config.dma_low_water);
5715 tw32(BUFMGR_DMA_HIGH_WATER,
5716 tp->bufmgr_config.dma_high_water);
5717
5718 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5719 for (i = 0; i < 2000; i++) {
5720 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5721 break;
5722 udelay(10);
5723 }
5724 if (i >= 2000) {
5725 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5726 tp->dev->name);
5727 return -ENODEV;
5728 }
5729
5730 /* Setup replenish threshold. */
5731 tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5732
5733 /* Initialize TG3_BDINFO's at:
5734 * RCVDBDI_STD_BD: standard eth size rx ring
5735 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
5736 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
5737 *
5738 * like so:
5739 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
5740 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
5741 * ring attribute flags
5742 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
5743 *
5744 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5745 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5746 *
5747 * The size of each ring is fixed in the firmware, but the location is
5748 * configurable.
5749 */
5750 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5751 ((u64) tp->rx_std_mapping >> 32));
5752 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5753 ((u64) tp->rx_std_mapping & 0xffffffff));
5754 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5755 NIC_SRAM_RX_BUFFER_DESC);
5756
5757 /* Don't even try to program the JUMBO/MINI buffer descriptor
5758 * configs on 5705.
5759 */
5760 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5761 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5762 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5763 } else {
5764 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5765 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5766
5767 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5768 BDINFO_FLAGS_DISABLED);
5769
5770 /* Setup replenish threshold. */
5771 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5772
0f893dc6 5773 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
5774 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5775 ((u64) tp->rx_jumbo_mapping >> 32));
5776 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5777 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5778 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5779 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5780 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5781 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5782 } else {
5783 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5784 BDINFO_FLAGS_DISABLED);
5785 }
5786
5787 }
5788
5789 /* There is only one send ring on 5705/5750, no need to explicitly
5790 * disable the others.
5791 */
5792 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5793 /* Clear out send RCB ring in SRAM. */
5794 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5795 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5796 BDINFO_FLAGS_DISABLED);
5797 }
5798
5799 tp->tx_prod = 0;
5800 tp->tx_cons = 0;
5801 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5802 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5803
5804 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5805 tp->tx_desc_mapping,
5806 (TG3_TX_RING_SIZE <<
5807 BDINFO_FLAGS_MAXLEN_SHIFT),
5808 NIC_SRAM_TX_BUFFER_DESC);
5809
5810 /* There is only one receive return ring on 5705/5750, no need
5811 * to explicitly disable the others.
5812 */
5813 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5814 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5815 i += TG3_BDINFO_SIZE) {
5816 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5817 BDINFO_FLAGS_DISABLED);
5818 }
5819 }
5820
5821 tp->rx_rcb_ptr = 0;
5822 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5823
5824 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5825 tp->rx_rcb_mapping,
5826 (TG3_RX_RCB_RING_SIZE(tp) <<
5827 BDINFO_FLAGS_MAXLEN_SHIFT),
5828 0);
5829
5830 tp->rx_std_ptr = tp->rx_pending;
5831 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5832 tp->rx_std_ptr);
5833
0f893dc6 5834 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
1da177e4
LT
5835 tp->rx_jumbo_pending : 0;
5836 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5837 tp->rx_jumbo_ptr);
5838
5839 /* Initialize MAC address and backoff seed. */
5840 __tg3_set_mac_addr(tp);
5841
5842 /* MTU + ethernet header + FCS + optional VLAN tag */
5843 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5844
5845 /* The slot time is changed by tg3_setup_phy if we
5846 * run at gigabit with half duplex.
5847 */
5848 tw32(MAC_TX_LENGTHS,
5849 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5850 (6 << TX_LENGTHS_IPG_SHIFT) |
5851 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5852
5853 /* Receive rules. */
5854 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5855 tw32(RCVLPC_CONFIG, 0x0181);
5856
5857 /* Calculate RDMAC_MODE setting early, we need it to determine
5858 * the RCVLPC_STATE_ENABLE mask.
5859 */
5860 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5861 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5862 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5863 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5864 RDMAC_MODE_LNGREAD_ENAB);
5865 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5866 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
85e94ced
MC
5867
5868 /* If statement applies to 5705 and 5750 PCI devices only */
5869 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5870 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5871 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
1da177e4
LT
5872 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5873 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5874 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5875 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5876 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5877 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5878 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5879 }
5880 }
5881
85e94ced
MC
5882 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5883 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5884
1da177e4
LT
5885#if TG3_TSO_SUPPORT != 0
5886 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5887 rdmac_mode |= (1 << 27);
5888#endif
5889
5890 /* Receive/send statistics. */
5891 if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5892 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5893 val = tr32(RCVLPC_STATS_ENABLE);
5894 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5895 tw32(RCVLPC_STATS_ENABLE, val);
5896 } else {
5897 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5898 }
5899 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5900 tw32(SNDDATAI_STATSENAB, 0xffffff);
5901 tw32(SNDDATAI_STATSCTRL,
5902 (SNDDATAI_SCTRL_ENABLE |
5903 SNDDATAI_SCTRL_FASTUPD));
5904
5905 /* Setup host coalescing engine. */
5906 tw32(HOSTCC_MODE, 0);
5907 for (i = 0; i < 2000; i++) {
5908 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5909 break;
5910 udelay(10);
5911 }
5912
d244c892 5913 __tg3_set_coalesce(tp, &tp->coal);
1da177e4
LT
5914
5915 /* set status block DMA address */
5916 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5917 ((u64) tp->status_mapping >> 32));
5918 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5919 ((u64) tp->status_mapping & 0xffffffff));
5920
5921 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5922 /* Status/statistics block address. See tg3_timer,
5923 * the tg3_periodic_fetch_stats call there, and
5924 * tg3_get_stats to see how this works for 5705/5750 chips.
5925 */
1da177e4
LT
5926 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5927 ((u64) tp->stats_mapping >> 32));
5928 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5929 ((u64) tp->stats_mapping & 0xffffffff));
5930 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5931 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5932 }
5933
5934 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5935
5936 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5937 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5938 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5939 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5940
5941 /* Clear statistics/status block in chip, and status block in ram. */
5942 for (i = NIC_SRAM_STATS_BLK;
5943 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5944 i += sizeof(u32)) {
5945 tg3_write_mem(tp, i, 0);
5946 udelay(40);
5947 }
5948 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5949
c94e3941
MC
5950 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5951 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
5952 /* reset to prevent losing 1st rx packet intermittently */
5953 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5954 udelay(10);
5955 }
5956
1da177e4
LT
5957 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5958 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5959 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5960 udelay(40);
5961
314fba34
MC
5962 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5963 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5964 * register to preserve the GPIO settings for LOMs. The GPIOs,
5965 * whether used as inputs or outputs, are set by boot code after
5966 * reset.
5967 */
5968 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5969 u32 gpio_mask;
5970
5971 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5972 GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
3e7d83bc
MC
5973
5974 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5975 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5976 GRC_LCLCTRL_GPIO_OUTPUT3;
5977
314fba34
MC
5978 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5979
5980 /* GPIO1 must be driven high for eeprom write protect */
1da177e4
LT
5981 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5982 GRC_LCLCTRL_GPIO_OUTPUT1);
314fba34 5983 }
1da177e4
LT
5984 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5985 udelay(100);
5986
09ee929c 5987 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
fac9b83e 5988 tp->last_tag = 0;
1da177e4
LT
5989
5990 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5991 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5992 udelay(40);
5993 }
5994
5995 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5996 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5997 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5998 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5999 WDMAC_MODE_LNGREAD_ENAB);
6000
85e94ced
MC
6001 /* If statement applies to 5705 and 5750 PCI devices only */
6002 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6003 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6004 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1da177e4
LT
6005 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6006 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6007 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6008 /* nothing */
6009 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6010 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6011 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6012 val |= WDMAC_MODE_RX_ACCEL;
6013 }
6014 }
6015
6016 tw32_f(WDMAC_MODE, val);
6017 udelay(40);
6018
6019 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6020 val = tr32(TG3PCI_X_CAPS);
6021 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6022 val &= ~PCIX_CAPS_BURST_MASK;
6023 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6024 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6025 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6026 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6027 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6028 val |= (tp->split_mode_max_reqs <<
6029 PCIX_CAPS_SPLIT_SHIFT);
6030 }
6031 tw32(TG3PCI_X_CAPS, val);
6032 }
6033
6034 tw32_f(RDMAC_MODE, rdmac_mode);
6035 udelay(40);
6036
6037 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6038 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6039 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6040 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6041 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6042 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6043 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6044 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6045#if TG3_TSO_SUPPORT != 0
6046 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6047 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6048#endif
6049 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6050 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6051
6052 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6053 err = tg3_load_5701_a0_firmware_fix(tp);
6054 if (err)
6055 return err;
6056 }
6057
6058#if TG3_TSO_SUPPORT != 0
6059 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6060 err = tg3_load_tso_firmware(tp);
6061 if (err)
6062 return err;
6063 }
6064#endif
6065
6066 tp->tx_mode = TX_MODE_ENABLE;
6067 tw32_f(MAC_TX_MODE, tp->tx_mode);
6068 udelay(100);
6069
6070 tp->rx_mode = RX_MODE_ENABLE;
6071 tw32_f(MAC_RX_MODE, tp->rx_mode);
6072 udelay(10);
6073
6074 if (tp->link_config.phy_is_low_power) {
6075 tp->link_config.phy_is_low_power = 0;
6076 tp->link_config.speed = tp->link_config.orig_speed;
6077 tp->link_config.duplex = tp->link_config.orig_duplex;
6078 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6079 }
6080
6081 tp->mi_mode = MAC_MI_MODE_BASE;
6082 tw32_f(MAC_MI_MODE, tp->mi_mode);
6083 udelay(80);
6084
6085 tw32(MAC_LED_CTRL, tp->led_ctrl);
6086
6087 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
c94e3941 6088 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1da177e4
LT
6089 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6090 udelay(10);
6091 }
6092 tw32_f(MAC_RX_MODE, tp->rx_mode);
6093 udelay(10);
6094
6095 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6096 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6097 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6098 /* Set drive transmission level to 1.2V */
6099 /* only if the signal pre-emphasis bit is not set */
6100 val = tr32(MAC_SERDES_CFG);
6101 val &= 0xfffff000;
6102 val |= 0x880;
6103 tw32(MAC_SERDES_CFG, val);
6104 }
6105 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6106 tw32(MAC_SERDES_CFG, 0x616000);
6107 }
6108
6109 /* Prevent chip from dropping frames when flow control
6110 * is enabled.
6111 */
6112 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6113
6114 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6115 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6116 /* Use hardware link auto-negotiation */
6117 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6118 }
6119
d4d2c558
MC
6120 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6121 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6122 u32 tmp;
6123
6124 tmp = tr32(SERDES_RX_CTRL);
6125 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6126 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6127 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6128 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6129 }
6130
1da177e4
LT
6131 err = tg3_setup_phy(tp, 1);
6132 if (err)
6133 return err;
6134
6135 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6136 u32 tmp;
6137
6138 /* Clear CRC stats. */
6139 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6140 tg3_writephy(tp, 0x1e, tmp | 0x8000);
6141 tg3_readphy(tp, 0x14, &tmp);
6142 }
6143 }
6144
6145 __tg3_set_rx_mode(tp->dev);
6146
6147 /* Initialize receive rules. */
6148 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
6149 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6150 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
6151 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6152
4cf78e4f 6153 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
a4e2b347 6154 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
1da177e4
LT
6155 limit = 8;
6156 else
6157 limit = 16;
6158 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6159 limit -= 4;
6160 switch (limit) {
6161 case 16:
6162 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
6163 case 15:
6164 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
6165 case 14:
6166 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
6167 case 13:
6168 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
6169 case 12:
6170 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
6171 case 11:
6172 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
6173 case 10:
6174 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
6175 case 9:
6176 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
6177 case 8:
6178 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
6179 case 7:
6180 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
6181 case 6:
6182 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
6183 case 5:
6184 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
6185 case 4:
6186 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6187 case 3:
6188 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6189 case 2:
6190 case 1:
6191
6192 default:
6193 break;
6194 };
6195
6196 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6197
1da177e4
LT
6198 return 0;
6199}
6200
6201/* Called at device open time to get the chip ready for
6202 * packet processing. Invoked with tp->lock held.
6203 */
6204static int tg3_init_hw(struct tg3 *tp)
6205{
6206 int err;
6207
6208 /* Force the chip into D0. */
6209 err = tg3_set_power_state(tp, 0);
6210 if (err)
6211 goto out;
6212
6213 tg3_switch_clocks(tp);
6214
6215 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6216
6217 err = tg3_reset_hw(tp);
6218
6219out:
6220 return err;
6221}
6222
6223#define TG3_STAT_ADD32(PSTAT, REG) \
6224do { u32 __val = tr32(REG); \
6225 (PSTAT)->low += __val; \
6226 if ((PSTAT)->low < __val) \
6227 (PSTAT)->high += 1; \
6228} while (0)
6229
6230static void tg3_periodic_fetch_stats(struct tg3 *tp)
6231{
6232 struct tg3_hw_stats *sp = tp->hw_stats;
6233
6234 if (!netif_carrier_ok(tp->dev))
6235 return;
6236
6237 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6238 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6239 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6240 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6241 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6242 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6243 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6244 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6245 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6246 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6247 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6248 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6249 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6250
6251 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6252 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6253 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6254 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6255 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6256 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6257 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6258 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6259 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6260 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6261 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6262 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6263 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6264 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6265}
6266
6267static void tg3_timer(unsigned long __opaque)
6268{
6269 struct tg3 *tp = (struct tg3 *) __opaque;
1da177e4 6270
f47c11ee 6271 spin_lock(&tp->lock);
1da177e4 6272
fac9b83e
DM
6273 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6274 /* All of this garbage is because when using non-tagged
6275 * IRQ status the mailbox/status_block protocol the chip
6276 * uses with the cpu is race prone.
6277 */
6278 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6279 tw32(GRC_LOCAL_CTRL,
6280 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6281 } else {
6282 tw32(HOSTCC_MODE, tp->coalesce_mode |
6283 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6284 }
1da177e4 6285
fac9b83e
DM
6286 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6287 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
f47c11ee 6288 spin_unlock(&tp->lock);
fac9b83e
DM
6289 schedule_work(&tp->reset_task);
6290 return;
6291 }
1da177e4
LT
6292 }
6293
1da177e4
LT
6294 /* This part only runs once per second. */
6295 if (!--tp->timer_counter) {
fac9b83e
DM
6296 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6297 tg3_periodic_fetch_stats(tp);
6298
1da177e4
LT
6299 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6300 u32 mac_stat;
6301 int phy_event;
6302
6303 mac_stat = tr32(MAC_STATUS);
6304
6305 phy_event = 0;
6306 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6307 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6308 phy_event = 1;
6309 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6310 phy_event = 1;
6311
6312 if (phy_event)
6313 tg3_setup_phy(tp, 0);
6314 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6315 u32 mac_stat = tr32(MAC_STATUS);
6316 int need_setup = 0;
6317
6318 if (netif_carrier_ok(tp->dev) &&
6319 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6320 need_setup = 1;
6321 }
6322 if (! netif_carrier_ok(tp->dev) &&
6323 (mac_stat & (MAC_STATUS_PCS_SYNCED |
6324 MAC_STATUS_SIGNAL_DET))) {
6325 need_setup = 1;
6326 }
6327 if (need_setup) {
6328 tw32_f(MAC_MODE,
6329 (tp->mac_mode &
6330 ~MAC_MODE_PORT_MODE_MASK));
6331 udelay(40);
6332 tw32_f(MAC_MODE, tp->mac_mode);
6333 udelay(40);
6334 tg3_setup_phy(tp, 0);
6335 }
747e8f8b
MC
6336 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6337 tg3_serdes_parallel_detect(tp);
1da177e4
LT
6338
6339 tp->timer_counter = tp->timer_multiplier;
6340 }
6341
28fbef78 6342 /* Heartbeat is only sent once every 2 seconds. */
1da177e4
LT
6343 if (!--tp->asf_counter) {
6344 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6345 u32 val;
6346
28fbef78
MC
6347 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
6348 FWCMD_NICDRV_ALIVE2);
6349 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6350 /* 5 seconds timeout */
6351 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
1da177e4
LT
6352 val = tr32(GRC_RX_CPU_EVENT);
6353 val |= (1 << 14);
6354 tw32(GRC_RX_CPU_EVENT, val);
6355 }
6356 tp->asf_counter = tp->asf_multiplier;
6357 }
6358
f47c11ee 6359 spin_unlock(&tp->lock);
1da177e4
LT
6360
6361 tp->timer.expires = jiffies + tp->timer_offset;
6362 add_timer(&tp->timer);
6363}
6364
7938109f
MC
6365static int tg3_test_interrupt(struct tg3 *tp)
6366{
6367 struct net_device *dev = tp->dev;
6368 int err, i;
6369 u32 int_mbox = 0;
6370
d4bc3927
MC
6371 if (!netif_running(dev))
6372 return -ENODEV;
6373
7938109f
MC
6374 tg3_disable_ints(tp);
6375
6376 free_irq(tp->pdev->irq, dev);
6377
6378 err = request_irq(tp->pdev->irq, tg3_test_isr,
f4d0ee98 6379 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
7938109f
MC
6380 if (err)
6381 return err;
6382
38f3843e 6383 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7938109f
MC
6384 tg3_enable_ints(tp);
6385
6386 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6387 HOSTCC_MODE_NOW);
6388
6389 for (i = 0; i < 5; i++) {
09ee929c
MC
6390 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6391 TG3_64BIT_REG_LOW);
7938109f
MC
6392 if (int_mbox != 0)
6393 break;
6394 msleep(10);
6395 }
6396
6397 tg3_disable_ints(tp);
6398
6399 free_irq(tp->pdev->irq, dev);
6400
6401 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6402 err = request_irq(tp->pdev->irq, tg3_msi,
f4d0ee98 6403 SA_SAMPLE_RANDOM, dev->name, dev);
fac9b83e
DM
6404 else {
6405 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6406 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6407 fn = tg3_interrupt_tagged;
6408 err = request_irq(tp->pdev->irq, fn,
f4d0ee98 6409 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
fac9b83e 6410 }
7938109f
MC
6411
6412 if (err)
6413 return err;
6414
6415 if (int_mbox != 0)
6416 return 0;
6417
6418 return -EIO;
6419}
6420
6421/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6422 * successfully restored
6423 */
6424static int tg3_test_msi(struct tg3 *tp)
6425{
6426 struct net_device *dev = tp->dev;
6427 int err;
6428 u16 pci_cmd;
6429
6430 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6431 return 0;
6432
6433 /* Turn off SERR reporting in case MSI terminates with Master
6434 * Abort.
6435 */
6436 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6437 pci_write_config_word(tp->pdev, PCI_COMMAND,
6438 pci_cmd & ~PCI_COMMAND_SERR);
6439
6440 err = tg3_test_interrupt(tp);
6441
6442 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6443
6444 if (!err)
6445 return 0;
6446
6447 /* other failures */
6448 if (err != -EIO)
6449 return err;
6450
6451 /* MSI test failed, go back to INTx mode */
6452 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6453 "switching to INTx mode. Please report this failure to "
6454 "the PCI maintainer and include system chipset information.\n",
6455 tp->dev->name);
6456
6457 free_irq(tp->pdev->irq, dev);
6458 pci_disable_msi(tp->pdev);
6459
6460 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6461
fac9b83e
DM
6462 {
6463 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6464 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6465 fn = tg3_interrupt_tagged;
7938109f 6466
fac9b83e
DM
6467 err = request_irq(tp->pdev->irq, fn,
6468 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6469 }
7938109f
MC
6470 if (err)
6471 return err;
6472
6473 /* Need to reset the chip because the MSI cycle may have terminated
6474 * with Master Abort.
6475 */
f47c11ee 6476 tg3_full_lock(tp, 1);
7938109f 6477
944d980e 6478 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f
MC
6479 err = tg3_init_hw(tp);
6480
f47c11ee 6481 tg3_full_unlock(tp);
7938109f
MC
6482
6483 if (err)
6484 free_irq(tp->pdev->irq, dev);
6485
6486 return err;
6487}
6488
1da177e4
LT
6489static int tg3_open(struct net_device *dev)
6490{
6491 struct tg3 *tp = netdev_priv(dev);
6492 int err;
6493
f47c11ee 6494 tg3_full_lock(tp, 0);
1da177e4
LT
6495
6496 tg3_disable_ints(tp);
6497 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6498
f47c11ee 6499 tg3_full_unlock(tp);
1da177e4
LT
6500
6501 /* The placement of this call is tied
6502 * to the setup and use of Host TX descriptors.
6503 */
6504 err = tg3_alloc_consistent(tp);
6505 if (err)
6506 return err;
6507
88b06bc2
MC
6508 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6509 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
d4d2c558
MC
6510 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6511 !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6512 (tp->pdev_peer == tp->pdev))) {
fac9b83e
DM
6513 /* All MSI supporting chips should support tagged
6514 * status. Assert that this is the case.
6515 */
6516 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6517 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6518 "Not using MSI.\n", tp->dev->name);
6519 } else if (pci_enable_msi(tp->pdev) == 0) {
88b06bc2
MC
6520 u32 msi_mode;
6521
6522 msi_mode = tr32(MSGINT_MODE);
6523 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6524 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6525 }
6526 }
6527 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6528 err = request_irq(tp->pdev->irq, tg3_msi,
f4d0ee98 6529 SA_SAMPLE_RANDOM, dev->name, dev);
fac9b83e
DM
6530 else {
6531 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6532 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6533 fn = tg3_interrupt_tagged;
6534
6535 err = request_irq(tp->pdev->irq, fn,
f4d0ee98 6536 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
fac9b83e 6537 }
1da177e4
LT
6538
6539 if (err) {
88b06bc2
MC
6540 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6541 pci_disable_msi(tp->pdev);
6542 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6543 }
1da177e4
LT
6544 tg3_free_consistent(tp);
6545 return err;
6546 }
6547
f47c11ee 6548 tg3_full_lock(tp, 0);
1da177e4
LT
6549
6550 err = tg3_init_hw(tp);
6551 if (err) {
944d980e 6552 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
6553 tg3_free_rings(tp);
6554 } else {
fac9b83e
DM
6555 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6556 tp->timer_offset = HZ;
6557 else
6558 tp->timer_offset = HZ / 10;
6559
6560 BUG_ON(tp->timer_offset > HZ);
6561 tp->timer_counter = tp->timer_multiplier =
6562 (HZ / tp->timer_offset);
6563 tp->asf_counter = tp->asf_multiplier =
28fbef78 6564 ((HZ / tp->timer_offset) * 2);
1da177e4
LT
6565
6566 init_timer(&tp->timer);
6567 tp->timer.expires = jiffies + tp->timer_offset;
6568 tp->timer.data = (unsigned long) tp;
6569 tp->timer.function = tg3_timer;
1da177e4
LT
6570 }
6571
f47c11ee 6572 tg3_full_unlock(tp);
1da177e4
LT
6573
6574 if (err) {
88b06bc2
MC
6575 free_irq(tp->pdev->irq, dev);
6576 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6577 pci_disable_msi(tp->pdev);
6578 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6579 }
1da177e4
LT
6580 tg3_free_consistent(tp);
6581 return err;
6582 }
6583
7938109f
MC
6584 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6585 err = tg3_test_msi(tp);
fac9b83e 6586
7938109f 6587 if (err) {
f47c11ee 6588 tg3_full_lock(tp, 0);
7938109f
MC
6589
6590 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6591 pci_disable_msi(tp->pdev);
6592 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6593 }
944d980e 6594 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f
MC
6595 tg3_free_rings(tp);
6596 tg3_free_consistent(tp);
6597
f47c11ee 6598 tg3_full_unlock(tp);
7938109f
MC
6599
6600 return err;
6601 }
6602 }
6603
f47c11ee 6604 tg3_full_lock(tp, 0);
1da177e4 6605
7938109f
MC
6606 add_timer(&tp->timer);
6607 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
1da177e4
LT
6608 tg3_enable_ints(tp);
6609
f47c11ee 6610 tg3_full_unlock(tp);
1da177e4
LT
6611
6612 netif_start_queue(dev);
6613
6614 return 0;
6615}
6616
6617#if 0
6618/*static*/ void tg3_dump_state(struct tg3 *tp)
6619{
6620 u32 val32, val32_2, val32_3, val32_4, val32_5;
6621 u16 val16;
6622 int i;
6623
6624 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6625 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6626 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6627 val16, val32);
6628
6629 /* MAC block */
6630 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6631 tr32(MAC_MODE), tr32(MAC_STATUS));
6632 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6633 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6634 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6635 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6636 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6637 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6638
6639 /* Send data initiator control block */
6640 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6641 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6642 printk(" SNDDATAI_STATSCTRL[%08x]\n",
6643 tr32(SNDDATAI_STATSCTRL));
6644
6645 /* Send data completion control block */
6646 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6647
6648 /* Send BD ring selector block */
6649 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6650 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6651
6652 /* Send BD initiator control block */
6653 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6654 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6655
6656 /* Send BD completion control block */
6657 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6658
6659 /* Receive list placement control block */
6660 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6661 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6662 printk(" RCVLPC_STATSCTRL[%08x]\n",
6663 tr32(RCVLPC_STATSCTRL));
6664
6665 /* Receive data and receive BD initiator control block */
6666 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6667 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6668
6669 /* Receive data completion control block */
6670 printk("DEBUG: RCVDCC_MODE[%08x]\n",
6671 tr32(RCVDCC_MODE));
6672
6673 /* Receive BD initiator control block */
6674 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6675 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6676
6677 /* Receive BD completion control block */
6678 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6679 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6680
6681 /* Receive list selector control block */
6682 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6683 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6684
6685 /* Mbuf cluster free block */
6686 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6687 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6688
6689 /* Host coalescing control block */
6690 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6691 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6692 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6693 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6694 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6695 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6696 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6697 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6698 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6699 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6700 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6701 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6702
6703 /* Memory arbiter control block */
6704 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6705 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6706
6707 /* Buffer manager control block */
6708 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6709 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6710 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6711 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6712 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6713 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6714 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6715 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6716
6717 /* Read DMA control block */
6718 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6719 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6720
6721 /* Write DMA control block */
6722 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6723 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6724
6725 /* DMA completion block */
6726 printk("DEBUG: DMAC_MODE[%08x]\n",
6727 tr32(DMAC_MODE));
6728
6729 /* GRC block */
6730 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6731 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6732 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6733 tr32(GRC_LOCAL_CTRL));
6734
6735 /* TG3_BDINFOs */
6736 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6737 tr32(RCVDBDI_JUMBO_BD + 0x0),
6738 tr32(RCVDBDI_JUMBO_BD + 0x4),
6739 tr32(RCVDBDI_JUMBO_BD + 0x8),
6740 tr32(RCVDBDI_JUMBO_BD + 0xc));
6741 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6742 tr32(RCVDBDI_STD_BD + 0x0),
6743 tr32(RCVDBDI_STD_BD + 0x4),
6744 tr32(RCVDBDI_STD_BD + 0x8),
6745 tr32(RCVDBDI_STD_BD + 0xc));
6746 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6747 tr32(RCVDBDI_MINI_BD + 0x0),
6748 tr32(RCVDBDI_MINI_BD + 0x4),
6749 tr32(RCVDBDI_MINI_BD + 0x8),
6750 tr32(RCVDBDI_MINI_BD + 0xc));
6751
6752 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6753 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6754 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6755 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6756 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6757 val32, val32_2, val32_3, val32_4);
6758
6759 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6760 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6761 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6762 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6763 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6764 val32, val32_2, val32_3, val32_4);
6765
6766 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6767 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6768 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6769 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6770 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6771 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6772 val32, val32_2, val32_3, val32_4, val32_5);
6773
6774 /* SW status block */
6775 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6776 tp->hw_status->status,
6777 tp->hw_status->status_tag,
6778 tp->hw_status->rx_jumbo_consumer,
6779 tp->hw_status->rx_consumer,
6780 tp->hw_status->rx_mini_consumer,
6781 tp->hw_status->idx[0].rx_producer,
6782 tp->hw_status->idx[0].tx_consumer);
6783
6784 /* SW statistics block */
6785 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6786 ((u32 *)tp->hw_stats)[0],
6787 ((u32 *)tp->hw_stats)[1],
6788 ((u32 *)tp->hw_stats)[2],
6789 ((u32 *)tp->hw_stats)[3]);
6790
6791 /* Mailboxes */
6792 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
09ee929c
MC
6793 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6794 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6795 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6796 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
1da177e4
LT
6797
6798 /* NIC side send descriptors. */
6799 for (i = 0; i < 6; i++) {
6800 unsigned long txd;
6801
6802 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6803 + (i * sizeof(struct tg3_tx_buffer_desc));
6804 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6805 i,
6806 readl(txd + 0x0), readl(txd + 0x4),
6807 readl(txd + 0x8), readl(txd + 0xc));
6808 }
6809
6810 /* NIC side RX descriptors. */
6811 for (i = 0; i < 6; i++) {
6812 unsigned long rxd;
6813
6814 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6815 + (i * sizeof(struct tg3_rx_buffer_desc));
6816 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6817 i,
6818 readl(rxd + 0x0), readl(rxd + 0x4),
6819 readl(rxd + 0x8), readl(rxd + 0xc));
6820 rxd += (4 * sizeof(u32));
6821 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6822 i,
6823 readl(rxd + 0x0), readl(rxd + 0x4),
6824 readl(rxd + 0x8), readl(rxd + 0xc));
6825 }
6826
6827 for (i = 0; i < 6; i++) {
6828 unsigned long rxd;
6829
6830 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6831 + (i * sizeof(struct tg3_rx_buffer_desc));
6832 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6833 i,
6834 readl(rxd + 0x0), readl(rxd + 0x4),
6835 readl(rxd + 0x8), readl(rxd + 0xc));
6836 rxd += (4 * sizeof(u32));
6837 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6838 i,
6839 readl(rxd + 0x0), readl(rxd + 0x4),
6840 readl(rxd + 0x8), readl(rxd + 0xc));
6841 }
6842}
6843#endif
6844
6845static struct net_device_stats *tg3_get_stats(struct net_device *);
6846static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6847
6848static int tg3_close(struct net_device *dev)
6849{
6850 struct tg3 *tp = netdev_priv(dev);
6851
7faa006f
MC
6852 /* Calling flush_scheduled_work() may deadlock because
6853 * linkwatch_event() may be on the workqueue and it will try to get
6854 * the rtnl_lock which we are holding.
6855 */
6856 while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
6857 msleep(1);
6858
1da177e4
LT
6859 netif_stop_queue(dev);
6860
6861 del_timer_sync(&tp->timer);
6862
f47c11ee 6863 tg3_full_lock(tp, 1);
1da177e4
LT
6864#if 0
6865 tg3_dump_state(tp);
6866#endif
6867
6868 tg3_disable_ints(tp);
6869
944d980e 6870 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
6871 tg3_free_rings(tp);
6872 tp->tg3_flags &=
6873 ~(TG3_FLAG_INIT_COMPLETE |
6874 TG3_FLAG_GOT_SERDES_FLOWCTL);
6875 netif_carrier_off(tp->dev);
6876
f47c11ee 6877 tg3_full_unlock(tp);
1da177e4 6878
88b06bc2
MC
6879 free_irq(tp->pdev->irq, dev);
6880 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6881 pci_disable_msi(tp->pdev);
6882 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6883 }
1da177e4
LT
6884
6885 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6886 sizeof(tp->net_stats_prev));
6887 memcpy(&tp->estats_prev, tg3_get_estats(tp),
6888 sizeof(tp->estats_prev));
6889
6890 tg3_free_consistent(tp);
6891
6892 return 0;
6893}
6894
6895static inline unsigned long get_stat64(tg3_stat64_t *val)
6896{
6897 unsigned long ret;
6898
6899#if (BITS_PER_LONG == 32)
6900 ret = val->low;
6901#else
6902 ret = ((u64)val->high << 32) | ((u64)val->low);
6903#endif
6904 return ret;
6905}
6906
6907static unsigned long calc_crc_errors(struct tg3 *tp)
6908{
6909 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6910
6911 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6912 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6913 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1da177e4
LT
6914 u32 val;
6915
f47c11ee 6916 spin_lock_bh(&tp->lock);
1da177e4
LT
6917 if (!tg3_readphy(tp, 0x1e, &val)) {
6918 tg3_writephy(tp, 0x1e, val | 0x8000);
6919 tg3_readphy(tp, 0x14, &val);
6920 } else
6921 val = 0;
f47c11ee 6922 spin_unlock_bh(&tp->lock);
1da177e4
LT
6923
6924 tp->phy_crc_errors += val;
6925
6926 return tp->phy_crc_errors;
6927 }
6928
6929 return get_stat64(&hw_stats->rx_fcs_errors);
6930}
6931
6932#define ESTAT_ADD(member) \
6933 estats->member = old_estats->member + \
6934 get_stat64(&hw_stats->member)
6935
6936static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6937{
6938 struct tg3_ethtool_stats *estats = &tp->estats;
6939 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6940 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6941
6942 if (!hw_stats)
6943 return old_estats;
6944
6945 ESTAT_ADD(rx_octets);
6946 ESTAT_ADD(rx_fragments);
6947 ESTAT_ADD(rx_ucast_packets);
6948 ESTAT_ADD(rx_mcast_packets);
6949 ESTAT_ADD(rx_bcast_packets);
6950 ESTAT_ADD(rx_fcs_errors);
6951 ESTAT_ADD(rx_align_errors);
6952 ESTAT_ADD(rx_xon_pause_rcvd);
6953 ESTAT_ADD(rx_xoff_pause_rcvd);
6954 ESTAT_ADD(rx_mac_ctrl_rcvd);
6955 ESTAT_ADD(rx_xoff_entered);
6956 ESTAT_ADD(rx_frame_too_long_errors);
6957 ESTAT_ADD(rx_jabbers);
6958 ESTAT_ADD(rx_undersize_packets);
6959 ESTAT_ADD(rx_in_length_errors);
6960 ESTAT_ADD(rx_out_length_errors);
6961 ESTAT_ADD(rx_64_or_less_octet_packets);
6962 ESTAT_ADD(rx_65_to_127_octet_packets);
6963 ESTAT_ADD(rx_128_to_255_octet_packets);
6964 ESTAT_ADD(rx_256_to_511_octet_packets);
6965 ESTAT_ADD(rx_512_to_1023_octet_packets);
6966 ESTAT_ADD(rx_1024_to_1522_octet_packets);
6967 ESTAT_ADD(rx_1523_to_2047_octet_packets);
6968 ESTAT_ADD(rx_2048_to_4095_octet_packets);
6969 ESTAT_ADD(rx_4096_to_8191_octet_packets);
6970 ESTAT_ADD(rx_8192_to_9022_octet_packets);
6971
6972 ESTAT_ADD(tx_octets);
6973 ESTAT_ADD(tx_collisions);
6974 ESTAT_ADD(tx_xon_sent);
6975 ESTAT_ADD(tx_xoff_sent);
6976 ESTAT_ADD(tx_flow_control);
6977 ESTAT_ADD(tx_mac_errors);
6978 ESTAT_ADD(tx_single_collisions);
6979 ESTAT_ADD(tx_mult_collisions);
6980 ESTAT_ADD(tx_deferred);
6981 ESTAT_ADD(tx_excessive_collisions);
6982 ESTAT_ADD(tx_late_collisions);
6983 ESTAT_ADD(tx_collide_2times);
6984 ESTAT_ADD(tx_collide_3times);
6985 ESTAT_ADD(tx_collide_4times);
6986 ESTAT_ADD(tx_collide_5times);
6987 ESTAT_ADD(tx_collide_6times);
6988 ESTAT_ADD(tx_collide_7times);
6989 ESTAT_ADD(tx_collide_8times);
6990 ESTAT_ADD(tx_collide_9times);
6991 ESTAT_ADD(tx_collide_10times);
6992 ESTAT_ADD(tx_collide_11times);
6993 ESTAT_ADD(tx_collide_12times);
6994 ESTAT_ADD(tx_collide_13times);
6995 ESTAT_ADD(tx_collide_14times);
6996 ESTAT_ADD(tx_collide_15times);
6997 ESTAT_ADD(tx_ucast_packets);
6998 ESTAT_ADD(tx_mcast_packets);
6999 ESTAT_ADD(tx_bcast_packets);
7000 ESTAT_ADD(tx_carrier_sense_errors);
7001 ESTAT_ADD(tx_discards);
7002 ESTAT_ADD(tx_errors);
7003
7004 ESTAT_ADD(dma_writeq_full);
7005 ESTAT_ADD(dma_write_prioq_full);
7006 ESTAT_ADD(rxbds_empty);
7007 ESTAT_ADD(rx_discards);
7008 ESTAT_ADD(rx_errors);
7009 ESTAT_ADD(rx_threshold_hit);
7010
7011 ESTAT_ADD(dma_readq_full);
7012 ESTAT_ADD(dma_read_prioq_full);
7013 ESTAT_ADD(tx_comp_queue_full);
7014
7015 ESTAT_ADD(ring_set_send_prod_index);
7016 ESTAT_ADD(ring_status_update);
7017 ESTAT_ADD(nic_irqs);
7018 ESTAT_ADD(nic_avoided_irqs);
7019 ESTAT_ADD(nic_tx_threshold_hit);
7020
7021 return estats;
7022}
7023
7024static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7025{
7026 struct tg3 *tp = netdev_priv(dev);
7027 struct net_device_stats *stats = &tp->net_stats;
7028 struct net_device_stats *old_stats = &tp->net_stats_prev;
7029 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7030
7031 if (!hw_stats)
7032 return old_stats;
7033
7034 stats->rx_packets = old_stats->rx_packets +
7035 get_stat64(&hw_stats->rx_ucast_packets) +
7036 get_stat64(&hw_stats->rx_mcast_packets) +
7037 get_stat64(&hw_stats->rx_bcast_packets);
7038
7039 stats->tx_packets = old_stats->tx_packets +
7040 get_stat64(&hw_stats->tx_ucast_packets) +
7041 get_stat64(&hw_stats->tx_mcast_packets) +
7042 get_stat64(&hw_stats->tx_bcast_packets);
7043
7044 stats->rx_bytes = old_stats->rx_bytes +
7045 get_stat64(&hw_stats->rx_octets);
7046 stats->tx_bytes = old_stats->tx_bytes +
7047 get_stat64(&hw_stats->tx_octets);
7048
7049 stats->rx_errors = old_stats->rx_errors +
4f63b877 7050 get_stat64(&hw_stats->rx_errors);
1da177e4
LT
7051 stats->tx_errors = old_stats->tx_errors +
7052 get_stat64(&hw_stats->tx_errors) +
7053 get_stat64(&hw_stats->tx_mac_errors) +
7054 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7055 get_stat64(&hw_stats->tx_discards);
7056
7057 stats->multicast = old_stats->multicast +
7058 get_stat64(&hw_stats->rx_mcast_packets);
7059 stats->collisions = old_stats->collisions +
7060 get_stat64(&hw_stats->tx_collisions);
7061
7062 stats->rx_length_errors = old_stats->rx_length_errors +
7063 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7064 get_stat64(&hw_stats->rx_undersize_packets);
7065
7066 stats->rx_over_errors = old_stats->rx_over_errors +
7067 get_stat64(&hw_stats->rxbds_empty);
7068 stats->rx_frame_errors = old_stats->rx_frame_errors +
7069 get_stat64(&hw_stats->rx_align_errors);
7070 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7071 get_stat64(&hw_stats->tx_discards);
7072 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7073 get_stat64(&hw_stats->tx_carrier_sense_errors);
7074
7075 stats->rx_crc_errors = old_stats->rx_crc_errors +
7076 calc_crc_errors(tp);
7077
4f63b877
JL
7078 stats->rx_missed_errors = old_stats->rx_missed_errors +
7079 get_stat64(&hw_stats->rx_discards);
7080
1da177e4
LT
7081 return stats;
7082}
7083
7084static inline u32 calc_crc(unsigned char *buf, int len)
7085{
7086 u32 reg;
7087 u32 tmp;
7088 int j, k;
7089
7090 reg = 0xffffffff;
7091
7092 for (j = 0; j < len; j++) {
7093 reg ^= buf[j];
7094
7095 for (k = 0; k < 8; k++) {
7096 tmp = reg & 0x01;
7097
7098 reg >>= 1;
7099
7100 if (tmp) {
7101 reg ^= 0xedb88320;
7102 }
7103 }
7104 }
7105
7106 return ~reg;
7107}
7108
7109static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7110{
7111 /* accept or reject all multicast frames */
7112 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7113 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7114 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7115 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7116}
7117
7118static void __tg3_set_rx_mode(struct net_device *dev)
7119{
7120 struct tg3 *tp = netdev_priv(dev);
7121 u32 rx_mode;
7122
7123 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7124 RX_MODE_KEEP_VLAN_TAG);
7125
7126 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7127 * flag clear.
7128 */
7129#if TG3_VLAN_TAG_USED
7130 if (!tp->vlgrp &&
7131 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7132 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7133#else
7134 /* By definition, VLAN is disabled always in this
7135 * case.
7136 */
7137 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7138 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7139#endif
7140
7141 if (dev->flags & IFF_PROMISC) {
7142 /* Promiscuous mode. */
7143 rx_mode |= RX_MODE_PROMISC;
7144 } else if (dev->flags & IFF_ALLMULTI) {
7145 /* Accept all multicast. */
7146 tg3_set_multi (tp, 1);
7147 } else if (dev->mc_count < 1) {
7148 /* Reject all multicast. */
7149 tg3_set_multi (tp, 0);
7150 } else {
7151 /* Accept one or more multicast(s). */
7152 struct dev_mc_list *mclist;
7153 unsigned int i;
7154 u32 mc_filter[4] = { 0, };
7155 u32 regidx;
7156 u32 bit;
7157 u32 crc;
7158
7159 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7160 i++, mclist = mclist->next) {
7161
7162 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7163 bit = ~crc & 0x7f;
7164 regidx = (bit & 0x60) >> 5;
7165 bit &= 0x1f;
7166 mc_filter[regidx] |= (1 << bit);
7167 }
7168
7169 tw32(MAC_HASH_REG_0, mc_filter[0]);
7170 tw32(MAC_HASH_REG_1, mc_filter[1]);
7171 tw32(MAC_HASH_REG_2, mc_filter[2]);
7172 tw32(MAC_HASH_REG_3, mc_filter[3]);
7173 }
7174
7175 if (rx_mode != tp->rx_mode) {
7176 tp->rx_mode = rx_mode;
7177 tw32_f(MAC_RX_MODE, rx_mode);
7178 udelay(10);
7179 }
7180}
7181
7182static void tg3_set_rx_mode(struct net_device *dev)
7183{
7184 struct tg3 *tp = netdev_priv(dev);
7185
f47c11ee 7186 tg3_full_lock(tp, 0);
1da177e4 7187 __tg3_set_rx_mode(dev);
f47c11ee 7188 tg3_full_unlock(tp);
1da177e4
LT
7189}
7190
7191#define TG3_REGDUMP_LEN (32 * 1024)
7192
7193static int tg3_get_regs_len(struct net_device *dev)
7194{
7195 return TG3_REGDUMP_LEN;
7196}
7197
7198static void tg3_get_regs(struct net_device *dev,
7199 struct ethtool_regs *regs, void *_p)
7200{
7201 u32 *p = _p;
7202 struct tg3 *tp = netdev_priv(dev);
7203 u8 *orig_p = _p;
7204 int i;
7205
7206 regs->version = 0;
7207
7208 memset(p, 0, TG3_REGDUMP_LEN);
7209
f47c11ee 7210 tg3_full_lock(tp, 0);
1da177e4
LT
7211
7212#define __GET_REG32(reg) (*(p)++ = tr32(reg))
7213#define GET_REG32_LOOP(base,len) \
7214do { p = (u32 *)(orig_p + (base)); \
7215 for (i = 0; i < len; i += 4) \
7216 __GET_REG32((base) + i); \
7217} while (0)
7218#define GET_REG32_1(reg) \
7219do { p = (u32 *)(orig_p + (reg)); \
7220 __GET_REG32((reg)); \
7221} while (0)
7222
7223 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7224 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7225 GET_REG32_LOOP(MAC_MODE, 0x4f0);
7226 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7227 GET_REG32_1(SNDDATAC_MODE);
7228 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7229 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7230 GET_REG32_1(SNDBDC_MODE);
7231 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7232 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7233 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7234 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7235 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7236 GET_REG32_1(RCVDCC_MODE);
7237 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7238 GET_REG32_LOOP(RCVCC_MODE, 0x14);
7239 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7240 GET_REG32_1(MBFREE_MODE);
7241 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7242 GET_REG32_LOOP(MEMARB_MODE, 0x10);
7243 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7244 GET_REG32_LOOP(RDMAC_MODE, 0x08);
7245 GET_REG32_LOOP(WDMAC_MODE, 0x08);
091465d7
CE
7246 GET_REG32_1(RX_CPU_MODE);
7247 GET_REG32_1(RX_CPU_STATE);
7248 GET_REG32_1(RX_CPU_PGMCTR);
7249 GET_REG32_1(RX_CPU_HWBKPT);
7250 GET_REG32_1(TX_CPU_MODE);
7251 GET_REG32_1(TX_CPU_STATE);
7252 GET_REG32_1(TX_CPU_PGMCTR);
1da177e4
LT
7253 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7254 GET_REG32_LOOP(FTQ_RESET, 0x120);
7255 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7256 GET_REG32_1(DMAC_MODE);
7257 GET_REG32_LOOP(GRC_MODE, 0x4c);
7258 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7259 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7260
7261#undef __GET_REG32
7262#undef GET_REG32_LOOP
7263#undef GET_REG32_1
7264
f47c11ee 7265 tg3_full_unlock(tp);
1da177e4
LT
7266}
7267
7268static int tg3_get_eeprom_len(struct net_device *dev)
7269{
7270 struct tg3 *tp = netdev_priv(dev);
7271
7272 return tp->nvram_size;
7273}
7274
7275static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7276
7277static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7278{
7279 struct tg3 *tp = netdev_priv(dev);
7280 int ret;
7281 u8 *pd;
7282 u32 i, offset, len, val, b_offset, b_count;
7283
7284 offset = eeprom->offset;
7285 len = eeprom->len;
7286 eeprom->len = 0;
7287
7288 eeprom->magic = TG3_EEPROM_MAGIC;
7289
7290 if (offset & 3) {
7291 /* adjustments to start on required 4 byte boundary */
7292 b_offset = offset & 3;
7293 b_count = 4 - b_offset;
7294 if (b_count > len) {
7295 /* i.e. offset=1 len=2 */
7296 b_count = len;
7297 }
7298 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7299 if (ret)
7300 return ret;
7301 val = cpu_to_le32(val);
7302 memcpy(data, ((char*)&val) + b_offset, b_count);
7303 len -= b_count;
7304 offset += b_count;
7305 eeprom->len += b_count;
7306 }
7307
7308 /* read bytes upto the last 4 byte boundary */
7309 pd = &data[eeprom->len];
7310 for (i = 0; i < (len - (len & 3)); i += 4) {
7311 ret = tg3_nvram_read(tp, offset + i, &val);
7312 if (ret) {
7313 eeprom->len += i;
7314 return ret;
7315 }
7316 val = cpu_to_le32(val);
7317 memcpy(pd + i, &val, 4);
7318 }
7319 eeprom->len += i;
7320
7321 if (len & 3) {
7322 /* read last bytes not ending on 4 byte boundary */
7323 pd = &data[eeprom->len];
7324 b_count = len & 3;
7325 b_offset = offset + len - b_count;
7326 ret = tg3_nvram_read(tp, b_offset, &val);
7327 if (ret)
7328 return ret;
7329 val = cpu_to_le32(val);
7330 memcpy(pd, ((char*)&val), b_count);
7331 eeprom->len += b_count;
7332 }
7333 return 0;
7334}
7335
7336static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7337
7338static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7339{
7340 struct tg3 *tp = netdev_priv(dev);
7341 int ret;
7342 u32 offset, len, b_offset, odd_len, start, end;
7343 u8 *buf;
7344
7345 if (eeprom->magic != TG3_EEPROM_MAGIC)
7346 return -EINVAL;
7347
7348 offset = eeprom->offset;
7349 len = eeprom->len;
7350
7351 if ((b_offset = (offset & 3))) {
7352 /* adjustments to start on required 4 byte boundary */
7353 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7354 if (ret)
7355 return ret;
7356 start = cpu_to_le32(start);
7357 len += b_offset;
7358 offset &= ~3;
1c8594b4
MC
7359 if (len < 4)
7360 len = 4;
1da177e4
LT
7361 }
7362
7363 odd_len = 0;
1c8594b4 7364 if (len & 3) {
1da177e4
LT
7365 /* adjustments to end on required 4 byte boundary */
7366 odd_len = 1;
7367 len = (len + 3) & ~3;
7368 ret = tg3_nvram_read(tp, offset+len-4, &end);
7369 if (ret)
7370 return ret;
7371 end = cpu_to_le32(end);
7372 }
7373
7374 buf = data;
7375 if (b_offset || odd_len) {
7376 buf = kmalloc(len, GFP_KERNEL);
7377 if (buf == 0)
7378 return -ENOMEM;
7379 if (b_offset)
7380 memcpy(buf, &start, 4);
7381 if (odd_len)
7382 memcpy(buf+len-4, &end, 4);
7383 memcpy(buf + b_offset, data, eeprom->len);
7384 }
7385
7386 ret = tg3_nvram_write_block(tp, offset, len, buf);
7387
7388 if (buf != data)
7389 kfree(buf);
7390
7391 return ret;
7392}
7393
7394static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7395{
7396 struct tg3 *tp = netdev_priv(dev);
7397
7398 cmd->supported = (SUPPORTED_Autoneg);
7399
7400 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7401 cmd->supported |= (SUPPORTED_1000baseT_Half |
7402 SUPPORTED_1000baseT_Full);
7403
a4e2b347 7404 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1da177e4
LT
7405 cmd->supported |= (SUPPORTED_100baseT_Half |
7406 SUPPORTED_100baseT_Full |
7407 SUPPORTED_10baseT_Half |
7408 SUPPORTED_10baseT_Full |
7409 SUPPORTED_MII);
7410 else
7411 cmd->supported |= SUPPORTED_FIBRE;
7412
7413 cmd->advertising = tp->link_config.advertising;
7414 if (netif_running(dev)) {
7415 cmd->speed = tp->link_config.active_speed;
7416 cmd->duplex = tp->link_config.active_duplex;
7417 }
7418 cmd->port = 0;
7419 cmd->phy_address = PHY_ADDR;
7420 cmd->transceiver = 0;
7421 cmd->autoneg = tp->link_config.autoneg;
7422 cmd->maxtxpkt = 0;
7423 cmd->maxrxpkt = 0;
7424 return 0;
7425}
7426
7427static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7428{
7429 struct tg3 *tp = netdev_priv(dev);
7430
37ff238d 7431 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
1da177e4
LT
7432 /* These are the only valid advertisement bits allowed. */
7433 if (cmd->autoneg == AUTONEG_ENABLE &&
7434 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7435 ADVERTISED_1000baseT_Full |
7436 ADVERTISED_Autoneg |
7437 ADVERTISED_FIBRE)))
7438 return -EINVAL;
37ff238d
MC
7439 /* Fiber can only do SPEED_1000. */
7440 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7441 (cmd->speed != SPEED_1000))
7442 return -EINVAL;
7443 /* Copper cannot force SPEED_1000. */
7444 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7445 (cmd->speed == SPEED_1000))
7446 return -EINVAL;
7447 else if ((cmd->speed == SPEED_1000) &&
7448 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7449 return -EINVAL;
1da177e4 7450
f47c11ee 7451 tg3_full_lock(tp, 0);
1da177e4
LT
7452
7453 tp->link_config.autoneg = cmd->autoneg;
7454 if (cmd->autoneg == AUTONEG_ENABLE) {
7455 tp->link_config.advertising = cmd->advertising;
7456 tp->link_config.speed = SPEED_INVALID;
7457 tp->link_config.duplex = DUPLEX_INVALID;
7458 } else {
7459 tp->link_config.advertising = 0;
7460 tp->link_config.speed = cmd->speed;
7461 tp->link_config.duplex = cmd->duplex;
7462 }
7463
7464 if (netif_running(dev))
7465 tg3_setup_phy(tp, 1);
7466
f47c11ee 7467 tg3_full_unlock(tp);
1da177e4
LT
7468
7469 return 0;
7470}
7471
7472static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7473{
7474 struct tg3 *tp = netdev_priv(dev);
7475
7476 strcpy(info->driver, DRV_MODULE_NAME);
7477 strcpy(info->version, DRV_MODULE_VERSION);
7478 strcpy(info->bus_info, pci_name(tp->pdev));
7479}
7480
7481static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7482{
7483 struct tg3 *tp = netdev_priv(dev);
7484
7485 wol->supported = WAKE_MAGIC;
7486 wol->wolopts = 0;
7487 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7488 wol->wolopts = WAKE_MAGIC;
7489 memset(&wol->sopass, 0, sizeof(wol->sopass));
7490}
7491
7492static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7493{
7494 struct tg3 *tp = netdev_priv(dev);
7495
7496 if (wol->wolopts & ~WAKE_MAGIC)
7497 return -EINVAL;
7498 if ((wol->wolopts & WAKE_MAGIC) &&
7499 tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7500 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7501 return -EINVAL;
7502
f47c11ee 7503 spin_lock_bh(&tp->lock);
1da177e4
LT
7504 if (wol->wolopts & WAKE_MAGIC)
7505 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7506 else
7507 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
f47c11ee 7508 spin_unlock_bh(&tp->lock);
1da177e4
LT
7509
7510 return 0;
7511}
7512
7513static u32 tg3_get_msglevel(struct net_device *dev)
7514{
7515 struct tg3 *tp = netdev_priv(dev);
7516 return tp->msg_enable;
7517}
7518
7519static void tg3_set_msglevel(struct net_device *dev, u32 value)
7520{
7521 struct tg3 *tp = netdev_priv(dev);
7522 tp->msg_enable = value;
7523}
7524
7525#if TG3_TSO_SUPPORT != 0
7526static int tg3_set_tso(struct net_device *dev, u32 value)
7527{
7528 struct tg3 *tp = netdev_priv(dev);
7529
7530 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7531 if (value)
7532 return -EINVAL;
7533 return 0;
7534 }
7535 return ethtool_op_set_tso(dev, value);
7536}
7537#endif
7538
7539static int tg3_nway_reset(struct net_device *dev)
7540{
7541 struct tg3 *tp = netdev_priv(dev);
7542 u32 bmcr;
7543 int r;
7544
7545 if (!netif_running(dev))
7546 return -EAGAIN;
7547
c94e3941
MC
7548 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7549 return -EINVAL;
7550
f47c11ee 7551 spin_lock_bh(&tp->lock);
1da177e4
LT
7552 r = -EINVAL;
7553 tg3_readphy(tp, MII_BMCR, &bmcr);
7554 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
c94e3941
MC
7555 ((bmcr & BMCR_ANENABLE) ||
7556 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7557 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7558 BMCR_ANENABLE);
1da177e4
LT
7559 r = 0;
7560 }
f47c11ee 7561 spin_unlock_bh(&tp->lock);
1da177e4
LT
7562
7563 return r;
7564}
7565
7566static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7567{
7568 struct tg3 *tp = netdev_priv(dev);
7569
7570 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7571 ering->rx_mini_max_pending = 0;
7572 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7573
7574 ering->rx_pending = tp->rx_pending;
7575 ering->rx_mini_pending = 0;
7576 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7577 ering->tx_pending = tp->tx_pending;
7578}
7579
7580static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7581{
7582 struct tg3 *tp = netdev_priv(dev);
bbe832c0 7583 int irq_sync = 0;
1da177e4
LT
7584
7585 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7586 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7587 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7588 return -EINVAL;
7589
bbe832c0 7590 if (netif_running(dev)) {
1da177e4 7591 tg3_netif_stop(tp);
bbe832c0
MC
7592 irq_sync = 1;
7593 }
1da177e4 7594
bbe832c0 7595 tg3_full_lock(tp, irq_sync);
1da177e4
LT
7596
7597 tp->rx_pending = ering->rx_pending;
7598
7599 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7600 tp->rx_pending > 63)
7601 tp->rx_pending = 63;
7602 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7603 tp->tx_pending = ering->tx_pending;
7604
7605 if (netif_running(dev)) {
944d980e 7606 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7607 tg3_init_hw(tp);
7608 tg3_netif_start(tp);
7609 }
7610
f47c11ee 7611 tg3_full_unlock(tp);
1da177e4
LT
7612
7613 return 0;
7614}
7615
7616static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7617{
7618 struct tg3 *tp = netdev_priv(dev);
7619
7620 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7621 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7622 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7623}
7624
7625static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7626{
7627 struct tg3 *tp = netdev_priv(dev);
bbe832c0 7628 int irq_sync = 0;
1da177e4 7629
bbe832c0 7630 if (netif_running(dev)) {
1da177e4 7631 tg3_netif_stop(tp);
bbe832c0
MC
7632 irq_sync = 1;
7633 }
1da177e4 7634
bbe832c0 7635 tg3_full_lock(tp, irq_sync);
f47c11ee 7636
1da177e4
LT
7637 if (epause->autoneg)
7638 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7639 else
7640 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7641 if (epause->rx_pause)
7642 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7643 else
7644 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7645 if (epause->tx_pause)
7646 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7647 else
7648 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7649
7650 if (netif_running(dev)) {
944d980e 7651 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7652 tg3_init_hw(tp);
7653 tg3_netif_start(tp);
7654 }
f47c11ee
DM
7655
7656 tg3_full_unlock(tp);
1da177e4
LT
7657
7658 return 0;
7659}
7660
7661static u32 tg3_get_rx_csum(struct net_device *dev)
7662{
7663 struct tg3 *tp = netdev_priv(dev);
7664 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7665}
7666
7667static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7668{
7669 struct tg3 *tp = netdev_priv(dev);
7670
7671 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7672 if (data != 0)
7673 return -EINVAL;
7674 return 0;
7675 }
7676
f47c11ee 7677 spin_lock_bh(&tp->lock);
1da177e4
LT
7678 if (data)
7679 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7680 else
7681 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
f47c11ee 7682 spin_unlock_bh(&tp->lock);
1da177e4
LT
7683
7684 return 0;
7685}
7686
7687static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7688{
7689 struct tg3 *tp = netdev_priv(dev);
7690
7691 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7692 if (data != 0)
7693 return -EINVAL;
7694 return 0;
7695 }
7696
7697 if (data)
7698 dev->features |= NETIF_F_IP_CSUM;
7699 else
7700 dev->features &= ~NETIF_F_IP_CSUM;
7701
7702 return 0;
7703}
7704
7705static int tg3_get_stats_count (struct net_device *dev)
7706{
7707 return TG3_NUM_STATS;
7708}
7709
4cafd3f5
MC
7710static int tg3_get_test_count (struct net_device *dev)
7711{
7712 return TG3_NUM_TEST;
7713}
7714
1da177e4
LT
7715static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7716{
7717 switch (stringset) {
7718 case ETH_SS_STATS:
7719 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7720 break;
4cafd3f5
MC
7721 case ETH_SS_TEST:
7722 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7723 break;
1da177e4
LT
7724 default:
7725 WARN_ON(1); /* we need a WARN() */
7726 break;
7727 }
7728}
7729
4009a93d
MC
7730static int tg3_phys_id(struct net_device *dev, u32 data)
7731{
7732 struct tg3 *tp = netdev_priv(dev);
7733 int i;
7734
7735 if (!netif_running(tp->dev))
7736 return -EAGAIN;
7737
7738 if (data == 0)
7739 data = 2;
7740
7741 for (i = 0; i < (data * 2); i++) {
7742 if ((i % 2) == 0)
7743 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7744 LED_CTRL_1000MBPS_ON |
7745 LED_CTRL_100MBPS_ON |
7746 LED_CTRL_10MBPS_ON |
7747 LED_CTRL_TRAFFIC_OVERRIDE |
7748 LED_CTRL_TRAFFIC_BLINK |
7749 LED_CTRL_TRAFFIC_LED);
7750
7751 else
7752 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7753 LED_CTRL_TRAFFIC_OVERRIDE);
7754
7755 if (msleep_interruptible(500))
7756 break;
7757 }
7758 tw32(MAC_LED_CTRL, tp->led_ctrl);
7759 return 0;
7760}
7761
1da177e4
LT
7762static void tg3_get_ethtool_stats (struct net_device *dev,
7763 struct ethtool_stats *estats, u64 *tmp_stats)
7764{
7765 struct tg3 *tp = netdev_priv(dev);
7766 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7767}
7768
566f86ad
MC
7769#define NVRAM_TEST_SIZE 0x100
7770
7771static int tg3_test_nvram(struct tg3 *tp)
7772{
7773 u32 *buf, csum;
7774 int i, j, err = 0;
7775
7776 buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7777 if (buf == NULL)
7778 return -ENOMEM;
7779
7780 for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7781 u32 val;
7782
7783 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7784 break;
7785 buf[j] = cpu_to_le32(val);
7786 }
7787 if (i < NVRAM_TEST_SIZE)
7788 goto out;
7789
7790 err = -EIO;
7791 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7792 goto out;
7793
7794 /* Bootstrap checksum at offset 0x10 */
7795 csum = calc_crc((unsigned char *) buf, 0x10);
7796 if(csum != cpu_to_le32(buf[0x10/4]))
7797 goto out;
7798
7799 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7800 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7801 if (csum != cpu_to_le32(buf[0xfc/4]))
7802 goto out;
7803
7804 err = 0;
7805
7806out:
7807 kfree(buf);
7808 return err;
7809}
7810
ca43007a
MC
7811#define TG3_SERDES_TIMEOUT_SEC 2
7812#define TG3_COPPER_TIMEOUT_SEC 6
7813
7814static int tg3_test_link(struct tg3 *tp)
7815{
7816 int i, max;
7817
7818 if (!netif_running(tp->dev))
7819 return -ENODEV;
7820
4c987487 7821 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
ca43007a
MC
7822 max = TG3_SERDES_TIMEOUT_SEC;
7823 else
7824 max = TG3_COPPER_TIMEOUT_SEC;
7825
7826 for (i = 0; i < max; i++) {
7827 if (netif_carrier_ok(tp->dev))
7828 return 0;
7829
7830 if (msleep_interruptible(1000))
7831 break;
7832 }
7833
7834 return -EIO;
7835}
7836
a71116d1 7837/* Only test the commonly used registers */
f71e1309 7838static const int tg3_test_registers(struct tg3 *tp)
a71116d1
MC
7839{
7840 int i, is_5705;
7841 u32 offset, read_mask, write_mask, val, save_val, read_val;
7842 static struct {
7843 u16 offset;
7844 u16 flags;
7845#define TG3_FL_5705 0x1
7846#define TG3_FL_NOT_5705 0x2
7847#define TG3_FL_NOT_5788 0x4
7848 u32 read_mask;
7849 u32 write_mask;
7850 } reg_tbl[] = {
7851 /* MAC Control Registers */
7852 { MAC_MODE, TG3_FL_NOT_5705,
7853 0x00000000, 0x00ef6f8c },
7854 { MAC_MODE, TG3_FL_5705,
7855 0x00000000, 0x01ef6b8c },
7856 { MAC_STATUS, TG3_FL_NOT_5705,
7857 0x03800107, 0x00000000 },
7858 { MAC_STATUS, TG3_FL_5705,
7859 0x03800100, 0x00000000 },
7860 { MAC_ADDR_0_HIGH, 0x0000,
7861 0x00000000, 0x0000ffff },
7862 { MAC_ADDR_0_LOW, 0x0000,
7863 0x00000000, 0xffffffff },
7864 { MAC_RX_MTU_SIZE, 0x0000,
7865 0x00000000, 0x0000ffff },
7866 { MAC_TX_MODE, 0x0000,
7867 0x00000000, 0x00000070 },
7868 { MAC_TX_LENGTHS, 0x0000,
7869 0x00000000, 0x00003fff },
7870 { MAC_RX_MODE, TG3_FL_NOT_5705,
7871 0x00000000, 0x000007fc },
7872 { MAC_RX_MODE, TG3_FL_5705,
7873 0x00000000, 0x000007dc },
7874 { MAC_HASH_REG_0, 0x0000,
7875 0x00000000, 0xffffffff },
7876 { MAC_HASH_REG_1, 0x0000,
7877 0x00000000, 0xffffffff },
7878 { MAC_HASH_REG_2, 0x0000,
7879 0x00000000, 0xffffffff },
7880 { MAC_HASH_REG_3, 0x0000,
7881 0x00000000, 0xffffffff },
7882
7883 /* Receive Data and Receive BD Initiator Control Registers. */
7884 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7885 0x00000000, 0xffffffff },
7886 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7887 0x00000000, 0xffffffff },
7888 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7889 0x00000000, 0x00000003 },
7890 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7891 0x00000000, 0xffffffff },
7892 { RCVDBDI_STD_BD+0, 0x0000,
7893 0x00000000, 0xffffffff },
7894 { RCVDBDI_STD_BD+4, 0x0000,
7895 0x00000000, 0xffffffff },
7896 { RCVDBDI_STD_BD+8, 0x0000,
7897 0x00000000, 0xffff0002 },
7898 { RCVDBDI_STD_BD+0xc, 0x0000,
7899 0x00000000, 0xffffffff },
7900
7901 /* Receive BD Initiator Control Registers. */
7902 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7903 0x00000000, 0xffffffff },
7904 { RCVBDI_STD_THRESH, TG3_FL_5705,
7905 0x00000000, 0x000003ff },
7906 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7907 0x00000000, 0xffffffff },
7908
7909 /* Host Coalescing Control Registers. */
7910 { HOSTCC_MODE, TG3_FL_NOT_5705,
7911 0x00000000, 0x00000004 },
7912 { HOSTCC_MODE, TG3_FL_5705,
7913 0x00000000, 0x000000f6 },
7914 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7915 0x00000000, 0xffffffff },
7916 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7917 0x00000000, 0x000003ff },
7918 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7919 0x00000000, 0xffffffff },
7920 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7921 0x00000000, 0x000003ff },
7922 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7923 0x00000000, 0xffffffff },
7924 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7925 0x00000000, 0x000000ff },
7926 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7927 0x00000000, 0xffffffff },
7928 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7929 0x00000000, 0x000000ff },
7930 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7931 0x00000000, 0xffffffff },
7932 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7933 0x00000000, 0xffffffff },
7934 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7935 0x00000000, 0xffffffff },
7936 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7937 0x00000000, 0x000000ff },
7938 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7939 0x00000000, 0xffffffff },
7940 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7941 0x00000000, 0x000000ff },
7942 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7943 0x00000000, 0xffffffff },
7944 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7945 0x00000000, 0xffffffff },
7946 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7947 0x00000000, 0xffffffff },
7948 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7949 0x00000000, 0xffffffff },
7950 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7951 0x00000000, 0xffffffff },
7952 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7953 0xffffffff, 0x00000000 },
7954 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7955 0xffffffff, 0x00000000 },
7956
7957 /* Buffer Manager Control Registers. */
7958 { BUFMGR_MB_POOL_ADDR, 0x0000,
7959 0x00000000, 0x007fff80 },
7960 { BUFMGR_MB_POOL_SIZE, 0x0000,
7961 0x00000000, 0x007fffff },
7962 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7963 0x00000000, 0x0000003f },
7964 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7965 0x00000000, 0x000001ff },
7966 { BUFMGR_MB_HIGH_WATER, 0x0000,
7967 0x00000000, 0x000001ff },
7968 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7969 0xffffffff, 0x00000000 },
7970 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7971 0xffffffff, 0x00000000 },
7972
7973 /* Mailbox Registers */
7974 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7975 0x00000000, 0x000001ff },
7976 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7977 0x00000000, 0x000001ff },
7978 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7979 0x00000000, 0x000007ff },
7980 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7981 0x00000000, 0x000001ff },
7982
7983 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7984 };
7985
7986 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7987 is_5705 = 1;
7988 else
7989 is_5705 = 0;
7990
7991 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7992 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7993 continue;
7994
7995 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7996 continue;
7997
7998 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7999 (reg_tbl[i].flags & TG3_FL_NOT_5788))
8000 continue;
8001
8002 offset = (u32) reg_tbl[i].offset;
8003 read_mask = reg_tbl[i].read_mask;
8004 write_mask = reg_tbl[i].write_mask;
8005
8006 /* Save the original register content */
8007 save_val = tr32(offset);
8008
8009 /* Determine the read-only value. */
8010 read_val = save_val & read_mask;
8011
8012 /* Write zero to the register, then make sure the read-only bits
8013 * are not changed and the read/write bits are all zeros.
8014 */
8015 tw32(offset, 0);
8016
8017 val = tr32(offset);
8018
8019 /* Test the read-only and read/write bits. */
8020 if (((val & read_mask) != read_val) || (val & write_mask))
8021 goto out;
8022
8023 /* Write ones to all the bits defined by RdMask and WrMask, then
8024 * make sure the read-only bits are not changed and the
8025 * read/write bits are all ones.
8026 */
8027 tw32(offset, read_mask | write_mask);
8028
8029 val = tr32(offset);
8030
8031 /* Test the read-only bits. */
8032 if ((val & read_mask) != read_val)
8033 goto out;
8034
8035 /* Test the read/write bits. */
8036 if ((val & write_mask) != write_mask)
8037 goto out;
8038
8039 tw32(offset, save_val);
8040 }
8041
8042 return 0;
8043
8044out:
8045 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8046 tw32(offset, save_val);
8047 return -EIO;
8048}
8049
7942e1db
MC
8050static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8051{
f71e1309 8052 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7942e1db
MC
8053 int i;
8054 u32 j;
8055
8056 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8057 for (j = 0; j < len; j += 4) {
8058 u32 val;
8059
8060 tg3_write_mem(tp, offset + j, test_pattern[i]);
8061 tg3_read_mem(tp, offset + j, &val);
8062 if (val != test_pattern[i])
8063 return -EIO;
8064 }
8065 }
8066 return 0;
8067}
8068
8069static int tg3_test_memory(struct tg3 *tp)
8070{
8071 static struct mem_entry {
8072 u32 offset;
8073 u32 len;
8074 } mem_tbl_570x[] = {
38690194 8075 { 0x00000000, 0x00b50},
7942e1db
MC
8076 { 0x00002000, 0x1c000},
8077 { 0xffffffff, 0x00000}
8078 }, mem_tbl_5705[] = {
8079 { 0x00000100, 0x0000c},
8080 { 0x00000200, 0x00008},
7942e1db
MC
8081 { 0x00004000, 0x00800},
8082 { 0x00006000, 0x01000},
8083 { 0x00008000, 0x02000},
8084 { 0x00010000, 0x0e000},
8085 { 0xffffffff, 0x00000}
8086 };
8087 struct mem_entry *mem_tbl;
8088 int err = 0;
8089 int i;
8090
8091 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8092 mem_tbl = mem_tbl_5705;
8093 else
8094 mem_tbl = mem_tbl_570x;
8095
8096 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8097 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8098 mem_tbl[i].len)) != 0)
8099 break;
8100 }
8101
8102 return err;
8103}
8104
9f40dead
MC
8105#define TG3_MAC_LOOPBACK 0
8106#define TG3_PHY_LOOPBACK 1
8107
8108static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
c76949a6 8109{
9f40dead 8110 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
c76949a6
MC
8111 u32 desc_idx;
8112 struct sk_buff *skb, *rx_skb;
8113 u8 *tx_data;
8114 dma_addr_t map;
8115 int num_pkts, tx_len, rx_len, i, err;
8116 struct tg3_rx_buffer_desc *desc;
8117
9f40dead 8118 if (loopback_mode == TG3_MAC_LOOPBACK) {
c94e3941
MC
8119 /* HW errata - mac loopback fails in some cases on 5780.
8120 * Normal traffic and PHY loopback are not affected by
8121 * errata.
8122 */
8123 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8124 return 0;
8125
9f40dead
MC
8126 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8127 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8128 MAC_MODE_PORT_MODE_GMII;
8129 tw32(MAC_MODE, mac_mode);
8130 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
c94e3941
MC
8131 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8132 BMCR_SPEED1000);
8133 udelay(40);
8134 /* reset to prevent losing 1st rx packet intermittently */
8135 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8136 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8137 udelay(10);
8138 tw32_f(MAC_RX_MODE, tp->rx_mode);
8139 }
9f40dead
MC
8140 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8141 MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8142 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
8143 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8144 tw32(MAC_MODE, mac_mode);
9f40dead
MC
8145 }
8146 else
8147 return -EINVAL;
c76949a6
MC
8148
8149 err = -EIO;
8150
c76949a6
MC
8151 tx_len = 1514;
8152 skb = dev_alloc_skb(tx_len);
8153 tx_data = skb_put(skb, tx_len);
8154 memcpy(tx_data, tp->dev->dev_addr, 6);
8155 memset(tx_data + 6, 0x0, 8);
8156
8157 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8158
8159 for (i = 14; i < tx_len; i++)
8160 tx_data[i] = (u8) (i & 0xff);
8161
8162 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8163
8164 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8165 HOSTCC_MODE_NOW);
8166
8167 udelay(10);
8168
8169 rx_start_idx = tp->hw_status->idx[0].rx_producer;
8170
c76949a6
MC
8171 num_pkts = 0;
8172
9f40dead 8173 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
c76949a6 8174
9f40dead 8175 tp->tx_prod++;
c76949a6
MC
8176 num_pkts++;
8177
9f40dead
MC
8178 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8179 tp->tx_prod);
09ee929c 8180 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
c76949a6
MC
8181
8182 udelay(10);
8183
8184 for (i = 0; i < 10; i++) {
8185 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8186 HOSTCC_MODE_NOW);
8187
8188 udelay(10);
8189
8190 tx_idx = tp->hw_status->idx[0].tx_consumer;
8191 rx_idx = tp->hw_status->idx[0].rx_producer;
9f40dead 8192 if ((tx_idx == tp->tx_prod) &&
c76949a6
MC
8193 (rx_idx == (rx_start_idx + num_pkts)))
8194 break;
8195 }
8196
8197 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8198 dev_kfree_skb(skb);
8199
9f40dead 8200 if (tx_idx != tp->tx_prod)
c76949a6
MC
8201 goto out;
8202
8203 if (rx_idx != rx_start_idx + num_pkts)
8204 goto out;
8205
8206 desc = &tp->rx_rcb[rx_start_idx];
8207 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8208 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8209 if (opaque_key != RXD_OPAQUE_RING_STD)
8210 goto out;
8211
8212 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8213 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8214 goto out;
8215
8216 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8217 if (rx_len != tx_len)
8218 goto out;
8219
8220 rx_skb = tp->rx_std_buffers[desc_idx].skb;
8221
8222 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8223 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8224
8225 for (i = 14; i < tx_len; i++) {
8226 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8227 goto out;
8228 }
8229 err = 0;
8230
8231 /* tg3_free_rings will unmap and free the rx_skb */
8232out:
8233 return err;
8234}
8235
9f40dead
MC
8236#define TG3_MAC_LOOPBACK_FAILED 1
8237#define TG3_PHY_LOOPBACK_FAILED 2
8238#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
8239 TG3_PHY_LOOPBACK_FAILED)
8240
8241static int tg3_test_loopback(struct tg3 *tp)
8242{
8243 int err = 0;
8244
8245 if (!netif_running(tp->dev))
8246 return TG3_LOOPBACK_FAILED;
8247
8248 tg3_reset_hw(tp);
8249
8250 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8251 err |= TG3_MAC_LOOPBACK_FAILED;
8252 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8253 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8254 err |= TG3_PHY_LOOPBACK_FAILED;
8255 }
8256
8257 return err;
8258}
8259
4cafd3f5
MC
8260static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8261 u64 *data)
8262{
566f86ad
MC
8263 struct tg3 *tp = netdev_priv(dev);
8264
8265 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8266
8267 if (tg3_test_nvram(tp) != 0) {
8268 etest->flags |= ETH_TEST_FL_FAILED;
8269 data[0] = 1;
8270 }
ca43007a
MC
8271 if (tg3_test_link(tp) != 0) {
8272 etest->flags |= ETH_TEST_FL_FAILED;
8273 data[1] = 1;
8274 }
a71116d1 8275 if (etest->flags & ETH_TEST_FL_OFFLINE) {
ec41c7df 8276 int err, irq_sync = 0;
bbe832c0
MC
8277
8278 if (netif_running(dev)) {
a71116d1 8279 tg3_netif_stop(tp);
bbe832c0
MC
8280 irq_sync = 1;
8281 }
a71116d1 8282
bbe832c0 8283 tg3_full_lock(tp, irq_sync);
a71116d1
MC
8284
8285 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
ec41c7df 8286 err = tg3_nvram_lock(tp);
a71116d1
MC
8287 tg3_halt_cpu(tp, RX_CPU_BASE);
8288 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8289 tg3_halt_cpu(tp, TX_CPU_BASE);
ec41c7df
MC
8290 if (!err)
8291 tg3_nvram_unlock(tp);
a71116d1
MC
8292
8293 if (tg3_test_registers(tp) != 0) {
8294 etest->flags |= ETH_TEST_FL_FAILED;
8295 data[2] = 1;
8296 }
7942e1db
MC
8297 if (tg3_test_memory(tp) != 0) {
8298 etest->flags |= ETH_TEST_FL_FAILED;
8299 data[3] = 1;
8300 }
9f40dead 8301 if ((data[4] = tg3_test_loopback(tp)) != 0)
c76949a6 8302 etest->flags |= ETH_TEST_FL_FAILED;
a71116d1 8303
f47c11ee
DM
8304 tg3_full_unlock(tp);
8305
d4bc3927
MC
8306 if (tg3_test_interrupt(tp) != 0) {
8307 etest->flags |= ETH_TEST_FL_FAILED;
8308 data[5] = 1;
8309 }
f47c11ee
DM
8310
8311 tg3_full_lock(tp, 0);
d4bc3927 8312
a71116d1
MC
8313 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8314 if (netif_running(dev)) {
8315 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8316 tg3_init_hw(tp);
8317 tg3_netif_start(tp);
8318 }
f47c11ee
DM
8319
8320 tg3_full_unlock(tp);
a71116d1 8321 }
4cafd3f5
MC
8322}
8323
1da177e4
LT
8324static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8325{
8326 struct mii_ioctl_data *data = if_mii(ifr);
8327 struct tg3 *tp = netdev_priv(dev);
8328 int err;
8329
8330 switch(cmd) {
8331 case SIOCGMIIPHY:
8332 data->phy_id = PHY_ADDR;
8333
8334 /* fallthru */
8335 case SIOCGMIIREG: {
8336 u32 mii_regval;
8337
8338 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8339 break; /* We have no PHY */
8340
f47c11ee 8341 spin_lock_bh(&tp->lock);
1da177e4 8342 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
f47c11ee 8343 spin_unlock_bh(&tp->lock);
1da177e4
LT
8344
8345 data->val_out = mii_regval;
8346
8347 return err;
8348 }
8349
8350 case SIOCSMIIREG:
8351 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8352 break; /* We have no PHY */
8353
8354 if (!capable(CAP_NET_ADMIN))
8355 return -EPERM;
8356
f47c11ee 8357 spin_lock_bh(&tp->lock);
1da177e4 8358 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
f47c11ee 8359 spin_unlock_bh(&tp->lock);
1da177e4
LT
8360
8361 return err;
8362
8363 default:
8364 /* do nothing */
8365 break;
8366 }
8367 return -EOPNOTSUPP;
8368}
8369
8370#if TG3_VLAN_TAG_USED
8371static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8372{
8373 struct tg3 *tp = netdev_priv(dev);
8374
f47c11ee 8375 tg3_full_lock(tp, 0);
1da177e4
LT
8376
8377 tp->vlgrp = grp;
8378
8379 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8380 __tg3_set_rx_mode(dev);
8381
f47c11ee 8382 tg3_full_unlock(tp);
1da177e4
LT
8383}
8384
8385static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8386{
8387 struct tg3 *tp = netdev_priv(dev);
8388
f47c11ee 8389 tg3_full_lock(tp, 0);
1da177e4
LT
8390 if (tp->vlgrp)
8391 tp->vlgrp->vlan_devices[vid] = NULL;
f47c11ee 8392 tg3_full_unlock(tp);
1da177e4
LT
8393}
8394#endif
8395
15f9850d
DM
8396static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8397{
8398 struct tg3 *tp = netdev_priv(dev);
8399
8400 memcpy(ec, &tp->coal, sizeof(*ec));
8401 return 0;
8402}
8403
d244c892
MC
8404static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8405{
8406 struct tg3 *tp = netdev_priv(dev);
8407 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8408 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8409
8410 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8411 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8412 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8413 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8414 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8415 }
8416
8417 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8418 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8419 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8420 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8421 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8422 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8423 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8424 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8425 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8426 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8427 return -EINVAL;
8428
8429 /* No rx interrupts will be generated if both are zero */
8430 if ((ec->rx_coalesce_usecs == 0) &&
8431 (ec->rx_max_coalesced_frames == 0))
8432 return -EINVAL;
8433
8434 /* No tx interrupts will be generated if both are zero */
8435 if ((ec->tx_coalesce_usecs == 0) &&
8436 (ec->tx_max_coalesced_frames == 0))
8437 return -EINVAL;
8438
8439 /* Only copy relevant parameters, ignore all others. */
8440 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8441 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8442 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8443 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8444 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8445 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8446 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8447 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8448 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8449
8450 if (netif_running(dev)) {
8451 tg3_full_lock(tp, 0);
8452 __tg3_set_coalesce(tp, &tp->coal);
8453 tg3_full_unlock(tp);
8454 }
8455 return 0;
8456}
8457
1da177e4
LT
8458static struct ethtool_ops tg3_ethtool_ops = {
8459 .get_settings = tg3_get_settings,
8460 .set_settings = tg3_set_settings,
8461 .get_drvinfo = tg3_get_drvinfo,
8462 .get_regs_len = tg3_get_regs_len,
8463 .get_regs = tg3_get_regs,
8464 .get_wol = tg3_get_wol,
8465 .set_wol = tg3_set_wol,
8466 .get_msglevel = tg3_get_msglevel,
8467 .set_msglevel = tg3_set_msglevel,
8468 .nway_reset = tg3_nway_reset,
8469 .get_link = ethtool_op_get_link,
8470 .get_eeprom_len = tg3_get_eeprom_len,
8471 .get_eeprom = tg3_get_eeprom,
8472 .set_eeprom = tg3_set_eeprom,
8473 .get_ringparam = tg3_get_ringparam,
8474 .set_ringparam = tg3_set_ringparam,
8475 .get_pauseparam = tg3_get_pauseparam,
8476 .set_pauseparam = tg3_set_pauseparam,
8477 .get_rx_csum = tg3_get_rx_csum,
8478 .set_rx_csum = tg3_set_rx_csum,
8479 .get_tx_csum = ethtool_op_get_tx_csum,
8480 .set_tx_csum = tg3_set_tx_csum,
8481 .get_sg = ethtool_op_get_sg,
8482 .set_sg = ethtool_op_set_sg,
8483#if TG3_TSO_SUPPORT != 0
8484 .get_tso = ethtool_op_get_tso,
8485 .set_tso = tg3_set_tso,
8486#endif
4cafd3f5
MC
8487 .self_test_count = tg3_get_test_count,
8488 .self_test = tg3_self_test,
1da177e4 8489 .get_strings = tg3_get_strings,
4009a93d 8490 .phys_id = tg3_phys_id,
1da177e4
LT
8491 .get_stats_count = tg3_get_stats_count,
8492 .get_ethtool_stats = tg3_get_ethtool_stats,
15f9850d 8493 .get_coalesce = tg3_get_coalesce,
d244c892 8494 .set_coalesce = tg3_set_coalesce,
2ff43697 8495 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
8496};
8497
8498static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8499{
8500 u32 cursize, val;
8501
8502 tp->nvram_size = EEPROM_CHIP_SIZE;
8503
8504 if (tg3_nvram_read(tp, 0, &val) != 0)
8505 return;
8506
8507 if (swab32(val) != TG3_EEPROM_MAGIC)
8508 return;
8509
8510 /*
8511 * Size the chip by reading offsets at increasing powers of two.
8512 * When we encounter our validation signature, we know the addressing
8513 * has wrapped around, and thus have our chip size.
8514 */
8515 cursize = 0x800;
8516
8517 while (cursize < tp->nvram_size) {
8518 if (tg3_nvram_read(tp, cursize, &val) != 0)
8519 return;
8520
8521 if (swab32(val) == TG3_EEPROM_MAGIC)
8522 break;
8523
8524 cursize <<= 1;
8525 }
8526
8527 tp->nvram_size = cursize;
8528}
8529
8530static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8531{
8532 u32 val;
8533
8534 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8535 if (val != 0) {
8536 tp->nvram_size = (val >> 16) * 1024;
8537 return;
8538 }
8539 }
8540 tp->nvram_size = 0x20000;
8541}
8542
8543static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8544{
8545 u32 nvcfg1;
8546
8547 nvcfg1 = tr32(NVRAM_CFG1);
8548 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8549 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8550 }
8551 else {
8552 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8553 tw32(NVRAM_CFG1, nvcfg1);
8554 }
8555
4c987487 8556 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
a4e2b347 8557 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
8558 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8559 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8560 tp->nvram_jedecnum = JEDEC_ATMEL;
8561 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8562 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8563 break;
8564 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8565 tp->nvram_jedecnum = JEDEC_ATMEL;
8566 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8567 break;
8568 case FLASH_VENDOR_ATMEL_EEPROM:
8569 tp->nvram_jedecnum = JEDEC_ATMEL;
8570 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8571 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8572 break;
8573 case FLASH_VENDOR_ST:
8574 tp->nvram_jedecnum = JEDEC_ST;
8575 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8576 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8577 break;
8578 case FLASH_VENDOR_SAIFUN:
8579 tp->nvram_jedecnum = JEDEC_SAIFUN;
8580 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8581 break;
8582 case FLASH_VENDOR_SST_SMALL:
8583 case FLASH_VENDOR_SST_LARGE:
8584 tp->nvram_jedecnum = JEDEC_SST;
8585 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8586 break;
8587 }
8588 }
8589 else {
8590 tp->nvram_jedecnum = JEDEC_ATMEL;
8591 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8592 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8593 }
8594}
8595
361b4ac2
MC
8596static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8597{
8598 u32 nvcfg1;
8599
8600 nvcfg1 = tr32(NVRAM_CFG1);
8601
e6af301b
MC
8602 /* NVRAM protection for TPM */
8603 if (nvcfg1 & (1 << 27))
8604 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8605
361b4ac2
MC
8606 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8607 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8608 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8609 tp->nvram_jedecnum = JEDEC_ATMEL;
8610 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8611 break;
8612 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8613 tp->nvram_jedecnum = JEDEC_ATMEL;
8614 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8615 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8616 break;
8617 case FLASH_5752VENDOR_ST_M45PE10:
8618 case FLASH_5752VENDOR_ST_M45PE20:
8619 case FLASH_5752VENDOR_ST_M45PE40:
8620 tp->nvram_jedecnum = JEDEC_ST;
8621 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8622 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8623 break;
8624 }
8625
8626 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8627 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8628 case FLASH_5752PAGE_SIZE_256:
8629 tp->nvram_pagesize = 256;
8630 break;
8631 case FLASH_5752PAGE_SIZE_512:
8632 tp->nvram_pagesize = 512;
8633 break;
8634 case FLASH_5752PAGE_SIZE_1K:
8635 tp->nvram_pagesize = 1024;
8636 break;
8637 case FLASH_5752PAGE_SIZE_2K:
8638 tp->nvram_pagesize = 2048;
8639 break;
8640 case FLASH_5752PAGE_SIZE_4K:
8641 tp->nvram_pagesize = 4096;
8642 break;
8643 case FLASH_5752PAGE_SIZE_264:
8644 tp->nvram_pagesize = 264;
8645 break;
8646 }
8647 }
8648 else {
8649 /* For eeprom, set pagesize to maximum eeprom size */
8650 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8651
8652 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8653 tw32(NVRAM_CFG1, nvcfg1);
8654 }
8655}
8656
1da177e4
LT
8657/* Chips other than 5700/5701 use the NVRAM for fetching info. */
8658static void __devinit tg3_nvram_init(struct tg3 *tp)
8659{
8660 int j;
8661
8662 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8663 return;
8664
8665 tw32_f(GRC_EEPROM_ADDR,
8666 (EEPROM_ADDR_FSM_RESET |
8667 (EEPROM_DEFAULT_CLOCK_PERIOD <<
8668 EEPROM_ADDR_CLKPERD_SHIFT)));
8669
8670 /* XXX schedule_timeout() ... */
8671 for (j = 0; j < 100; j++)
8672 udelay(10);
8673
8674 /* Enable seeprom accesses. */
8675 tw32_f(GRC_LOCAL_CTRL,
8676 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8677 udelay(100);
8678
8679 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8680 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8681 tp->tg3_flags |= TG3_FLAG_NVRAM;
8682
ec41c7df
MC
8683 if (tg3_nvram_lock(tp)) {
8684 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
8685 "tg3_nvram_init failed.\n", tp->dev->name);
8686 return;
8687 }
e6af301b 8688 tg3_enable_nvram_access(tp);
1da177e4 8689
361b4ac2
MC
8690 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8691 tg3_get_5752_nvram_info(tp);
8692 else
8693 tg3_get_nvram_info(tp);
8694
1da177e4
LT
8695 tg3_get_nvram_size(tp);
8696
e6af301b 8697 tg3_disable_nvram_access(tp);
381291b7 8698 tg3_nvram_unlock(tp);
1da177e4
LT
8699
8700 } else {
8701 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8702
8703 tg3_get_eeprom_size(tp);
8704 }
8705}
8706
8707static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8708 u32 offset, u32 *val)
8709{
8710 u32 tmp;
8711 int i;
8712
8713 if (offset > EEPROM_ADDR_ADDR_MASK ||
8714 (offset % 4) != 0)
8715 return -EINVAL;
8716
8717 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8718 EEPROM_ADDR_DEVID_MASK |
8719 EEPROM_ADDR_READ);
8720 tw32(GRC_EEPROM_ADDR,
8721 tmp |
8722 (0 << EEPROM_ADDR_DEVID_SHIFT) |
8723 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8724 EEPROM_ADDR_ADDR_MASK) |
8725 EEPROM_ADDR_READ | EEPROM_ADDR_START);
8726
8727 for (i = 0; i < 10000; i++) {
8728 tmp = tr32(GRC_EEPROM_ADDR);
8729
8730 if (tmp & EEPROM_ADDR_COMPLETE)
8731 break;
8732 udelay(100);
8733 }
8734 if (!(tmp & EEPROM_ADDR_COMPLETE))
8735 return -EBUSY;
8736
8737 *val = tr32(GRC_EEPROM_DATA);
8738 return 0;
8739}
8740
8741#define NVRAM_CMD_TIMEOUT 10000
8742
8743static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8744{
8745 int i;
8746
8747 tw32(NVRAM_CMD, nvram_cmd);
8748 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8749 udelay(10);
8750 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8751 udelay(10);
8752 break;
8753 }
8754 }
8755 if (i == NVRAM_CMD_TIMEOUT) {
8756 return -EBUSY;
8757 }
8758 return 0;
8759}
8760
8761static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8762{
8763 int ret;
8764
8765 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8766 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8767 return -EINVAL;
8768 }
8769
8770 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8771 return tg3_nvram_read_using_eeprom(tp, offset, val);
8772
8773 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8774 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8775 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8776
8777 offset = ((offset / tp->nvram_pagesize) <<
8778 ATMEL_AT45DB0X1B_PAGE_POS) +
8779 (offset % tp->nvram_pagesize);
8780 }
8781
8782 if (offset > NVRAM_ADDR_MSK)
8783 return -EINVAL;
8784
ec41c7df
MC
8785 ret = tg3_nvram_lock(tp);
8786 if (ret)
8787 return ret;
1da177e4 8788
e6af301b 8789 tg3_enable_nvram_access(tp);
1da177e4
LT
8790
8791 tw32(NVRAM_ADDR, offset);
8792 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8793 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8794
8795 if (ret == 0)
8796 *val = swab32(tr32(NVRAM_RDDATA));
8797
e6af301b 8798 tg3_disable_nvram_access(tp);
1da177e4 8799
381291b7
MC
8800 tg3_nvram_unlock(tp);
8801
1da177e4
LT
8802 return ret;
8803}
8804
8805static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8806 u32 offset, u32 len, u8 *buf)
8807{
8808 int i, j, rc = 0;
8809 u32 val;
8810
8811 for (i = 0; i < len; i += 4) {
8812 u32 addr, data;
8813
8814 addr = offset + i;
8815
8816 memcpy(&data, buf + i, 4);
8817
8818 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8819
8820 val = tr32(GRC_EEPROM_ADDR);
8821 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8822
8823 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8824 EEPROM_ADDR_READ);
8825 tw32(GRC_EEPROM_ADDR, val |
8826 (0 << EEPROM_ADDR_DEVID_SHIFT) |
8827 (addr & EEPROM_ADDR_ADDR_MASK) |
8828 EEPROM_ADDR_START |
8829 EEPROM_ADDR_WRITE);
8830
8831 for (j = 0; j < 10000; j++) {
8832 val = tr32(GRC_EEPROM_ADDR);
8833
8834 if (val & EEPROM_ADDR_COMPLETE)
8835 break;
8836 udelay(100);
8837 }
8838 if (!(val & EEPROM_ADDR_COMPLETE)) {
8839 rc = -EBUSY;
8840 break;
8841 }
8842 }
8843
8844 return rc;
8845}
8846
8847/* offset and length are dword aligned */
8848static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8849 u8 *buf)
8850{
8851 int ret = 0;
8852 u32 pagesize = tp->nvram_pagesize;
8853 u32 pagemask = pagesize - 1;
8854 u32 nvram_cmd;
8855 u8 *tmp;
8856
8857 tmp = kmalloc(pagesize, GFP_KERNEL);
8858 if (tmp == NULL)
8859 return -ENOMEM;
8860
8861 while (len) {
8862 int j;
e6af301b 8863 u32 phy_addr, page_off, size;
1da177e4
LT
8864
8865 phy_addr = offset & ~pagemask;
8866
8867 for (j = 0; j < pagesize; j += 4) {
8868 if ((ret = tg3_nvram_read(tp, phy_addr + j,
8869 (u32 *) (tmp + j))))
8870 break;
8871 }
8872 if (ret)
8873 break;
8874
8875 page_off = offset & pagemask;
8876 size = pagesize;
8877 if (len < size)
8878 size = len;
8879
8880 len -= size;
8881
8882 memcpy(tmp + page_off, buf, size);
8883
8884 offset = offset + (pagesize - page_off);
8885
e6af301b 8886 tg3_enable_nvram_access(tp);
1da177e4
LT
8887
8888 /*
8889 * Before we can erase the flash page, we need
8890 * to issue a special "write enable" command.
8891 */
8892 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8893
8894 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8895 break;
8896
8897 /* Erase the target page */
8898 tw32(NVRAM_ADDR, phy_addr);
8899
8900 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8901 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8902
8903 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8904 break;
8905
8906 /* Issue another write enable to start the write. */
8907 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8908
8909 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8910 break;
8911
8912 for (j = 0; j < pagesize; j += 4) {
8913 u32 data;
8914
8915 data = *((u32 *) (tmp + j));
8916 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8917
8918 tw32(NVRAM_ADDR, phy_addr + j);
8919
8920 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8921 NVRAM_CMD_WR;
8922
8923 if (j == 0)
8924 nvram_cmd |= NVRAM_CMD_FIRST;
8925 else if (j == (pagesize - 4))
8926 nvram_cmd |= NVRAM_CMD_LAST;
8927
8928 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8929 break;
8930 }
8931 if (ret)
8932 break;
8933 }
8934
8935 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8936 tg3_nvram_exec_cmd(tp, nvram_cmd);
8937
8938 kfree(tmp);
8939
8940 return ret;
8941}
8942
8943/* offset and length are dword aligned */
8944static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8945 u8 *buf)
8946{
8947 int i, ret = 0;
8948
8949 for (i = 0; i < len; i += 4, offset += 4) {
8950 u32 data, page_off, phy_addr, nvram_cmd;
8951
8952 memcpy(&data, buf + i, 4);
8953 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8954
8955 page_off = offset % tp->nvram_pagesize;
8956
8957 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8958 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8959
8960 phy_addr = ((offset / tp->nvram_pagesize) <<
8961 ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8962 }
8963 else {
8964 phy_addr = offset;
8965 }
8966
8967 tw32(NVRAM_ADDR, phy_addr);
8968
8969 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
8970
8971 if ((page_off == 0) || (i == 0))
8972 nvram_cmd |= NVRAM_CMD_FIRST;
8973 else if (page_off == (tp->nvram_pagesize - 4))
8974 nvram_cmd |= NVRAM_CMD_LAST;
8975
8976 if (i == (len - 4))
8977 nvram_cmd |= NVRAM_CMD_LAST;
8978
4c987487
MC
8979 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
8980 (tp->nvram_jedecnum == JEDEC_ST) &&
8981 (nvram_cmd & NVRAM_CMD_FIRST)) {
1da177e4
LT
8982
8983 if ((ret = tg3_nvram_exec_cmd(tp,
8984 NVRAM_CMD_WREN | NVRAM_CMD_GO |
8985 NVRAM_CMD_DONE)))
8986
8987 break;
8988 }
8989 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8990 /* We always do complete word writes to eeprom. */
8991 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
8992 }
8993
8994 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8995 break;
8996 }
8997 return ret;
8998}
8999
9000/* offset and length are dword aligned */
9001static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9002{
9003 int ret;
9004
9005 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9006 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
9007 return -EINVAL;
9008 }
9009
9010 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34
MC
9011 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9012 ~GRC_LCLCTRL_GPIO_OUTPUT1);
1da177e4
LT
9013 udelay(40);
9014 }
9015
9016 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9017 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9018 }
9019 else {
9020 u32 grc_mode;
9021
ec41c7df
MC
9022 ret = tg3_nvram_lock(tp);
9023 if (ret)
9024 return ret;
1da177e4 9025
e6af301b
MC
9026 tg3_enable_nvram_access(tp);
9027 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9028 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
1da177e4 9029 tw32(NVRAM_WRITE1, 0x406);
1da177e4
LT
9030
9031 grc_mode = tr32(GRC_MODE);
9032 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9033
9034 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9035 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9036
9037 ret = tg3_nvram_write_block_buffered(tp, offset, len,
9038 buf);
9039 }
9040 else {
9041 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9042 buf);
9043 }
9044
9045 grc_mode = tr32(GRC_MODE);
9046 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9047
e6af301b 9048 tg3_disable_nvram_access(tp);
1da177e4
LT
9049 tg3_nvram_unlock(tp);
9050 }
9051
9052 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34 9053 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1da177e4
LT
9054 udelay(40);
9055 }
9056
9057 return ret;
9058}
9059
9060struct subsys_tbl_ent {
9061 u16 subsys_vendor, subsys_devid;
9062 u32 phy_id;
9063};
9064
9065static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9066 /* Broadcom boards. */
9067 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9068 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9069 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9070 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
9071 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9072 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9073 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
9074 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9075 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9076 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9077 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9078
9079 /* 3com boards. */
9080 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9081 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9082 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
9083 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9084 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9085
9086 /* DELL boards. */
9087 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9088 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9089 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9090 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9091
9092 /* Compaq boards. */
9093 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9094 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9095 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
9096 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9097 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9098
9099 /* IBM boards. */
9100 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9101};
9102
9103static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9104{
9105 int i;
9106
9107 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9108 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9109 tp->pdev->subsystem_vendor) &&
9110 (subsys_id_to_phy_id[i].subsys_devid ==
9111 tp->pdev->subsystem_device))
9112 return &subsys_id_to_phy_id[i];
9113 }
9114 return NULL;
9115}
9116
7d0c41ef
MC
9117/* Since this function may be called in D3-hot power state during
9118 * tg3_init_one(), only config cycles are allowed.
9119 */
9120static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
1da177e4 9121{
1da177e4 9122 u32 val;
7d0c41ef
MC
9123
9124 /* Make sure register accesses (indirect or otherwise)
9125 * will function correctly.
9126 */
9127 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9128 tp->misc_host_ctrl);
1da177e4
LT
9129
9130 tp->phy_id = PHY_ID_INVALID;
7d0c41ef
MC
9131 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9132
72b845e0
DM
9133 /* Do not even try poking around in here on Sun parts. */
9134 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9135 return;
9136
1da177e4
LT
9137 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9138 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9139 u32 nic_cfg, led_cfg;
7d0c41ef
MC
9140 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9141 int eeprom_phy_serdes = 0;
1da177e4
LT
9142
9143 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9144 tp->nic_sram_data_cfg = nic_cfg;
9145
9146 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9147 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9148 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9149 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9150 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9151 (ver > 0) && (ver < 0x100))
9152 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9153
1da177e4
LT
9154 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9155 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9156 eeprom_phy_serdes = 1;
9157
9158 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9159 if (nic_phy_id != 0) {
9160 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9161 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9162
9163 eeprom_phy_id = (id1 >> 16) << 10;
9164 eeprom_phy_id |= (id2 & 0xfc00) << 16;
9165 eeprom_phy_id |= (id2 & 0x03ff) << 0;
9166 } else
9167 eeprom_phy_id = 0;
9168
7d0c41ef 9169 tp->phy_id = eeprom_phy_id;
747e8f8b 9170 if (eeprom_phy_serdes) {
a4e2b347 9171 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
747e8f8b
MC
9172 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9173 else
9174 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9175 }
7d0c41ef 9176
cbf46853 9177 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
9178 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9179 SHASTA_EXT_LED_MODE_MASK);
cbf46853 9180 else
1da177e4
LT
9181 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9182
9183 switch (led_cfg) {
9184 default:
9185 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9186 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9187 break;
9188
9189 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9190 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9191 break;
9192
9193 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9194 tp->led_ctrl = LED_CTRL_MODE_MAC;
9ba27794
MC
9195
9196 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9197 * read on some older 5700/5701 bootcode.
9198 */
9199 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9200 ASIC_REV_5700 ||
9201 GET_ASIC_REV(tp->pci_chip_rev_id) ==
9202 ASIC_REV_5701)
9203 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9204
1da177e4
LT
9205 break;
9206
9207 case SHASTA_EXT_LED_SHARED:
9208 tp->led_ctrl = LED_CTRL_MODE_SHARED;
9209 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9210 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9211 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9212 LED_CTRL_MODE_PHY_2);
9213 break;
9214
9215 case SHASTA_EXT_LED_MAC:
9216 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9217 break;
9218
9219 case SHASTA_EXT_LED_COMBO:
9220 tp->led_ctrl = LED_CTRL_MODE_COMBO;
9221 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9222 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9223 LED_CTRL_MODE_PHY_2);
9224 break;
9225
9226 };
9227
9228 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9229 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9230 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9231 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9232
9233 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9234 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9235 (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9236 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9237
9238 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9239 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 9240 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
9241 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9242 }
9243 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9244 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9245
9246 if (cfg2 & (1 << 17))
9247 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9248
9249 /* serdes signal pre-emphasis in register 0x590 set by */
9250 /* bootcode if bit 18 is set */
9251 if (cfg2 & (1 << 18))
9252 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9253 }
7d0c41ef
MC
9254}
9255
9256static int __devinit tg3_phy_probe(struct tg3 *tp)
9257{
9258 u32 hw_phy_id_1, hw_phy_id_2;
9259 u32 hw_phy_id, hw_phy_id_masked;
9260 int err;
1da177e4
LT
9261
9262 /* Reading the PHY ID register can conflict with ASF
9263 * firwmare access to the PHY hardware.
9264 */
9265 err = 0;
9266 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9267 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9268 } else {
9269 /* Now read the physical PHY_ID from the chip and verify
9270 * that it is sane. If it doesn't look good, we fall back
9271 * to either the hard-coded table based PHY_ID and failing
9272 * that the value found in the eeprom area.
9273 */
9274 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9275 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9276
9277 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
9278 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9279 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
9280
9281 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9282 }
9283
9284 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9285 tp->phy_id = hw_phy_id;
9286 if (hw_phy_id_masked == PHY_ID_BCM8002)
9287 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
da6b2d01
MC
9288 else
9289 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
1da177e4 9290 } else {
7d0c41ef
MC
9291 if (tp->phy_id != PHY_ID_INVALID) {
9292 /* Do nothing, phy ID already set up in
9293 * tg3_get_eeprom_hw_cfg().
9294 */
1da177e4
LT
9295 } else {
9296 struct subsys_tbl_ent *p;
9297
9298 /* No eeprom signature? Try the hardcoded
9299 * subsys device table.
9300 */
9301 p = lookup_by_subsys(tp);
9302 if (!p)
9303 return -ENODEV;
9304
9305 tp->phy_id = p->phy_id;
9306 if (!tp->phy_id ||
9307 tp->phy_id == PHY_ID_BCM8002)
9308 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9309 }
9310 }
9311
747e8f8b 9312 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
1da177e4
LT
9313 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9314 u32 bmsr, adv_reg, tg3_ctrl;
9315
9316 tg3_readphy(tp, MII_BMSR, &bmsr);
9317 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9318 (bmsr & BMSR_LSTATUS))
9319 goto skip_phy_reset;
9320
9321 err = tg3_phy_reset(tp);
9322 if (err)
9323 return err;
9324
9325 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9326 ADVERTISE_100HALF | ADVERTISE_100FULL |
9327 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9328 tg3_ctrl = 0;
9329 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9330 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9331 MII_TG3_CTRL_ADV_1000_FULL);
9332 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9333 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9334 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9335 MII_TG3_CTRL_ENABLE_AS_MASTER);
9336 }
9337
9338 if (!tg3_copper_is_advertising_all(tp)) {
9339 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9340
9341 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9342 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9343
9344 tg3_writephy(tp, MII_BMCR,
9345 BMCR_ANENABLE | BMCR_ANRESTART);
9346 }
9347 tg3_phy_set_wirespeed(tp);
9348
9349 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9350 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9351 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9352 }
9353
9354skip_phy_reset:
9355 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9356 err = tg3_init_5401phy_dsp(tp);
9357 if (err)
9358 return err;
9359 }
9360
9361 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9362 err = tg3_init_5401phy_dsp(tp);
9363 }
9364
747e8f8b 9365 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1da177e4
LT
9366 tp->link_config.advertising =
9367 (ADVERTISED_1000baseT_Half |
9368 ADVERTISED_1000baseT_Full |
9369 ADVERTISED_Autoneg |
9370 ADVERTISED_FIBRE);
9371 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9372 tp->link_config.advertising &=
9373 ~(ADVERTISED_1000baseT_Half |
9374 ADVERTISED_1000baseT_Full);
9375
9376 return err;
9377}
9378
9379static void __devinit tg3_read_partno(struct tg3 *tp)
9380{
9381 unsigned char vpd_data[256];
9382 int i;
9383
9384 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9385 /* Sun decided not to put the necessary bits in the
9386 * NVRAM of their onboard tg3 parts :(
9387 */
9388 strcpy(tp->board_part_number, "Sun 570X");
9389 return;
9390 }
9391
9392 for (i = 0; i < 256; i += 4) {
9393 u32 tmp;
9394
9395 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9396 goto out_not_found;
9397
9398 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
9399 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
9400 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9401 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9402 }
9403
9404 /* Now parse and find the part number. */
9405 for (i = 0; i < 256; ) {
9406 unsigned char val = vpd_data[i];
9407 int block_end;
9408
9409 if (val == 0x82 || val == 0x91) {
9410 i = (i + 3 +
9411 (vpd_data[i + 1] +
9412 (vpd_data[i + 2] << 8)));
9413 continue;
9414 }
9415
9416 if (val != 0x90)
9417 goto out_not_found;
9418
9419 block_end = (i + 3 +
9420 (vpd_data[i + 1] +
9421 (vpd_data[i + 2] << 8)));
9422 i += 3;
9423 while (i < block_end) {
9424 if (vpd_data[i + 0] == 'P' &&
9425 vpd_data[i + 1] == 'N') {
9426 int partno_len = vpd_data[i + 2];
9427
9428 if (partno_len > 24)
9429 goto out_not_found;
9430
9431 memcpy(tp->board_part_number,
9432 &vpd_data[i + 3],
9433 partno_len);
9434
9435 /* Success. */
9436 return;
9437 }
9438 }
9439
9440 /* Part number not found. */
9441 goto out_not_found;
9442 }
9443
9444out_not_found:
9445 strcpy(tp->board_part_number, "none");
9446}
9447
9448#ifdef CONFIG_SPARC64
9449static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9450{
9451 struct pci_dev *pdev = tp->pdev;
9452 struct pcidev_cookie *pcp = pdev->sysdata;
9453
9454 if (pcp != NULL) {
9455 int node = pcp->prom_node;
9456 u32 venid;
9457 int err;
9458
9459 err = prom_getproperty(node, "subsystem-vendor-id",
9460 (char *) &venid, sizeof(venid));
9461 if (err == 0 || err == -1)
9462 return 0;
9463 if (venid == PCI_VENDOR_ID_SUN)
9464 return 1;
051d3cbd
DM
9465
9466 /* TG3 chips onboard the SunBlade-2500 don't have the
9467 * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
9468 * are distinguishable from non-Sun variants by being
9469 * named "network" by the firmware. Non-Sun cards will
9470 * show up as being named "ethernet".
9471 */
9472 if (!strcmp(pcp->prom_name, "network"))
9473 return 1;
1da177e4
LT
9474 }
9475 return 0;
9476}
9477#endif
9478
9479static int __devinit tg3_get_invariants(struct tg3 *tp)
9480{
9481 static struct pci_device_id write_reorder_chipsets[] = {
1da177e4
LT
9482 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9483 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
399de50b
MC
9484 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9485 PCI_DEVICE_ID_VIA_8385_0) },
1da177e4
LT
9486 { },
9487 };
9488 u32 misc_ctrl_reg;
9489 u32 cacheline_sz_reg;
9490 u32 pci_state_reg, grc_misc_cfg;
9491 u32 val;
9492 u16 pci_cmd;
9493 int err;
9494
9495#ifdef CONFIG_SPARC64
9496 if (tg3_is_sun_570X(tp))
9497 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9498#endif
9499
1da177e4
LT
9500 /* Force memory write invalidate off. If we leave it on,
9501 * then on 5700_BX chips we have to enable a workaround.
9502 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9503 * to match the cacheline size. The Broadcom driver have this
9504 * workaround but turns MWI off all the times so never uses
9505 * it. This seems to suggest that the workaround is insufficient.
9506 */
9507 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9508 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9509 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9510
9511 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9512 * has the register indirect write enable bit set before
9513 * we try to access any of the MMIO registers. It is also
9514 * critical that the PCI-X hw workaround situation is decided
9515 * before that as well.
9516 */
9517 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9518 &misc_ctrl_reg);
9519
9520 tp->pci_chip_rev_id = (misc_ctrl_reg >>
9521 MISC_HOST_CTRL_CHIPREV_SHIFT);
9522
ff645bec
MC
9523 /* Wrong chip ID in 5752 A0. This code can be removed later
9524 * as A0 is not in production.
9525 */
9526 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9527 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9528
6892914f
MC
9529 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9530 * we need to disable memory and use config. cycles
9531 * only to access all registers. The 5702/03 chips
9532 * can mistakenly decode the special cycles from the
9533 * ICH chipsets as memory write cycles, causing corruption
9534 * of register and memory space. Only certain ICH bridges
9535 * will drive special cycles with non-zero data during the
9536 * address phase which can fall within the 5703's address
9537 * range. This is not an ICH bug as the PCI spec allows
9538 * non-zero address during special cycles. However, only
9539 * these ICH bridges are known to drive non-zero addresses
9540 * during special cycles.
9541 *
9542 * Since special cycles do not cross PCI bridges, we only
9543 * enable this workaround if the 5703 is on the secondary
9544 * bus of these ICH bridges.
9545 */
9546 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
9547 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
9548 static struct tg3_dev_id {
9549 u32 vendor;
9550 u32 device;
9551 u32 rev;
9552 } ich_chipsets[] = {
9553 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
9554 PCI_ANY_ID },
9555 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
9556 PCI_ANY_ID },
9557 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
9558 0xa },
9559 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
9560 PCI_ANY_ID },
9561 { },
9562 };
9563 struct tg3_dev_id *pci_id = &ich_chipsets[0];
9564 struct pci_dev *bridge = NULL;
9565
9566 while (pci_id->vendor != 0) {
9567 bridge = pci_get_device(pci_id->vendor, pci_id->device,
9568 bridge);
9569 if (!bridge) {
9570 pci_id++;
9571 continue;
9572 }
9573 if (pci_id->rev != PCI_ANY_ID) {
9574 u8 rev;
9575
9576 pci_read_config_byte(bridge, PCI_REVISION_ID,
9577 &rev);
9578 if (rev > pci_id->rev)
9579 continue;
9580 }
9581 if (bridge->subordinate &&
9582 (bridge->subordinate->number ==
9583 tp->pdev->bus->number)) {
9584
9585 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
9586 pci_dev_put(bridge);
9587 break;
9588 }
9589 }
9590 }
9591
4a29cc2e
MC
9592 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
9593 * DMA addresses > 40-bit. This bridge may have other additional
9594 * 57xx devices behind it in some 4-port NIC designs for example.
9595 * Any tg3 device found behind the bridge will also need the 40-bit
9596 * DMA workaround.
9597 */
a4e2b347
MC
9598 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
9599 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9600 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
4a29cc2e 9601 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
4cf78e4f 9602 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
a4e2b347 9603 }
4a29cc2e
MC
9604 else {
9605 struct pci_dev *bridge = NULL;
9606
9607 do {
9608 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
9609 PCI_DEVICE_ID_SERVERWORKS_EPB,
9610 bridge);
9611 if (bridge && bridge->subordinate &&
9612 (bridge->subordinate->number <=
9613 tp->pdev->bus->number) &&
9614 (bridge->subordinate->subordinate >=
9615 tp->pdev->bus->number)) {
9616 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
9617 pci_dev_put(bridge);
9618 break;
9619 }
9620 } while (bridge);
9621 }
4cf78e4f 9622
1da177e4
LT
9623 /* Initialize misc host control in PCI block. */
9624 tp->misc_host_ctrl |= (misc_ctrl_reg &
9625 MISC_HOST_CTRL_CHIPREV);
9626 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9627 tp->misc_host_ctrl);
9628
9629 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9630 &cacheline_sz_reg);
9631
9632 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
9633 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
9634 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
9635 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
9636
6708e5cc 9637 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
4cf78e4f 9638 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
a4e2b347 9639 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6708e5cc
JL
9640 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9641
1b440c56
JL
9642 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
9643 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9644 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9645
bb7064dc 9646 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
9647 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
9648
0f893dc6
MC
9649 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9650 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9651 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
9652 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9653
1da177e4
LT
9654 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9655 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9656
399de50b
MC
9657 /* If we have an AMD 762 or VIA K8T800 chipset, write
9658 * reordering to the mailbox registers done by the host
9659 * controller can cause major troubles. We read back from
9660 * every mailbox register write to force the writes to be
9661 * posted to the chip in order.
9662 */
9663 if (pci_dev_present(write_reorder_chipsets) &&
9664 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9665 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9666
1da177e4
LT
9667 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9668 tp->pci_lat_timer < 64) {
9669 tp->pci_lat_timer = 64;
9670
9671 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
9672 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
9673 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
9674 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
9675
9676 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9677 cacheline_sz_reg);
9678 }
9679
9680 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9681 &pci_state_reg);
9682
9683 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9684 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9685
9686 /* If this is a 5700 BX chipset, and we are in PCI-X
9687 * mode, enable register write workaround.
9688 *
9689 * The workaround is to use indirect register accesses
9690 * for all chip writes not to mailbox registers.
9691 */
9692 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9693 u32 pm_reg;
9694 u16 pci_cmd;
9695
9696 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9697
9698 /* The chip can have it's power management PCI config
9699 * space registers clobbered due to this bug.
9700 * So explicitly force the chip into D0 here.
9701 */
9702 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9703 &pm_reg);
9704 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9705 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9706 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9707 pm_reg);
9708
9709 /* Also, force SERR#/PERR# in PCI command. */
9710 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9711 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9712 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9713 }
9714 }
9715
087fe256
MC
9716 /* 5700 BX chips need to have their TX producer index mailboxes
9717 * written twice to workaround a bug.
9718 */
9719 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9720 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9721
1da177e4
LT
9722 /* Back to back register writes can cause problems on this chip,
9723 * the workaround is to read back all reg writes except those to
9724 * mailbox regs. See tg3_write_indirect_reg32().
9725 *
9726 * PCI Express 5750_A0 rev chips need this workaround too.
9727 */
9728 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9729 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9730 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9731 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9732
9733 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9734 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9735 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9736 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9737
9738 /* Chip-specific fixup from Broadcom driver */
9739 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9740 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9741 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9742 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9743 }
9744
1ee582d8 9745 /* Default fast path register access methods */
20094930 9746 tp->read32 = tg3_read32;
1ee582d8 9747 tp->write32 = tg3_write32;
09ee929c 9748 tp->read32_mbox = tg3_read32;
20094930 9749 tp->write32_mbox = tg3_write32;
1ee582d8
MC
9750 tp->write32_tx_mbox = tg3_write32;
9751 tp->write32_rx_mbox = tg3_write32;
9752
9753 /* Various workaround register access methods */
9754 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
9755 tp->write32 = tg3_write_indirect_reg32;
9756 else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
9757 tp->write32 = tg3_write_flush_reg32;
9758
9759 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
9760 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
9761 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9762 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
9763 tp->write32_rx_mbox = tg3_write_flush_reg32;
9764 }
20094930 9765
6892914f
MC
9766 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
9767 tp->read32 = tg3_read_indirect_reg32;
9768 tp->write32 = tg3_write_indirect_reg32;
9769 tp->read32_mbox = tg3_read_indirect_mbox;
9770 tp->write32_mbox = tg3_write_indirect_mbox;
9771 tp->write32_tx_mbox = tg3_write_indirect_mbox;
9772 tp->write32_rx_mbox = tg3_write_indirect_mbox;
9773
9774 iounmap(tp->regs);
22abe310 9775 tp->regs = NULL;
6892914f
MC
9776
9777 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9778 pci_cmd &= ~PCI_COMMAND_MEMORY;
9779 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9780 }
9781
7d0c41ef
MC
9782 /* Get eeprom hw config before calling tg3_set_power_state().
9783 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9784 * determined before calling tg3_set_power_state() so that
9785 * we know whether or not to switch out of Vaux power.
9786 * When the flag is set, it means that GPIO1 is used for eeprom
9787 * write protect and also implies that it is a LOM where GPIOs
9788 * are not used to switch power.
9789 */
9790 tg3_get_eeprom_hw_cfg(tp);
9791
314fba34
MC
9792 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9793 * GPIO1 driven high will bring 5700's external PHY out of reset.
9794 * It is also used as eeprom write protect on LOMs.
9795 */
9796 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9797 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9798 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9799 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9800 GRC_LCLCTRL_GPIO_OUTPUT1);
3e7d83bc
MC
9801 /* Unused GPIO3 must be driven as output on 5752 because there
9802 * are no pull-up resistors on unused GPIO pins.
9803 */
9804 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9805 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
314fba34 9806
1da177e4
LT
9807 /* Force the chip into D0. */
9808 err = tg3_set_power_state(tp, 0);
9809 if (err) {
9810 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9811 pci_name(tp->pdev));
9812 return err;
9813 }
9814
9815 /* 5700 B0 chips do not support checksumming correctly due
9816 * to hardware bugs.
9817 */
9818 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9819 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9820
9821 /* Pseudo-header checksum is done by hardware logic and not
9822 * the offload processers, so make the chip do the pseudo-
9823 * header checksums on receive. For transmit it is more
9824 * convenient to do the pseudo-header checksum in software
9825 * as Linux does that on transmit for us in all cases.
9826 */
9827 tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9828 tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9829
9830 /* Derive initial jumbo mode from MTU assigned in
9831 * ether_setup() via the alloc_etherdev() call
9832 */
0f893dc6 9833 if (tp->dev->mtu > ETH_DATA_LEN &&
a4e2b347 9834 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
0f893dc6 9835 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
1da177e4
LT
9836
9837 /* Determine WakeOnLan speed to use. */
9838 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9839 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9840 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9841 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9842 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9843 } else {
9844 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9845 }
9846
9847 /* A few boards don't want Ethernet@WireSpeed phy feature */
9848 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9849 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9850 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
747e8f8b
MC
9851 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
9852 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1da177e4
LT
9853 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9854
9855 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9856 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9857 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9858 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9859 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9860
bb7064dc 9861 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
1da177e4
LT
9862 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9863
1da177e4 9864 tp->coalesce_mode = 0;
1da177e4
LT
9865 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9866 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9867 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9868
9869 /* Initialize MAC MI mode, polling disabled. */
9870 tw32_f(MAC_MI_MODE, tp->mi_mode);
9871 udelay(80);
9872
9873 /* Initialize data/descriptor byte/word swapping. */
9874 val = tr32(GRC_MODE);
9875 val &= GRC_MODE_HOST_STACKUP;
9876 tw32(GRC_MODE, val | tp->grc_mode);
9877
9878 tg3_switch_clocks(tp);
9879
9880 /* Clear this out for sanity. */
9881 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9882
9883 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9884 &pci_state_reg);
9885 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9886 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9887 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9888
9889 if (chiprevid == CHIPREV_ID_5701_A0 ||
9890 chiprevid == CHIPREV_ID_5701_B0 ||
9891 chiprevid == CHIPREV_ID_5701_B2 ||
9892 chiprevid == CHIPREV_ID_5701_B5) {
9893 void __iomem *sram_base;
9894
9895 /* Write some dummy words into the SRAM status block
9896 * area, see if it reads back correctly. If the return
9897 * value is bad, force enable the PCIX workaround.
9898 */
9899 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9900
9901 writel(0x00000000, sram_base);
9902 writel(0x00000000, sram_base + 4);
9903 writel(0xffffffff, sram_base + 4);
9904 if (readl(sram_base) != 0x00000000)
9905 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9906 }
9907 }
9908
9909 udelay(50);
9910 tg3_nvram_init(tp);
9911
9912 grc_misc_cfg = tr32(GRC_MISC_CFG);
9913 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9914
9915 /* Broadcom's driver says that CIOBE multisplit has a bug */
9916#if 0
9917 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9918 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9919 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9920 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9921 }
9922#endif
9923 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9924 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9925 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9926 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9927
fac9b83e
DM
9928 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9929 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9930 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9931 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9932 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9933 HOSTCC_MODE_CLRTICK_TXBD);
9934
9935 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9936 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9937 tp->misc_host_ctrl);
9938 }
9939
1da177e4
LT
9940 /* these are limited to 10/100 only */
9941 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9942 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9943 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9944 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9945 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9946 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9947 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9948 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9949 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9950 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9951 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9952
9953 err = tg3_phy_probe(tp);
9954 if (err) {
9955 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
9956 pci_name(tp->pdev), err);
9957 /* ... but do not return immediately ... */
9958 }
9959
9960 tg3_read_partno(tp);
9961
9962 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9963 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9964 } else {
9965 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9966 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
9967 else
9968 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9969 }
9970
9971 /* 5700 {AX,BX} chips have a broken status block link
9972 * change bit implementation, so we must use the
9973 * status register in those cases.
9974 */
9975 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9976 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
9977 else
9978 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
9979
9980 /* The led_ctrl is set during tg3_phy_probe, here we might
9981 * have to force the link status polling mechanism based
9982 * upon subsystem IDs.
9983 */
9984 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
9985 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9986 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
9987 TG3_FLAG_USE_LINKCHG_REG);
9988 }
9989
9990 /* For all SERDES we poll the MAC status register. */
9991 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9992 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
9993 else
9994 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9995
1da177e4
LT
9996 /* It seems all chips can get confused if TX buffers
9997 * straddle the 4GB address boundary in some cases.
9998 */
9999 tp->dev->hard_start_xmit = tg3_start_xmit;
10000
10001 tp->rx_offset = 2;
10002 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10003 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10004 tp->rx_offset = 0;
10005
10006 /* By default, disable wake-on-lan. User can change this
10007 * using ETHTOOL_SWOL.
10008 */
10009 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10010
10011 return err;
10012}
10013
10014#ifdef CONFIG_SPARC64
10015static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10016{
10017 struct net_device *dev = tp->dev;
10018 struct pci_dev *pdev = tp->pdev;
10019 struct pcidev_cookie *pcp = pdev->sysdata;
10020
10021 if (pcp != NULL) {
10022 int node = pcp->prom_node;
10023
10024 if (prom_getproplen(node, "local-mac-address") == 6) {
10025 prom_getproperty(node, "local-mac-address",
10026 dev->dev_addr, 6);
2ff43697 10027 memcpy(dev->perm_addr, dev->dev_addr, 6);
1da177e4
LT
10028 return 0;
10029 }
10030 }
10031 return -ENODEV;
10032}
10033
10034static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10035{
10036 struct net_device *dev = tp->dev;
10037
10038 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2ff43697 10039 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
1da177e4
LT
10040 return 0;
10041}
10042#endif
10043
10044static int __devinit tg3_get_device_address(struct tg3 *tp)
10045{
10046 struct net_device *dev = tp->dev;
10047 u32 hi, lo, mac_offset;
10048
10049#ifdef CONFIG_SPARC64
10050 if (!tg3_get_macaddr_sparc(tp))
10051 return 0;
10052#endif
10053
10054 mac_offset = 0x7c;
4cf78e4f
MC
10055 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10056 !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
a4e2b347 10057 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
10058 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10059 mac_offset = 0xcc;
10060 if (tg3_nvram_lock(tp))
10061 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10062 else
10063 tg3_nvram_unlock(tp);
10064 }
10065
10066 /* First try to get it from MAC address mailbox. */
10067 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10068 if ((hi >> 16) == 0x484b) {
10069 dev->dev_addr[0] = (hi >> 8) & 0xff;
10070 dev->dev_addr[1] = (hi >> 0) & 0xff;
10071
10072 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10073 dev->dev_addr[2] = (lo >> 24) & 0xff;
10074 dev->dev_addr[3] = (lo >> 16) & 0xff;
10075 dev->dev_addr[4] = (lo >> 8) & 0xff;
10076 dev->dev_addr[5] = (lo >> 0) & 0xff;
10077 }
10078 /* Next, try NVRAM. */
10079 else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
10080 !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10081 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10082 dev->dev_addr[0] = ((hi >> 16) & 0xff);
10083 dev->dev_addr[1] = ((hi >> 24) & 0xff);
10084 dev->dev_addr[2] = ((lo >> 0) & 0xff);
10085 dev->dev_addr[3] = ((lo >> 8) & 0xff);
10086 dev->dev_addr[4] = ((lo >> 16) & 0xff);
10087 dev->dev_addr[5] = ((lo >> 24) & 0xff);
10088 }
10089 /* Finally just fetch it out of the MAC control regs. */
10090 else {
10091 hi = tr32(MAC_ADDR_0_HIGH);
10092 lo = tr32(MAC_ADDR_0_LOW);
10093
10094 dev->dev_addr[5] = lo & 0xff;
10095 dev->dev_addr[4] = (lo >> 8) & 0xff;
10096 dev->dev_addr[3] = (lo >> 16) & 0xff;
10097 dev->dev_addr[2] = (lo >> 24) & 0xff;
10098 dev->dev_addr[1] = hi & 0xff;
10099 dev->dev_addr[0] = (hi >> 8) & 0xff;
10100 }
10101
10102 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10103#ifdef CONFIG_SPARC64
10104 if (!tg3_get_default_macaddr_sparc(tp))
10105 return 0;
10106#endif
10107 return -EINVAL;
10108 }
2ff43697 10109 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
10110 return 0;
10111}
10112
59e6b434
DM
10113#define BOUNDARY_SINGLE_CACHELINE 1
10114#define BOUNDARY_MULTI_CACHELINE 2
10115
10116static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10117{
10118 int cacheline_size;
10119 u8 byte;
10120 int goal;
10121
10122 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10123 if (byte == 0)
10124 cacheline_size = 1024;
10125 else
10126 cacheline_size = (int) byte * 4;
10127
10128 /* On 5703 and later chips, the boundary bits have no
10129 * effect.
10130 */
10131 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10132 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10133 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10134 goto out;
10135
10136#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10137 goal = BOUNDARY_MULTI_CACHELINE;
10138#else
10139#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10140 goal = BOUNDARY_SINGLE_CACHELINE;
10141#else
10142 goal = 0;
10143#endif
10144#endif
10145
10146 if (!goal)
10147 goto out;
10148
10149 /* PCI controllers on most RISC systems tend to disconnect
10150 * when a device tries to burst across a cache-line boundary.
10151 * Therefore, letting tg3 do so just wastes PCI bandwidth.
10152 *
10153 * Unfortunately, for PCI-E there are only limited
10154 * write-side controls for this, and thus for reads
10155 * we will still get the disconnects. We'll also waste
10156 * these PCI cycles for both read and write for chips
10157 * other than 5700 and 5701 which do not implement the
10158 * boundary bits.
10159 */
10160 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10161 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10162 switch (cacheline_size) {
10163 case 16:
10164 case 32:
10165 case 64:
10166 case 128:
10167 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10168 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10169 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10170 } else {
10171 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10172 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10173 }
10174 break;
10175
10176 case 256:
10177 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10178 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10179 break;
10180
10181 default:
10182 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10183 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10184 break;
10185 };
10186 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10187 switch (cacheline_size) {
10188 case 16:
10189 case 32:
10190 case 64:
10191 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10192 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10193 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10194 break;
10195 }
10196 /* fallthrough */
10197 case 128:
10198 default:
10199 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10200 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10201 break;
10202 };
10203 } else {
10204 switch (cacheline_size) {
10205 case 16:
10206 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10207 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10208 DMA_RWCTRL_WRITE_BNDRY_16);
10209 break;
10210 }
10211 /* fallthrough */
10212 case 32:
10213 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10214 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10215 DMA_RWCTRL_WRITE_BNDRY_32);
10216 break;
10217 }
10218 /* fallthrough */
10219 case 64:
10220 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10221 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10222 DMA_RWCTRL_WRITE_BNDRY_64);
10223 break;
10224 }
10225 /* fallthrough */
10226 case 128:
10227 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10228 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10229 DMA_RWCTRL_WRITE_BNDRY_128);
10230 break;
10231 }
10232 /* fallthrough */
10233 case 256:
10234 val |= (DMA_RWCTRL_READ_BNDRY_256 |
10235 DMA_RWCTRL_WRITE_BNDRY_256);
10236 break;
10237 case 512:
10238 val |= (DMA_RWCTRL_READ_BNDRY_512 |
10239 DMA_RWCTRL_WRITE_BNDRY_512);
10240 break;
10241 case 1024:
10242 default:
10243 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10244 DMA_RWCTRL_WRITE_BNDRY_1024);
10245 break;
10246 };
10247 }
10248
10249out:
10250 return val;
10251}
10252
1da177e4
LT
10253static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10254{
10255 struct tg3_internal_buffer_desc test_desc;
10256 u32 sram_dma_descs;
10257 int i, ret;
10258
10259 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10260
10261 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10262 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10263 tw32(RDMAC_STATUS, 0);
10264 tw32(WDMAC_STATUS, 0);
10265
10266 tw32(BUFMGR_MODE, 0);
10267 tw32(FTQ_RESET, 0);
10268
10269 test_desc.addr_hi = ((u64) buf_dma) >> 32;
10270 test_desc.addr_lo = buf_dma & 0xffffffff;
10271 test_desc.nic_mbuf = 0x00002100;
10272 test_desc.len = size;
10273
10274 /*
10275 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10276 * the *second* time the tg3 driver was getting loaded after an
10277 * initial scan.
10278 *
10279 * Broadcom tells me:
10280 * ...the DMA engine is connected to the GRC block and a DMA
10281 * reset may affect the GRC block in some unpredictable way...
10282 * The behavior of resets to individual blocks has not been tested.
10283 *
10284 * Broadcom noted the GRC reset will also reset all sub-components.
10285 */
10286 if (to_device) {
10287 test_desc.cqid_sqid = (13 << 8) | 2;
10288
10289 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10290 udelay(40);
10291 } else {
10292 test_desc.cqid_sqid = (16 << 8) | 7;
10293
10294 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10295 udelay(40);
10296 }
10297 test_desc.flags = 0x00000005;
10298
10299 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10300 u32 val;
10301
10302 val = *(((u32 *)&test_desc) + i);
10303 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10304 sram_dma_descs + (i * sizeof(u32)));
10305 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10306 }
10307 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10308
10309 if (to_device) {
10310 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10311 } else {
10312 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10313 }
10314
10315 ret = -ENODEV;
10316 for (i = 0; i < 40; i++) {
10317 u32 val;
10318
10319 if (to_device)
10320 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10321 else
10322 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10323 if ((val & 0xffff) == sram_dma_descs) {
10324 ret = 0;
10325 break;
10326 }
10327
10328 udelay(100);
10329 }
10330
10331 return ret;
10332}
10333
ded7340d 10334#define TEST_BUFFER_SIZE 0x2000
1da177e4
LT
10335
10336static int __devinit tg3_test_dma(struct tg3 *tp)
10337{
10338 dma_addr_t buf_dma;
59e6b434 10339 u32 *buf, saved_dma_rwctrl;
1da177e4
LT
10340 int ret;
10341
10342 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10343 if (!buf) {
10344 ret = -ENOMEM;
10345 goto out_nofree;
10346 }
10347
10348 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10349 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10350
59e6b434 10351 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
1da177e4
LT
10352
10353 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10354 /* DMA read watermark not used on PCIE */
10355 tp->dma_rwctrl |= 0x00180000;
10356 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
85e94ced
MC
10357 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10358 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
1da177e4
LT
10359 tp->dma_rwctrl |= 0x003f0000;
10360 else
10361 tp->dma_rwctrl |= 0x003f000f;
10362 } else {
10363 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10364 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10365 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10366
4a29cc2e
MC
10367 /* If the 5704 is behind the EPB bridge, we can
10368 * do the less restrictive ONE_DMA workaround for
10369 * better performance.
10370 */
10371 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10372 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10373 tp->dma_rwctrl |= 0x8000;
10374 else if (ccval == 0x6 || ccval == 0x7)
1da177e4
LT
10375 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10376
59e6b434 10377 /* Set bit 23 to enable PCIX hw bug fix */
1da177e4 10378 tp->dma_rwctrl |= 0x009f0000;
4cf78e4f
MC
10379 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10380 /* 5780 always in PCIX mode */
10381 tp->dma_rwctrl |= 0x00144000;
a4e2b347
MC
10382 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10383 /* 5714 always in PCIX mode */
10384 tp->dma_rwctrl |= 0x00148000;
1da177e4
LT
10385 } else {
10386 tp->dma_rwctrl |= 0x001b000f;
10387 }
10388 }
10389
10390 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10391 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10392 tp->dma_rwctrl &= 0xfffffff0;
10393
10394 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10395 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10396 /* Remove this if it causes problems for some boards. */
10397 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10398
10399 /* On 5700/5701 chips, we need to set this bit.
10400 * Otherwise the chip will issue cacheline transactions
10401 * to streamable DMA memory with not all the byte
10402 * enables turned on. This is an error on several
10403 * RISC PCI controllers, in particular sparc64.
10404 *
10405 * On 5703/5704 chips, this bit has been reassigned
10406 * a different meaning. In particular, it is used
10407 * on those chips to enable a PCI-X workaround.
10408 */
10409 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10410 }
10411
10412 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10413
10414#if 0
10415 /* Unneeded, already done by tg3_get_invariants. */
10416 tg3_switch_clocks(tp);
10417#endif
10418
10419 ret = 0;
10420 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10421 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10422 goto out;
10423
59e6b434
DM
10424 /* It is best to perform DMA test with maximum write burst size
10425 * to expose the 5700/5701 write DMA bug.
10426 */
10427 saved_dma_rwctrl = tp->dma_rwctrl;
10428 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10429 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10430
1da177e4
LT
10431 while (1) {
10432 u32 *p = buf, i;
10433
10434 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10435 p[i] = i;
10436
10437 /* Send the buffer to the chip. */
10438 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10439 if (ret) {
10440 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10441 break;
10442 }
10443
10444#if 0
10445 /* validate data reached card RAM correctly. */
10446 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10447 u32 val;
10448 tg3_read_mem(tp, 0x2100 + (i*4), &val);
10449 if (le32_to_cpu(val) != p[i]) {
10450 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
10451 /* ret = -ENODEV here? */
10452 }
10453 p[i] = 0;
10454 }
10455#endif
10456 /* Now read it back. */
10457 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10458 if (ret) {
10459 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10460
10461 break;
10462 }
10463
10464 /* Verify it. */
10465 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10466 if (p[i] == i)
10467 continue;
10468
59e6b434
DM
10469 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10470 DMA_RWCTRL_WRITE_BNDRY_16) {
10471 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
1da177e4
LT
10472 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10473 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10474 break;
10475 } else {
10476 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10477 ret = -ENODEV;
10478 goto out;
10479 }
10480 }
10481
10482 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10483 /* Success. */
10484 ret = 0;
10485 break;
10486 }
10487 }
59e6b434
DM
10488 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10489 DMA_RWCTRL_WRITE_BNDRY_16) {
6d1cfbab
MC
10490 static struct pci_device_id dma_wait_state_chipsets[] = {
10491 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10492 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10493 { },
10494 };
10495
59e6b434 10496 /* DMA test passed without adjusting DMA boundary,
6d1cfbab
MC
10497 * now look for chipsets that are known to expose the
10498 * DMA bug without failing the test.
59e6b434 10499 */
6d1cfbab
MC
10500 if (pci_dev_present(dma_wait_state_chipsets)) {
10501 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10502 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10503 }
10504 else
10505 /* Safe to use the calculated DMA boundary. */
10506 tp->dma_rwctrl = saved_dma_rwctrl;
10507
59e6b434
DM
10508 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10509 }
1da177e4
LT
10510
10511out:
10512 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10513out_nofree:
10514 return ret;
10515}
10516
10517static void __devinit tg3_init_link_config(struct tg3 *tp)
10518{
10519 tp->link_config.advertising =
10520 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10521 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10522 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10523 ADVERTISED_Autoneg | ADVERTISED_MII);
10524 tp->link_config.speed = SPEED_INVALID;
10525 tp->link_config.duplex = DUPLEX_INVALID;
10526 tp->link_config.autoneg = AUTONEG_ENABLE;
10527 netif_carrier_off(tp->dev);
10528 tp->link_config.active_speed = SPEED_INVALID;
10529 tp->link_config.active_duplex = DUPLEX_INVALID;
10530 tp->link_config.phy_is_low_power = 0;
10531 tp->link_config.orig_speed = SPEED_INVALID;
10532 tp->link_config.orig_duplex = DUPLEX_INVALID;
10533 tp->link_config.orig_autoneg = AUTONEG_INVALID;
10534}
10535
10536static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10537{
fdfec172
MC
10538 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10539 tp->bufmgr_config.mbuf_read_dma_low_water =
10540 DEFAULT_MB_RDMA_LOW_WATER_5705;
10541 tp->bufmgr_config.mbuf_mac_rx_low_water =
10542 DEFAULT_MB_MACRX_LOW_WATER_5705;
10543 tp->bufmgr_config.mbuf_high_water =
10544 DEFAULT_MB_HIGH_WATER_5705;
10545
10546 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10547 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10548 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10549 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10550 tp->bufmgr_config.mbuf_high_water_jumbo =
10551 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10552 } else {
10553 tp->bufmgr_config.mbuf_read_dma_low_water =
10554 DEFAULT_MB_RDMA_LOW_WATER;
10555 tp->bufmgr_config.mbuf_mac_rx_low_water =
10556 DEFAULT_MB_MACRX_LOW_WATER;
10557 tp->bufmgr_config.mbuf_high_water =
10558 DEFAULT_MB_HIGH_WATER;
10559
10560 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10561 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10562 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10563 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10564 tp->bufmgr_config.mbuf_high_water_jumbo =
10565 DEFAULT_MB_HIGH_WATER_JUMBO;
10566 }
1da177e4
LT
10567
10568 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10569 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10570}
10571
10572static char * __devinit tg3_phy_string(struct tg3 *tp)
10573{
10574 switch (tp->phy_id & PHY_ID_MASK) {
10575 case PHY_ID_BCM5400: return "5400";
10576 case PHY_ID_BCM5401: return "5401";
10577 case PHY_ID_BCM5411: return "5411";
10578 case PHY_ID_BCM5701: return "5701";
10579 case PHY_ID_BCM5703: return "5703";
10580 case PHY_ID_BCM5704: return "5704";
10581 case PHY_ID_BCM5705: return "5705";
10582 case PHY_ID_BCM5750: return "5750";
85e94ced 10583 case PHY_ID_BCM5752: return "5752";
a4e2b347 10584 case PHY_ID_BCM5714: return "5714";
4cf78e4f 10585 case PHY_ID_BCM5780: return "5780";
1da177e4
LT
10586 case PHY_ID_BCM8002: return "8002/serdes";
10587 case 0: return "serdes";
10588 default: return "unknown";
10589 };
10590}
10591
f9804ddb
MC
10592static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
10593{
10594 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10595 strcpy(str, "PCI Express");
10596 return str;
10597 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
10598 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
10599
10600 strcpy(str, "PCIX:");
10601
10602 if ((clock_ctrl == 7) ||
10603 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
10604 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
10605 strcat(str, "133MHz");
10606 else if (clock_ctrl == 0)
10607 strcat(str, "33MHz");
10608 else if (clock_ctrl == 2)
10609 strcat(str, "50MHz");
10610 else if (clock_ctrl == 4)
10611 strcat(str, "66MHz");
10612 else if (clock_ctrl == 6)
10613 strcat(str, "100MHz");
f9804ddb
MC
10614 } else {
10615 strcpy(str, "PCI:");
10616 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
10617 strcat(str, "66MHz");
10618 else
10619 strcat(str, "33MHz");
10620 }
10621 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
10622 strcat(str, ":32-bit");
10623 else
10624 strcat(str, ":64-bit");
10625 return str;
10626}
10627
8c2dc7e1 10628static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
1da177e4
LT
10629{
10630 struct pci_dev *peer;
10631 unsigned int func, devnr = tp->pdev->devfn & ~7;
10632
10633 for (func = 0; func < 8; func++) {
10634 peer = pci_get_slot(tp->pdev->bus, devnr | func);
10635 if (peer && peer != tp->pdev)
10636 break;
10637 pci_dev_put(peer);
10638 }
16fe9d74
MC
10639 /* 5704 can be configured in single-port mode, set peer to
10640 * tp->pdev in that case.
10641 */
10642 if (!peer) {
10643 peer = tp->pdev;
10644 return peer;
10645 }
1da177e4
LT
10646
10647 /*
10648 * We don't need to keep the refcount elevated; there's no way
10649 * to remove one half of this device without removing the other
10650 */
10651 pci_dev_put(peer);
10652
10653 return peer;
10654}
10655
15f9850d
DM
10656static void __devinit tg3_init_coal(struct tg3 *tp)
10657{
10658 struct ethtool_coalesce *ec = &tp->coal;
10659
10660 memset(ec, 0, sizeof(*ec));
10661 ec->cmd = ETHTOOL_GCOALESCE;
10662 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
10663 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
10664 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
10665 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
10666 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
10667 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
10668 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
10669 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
10670 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
10671
10672 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
10673 HOSTCC_MODE_CLRTICK_TXBD)) {
10674 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
10675 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
10676 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
10677 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
10678 }
d244c892
MC
10679
10680 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10681 ec->rx_coalesce_usecs_irq = 0;
10682 ec->tx_coalesce_usecs_irq = 0;
10683 ec->stats_block_coalesce_usecs = 0;
10684 }
15f9850d
DM
10685}
10686
1da177e4
LT
10687static int __devinit tg3_init_one(struct pci_dev *pdev,
10688 const struct pci_device_id *ent)
10689{
10690 static int tg3_version_printed = 0;
10691 unsigned long tg3reg_base, tg3reg_len;
10692 struct net_device *dev;
10693 struct tg3 *tp;
72f2afb8 10694 int i, err, pm_cap;
f9804ddb 10695 char str[40];
72f2afb8 10696 u64 dma_mask, persist_dma_mask;
1da177e4
LT
10697
10698 if (tg3_version_printed++ == 0)
10699 printk(KERN_INFO "%s", version);
10700
10701 err = pci_enable_device(pdev);
10702 if (err) {
10703 printk(KERN_ERR PFX "Cannot enable PCI device, "
10704 "aborting.\n");
10705 return err;
10706 }
10707
10708 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10709 printk(KERN_ERR PFX "Cannot find proper PCI device "
10710 "base address, aborting.\n");
10711 err = -ENODEV;
10712 goto err_out_disable_pdev;
10713 }
10714
10715 err = pci_request_regions(pdev, DRV_MODULE_NAME);
10716 if (err) {
10717 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
10718 "aborting.\n");
10719 goto err_out_disable_pdev;
10720 }
10721
10722 pci_set_master(pdev);
10723
10724 /* Find power-management capability. */
10725 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10726 if (pm_cap == 0) {
10727 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
10728 "aborting.\n");
10729 err = -EIO;
10730 goto err_out_free_res;
10731 }
10732
1da177e4
LT
10733 tg3reg_base = pci_resource_start(pdev, 0);
10734 tg3reg_len = pci_resource_len(pdev, 0);
10735
10736 dev = alloc_etherdev(sizeof(*tp));
10737 if (!dev) {
10738 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
10739 err = -ENOMEM;
10740 goto err_out_free_res;
10741 }
10742
10743 SET_MODULE_OWNER(dev);
10744 SET_NETDEV_DEV(dev, &pdev->dev);
10745
1da177e4
LT
10746 dev->features |= NETIF_F_LLTX;
10747#if TG3_VLAN_TAG_USED
10748 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
10749 dev->vlan_rx_register = tg3_vlan_rx_register;
10750 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
10751#endif
10752
10753 tp = netdev_priv(dev);
10754 tp->pdev = pdev;
10755 tp->dev = dev;
10756 tp->pm_cap = pm_cap;
10757 tp->mac_mode = TG3_DEF_MAC_MODE;
10758 tp->rx_mode = TG3_DEF_RX_MODE;
10759 tp->tx_mode = TG3_DEF_TX_MODE;
10760 tp->mi_mode = MAC_MI_MODE_BASE;
10761 if (tg3_debug > 0)
10762 tp->msg_enable = tg3_debug;
10763 else
10764 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10765
10766 /* The word/byte swap controls here control register access byte
10767 * swapping. DMA data byte swapping is controlled in the GRC_MODE
10768 * setting below.
10769 */
10770 tp->misc_host_ctrl =
10771 MISC_HOST_CTRL_MASK_PCI_INT |
10772 MISC_HOST_CTRL_WORD_SWAP |
10773 MISC_HOST_CTRL_INDIR_ACCESS |
10774 MISC_HOST_CTRL_PCISTATE_RW;
10775
10776 /* The NONFRM (non-frame) byte/word swap controls take effect
10777 * on descriptor entries, anything which isn't packet data.
10778 *
10779 * The StrongARM chips on the board (one for tx, one for rx)
10780 * are running in big-endian mode.
10781 */
10782 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10783 GRC_MODE_WSWAP_NONFRM_DATA);
10784#ifdef __BIG_ENDIAN
10785 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10786#endif
10787 spin_lock_init(&tp->lock);
10788 spin_lock_init(&tp->tx_lock);
10789 spin_lock_init(&tp->indirect_lock);
10790 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10791
10792 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10793 if (tp->regs == 0UL) {
10794 printk(KERN_ERR PFX "Cannot map device registers, "
10795 "aborting.\n");
10796 err = -ENOMEM;
10797 goto err_out_free_dev;
10798 }
10799
10800 tg3_init_link_config(tp);
10801
1da177e4
LT
10802 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10803 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10804 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10805
10806 dev->open = tg3_open;
10807 dev->stop = tg3_close;
10808 dev->get_stats = tg3_get_stats;
10809 dev->set_multicast_list = tg3_set_rx_mode;
10810 dev->set_mac_address = tg3_set_mac_addr;
10811 dev->do_ioctl = tg3_ioctl;
10812 dev->tx_timeout = tg3_tx_timeout;
10813 dev->poll = tg3_poll;
10814 dev->ethtool_ops = &tg3_ethtool_ops;
10815 dev->weight = 64;
10816 dev->watchdog_timeo = TG3_TX_TIMEOUT;
10817 dev->change_mtu = tg3_change_mtu;
10818 dev->irq = pdev->irq;
10819#ifdef CONFIG_NET_POLL_CONTROLLER
10820 dev->poll_controller = tg3_poll_controller;
10821#endif
10822
10823 err = tg3_get_invariants(tp);
10824 if (err) {
10825 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10826 "aborting.\n");
10827 goto err_out_iounmap;
10828 }
10829
4a29cc2e
MC
10830 /* The EPB bridge inside 5714, 5715, and 5780 and any
10831 * device behind the EPB cannot support DMA addresses > 40-bit.
72f2afb8
MC
10832 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
10833 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
10834 * do DMA address check in tg3_start_xmit().
10835 */
4a29cc2e
MC
10836 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10837 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
10838 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
72f2afb8
MC
10839 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
10840#ifdef CONFIG_HIGHMEM
10841 dma_mask = DMA_64BIT_MASK;
10842#endif
4a29cc2e 10843 } else
72f2afb8
MC
10844 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
10845
10846 /* Configure DMA attributes. */
10847 if (dma_mask > DMA_32BIT_MASK) {
10848 err = pci_set_dma_mask(pdev, dma_mask);
10849 if (!err) {
10850 dev->features |= NETIF_F_HIGHDMA;
10851 err = pci_set_consistent_dma_mask(pdev,
10852 persist_dma_mask);
10853 if (err < 0) {
10854 printk(KERN_ERR PFX "Unable to obtain 64 bit "
10855 "DMA for consistent allocations\n");
10856 goto err_out_iounmap;
10857 }
10858 }
10859 }
10860 if (err || dma_mask == DMA_32BIT_MASK) {
10861 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
10862 if (err) {
10863 printk(KERN_ERR PFX "No usable DMA configuration, "
10864 "aborting.\n");
10865 goto err_out_iounmap;
10866 }
10867 }
10868
fdfec172 10869 tg3_init_bufmgr_config(tp);
1da177e4
LT
10870
10871#if TG3_TSO_SUPPORT != 0
10872 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10873 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10874 }
10875 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10876 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10877 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10878 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10879 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10880 } else {
10881 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10882 }
10883
10884 /* TSO is off by default, user can enable using ethtool. */
10885#if 0
10886 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
10887 dev->features |= NETIF_F_TSO;
10888#endif
10889
10890#endif
10891
10892 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10893 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10894 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10895 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10896 tp->rx_pending = 63;
10897 }
10898
8c2dc7e1
MC
10899 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10900 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
10901 tp->pdev_peer = tg3_find_peer(tp);
1da177e4
LT
10902
10903 err = tg3_get_device_address(tp);
10904 if (err) {
10905 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10906 "aborting.\n");
10907 goto err_out_iounmap;
10908 }
10909
10910 /*
10911 * Reset chip in case UNDI or EFI driver did not shutdown
10912 * DMA self test will enable WDMAC and we'll see (spurious)
10913 * pending DMA on the PCI bus at that point.
10914 */
10915 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10916 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10917 pci_save_state(tp->pdev);
10918 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
944d980e 10919 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
10920 }
10921
10922 err = tg3_test_dma(tp);
10923 if (err) {
10924 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10925 goto err_out_iounmap;
10926 }
10927
10928 /* Tigon3 can do ipv4 only... and some chips have buggy
10929 * checksumming.
10930 */
10931 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10932 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10933 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10934 } else
10935 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10936
1da177e4
LT
10937 /* flow control autonegotiation is default behavior */
10938 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10939
15f9850d
DM
10940 tg3_init_coal(tp);
10941
7d3f4c97
DM
10942 /* Now that we have fully setup the chip, save away a snapshot
10943 * of the PCI config space. We need to restore this after
10944 * GRC_MISC_CFG core clock resets and some resume events.
10945 */
10946 pci_save_state(tp->pdev);
10947
1da177e4
LT
10948 err = register_netdev(dev);
10949 if (err) {
10950 printk(KERN_ERR PFX "Cannot register net device, "
10951 "aborting.\n");
10952 goto err_out_iounmap;
10953 }
10954
10955 pci_set_drvdata(pdev, dev);
10956
f9804ddb 10957 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
1da177e4
LT
10958 dev->name,
10959 tp->board_part_number,
10960 tp->pci_chip_rev_id,
10961 tg3_phy_string(tp),
f9804ddb 10962 tg3_bus_string(tp, str),
1da177e4
LT
10963 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10964
10965 for (i = 0; i < 6; i++)
10966 printk("%2.2x%c", dev->dev_addr[i],
10967 i == 5 ? '\n' : ':');
10968
10969 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
10970 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
10971 "TSOcap[%d] \n",
10972 dev->name,
10973 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
10974 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
10975 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
10976 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
10977 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
10978 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
10979 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
4a29cc2e
MC
10980 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
10981 dev->name, tp->dma_rwctrl,
10982 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
10983 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
1da177e4
LT
10984
10985 return 0;
10986
10987err_out_iounmap:
6892914f
MC
10988 if (tp->regs) {
10989 iounmap(tp->regs);
22abe310 10990 tp->regs = NULL;
6892914f 10991 }
1da177e4
LT
10992
10993err_out_free_dev:
10994 free_netdev(dev);
10995
10996err_out_free_res:
10997 pci_release_regions(pdev);
10998
10999err_out_disable_pdev:
11000 pci_disable_device(pdev);
11001 pci_set_drvdata(pdev, NULL);
11002 return err;
11003}
11004
11005static void __devexit tg3_remove_one(struct pci_dev *pdev)
11006{
11007 struct net_device *dev = pci_get_drvdata(pdev);
11008
11009 if (dev) {
11010 struct tg3 *tp = netdev_priv(dev);
11011
7faa006f 11012 flush_scheduled_work();
1da177e4 11013 unregister_netdev(dev);
6892914f
MC
11014 if (tp->regs) {
11015 iounmap(tp->regs);
22abe310 11016 tp->regs = NULL;
6892914f 11017 }
1da177e4
LT
11018 free_netdev(dev);
11019 pci_release_regions(pdev);
11020 pci_disable_device(pdev);
11021 pci_set_drvdata(pdev, NULL);
11022 }
11023}
11024
11025static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11026{
11027 struct net_device *dev = pci_get_drvdata(pdev);
11028 struct tg3 *tp = netdev_priv(dev);
11029 int err;
11030
11031 if (!netif_running(dev))
11032 return 0;
11033
7faa006f 11034 flush_scheduled_work();
1da177e4
LT
11035 tg3_netif_stop(tp);
11036
11037 del_timer_sync(&tp->timer);
11038
f47c11ee 11039 tg3_full_lock(tp, 1);
1da177e4 11040 tg3_disable_ints(tp);
f47c11ee 11041 tg3_full_unlock(tp);
1da177e4
LT
11042
11043 netif_device_detach(dev);
11044
f47c11ee 11045 tg3_full_lock(tp, 0);
944d980e 11046 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6a9eba15 11047 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
f47c11ee 11048 tg3_full_unlock(tp);
1da177e4
LT
11049
11050 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11051 if (err) {
f47c11ee 11052 tg3_full_lock(tp, 0);
1da177e4 11053
6a9eba15 11054 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
1da177e4
LT
11055 tg3_init_hw(tp);
11056
11057 tp->timer.expires = jiffies + tp->timer_offset;
11058 add_timer(&tp->timer);
11059
11060 netif_device_attach(dev);
11061 tg3_netif_start(tp);
11062
f47c11ee 11063 tg3_full_unlock(tp);
1da177e4
LT
11064 }
11065
11066 return err;
11067}
11068
11069static int tg3_resume(struct pci_dev *pdev)
11070{
11071 struct net_device *dev = pci_get_drvdata(pdev);
11072 struct tg3 *tp = netdev_priv(dev);
11073 int err;
11074
11075 if (!netif_running(dev))
11076 return 0;
11077
11078 pci_restore_state(tp->pdev);
11079
11080 err = tg3_set_power_state(tp, 0);
11081 if (err)
11082 return err;
11083
11084 netif_device_attach(dev);
11085
f47c11ee 11086 tg3_full_lock(tp, 0);
1da177e4 11087
6a9eba15 11088 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
1da177e4
LT
11089 tg3_init_hw(tp);
11090
11091 tp->timer.expires = jiffies + tp->timer_offset;
11092 add_timer(&tp->timer);
11093
1da177e4
LT
11094 tg3_netif_start(tp);
11095
f47c11ee 11096 tg3_full_unlock(tp);
1da177e4
LT
11097
11098 return 0;
11099}
11100
11101static struct pci_driver tg3_driver = {
11102 .name = DRV_MODULE_NAME,
11103 .id_table = tg3_pci_tbl,
11104 .probe = tg3_init_one,
11105 .remove = __devexit_p(tg3_remove_one),
11106 .suspend = tg3_suspend,
11107 .resume = tg3_resume
11108};
11109
11110static int __init tg3_init(void)
11111{
11112 return pci_module_init(&tg3_driver);
11113}
11114
11115static void __exit tg3_cleanup(void)
11116{
11117 pci_unregister_driver(&tg3_driver);
11118}
11119
11120module_init(tg3_init);
11121module_exit(tg3_cleanup);