]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/tg3.c
[TG3]: Call netif_carrier_off() during phy reset
[net-next-2.6.git] / drivers / net / tg3.c
CommitLineData
1da177e4
LT
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
8 *
9 * Firmware is:
49cabf49
MC
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
1da177e4
LT
16 */
17
18#include <linux/config.h>
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
14c85021 27#include <linux/in.h>
1da177e4
LT
28#include <linux/init.h>
29#include <linux/ioport.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/ethtool.h>
35#include <linux/mii.h>
36#include <linux/if_vlan.h>
37#include <linux/ip.h>
38#include <linux/tcp.h>
39#include <linux/workqueue.h>
61487480 40#include <linux/prefetch.h>
f9a5f7d3 41#include <linux/dma-mapping.h>
1da177e4
LT
42
43#include <net/checksum.h>
44
45#include <asm/system.h>
46#include <asm/io.h>
47#include <asm/byteorder.h>
48#include <asm/uaccess.h>
49
50#ifdef CONFIG_SPARC64
51#include <asm/idprom.h>
52#include <asm/oplib.h>
53#include <asm/pbm.h>
54#endif
55
56#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57#define TG3_VLAN_TAG_USED 1
58#else
59#define TG3_VLAN_TAG_USED 0
60#endif
61
62#ifdef NETIF_F_TSO
63#define TG3_TSO_SUPPORT 1
64#else
65#define TG3_TSO_SUPPORT 0
66#endif
67
68#include "tg3.h"
69
70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": "
5c516c10
DM
72#define DRV_MODULE_VERSION "3.56"
73#define DRV_MODULE_RELDATE "Apr 1, 2006"
1da177e4
LT
74
75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0
77#define TG3_DEF_TX_MODE 0
78#define TG3_DEF_MSG_ENABLE \
79 (NETIF_MSG_DRV | \
80 NETIF_MSG_PROBE | \
81 NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | \
83 NETIF_MSG_IFDOWN | \
84 NETIF_MSG_IFUP | \
85 NETIF_MSG_RX_ERR | \
86 NETIF_MSG_TX_ERR)
87
88/* length of time before we decide the hardware is borked,
89 * and dev->tx_timeout() should be called to fix the problem
90 */
91#define TG3_TX_TIMEOUT (5 * HZ)
92
93/* hardware minimum and maximum for a single frame's data payload */
94#define TG3_MIN_MTU 60
95#define TG3_MAX_MTU(tp) \
0f893dc6 96 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
1da177e4
LT
97
98/* These numbers seem to be hard coded in the NIC firmware somehow.
99 * You can't change the ring sizes, but you can change where you place
100 * them in the NIC onboard memory.
101 */
102#define TG3_RX_RING_SIZE 512
103#define TG3_DEF_RX_RING_PENDING 200
104#define TG3_RX_JUMBO_RING_SIZE 256
105#define TG3_DEF_RX_JUMBO_RING_PENDING 100
106
107/* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
112 */
113#define TG3_RX_RCB_RING_SIZE(tp) \
114 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115
116#define TG3_TX_RING_SIZE 512
117#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118
119#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 TG3_RX_RING_SIZE)
121#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
126 TG3_TX_RING_SIZE)
1da177e4 127#define TX_BUFFS_AVAIL(TP) \
51b91468
MC
128 ((TP)->tx_pending - \
129 (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
1da177e4
LT
130#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
133#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
134
135/* minimum number of free TX descriptors required to wake up TX process */
136#define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
137
138/* number of ETHTOOL_GSTATS u64's */
139#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
4cafd3f5
MC
141#define TG3_NUM_TEST 6
142
1da177e4
LT
143static char version[] __devinitdata =
144 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148MODULE_LICENSE("GPL");
149MODULE_VERSION(DRV_MODULE_VERSION);
150
151static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
152module_param(tg3_debug, int, 0);
153MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155static struct pci_device_id tg3_pci_tbl[] = {
156 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
6e9017a7 214 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
af2bcd97 215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
d8659255
XVP
216 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1da177e4
LT
218 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
d9ab5ad1
MC
224 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
af36e6b6
MC
228 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
d9ab5ad1
MC
232 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
a4e2b347
MC
236 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
237 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
d4d2c558
MC
238 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
239 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
a4e2b347
MC
240 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
d4d2c558
MC
242 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
4cf78e4f
MC
244 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
245 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
247 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1da177e4
LT
248 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
249 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
250 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
251 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
253 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
254 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
255 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
256 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
257 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
258 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
259 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
260 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
261 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
262 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
263 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
264 { 0, }
265};
266
267MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
268
269static struct {
270 const char string[ETH_GSTRING_LEN];
271} ethtool_stats_keys[TG3_NUM_STATS] = {
272 { "rx_octets" },
273 { "rx_fragments" },
274 { "rx_ucast_packets" },
275 { "rx_mcast_packets" },
276 { "rx_bcast_packets" },
277 { "rx_fcs_errors" },
278 { "rx_align_errors" },
279 { "rx_xon_pause_rcvd" },
280 { "rx_xoff_pause_rcvd" },
281 { "rx_mac_ctrl_rcvd" },
282 { "rx_xoff_entered" },
283 { "rx_frame_too_long_errors" },
284 { "rx_jabbers" },
285 { "rx_undersize_packets" },
286 { "rx_in_length_errors" },
287 { "rx_out_length_errors" },
288 { "rx_64_or_less_octet_packets" },
289 { "rx_65_to_127_octet_packets" },
290 { "rx_128_to_255_octet_packets" },
291 { "rx_256_to_511_octet_packets" },
292 { "rx_512_to_1023_octet_packets" },
293 { "rx_1024_to_1522_octet_packets" },
294 { "rx_1523_to_2047_octet_packets" },
295 { "rx_2048_to_4095_octet_packets" },
296 { "rx_4096_to_8191_octet_packets" },
297 { "rx_8192_to_9022_octet_packets" },
298
299 { "tx_octets" },
300 { "tx_collisions" },
301
302 { "tx_xon_sent" },
303 { "tx_xoff_sent" },
304 { "tx_flow_control" },
305 { "tx_mac_errors" },
306 { "tx_single_collisions" },
307 { "tx_mult_collisions" },
308 { "tx_deferred" },
309 { "tx_excessive_collisions" },
310 { "tx_late_collisions" },
311 { "tx_collide_2times" },
312 { "tx_collide_3times" },
313 { "tx_collide_4times" },
314 { "tx_collide_5times" },
315 { "tx_collide_6times" },
316 { "tx_collide_7times" },
317 { "tx_collide_8times" },
318 { "tx_collide_9times" },
319 { "tx_collide_10times" },
320 { "tx_collide_11times" },
321 { "tx_collide_12times" },
322 { "tx_collide_13times" },
323 { "tx_collide_14times" },
324 { "tx_collide_15times" },
325 { "tx_ucast_packets" },
326 { "tx_mcast_packets" },
327 { "tx_bcast_packets" },
328 { "tx_carrier_sense_errors" },
329 { "tx_discards" },
330 { "tx_errors" },
331
332 { "dma_writeq_full" },
333 { "dma_write_prioq_full" },
334 { "rxbds_empty" },
335 { "rx_discards" },
336 { "rx_errors" },
337 { "rx_threshold_hit" },
338
339 { "dma_readq_full" },
340 { "dma_read_prioq_full" },
341 { "tx_comp_queue_full" },
342
343 { "ring_set_send_prod_index" },
344 { "ring_status_update" },
345 { "nic_irqs" },
346 { "nic_avoided_irqs" },
347 { "nic_tx_threshold_hit" }
348};
349
4cafd3f5
MC
350static struct {
351 const char string[ETH_GSTRING_LEN];
352} ethtool_test_keys[TG3_NUM_TEST] = {
353 { "nvram test (online) " },
354 { "link test (online) " },
355 { "register test (offline)" },
356 { "memory test (offline)" },
357 { "loopback test (offline)" },
358 { "interrupt test (offline)" },
359};
360
b401e9e2
MC
361static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
362{
363 writel(val, tp->regs + off);
364}
365
366static u32 tg3_read32(struct tg3 *tp, u32 off)
367{
368 return (readl(tp->regs + off));
369}
370
1da177e4
LT
371static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
372{
6892914f
MC
373 unsigned long flags;
374
375 spin_lock_irqsave(&tp->indirect_lock, flags);
1ee582d8
MC
376 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
377 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
6892914f 378 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1ee582d8
MC
379}
380
381static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
382{
383 writel(val, tp->regs + off);
384 readl(tp->regs + off);
1da177e4
LT
385}
386
6892914f 387static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
1da177e4 388{
6892914f
MC
389 unsigned long flags;
390 u32 val;
391
392 spin_lock_irqsave(&tp->indirect_lock, flags);
393 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
394 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
395 spin_unlock_irqrestore(&tp->indirect_lock, flags);
396 return val;
397}
398
399static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
400{
401 unsigned long flags;
402
403 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
404 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
405 TG3_64BIT_REG_LOW, val);
406 return;
407 }
408 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
409 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
410 TG3_64BIT_REG_LOW, val);
411 return;
1da177e4 412 }
6892914f
MC
413
414 spin_lock_irqsave(&tp->indirect_lock, flags);
415 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
416 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
417 spin_unlock_irqrestore(&tp->indirect_lock, flags);
418
419 /* In indirect mode when disabling interrupts, we also need
420 * to clear the interrupt bit in the GRC local ctrl register.
421 */
422 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
423 (val == 0x1)) {
424 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
425 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
426 }
427}
428
429static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
430{
431 unsigned long flags;
432 u32 val;
433
434 spin_lock_irqsave(&tp->indirect_lock, flags);
435 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
436 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
437 spin_unlock_irqrestore(&tp->indirect_lock, flags);
438 return val;
439}
440
b401e9e2
MC
441/* usec_wait specifies the wait time in usec when writing to certain registers
442 * where it is unsafe to read back the register without some delay.
443 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
444 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
445 */
446static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
6892914f 447{
b401e9e2
MC
448 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
449 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
450 /* Non-posted methods */
451 tp->write32(tp, off, val);
452 else {
453 /* Posted method */
454 tg3_write32(tp, off, val);
455 if (usec_wait)
456 udelay(usec_wait);
457 tp->read32(tp, off);
458 }
459 /* Wait again after the read for the posted method to guarantee that
460 * the wait time is met.
461 */
462 if (usec_wait)
463 udelay(usec_wait);
1da177e4
LT
464}
465
09ee929c
MC
466static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
467{
468 tp->write32_mbox(tp, off, val);
6892914f
MC
469 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
470 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
471 tp->read32_mbox(tp, off);
09ee929c
MC
472}
473
20094930 474static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
1da177e4
LT
475{
476 void __iomem *mbox = tp->regs + off;
477 writel(val, mbox);
478 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
479 writel(val, mbox);
480 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
481 readl(mbox);
482}
483
20094930 484#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
09ee929c 485#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
20094930
MC
486#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
487#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
09ee929c 488#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
20094930
MC
489
490#define tw32(reg,val) tp->write32(tp, reg, val)
b401e9e2
MC
491#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
492#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
20094930 493#define tr32(reg) tp->read32(tp, reg)
1da177e4
LT
494
495static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
496{
6892914f
MC
497 unsigned long flags;
498
499 spin_lock_irqsave(&tp->indirect_lock, flags);
bbadf503
MC
500 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 503
bbadf503
MC
504 /* Always leave this as zero. */
505 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506 } else {
507 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508 tw32_f(TG3PCI_MEM_WIN_DATA, val);
28fbef78 509
bbadf503
MC
510 /* Always leave this as zero. */
511 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512 }
513 spin_unlock_irqrestore(&tp->indirect_lock, flags);
758a6139
DM
514}
515
1da177e4
LT
516static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
517{
6892914f
MC
518 unsigned long flags;
519
520 spin_lock_irqsave(&tp->indirect_lock, flags);
bbadf503
MC
521 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
522 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
523 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 524
bbadf503
MC
525 /* Always leave this as zero. */
526 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
527 } else {
528 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
529 *val = tr32(TG3PCI_MEM_WIN_DATA);
530
531 /* Always leave this as zero. */
532 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
533 }
6892914f 534 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
535}
536
537static void tg3_disable_ints(struct tg3 *tp)
538{
539 tw32(TG3PCI_MISC_HOST_CTRL,
540 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c 541 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
1da177e4
LT
542}
543
544static inline void tg3_cond_int(struct tg3 *tp)
545{
38f3843e
MC
546 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
547 (tp->hw_status->status & SD_STATUS_UPDATED))
1da177e4
LT
548 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
549}
550
551static void tg3_enable_ints(struct tg3 *tp)
552{
bbe832c0
MC
553 tp->irq_sync = 0;
554 wmb();
555
1da177e4
LT
556 tw32(TG3PCI_MISC_HOST_CTRL,
557 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c
MC
558 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
559 (tp->last_tag << 24));
fcfa0a32
MC
560 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
561 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
562 (tp->last_tag << 24));
1da177e4
LT
563 tg3_cond_int(tp);
564}
565
04237ddd
MC
566static inline unsigned int tg3_has_work(struct tg3 *tp)
567{
568 struct tg3_hw_status *sblk = tp->hw_status;
569 unsigned int work_exists = 0;
570
571 /* check for phy events */
572 if (!(tp->tg3_flags &
573 (TG3_FLAG_USE_LINKCHG_REG |
574 TG3_FLAG_POLL_SERDES))) {
575 if (sblk->status & SD_STATUS_LINK_CHG)
576 work_exists = 1;
577 }
578 /* check for RX/TX work to do */
579 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
580 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
581 work_exists = 1;
582
583 return work_exists;
584}
585
1da177e4 586/* tg3_restart_ints
04237ddd
MC
587 * similar to tg3_enable_ints, but it accurately determines whether there
588 * is new work pending and can return without flushing the PIO write
589 * which reenables interrupts
1da177e4
LT
590 */
591static void tg3_restart_ints(struct tg3 *tp)
592{
fac9b83e
DM
593 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
594 tp->last_tag << 24);
1da177e4
LT
595 mmiowb();
596
fac9b83e
DM
597 /* When doing tagged status, this work check is unnecessary.
598 * The last_tag we write above tells the chip which piece of
599 * work we've completed.
600 */
601 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
602 tg3_has_work(tp))
04237ddd
MC
603 tw32(HOSTCC_MODE, tp->coalesce_mode |
604 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
1da177e4
LT
605}
606
607static inline void tg3_netif_stop(struct tg3 *tp)
608{
bbe832c0 609 tp->dev->trans_start = jiffies; /* prevent tx timeout */
1da177e4
LT
610 netif_poll_disable(tp->dev);
611 netif_tx_disable(tp->dev);
612}
613
614static inline void tg3_netif_start(struct tg3 *tp)
615{
616 netif_wake_queue(tp->dev);
617 /* NOTE: unconditional netif_wake_queue is only appropriate
618 * so long as all callers are assured to have free tx slots
619 * (such as after tg3_init_hw)
620 */
621 netif_poll_enable(tp->dev);
f47c11ee
DM
622 tp->hw_status->status |= SD_STATUS_UPDATED;
623 tg3_enable_ints(tp);
1da177e4
LT
624}
625
626static void tg3_switch_clocks(struct tg3 *tp)
627{
628 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
629 u32 orig_clock_ctrl;
630
a4e2b347 631 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4cf78e4f
MC
632 return;
633
1da177e4
LT
634 orig_clock_ctrl = clock_ctrl;
635 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
636 CLOCK_CTRL_CLKRUN_OENABLE |
637 0x1f);
638 tp->pci_clock_ctrl = clock_ctrl;
639
640 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
641 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
b401e9e2
MC
642 tw32_wait_f(TG3PCI_CLOCK_CTRL,
643 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1da177e4
LT
644 }
645 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
b401e9e2
MC
646 tw32_wait_f(TG3PCI_CLOCK_CTRL,
647 clock_ctrl |
648 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
649 40);
650 tw32_wait_f(TG3PCI_CLOCK_CTRL,
651 clock_ctrl | (CLOCK_CTRL_ALTCLK),
652 40);
1da177e4 653 }
b401e9e2 654 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1da177e4
LT
655}
656
657#define PHY_BUSY_LOOPS 5000
658
659static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
660{
661 u32 frame_val;
662 unsigned int loops;
663 int ret;
664
665 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
666 tw32_f(MAC_MI_MODE,
667 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
668 udelay(80);
669 }
670
671 *val = 0x0;
672
673 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
674 MI_COM_PHY_ADDR_MASK);
675 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
676 MI_COM_REG_ADDR_MASK);
677 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
678
679 tw32_f(MAC_MI_COM, frame_val);
680
681 loops = PHY_BUSY_LOOPS;
682 while (loops != 0) {
683 udelay(10);
684 frame_val = tr32(MAC_MI_COM);
685
686 if ((frame_val & MI_COM_BUSY) == 0) {
687 udelay(5);
688 frame_val = tr32(MAC_MI_COM);
689 break;
690 }
691 loops -= 1;
692 }
693
694 ret = -EBUSY;
695 if (loops != 0) {
696 *val = frame_val & MI_COM_DATA_MASK;
697 ret = 0;
698 }
699
700 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
701 tw32_f(MAC_MI_MODE, tp->mi_mode);
702 udelay(80);
703 }
704
705 return ret;
706}
707
708static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
709{
710 u32 frame_val;
711 unsigned int loops;
712 int ret;
713
714 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
715 tw32_f(MAC_MI_MODE,
716 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
717 udelay(80);
718 }
719
720 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
721 MI_COM_PHY_ADDR_MASK);
722 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
723 MI_COM_REG_ADDR_MASK);
724 frame_val |= (val & MI_COM_DATA_MASK);
725 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
726
727 tw32_f(MAC_MI_COM, frame_val);
728
729 loops = PHY_BUSY_LOOPS;
730 while (loops != 0) {
731 udelay(10);
732 frame_val = tr32(MAC_MI_COM);
733 if ((frame_val & MI_COM_BUSY) == 0) {
734 udelay(5);
735 frame_val = tr32(MAC_MI_COM);
736 break;
737 }
738 loops -= 1;
739 }
740
741 ret = -EBUSY;
742 if (loops != 0)
743 ret = 0;
744
745 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
746 tw32_f(MAC_MI_MODE, tp->mi_mode);
747 udelay(80);
748 }
749
750 return ret;
751}
752
753static void tg3_phy_set_wirespeed(struct tg3 *tp)
754{
755 u32 val;
756
757 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
758 return;
759
760 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
761 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
762 tg3_writephy(tp, MII_TG3_AUX_CTRL,
763 (val | (1 << 15) | (1 << 4)));
764}
765
766static int tg3_bmcr_reset(struct tg3 *tp)
767{
768 u32 phy_control;
769 int limit, err;
770
771 /* OK, reset it, and poll the BMCR_RESET bit until it
772 * clears or we time out.
773 */
774 phy_control = BMCR_RESET;
775 err = tg3_writephy(tp, MII_BMCR, phy_control);
776 if (err != 0)
777 return -EBUSY;
778
779 limit = 5000;
780 while (limit--) {
781 err = tg3_readphy(tp, MII_BMCR, &phy_control);
782 if (err != 0)
783 return -EBUSY;
784
785 if ((phy_control & BMCR_RESET) == 0) {
786 udelay(40);
787 break;
788 }
789 udelay(10);
790 }
791 if (limit <= 0)
792 return -EBUSY;
793
794 return 0;
795}
796
797static int tg3_wait_macro_done(struct tg3 *tp)
798{
799 int limit = 100;
800
801 while (limit--) {
802 u32 tmp32;
803
804 if (!tg3_readphy(tp, 0x16, &tmp32)) {
805 if ((tmp32 & 0x1000) == 0)
806 break;
807 }
808 }
809 if (limit <= 0)
810 return -EBUSY;
811
812 return 0;
813}
814
815static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
816{
817 static const u32 test_pat[4][6] = {
818 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
819 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
820 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
821 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
822 };
823 int chan;
824
825 for (chan = 0; chan < 4; chan++) {
826 int i;
827
828 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
829 (chan * 0x2000) | 0x0200);
830 tg3_writephy(tp, 0x16, 0x0002);
831
832 for (i = 0; i < 6; i++)
833 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
834 test_pat[chan][i]);
835
836 tg3_writephy(tp, 0x16, 0x0202);
837 if (tg3_wait_macro_done(tp)) {
838 *resetp = 1;
839 return -EBUSY;
840 }
841
842 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
843 (chan * 0x2000) | 0x0200);
844 tg3_writephy(tp, 0x16, 0x0082);
845 if (tg3_wait_macro_done(tp)) {
846 *resetp = 1;
847 return -EBUSY;
848 }
849
850 tg3_writephy(tp, 0x16, 0x0802);
851 if (tg3_wait_macro_done(tp)) {
852 *resetp = 1;
853 return -EBUSY;
854 }
855
856 for (i = 0; i < 6; i += 2) {
857 u32 low, high;
858
859 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
860 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
861 tg3_wait_macro_done(tp)) {
862 *resetp = 1;
863 return -EBUSY;
864 }
865 low &= 0x7fff;
866 high &= 0x000f;
867 if (low != test_pat[chan][i] ||
868 high != test_pat[chan][i+1]) {
869 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
870 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
871 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
872
873 return -EBUSY;
874 }
875 }
876 }
877
878 return 0;
879}
880
881static int tg3_phy_reset_chanpat(struct tg3 *tp)
882{
883 int chan;
884
885 for (chan = 0; chan < 4; chan++) {
886 int i;
887
888 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
889 (chan * 0x2000) | 0x0200);
890 tg3_writephy(tp, 0x16, 0x0002);
891 for (i = 0; i < 6; i++)
892 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
893 tg3_writephy(tp, 0x16, 0x0202);
894 if (tg3_wait_macro_done(tp))
895 return -EBUSY;
896 }
897
898 return 0;
899}
900
901static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
902{
903 u32 reg32, phy9_orig;
904 int retries, do_phy_reset, err;
905
906 retries = 10;
907 do_phy_reset = 1;
908 do {
909 if (do_phy_reset) {
910 err = tg3_bmcr_reset(tp);
911 if (err)
912 return err;
913 do_phy_reset = 0;
914 }
915
916 /* Disable transmitter and interrupt. */
917 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
918 continue;
919
920 reg32 |= 0x3000;
921 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
922
923 /* Set full-duplex, 1000 mbps. */
924 tg3_writephy(tp, MII_BMCR,
925 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
926
927 /* Set to master mode. */
928 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
929 continue;
930
931 tg3_writephy(tp, MII_TG3_CTRL,
932 (MII_TG3_CTRL_AS_MASTER |
933 MII_TG3_CTRL_ENABLE_AS_MASTER));
934
935 /* Enable SM_DSP_CLOCK and 6dB. */
936 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
937
938 /* Block the PHY control access. */
939 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
940 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
941
942 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
943 if (!err)
944 break;
945 } while (--retries);
946
947 err = tg3_phy_reset_chanpat(tp);
948 if (err)
949 return err;
950
951 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
952 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
953
954 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
955 tg3_writephy(tp, 0x16, 0x0000);
956
957 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
958 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
959 /* Set Extended packet length bit for jumbo frames */
960 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
961 }
962 else {
963 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
964 }
965
966 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
967
968 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
969 reg32 &= ~0x3000;
970 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
971 } else if (!err)
972 err = -EBUSY;
973
974 return err;
975}
976
c8e1e82b
MC
977static void tg3_link_report(struct tg3 *);
978
1da177e4
LT
979/* This will reset the tigon3 PHY if there is no valid
980 * link unless the FORCE argument is non-zero.
981 */
982static int tg3_phy_reset(struct tg3 *tp)
983{
984 u32 phy_status;
985 int err;
986
987 err = tg3_readphy(tp, MII_BMSR, &phy_status);
988 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
989 if (err != 0)
990 return -EBUSY;
991
c8e1e82b
MC
992 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
993 netif_carrier_off(tp->dev);
994 tg3_link_report(tp);
995 }
996
1da177e4
LT
997 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
998 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
999 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1000 err = tg3_phy_reset_5703_4_5(tp);
1001 if (err)
1002 return err;
1003 goto out;
1004 }
1005
1006 err = tg3_bmcr_reset(tp);
1007 if (err)
1008 return err;
1009
1010out:
1011 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1012 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1013 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1014 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1015 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1016 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1017 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1018 }
1019 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1020 tg3_writephy(tp, 0x1c, 0x8d68);
1021 tg3_writephy(tp, 0x1c, 0x8d68);
1022 }
1023 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1024 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1025 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1026 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1027 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1028 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1029 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1030 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1031 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1032 }
1033 /* Set Extended packet length bit (bit 14) on all chips that */
1034 /* support jumbo frames */
1035 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1036 /* Cannot do read-modify-write on 5401 */
1037 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
0f893dc6 1038 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1039 u32 phy_reg;
1040
1041 /* Set bit 14 with read-modify-write to preserve other bits */
1042 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1043 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1044 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1045 }
1046
1047 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1048 * jumbo frames transmission.
1049 */
0f893dc6 1050 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1051 u32 phy_reg;
1052
1053 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1054 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1055 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1056 }
1057
1058 tg3_phy_set_wirespeed(tp);
1059 return 0;
1060}
1061
1062static void tg3_frob_aux_power(struct tg3 *tp)
1063{
1064 struct tg3 *tp_peer = tp;
1065
1066 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1067 return;
1068
8c2dc7e1
MC
1069 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1070 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1071 struct net_device *dev_peer;
1072
1073 dev_peer = pci_get_drvdata(tp->pdev_peer);
bc1c7567 1074 /* remove_one() may have been run on the peer. */
8c2dc7e1 1075 if (!dev_peer)
bc1c7567
MC
1076 tp_peer = tp;
1077 else
1078 tp_peer = netdev_priv(dev_peer);
1da177e4
LT
1079 }
1080
1da177e4 1081 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
6921d201
MC
1082 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1083 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1084 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1da177e4
LT
1085 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1086 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
b401e9e2
MC
1087 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1088 (GRC_LCLCTRL_GPIO_OE0 |
1089 GRC_LCLCTRL_GPIO_OE1 |
1090 GRC_LCLCTRL_GPIO_OE2 |
1091 GRC_LCLCTRL_GPIO_OUTPUT0 |
1092 GRC_LCLCTRL_GPIO_OUTPUT1),
1093 100);
1da177e4
LT
1094 } else {
1095 u32 no_gpio2;
dc56b7d4 1096 u32 grc_local_ctrl = 0;
1da177e4
LT
1097
1098 if (tp_peer != tp &&
1099 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1100 return;
1101
dc56b7d4
MC
1102 /* Workaround to prevent overdrawing Amps. */
1103 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1104 ASIC_REV_5714) {
1105 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
b401e9e2
MC
1106 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1107 grc_local_ctrl, 100);
dc56b7d4
MC
1108 }
1109
1da177e4
LT
1110 /* On 5753 and variants, GPIO2 cannot be used. */
1111 no_gpio2 = tp->nic_sram_data_cfg &
1112 NIC_SRAM_DATA_CFG_NO_GPIO2;
1113
dc56b7d4 1114 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1da177e4
LT
1115 GRC_LCLCTRL_GPIO_OE1 |
1116 GRC_LCLCTRL_GPIO_OE2 |
1117 GRC_LCLCTRL_GPIO_OUTPUT1 |
1118 GRC_LCLCTRL_GPIO_OUTPUT2;
1119 if (no_gpio2) {
1120 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1121 GRC_LCLCTRL_GPIO_OUTPUT2);
1122 }
b401e9e2
MC
1123 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1124 grc_local_ctrl, 100);
1da177e4
LT
1125
1126 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1127
b401e9e2
MC
1128 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1129 grc_local_ctrl, 100);
1da177e4
LT
1130
1131 if (!no_gpio2) {
1132 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
b401e9e2
MC
1133 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1134 grc_local_ctrl, 100);
1da177e4
LT
1135 }
1136 }
1137 } else {
1138 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1139 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1140 if (tp_peer != tp &&
1141 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1142 return;
1143
b401e9e2
MC
1144 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1145 (GRC_LCLCTRL_GPIO_OE1 |
1146 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4 1147
b401e9e2
MC
1148 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1149 GRC_LCLCTRL_GPIO_OE1, 100);
1da177e4 1150
b401e9e2
MC
1151 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1152 (GRC_LCLCTRL_GPIO_OE1 |
1153 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4
LT
1154 }
1155 }
1156}
1157
1158static int tg3_setup_phy(struct tg3 *, int);
1159
1160#define RESET_KIND_SHUTDOWN 0
1161#define RESET_KIND_INIT 1
1162#define RESET_KIND_SUSPEND 2
1163
1164static void tg3_write_sig_post_reset(struct tg3 *, int);
1165static int tg3_halt_cpu(struct tg3 *, u32);
6921d201
MC
1166static int tg3_nvram_lock(struct tg3 *);
1167static void tg3_nvram_unlock(struct tg3 *);
1da177e4 1168
15c3b696
MC
1169static void tg3_power_down_phy(struct tg3 *tp)
1170{
1171 /* The PHY should not be powered down on some chips because
1172 * of bugs.
1173 */
1174 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1176 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1177 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1178 return;
1179 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1180}
1181
bc1c7567 1182static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1da177e4
LT
1183{
1184 u32 misc_host_ctrl;
1185 u16 power_control, power_caps;
1186 int pm = tp->pm_cap;
1187
1188 /* Make sure register accesses (indirect or otherwise)
1189 * will function correctly.
1190 */
1191 pci_write_config_dword(tp->pdev,
1192 TG3PCI_MISC_HOST_CTRL,
1193 tp->misc_host_ctrl);
1194
1195 pci_read_config_word(tp->pdev,
1196 pm + PCI_PM_CTRL,
1197 &power_control);
1198 power_control |= PCI_PM_CTRL_PME_STATUS;
1199 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1200 switch (state) {
bc1c7567 1201 case PCI_D0:
1da177e4
LT
1202 power_control |= 0;
1203 pci_write_config_word(tp->pdev,
1204 pm + PCI_PM_CTRL,
1205 power_control);
8c6bda1a
MC
1206 udelay(100); /* Delay after power state change */
1207
1208 /* Switch out of Vaux if it is not a LOM */
b401e9e2
MC
1209 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1210 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1da177e4
LT
1211
1212 return 0;
1213
bc1c7567 1214 case PCI_D1:
1da177e4
LT
1215 power_control |= 1;
1216 break;
1217
bc1c7567 1218 case PCI_D2:
1da177e4
LT
1219 power_control |= 2;
1220 break;
1221
bc1c7567 1222 case PCI_D3hot:
1da177e4
LT
1223 power_control |= 3;
1224 break;
1225
1226 default:
1227 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1228 "requested.\n",
1229 tp->dev->name, state);
1230 return -EINVAL;
1231 };
1232
1233 power_control |= PCI_PM_CTRL_PME_ENABLE;
1234
1235 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1236 tw32(TG3PCI_MISC_HOST_CTRL,
1237 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1238
1239 if (tp->link_config.phy_is_low_power == 0) {
1240 tp->link_config.phy_is_low_power = 1;
1241 tp->link_config.orig_speed = tp->link_config.speed;
1242 tp->link_config.orig_duplex = tp->link_config.duplex;
1243 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1244 }
1245
747e8f8b 1246 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1da177e4
LT
1247 tp->link_config.speed = SPEED_10;
1248 tp->link_config.duplex = DUPLEX_HALF;
1249 tp->link_config.autoneg = AUTONEG_ENABLE;
1250 tg3_setup_phy(tp, 0);
1251 }
1252
6921d201
MC
1253 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1254 int i;
1255 u32 val;
1256
1257 for (i = 0; i < 200; i++) {
1258 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1259 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1260 break;
1261 msleep(1);
1262 }
1263 }
1264 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1265 WOL_DRV_STATE_SHUTDOWN |
1266 WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1267
1da177e4
LT
1268 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1269
1270 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1271 u32 mac_mode;
1272
1273 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1274 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1275 udelay(40);
1276
1277 mac_mode = MAC_MODE_PORT_MODE_MII;
1278
1279 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1280 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1281 mac_mode |= MAC_MODE_LINK_POLARITY;
1282 } else {
1283 mac_mode = MAC_MODE_PORT_MODE_TBI;
1284 }
1285
cbf46853 1286 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1da177e4
LT
1287 tw32(MAC_LED_CTRL, tp->led_ctrl);
1288
1289 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1290 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1291 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1292
1293 tw32_f(MAC_MODE, mac_mode);
1294 udelay(100);
1295
1296 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1297 udelay(10);
1298 }
1299
1300 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1301 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1302 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1303 u32 base_val;
1304
1305 base_val = tp->pci_clock_ctrl;
1306 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1307 CLOCK_CTRL_TXCLK_DISABLE);
1308
b401e9e2
MC
1309 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1310 CLOCK_CTRL_PWRDOWN_PLL133, 40);
a4e2b347 1311 } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4cf78e4f 1312 /* do nothing */
85e94ced 1313 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1da177e4
LT
1314 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1315 u32 newbits1, newbits2;
1316
1317 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1318 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1319 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1320 CLOCK_CTRL_TXCLK_DISABLE |
1321 CLOCK_CTRL_ALTCLK);
1322 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1323 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1324 newbits1 = CLOCK_CTRL_625_CORE;
1325 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1326 } else {
1327 newbits1 = CLOCK_CTRL_ALTCLK;
1328 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1329 }
1330
b401e9e2
MC
1331 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1332 40);
1da177e4 1333
b401e9e2
MC
1334 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1335 40);
1da177e4
LT
1336
1337 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1338 u32 newbits3;
1339
1340 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1341 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1342 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1343 CLOCK_CTRL_TXCLK_DISABLE |
1344 CLOCK_CTRL_44MHZ_CORE);
1345 } else {
1346 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1347 }
1348
b401e9e2
MC
1349 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1350 tp->pci_clock_ctrl | newbits3, 40);
1da177e4
LT
1351 }
1352 }
1353
6921d201
MC
1354 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1355 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1356 /* Turn off the PHY */
1357 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1358 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1359 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1360 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
15c3b696 1361 tg3_power_down_phy(tp);
6921d201
MC
1362 }
1363 }
1364
1da177e4
LT
1365 tg3_frob_aux_power(tp);
1366
1367 /* Workaround for unstable PLL clock */
1368 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1369 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1370 u32 val = tr32(0x7d00);
1371
1372 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1373 tw32(0x7d00, val);
6921d201 1374 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
ec41c7df
MC
1375 int err;
1376
1377 err = tg3_nvram_lock(tp);
1da177e4 1378 tg3_halt_cpu(tp, RX_CPU_BASE);
ec41c7df
MC
1379 if (!err)
1380 tg3_nvram_unlock(tp);
6921d201 1381 }
1da177e4
LT
1382 }
1383
bbadf503
MC
1384 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1385
1da177e4
LT
1386 /* Finally, set the new power state. */
1387 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
8c6bda1a 1388 udelay(100); /* Delay after power state change */
1da177e4 1389
1da177e4
LT
1390 return 0;
1391}
1392
1393static void tg3_link_report(struct tg3 *tp)
1394{
1395 if (!netif_carrier_ok(tp->dev)) {
1396 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1397 } else {
1398 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1399 tp->dev->name,
1400 (tp->link_config.active_speed == SPEED_1000 ?
1401 1000 :
1402 (tp->link_config.active_speed == SPEED_100 ?
1403 100 : 10)),
1404 (tp->link_config.active_duplex == DUPLEX_FULL ?
1405 "full" : "half"));
1406
1407 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1408 "%s for RX.\n",
1409 tp->dev->name,
1410 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1411 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1412 }
1413}
1414
1415static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1416{
1417 u32 new_tg3_flags = 0;
1418 u32 old_rx_mode = tp->rx_mode;
1419 u32 old_tx_mode = tp->tx_mode;
1420
1421 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
747e8f8b
MC
1422
1423 /* Convert 1000BaseX flow control bits to 1000BaseT
1424 * bits before resolving flow control.
1425 */
1426 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1427 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1428 ADVERTISE_PAUSE_ASYM);
1429 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1430
1431 if (local_adv & ADVERTISE_1000XPAUSE)
1432 local_adv |= ADVERTISE_PAUSE_CAP;
1433 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1434 local_adv |= ADVERTISE_PAUSE_ASYM;
1435 if (remote_adv & LPA_1000XPAUSE)
1436 remote_adv |= LPA_PAUSE_CAP;
1437 if (remote_adv & LPA_1000XPAUSE_ASYM)
1438 remote_adv |= LPA_PAUSE_ASYM;
1439 }
1440
1da177e4
LT
1441 if (local_adv & ADVERTISE_PAUSE_CAP) {
1442 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1443 if (remote_adv & LPA_PAUSE_CAP)
1444 new_tg3_flags |=
1445 (TG3_FLAG_RX_PAUSE |
1446 TG3_FLAG_TX_PAUSE);
1447 else if (remote_adv & LPA_PAUSE_ASYM)
1448 new_tg3_flags |=
1449 (TG3_FLAG_RX_PAUSE);
1450 } else {
1451 if (remote_adv & LPA_PAUSE_CAP)
1452 new_tg3_flags |=
1453 (TG3_FLAG_RX_PAUSE |
1454 TG3_FLAG_TX_PAUSE);
1455 }
1456 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1457 if ((remote_adv & LPA_PAUSE_CAP) &&
1458 (remote_adv & LPA_PAUSE_ASYM))
1459 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1460 }
1461
1462 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1463 tp->tg3_flags |= new_tg3_flags;
1464 } else {
1465 new_tg3_flags = tp->tg3_flags;
1466 }
1467
1468 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1469 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1470 else
1471 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1472
1473 if (old_rx_mode != tp->rx_mode) {
1474 tw32_f(MAC_RX_MODE, tp->rx_mode);
1475 }
1476
1477 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1478 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1479 else
1480 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1481
1482 if (old_tx_mode != tp->tx_mode) {
1483 tw32_f(MAC_TX_MODE, tp->tx_mode);
1484 }
1485}
1486
1487static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1488{
1489 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1490 case MII_TG3_AUX_STAT_10HALF:
1491 *speed = SPEED_10;
1492 *duplex = DUPLEX_HALF;
1493 break;
1494
1495 case MII_TG3_AUX_STAT_10FULL:
1496 *speed = SPEED_10;
1497 *duplex = DUPLEX_FULL;
1498 break;
1499
1500 case MII_TG3_AUX_STAT_100HALF:
1501 *speed = SPEED_100;
1502 *duplex = DUPLEX_HALF;
1503 break;
1504
1505 case MII_TG3_AUX_STAT_100FULL:
1506 *speed = SPEED_100;
1507 *duplex = DUPLEX_FULL;
1508 break;
1509
1510 case MII_TG3_AUX_STAT_1000HALF:
1511 *speed = SPEED_1000;
1512 *duplex = DUPLEX_HALF;
1513 break;
1514
1515 case MII_TG3_AUX_STAT_1000FULL:
1516 *speed = SPEED_1000;
1517 *duplex = DUPLEX_FULL;
1518 break;
1519
1520 default:
1521 *speed = SPEED_INVALID;
1522 *duplex = DUPLEX_INVALID;
1523 break;
1524 };
1525}
1526
1527static void tg3_phy_copper_begin(struct tg3 *tp)
1528{
1529 u32 new_adv;
1530 int i;
1531
1532 if (tp->link_config.phy_is_low_power) {
1533 /* Entering low power mode. Disable gigabit and
1534 * 100baseT advertisements.
1535 */
1536 tg3_writephy(tp, MII_TG3_CTRL, 0);
1537
1538 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1539 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1540 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1541 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1542
1543 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1544 } else if (tp->link_config.speed == SPEED_INVALID) {
1545 tp->link_config.advertising =
1546 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1547 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1548 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1549 ADVERTISED_Autoneg | ADVERTISED_MII);
1550
1551 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1552 tp->link_config.advertising &=
1553 ~(ADVERTISED_1000baseT_Half |
1554 ADVERTISED_1000baseT_Full);
1555
1556 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1557 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1558 new_adv |= ADVERTISE_10HALF;
1559 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1560 new_adv |= ADVERTISE_10FULL;
1561 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1562 new_adv |= ADVERTISE_100HALF;
1563 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1564 new_adv |= ADVERTISE_100FULL;
1565 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1566
1567 if (tp->link_config.advertising &
1568 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1569 new_adv = 0;
1570 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1571 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1572 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1573 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1574 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1575 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1576 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1577 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1578 MII_TG3_CTRL_ENABLE_AS_MASTER);
1579 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1580 } else {
1581 tg3_writephy(tp, MII_TG3_CTRL, 0);
1582 }
1583 } else {
1584 /* Asking for a specific link mode. */
1585 if (tp->link_config.speed == SPEED_1000) {
1586 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1587 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1588
1589 if (tp->link_config.duplex == DUPLEX_FULL)
1590 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1591 else
1592 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1593 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1594 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1595 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1596 MII_TG3_CTRL_ENABLE_AS_MASTER);
1597 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1598 } else {
1599 tg3_writephy(tp, MII_TG3_CTRL, 0);
1600
1601 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1602 if (tp->link_config.speed == SPEED_100) {
1603 if (tp->link_config.duplex == DUPLEX_FULL)
1604 new_adv |= ADVERTISE_100FULL;
1605 else
1606 new_adv |= ADVERTISE_100HALF;
1607 } else {
1608 if (tp->link_config.duplex == DUPLEX_FULL)
1609 new_adv |= ADVERTISE_10FULL;
1610 else
1611 new_adv |= ADVERTISE_10HALF;
1612 }
1613 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1614 }
1615 }
1616
1617 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1618 tp->link_config.speed != SPEED_INVALID) {
1619 u32 bmcr, orig_bmcr;
1620
1621 tp->link_config.active_speed = tp->link_config.speed;
1622 tp->link_config.active_duplex = tp->link_config.duplex;
1623
1624 bmcr = 0;
1625 switch (tp->link_config.speed) {
1626 default:
1627 case SPEED_10:
1628 break;
1629
1630 case SPEED_100:
1631 bmcr |= BMCR_SPEED100;
1632 break;
1633
1634 case SPEED_1000:
1635 bmcr |= TG3_BMCR_SPEED1000;
1636 break;
1637 };
1638
1639 if (tp->link_config.duplex == DUPLEX_FULL)
1640 bmcr |= BMCR_FULLDPLX;
1641
1642 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1643 (bmcr != orig_bmcr)) {
1644 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1645 for (i = 0; i < 1500; i++) {
1646 u32 tmp;
1647
1648 udelay(10);
1649 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1650 tg3_readphy(tp, MII_BMSR, &tmp))
1651 continue;
1652 if (!(tmp & BMSR_LSTATUS)) {
1653 udelay(40);
1654 break;
1655 }
1656 }
1657 tg3_writephy(tp, MII_BMCR, bmcr);
1658 udelay(40);
1659 }
1660 } else {
1661 tg3_writephy(tp, MII_BMCR,
1662 BMCR_ANENABLE | BMCR_ANRESTART);
1663 }
1664}
1665
1666static int tg3_init_5401phy_dsp(struct tg3 *tp)
1667{
1668 int err;
1669
1670 /* Turn off tap power management. */
1671 /* Set Extended packet length bit */
1672 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1673
1674 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1675 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1676
1677 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1678 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1679
1680 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1681 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1682
1683 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1684 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1685
1686 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1687 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1688
1689 udelay(40);
1690
1691 return err;
1692}
1693
1694static int tg3_copper_is_advertising_all(struct tg3 *tp)
1695{
1696 u32 adv_reg, all_mask;
1697
1698 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1699 return 0;
1700
1701 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1702 ADVERTISE_100HALF | ADVERTISE_100FULL);
1703 if ((adv_reg & all_mask) != all_mask)
1704 return 0;
1705 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1706 u32 tg3_ctrl;
1707
1708 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1709 return 0;
1710
1711 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1712 MII_TG3_CTRL_ADV_1000_FULL);
1713 if ((tg3_ctrl & all_mask) != all_mask)
1714 return 0;
1715 }
1716 return 1;
1717}
1718
1719static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1720{
1721 int current_link_up;
1722 u32 bmsr, dummy;
1723 u16 current_speed;
1724 u8 current_duplex;
1725 int i, err;
1726
1727 tw32(MAC_EVENT, 0);
1728
1729 tw32_f(MAC_STATUS,
1730 (MAC_STATUS_SYNC_CHANGED |
1731 MAC_STATUS_CFG_CHANGED |
1732 MAC_STATUS_MI_COMPLETION |
1733 MAC_STATUS_LNKSTATE_CHANGED));
1734 udelay(40);
1735
1736 tp->mi_mode = MAC_MI_MODE_BASE;
1737 tw32_f(MAC_MI_MODE, tp->mi_mode);
1738 udelay(80);
1739
1740 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1741
1742 /* Some third-party PHYs need to be reset on link going
1743 * down.
1744 */
1745 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1746 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1747 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1748 netif_carrier_ok(tp->dev)) {
1749 tg3_readphy(tp, MII_BMSR, &bmsr);
1750 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1751 !(bmsr & BMSR_LSTATUS))
1752 force_reset = 1;
1753 }
1754 if (force_reset)
1755 tg3_phy_reset(tp);
1756
1757 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1758 tg3_readphy(tp, MII_BMSR, &bmsr);
1759 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1760 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1761 bmsr = 0;
1762
1763 if (!(bmsr & BMSR_LSTATUS)) {
1764 err = tg3_init_5401phy_dsp(tp);
1765 if (err)
1766 return err;
1767
1768 tg3_readphy(tp, MII_BMSR, &bmsr);
1769 for (i = 0; i < 1000; i++) {
1770 udelay(10);
1771 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1772 (bmsr & BMSR_LSTATUS)) {
1773 udelay(40);
1774 break;
1775 }
1776 }
1777
1778 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1779 !(bmsr & BMSR_LSTATUS) &&
1780 tp->link_config.active_speed == SPEED_1000) {
1781 err = tg3_phy_reset(tp);
1782 if (!err)
1783 err = tg3_init_5401phy_dsp(tp);
1784 if (err)
1785 return err;
1786 }
1787 }
1788 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1789 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1790 /* 5701 {A0,B0} CRC bug workaround */
1791 tg3_writephy(tp, 0x15, 0x0a75);
1792 tg3_writephy(tp, 0x1c, 0x8c68);
1793 tg3_writephy(tp, 0x1c, 0x8d68);
1794 tg3_writephy(tp, 0x1c, 0x8c68);
1795 }
1796
1797 /* Clear pending interrupts... */
1798 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1799 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1800
1801 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1802 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1803 else
1804 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1805
1806 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1807 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1808 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1809 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1810 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1811 else
1812 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1813 }
1814
1815 current_link_up = 0;
1816 current_speed = SPEED_INVALID;
1817 current_duplex = DUPLEX_INVALID;
1818
1819 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1820 u32 val;
1821
1822 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1823 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1824 if (!(val & (1 << 10))) {
1825 val |= (1 << 10);
1826 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1827 goto relink;
1828 }
1829 }
1830
1831 bmsr = 0;
1832 for (i = 0; i < 100; i++) {
1833 tg3_readphy(tp, MII_BMSR, &bmsr);
1834 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1835 (bmsr & BMSR_LSTATUS))
1836 break;
1837 udelay(40);
1838 }
1839
1840 if (bmsr & BMSR_LSTATUS) {
1841 u32 aux_stat, bmcr;
1842
1843 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1844 for (i = 0; i < 2000; i++) {
1845 udelay(10);
1846 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1847 aux_stat)
1848 break;
1849 }
1850
1851 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1852 &current_speed,
1853 &current_duplex);
1854
1855 bmcr = 0;
1856 for (i = 0; i < 200; i++) {
1857 tg3_readphy(tp, MII_BMCR, &bmcr);
1858 if (tg3_readphy(tp, MII_BMCR, &bmcr))
1859 continue;
1860 if (bmcr && bmcr != 0x7fff)
1861 break;
1862 udelay(10);
1863 }
1864
1865 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1866 if (bmcr & BMCR_ANENABLE) {
1867 current_link_up = 1;
1868
1869 /* Force autoneg restart if we are exiting
1870 * low power mode.
1871 */
1872 if (!tg3_copper_is_advertising_all(tp))
1873 current_link_up = 0;
1874 } else {
1875 current_link_up = 0;
1876 }
1877 } else {
1878 if (!(bmcr & BMCR_ANENABLE) &&
1879 tp->link_config.speed == current_speed &&
1880 tp->link_config.duplex == current_duplex) {
1881 current_link_up = 1;
1882 } else {
1883 current_link_up = 0;
1884 }
1885 }
1886
1887 tp->link_config.active_speed = current_speed;
1888 tp->link_config.active_duplex = current_duplex;
1889 }
1890
1891 if (current_link_up == 1 &&
1892 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1893 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1894 u32 local_adv, remote_adv;
1895
1896 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1897 local_adv = 0;
1898 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1899
1900 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1901 remote_adv = 0;
1902
1903 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1904
1905 /* If we are not advertising full pause capability,
1906 * something is wrong. Bring the link down and reconfigure.
1907 */
1908 if (local_adv != ADVERTISE_PAUSE_CAP) {
1909 current_link_up = 0;
1910 } else {
1911 tg3_setup_flow_control(tp, local_adv, remote_adv);
1912 }
1913 }
1914relink:
6921d201 1915 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1da177e4
LT
1916 u32 tmp;
1917
1918 tg3_phy_copper_begin(tp);
1919
1920 tg3_readphy(tp, MII_BMSR, &tmp);
1921 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1922 (tmp & BMSR_LSTATUS))
1923 current_link_up = 1;
1924 }
1925
1926 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1927 if (current_link_up == 1) {
1928 if (tp->link_config.active_speed == SPEED_100 ||
1929 tp->link_config.active_speed == SPEED_10)
1930 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1931 else
1932 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1933 } else
1934 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1935
1936 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1937 if (tp->link_config.active_duplex == DUPLEX_HALF)
1938 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1939
1940 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1941 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1942 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1943 (current_link_up == 1 &&
1944 tp->link_config.active_speed == SPEED_10))
1945 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1946 } else {
1947 if (current_link_up == 1)
1948 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1949 }
1950
1951 /* ??? Without this setting Netgear GA302T PHY does not
1952 * ??? send/receive packets...
1953 */
1954 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1955 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1956 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1957 tw32_f(MAC_MI_MODE, tp->mi_mode);
1958 udelay(80);
1959 }
1960
1961 tw32_f(MAC_MODE, tp->mac_mode);
1962 udelay(40);
1963
1964 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1965 /* Polled via timer. */
1966 tw32_f(MAC_EVENT, 0);
1967 } else {
1968 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1969 }
1970 udelay(40);
1971
1972 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1973 current_link_up == 1 &&
1974 tp->link_config.active_speed == SPEED_1000 &&
1975 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1976 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1977 udelay(120);
1978 tw32_f(MAC_STATUS,
1979 (MAC_STATUS_SYNC_CHANGED |
1980 MAC_STATUS_CFG_CHANGED));
1981 udelay(40);
1982 tg3_write_mem(tp,
1983 NIC_SRAM_FIRMWARE_MBOX,
1984 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1985 }
1986
1987 if (current_link_up != netif_carrier_ok(tp->dev)) {
1988 if (current_link_up)
1989 netif_carrier_on(tp->dev);
1990 else
1991 netif_carrier_off(tp->dev);
1992 tg3_link_report(tp);
1993 }
1994
1995 return 0;
1996}
1997
1998struct tg3_fiber_aneginfo {
1999 int state;
2000#define ANEG_STATE_UNKNOWN 0
2001#define ANEG_STATE_AN_ENABLE 1
2002#define ANEG_STATE_RESTART_INIT 2
2003#define ANEG_STATE_RESTART 3
2004#define ANEG_STATE_DISABLE_LINK_OK 4
2005#define ANEG_STATE_ABILITY_DETECT_INIT 5
2006#define ANEG_STATE_ABILITY_DETECT 6
2007#define ANEG_STATE_ACK_DETECT_INIT 7
2008#define ANEG_STATE_ACK_DETECT 8
2009#define ANEG_STATE_COMPLETE_ACK_INIT 9
2010#define ANEG_STATE_COMPLETE_ACK 10
2011#define ANEG_STATE_IDLE_DETECT_INIT 11
2012#define ANEG_STATE_IDLE_DETECT 12
2013#define ANEG_STATE_LINK_OK 13
2014#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2015#define ANEG_STATE_NEXT_PAGE_WAIT 15
2016
2017 u32 flags;
2018#define MR_AN_ENABLE 0x00000001
2019#define MR_RESTART_AN 0x00000002
2020#define MR_AN_COMPLETE 0x00000004
2021#define MR_PAGE_RX 0x00000008
2022#define MR_NP_LOADED 0x00000010
2023#define MR_TOGGLE_TX 0x00000020
2024#define MR_LP_ADV_FULL_DUPLEX 0x00000040
2025#define MR_LP_ADV_HALF_DUPLEX 0x00000080
2026#define MR_LP_ADV_SYM_PAUSE 0x00000100
2027#define MR_LP_ADV_ASYM_PAUSE 0x00000200
2028#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2029#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2030#define MR_LP_ADV_NEXT_PAGE 0x00001000
2031#define MR_TOGGLE_RX 0x00002000
2032#define MR_NP_RX 0x00004000
2033
2034#define MR_LINK_OK 0x80000000
2035
2036 unsigned long link_time, cur_time;
2037
2038 u32 ability_match_cfg;
2039 int ability_match_count;
2040
2041 char ability_match, idle_match, ack_match;
2042
2043 u32 txconfig, rxconfig;
2044#define ANEG_CFG_NP 0x00000080
2045#define ANEG_CFG_ACK 0x00000040
2046#define ANEG_CFG_RF2 0x00000020
2047#define ANEG_CFG_RF1 0x00000010
2048#define ANEG_CFG_PS2 0x00000001
2049#define ANEG_CFG_PS1 0x00008000
2050#define ANEG_CFG_HD 0x00004000
2051#define ANEG_CFG_FD 0x00002000
2052#define ANEG_CFG_INVAL 0x00001f06
2053
2054};
2055#define ANEG_OK 0
2056#define ANEG_DONE 1
2057#define ANEG_TIMER_ENAB 2
2058#define ANEG_FAILED -1
2059
2060#define ANEG_STATE_SETTLE_TIME 10000
2061
2062static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2063 struct tg3_fiber_aneginfo *ap)
2064{
2065 unsigned long delta;
2066 u32 rx_cfg_reg;
2067 int ret;
2068
2069 if (ap->state == ANEG_STATE_UNKNOWN) {
2070 ap->rxconfig = 0;
2071 ap->link_time = 0;
2072 ap->cur_time = 0;
2073 ap->ability_match_cfg = 0;
2074 ap->ability_match_count = 0;
2075 ap->ability_match = 0;
2076 ap->idle_match = 0;
2077 ap->ack_match = 0;
2078 }
2079 ap->cur_time++;
2080
2081 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2082 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2083
2084 if (rx_cfg_reg != ap->ability_match_cfg) {
2085 ap->ability_match_cfg = rx_cfg_reg;
2086 ap->ability_match = 0;
2087 ap->ability_match_count = 0;
2088 } else {
2089 if (++ap->ability_match_count > 1) {
2090 ap->ability_match = 1;
2091 ap->ability_match_cfg = rx_cfg_reg;
2092 }
2093 }
2094 if (rx_cfg_reg & ANEG_CFG_ACK)
2095 ap->ack_match = 1;
2096 else
2097 ap->ack_match = 0;
2098
2099 ap->idle_match = 0;
2100 } else {
2101 ap->idle_match = 1;
2102 ap->ability_match_cfg = 0;
2103 ap->ability_match_count = 0;
2104 ap->ability_match = 0;
2105 ap->ack_match = 0;
2106
2107 rx_cfg_reg = 0;
2108 }
2109
2110 ap->rxconfig = rx_cfg_reg;
2111 ret = ANEG_OK;
2112
2113 switch(ap->state) {
2114 case ANEG_STATE_UNKNOWN:
2115 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2116 ap->state = ANEG_STATE_AN_ENABLE;
2117
2118 /* fallthru */
2119 case ANEG_STATE_AN_ENABLE:
2120 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2121 if (ap->flags & MR_AN_ENABLE) {
2122 ap->link_time = 0;
2123 ap->cur_time = 0;
2124 ap->ability_match_cfg = 0;
2125 ap->ability_match_count = 0;
2126 ap->ability_match = 0;
2127 ap->idle_match = 0;
2128 ap->ack_match = 0;
2129
2130 ap->state = ANEG_STATE_RESTART_INIT;
2131 } else {
2132 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2133 }
2134 break;
2135
2136 case ANEG_STATE_RESTART_INIT:
2137 ap->link_time = ap->cur_time;
2138 ap->flags &= ~(MR_NP_LOADED);
2139 ap->txconfig = 0;
2140 tw32(MAC_TX_AUTO_NEG, 0);
2141 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2142 tw32_f(MAC_MODE, tp->mac_mode);
2143 udelay(40);
2144
2145 ret = ANEG_TIMER_ENAB;
2146 ap->state = ANEG_STATE_RESTART;
2147
2148 /* fallthru */
2149 case ANEG_STATE_RESTART:
2150 delta = ap->cur_time - ap->link_time;
2151 if (delta > ANEG_STATE_SETTLE_TIME) {
2152 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2153 } else {
2154 ret = ANEG_TIMER_ENAB;
2155 }
2156 break;
2157
2158 case ANEG_STATE_DISABLE_LINK_OK:
2159 ret = ANEG_DONE;
2160 break;
2161
2162 case ANEG_STATE_ABILITY_DETECT_INIT:
2163 ap->flags &= ~(MR_TOGGLE_TX);
2164 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2165 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2166 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2167 tw32_f(MAC_MODE, tp->mac_mode);
2168 udelay(40);
2169
2170 ap->state = ANEG_STATE_ABILITY_DETECT;
2171 break;
2172
2173 case ANEG_STATE_ABILITY_DETECT:
2174 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2175 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2176 }
2177 break;
2178
2179 case ANEG_STATE_ACK_DETECT_INIT:
2180 ap->txconfig |= ANEG_CFG_ACK;
2181 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2182 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2183 tw32_f(MAC_MODE, tp->mac_mode);
2184 udelay(40);
2185
2186 ap->state = ANEG_STATE_ACK_DETECT;
2187
2188 /* fallthru */
2189 case ANEG_STATE_ACK_DETECT:
2190 if (ap->ack_match != 0) {
2191 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2192 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2193 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2194 } else {
2195 ap->state = ANEG_STATE_AN_ENABLE;
2196 }
2197 } else if (ap->ability_match != 0 &&
2198 ap->rxconfig == 0) {
2199 ap->state = ANEG_STATE_AN_ENABLE;
2200 }
2201 break;
2202
2203 case ANEG_STATE_COMPLETE_ACK_INIT:
2204 if (ap->rxconfig & ANEG_CFG_INVAL) {
2205 ret = ANEG_FAILED;
2206 break;
2207 }
2208 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2209 MR_LP_ADV_HALF_DUPLEX |
2210 MR_LP_ADV_SYM_PAUSE |
2211 MR_LP_ADV_ASYM_PAUSE |
2212 MR_LP_ADV_REMOTE_FAULT1 |
2213 MR_LP_ADV_REMOTE_FAULT2 |
2214 MR_LP_ADV_NEXT_PAGE |
2215 MR_TOGGLE_RX |
2216 MR_NP_RX);
2217 if (ap->rxconfig & ANEG_CFG_FD)
2218 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2219 if (ap->rxconfig & ANEG_CFG_HD)
2220 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2221 if (ap->rxconfig & ANEG_CFG_PS1)
2222 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2223 if (ap->rxconfig & ANEG_CFG_PS2)
2224 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2225 if (ap->rxconfig & ANEG_CFG_RF1)
2226 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2227 if (ap->rxconfig & ANEG_CFG_RF2)
2228 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2229 if (ap->rxconfig & ANEG_CFG_NP)
2230 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2231
2232 ap->link_time = ap->cur_time;
2233
2234 ap->flags ^= (MR_TOGGLE_TX);
2235 if (ap->rxconfig & 0x0008)
2236 ap->flags |= MR_TOGGLE_RX;
2237 if (ap->rxconfig & ANEG_CFG_NP)
2238 ap->flags |= MR_NP_RX;
2239 ap->flags |= MR_PAGE_RX;
2240
2241 ap->state = ANEG_STATE_COMPLETE_ACK;
2242 ret = ANEG_TIMER_ENAB;
2243 break;
2244
2245 case ANEG_STATE_COMPLETE_ACK:
2246 if (ap->ability_match != 0 &&
2247 ap->rxconfig == 0) {
2248 ap->state = ANEG_STATE_AN_ENABLE;
2249 break;
2250 }
2251 delta = ap->cur_time - ap->link_time;
2252 if (delta > ANEG_STATE_SETTLE_TIME) {
2253 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2254 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2255 } else {
2256 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2257 !(ap->flags & MR_NP_RX)) {
2258 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2259 } else {
2260 ret = ANEG_FAILED;
2261 }
2262 }
2263 }
2264 break;
2265
2266 case ANEG_STATE_IDLE_DETECT_INIT:
2267 ap->link_time = ap->cur_time;
2268 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2269 tw32_f(MAC_MODE, tp->mac_mode);
2270 udelay(40);
2271
2272 ap->state = ANEG_STATE_IDLE_DETECT;
2273 ret = ANEG_TIMER_ENAB;
2274 break;
2275
2276 case ANEG_STATE_IDLE_DETECT:
2277 if (ap->ability_match != 0 &&
2278 ap->rxconfig == 0) {
2279 ap->state = ANEG_STATE_AN_ENABLE;
2280 break;
2281 }
2282 delta = ap->cur_time - ap->link_time;
2283 if (delta > ANEG_STATE_SETTLE_TIME) {
2284 /* XXX another gem from the Broadcom driver :( */
2285 ap->state = ANEG_STATE_LINK_OK;
2286 }
2287 break;
2288
2289 case ANEG_STATE_LINK_OK:
2290 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2291 ret = ANEG_DONE;
2292 break;
2293
2294 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2295 /* ??? unimplemented */
2296 break;
2297
2298 case ANEG_STATE_NEXT_PAGE_WAIT:
2299 /* ??? unimplemented */
2300 break;
2301
2302 default:
2303 ret = ANEG_FAILED;
2304 break;
2305 };
2306
2307 return ret;
2308}
2309
2310static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2311{
2312 int res = 0;
2313 struct tg3_fiber_aneginfo aninfo;
2314 int status = ANEG_FAILED;
2315 unsigned int tick;
2316 u32 tmp;
2317
2318 tw32_f(MAC_TX_AUTO_NEG, 0);
2319
2320 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2321 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2322 udelay(40);
2323
2324 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2325 udelay(40);
2326
2327 memset(&aninfo, 0, sizeof(aninfo));
2328 aninfo.flags |= MR_AN_ENABLE;
2329 aninfo.state = ANEG_STATE_UNKNOWN;
2330 aninfo.cur_time = 0;
2331 tick = 0;
2332 while (++tick < 195000) {
2333 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2334 if (status == ANEG_DONE || status == ANEG_FAILED)
2335 break;
2336
2337 udelay(1);
2338 }
2339
2340 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2341 tw32_f(MAC_MODE, tp->mac_mode);
2342 udelay(40);
2343
2344 *flags = aninfo.flags;
2345
2346 if (status == ANEG_DONE &&
2347 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2348 MR_LP_ADV_FULL_DUPLEX)))
2349 res = 1;
2350
2351 return res;
2352}
2353
2354static void tg3_init_bcm8002(struct tg3 *tp)
2355{
2356 u32 mac_status = tr32(MAC_STATUS);
2357 int i;
2358
2359 /* Reset when initting first time or we have a link. */
2360 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2361 !(mac_status & MAC_STATUS_PCS_SYNCED))
2362 return;
2363
2364 /* Set PLL lock range. */
2365 tg3_writephy(tp, 0x16, 0x8007);
2366
2367 /* SW reset */
2368 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2369
2370 /* Wait for reset to complete. */
2371 /* XXX schedule_timeout() ... */
2372 for (i = 0; i < 500; i++)
2373 udelay(10);
2374
2375 /* Config mode; select PMA/Ch 1 regs. */
2376 tg3_writephy(tp, 0x10, 0x8411);
2377
2378 /* Enable auto-lock and comdet, select txclk for tx. */
2379 tg3_writephy(tp, 0x11, 0x0a10);
2380
2381 tg3_writephy(tp, 0x18, 0x00a0);
2382 tg3_writephy(tp, 0x16, 0x41ff);
2383
2384 /* Assert and deassert POR. */
2385 tg3_writephy(tp, 0x13, 0x0400);
2386 udelay(40);
2387 tg3_writephy(tp, 0x13, 0x0000);
2388
2389 tg3_writephy(tp, 0x11, 0x0a50);
2390 udelay(40);
2391 tg3_writephy(tp, 0x11, 0x0a10);
2392
2393 /* Wait for signal to stabilize */
2394 /* XXX schedule_timeout() ... */
2395 for (i = 0; i < 15000; i++)
2396 udelay(10);
2397
2398 /* Deselect the channel register so we can read the PHYID
2399 * later.
2400 */
2401 tg3_writephy(tp, 0x10, 0x8011);
2402}
2403
2404static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2405{
2406 u32 sg_dig_ctrl, sg_dig_status;
2407 u32 serdes_cfg, expected_sg_dig_ctrl;
2408 int workaround, port_a;
2409 int current_link_up;
2410
2411 serdes_cfg = 0;
2412 expected_sg_dig_ctrl = 0;
2413 workaround = 0;
2414 port_a = 1;
2415 current_link_up = 0;
2416
2417 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2418 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2419 workaround = 1;
2420 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2421 port_a = 0;
2422
2423 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2424 /* preserve bits 20-23 for voltage regulator */
2425 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2426 }
2427
2428 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2429
2430 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2431 if (sg_dig_ctrl & (1 << 31)) {
2432 if (workaround) {
2433 u32 val = serdes_cfg;
2434
2435 if (port_a)
2436 val |= 0xc010000;
2437 else
2438 val |= 0x4010000;
2439 tw32_f(MAC_SERDES_CFG, val);
2440 }
2441 tw32_f(SG_DIG_CTRL, 0x01388400);
2442 }
2443 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2444 tg3_setup_flow_control(tp, 0, 0);
2445 current_link_up = 1;
2446 }
2447 goto out;
2448 }
2449
2450 /* Want auto-negotiation. */
2451 expected_sg_dig_ctrl = 0x81388400;
2452
2453 /* Pause capability */
2454 expected_sg_dig_ctrl |= (1 << 11);
2455
2456 /* Asymettric pause */
2457 expected_sg_dig_ctrl |= (1 << 12);
2458
2459 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2460 if (workaround)
2461 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2462 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2463 udelay(5);
2464 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2465
2466 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2467 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2468 MAC_STATUS_SIGNAL_DET)) {
2469 int i;
2470
2471 /* Giver time to negotiate (~200ms) */
2472 for (i = 0; i < 40000; i++) {
2473 sg_dig_status = tr32(SG_DIG_STATUS);
2474 if (sg_dig_status & (0x3))
2475 break;
2476 udelay(5);
2477 }
2478 mac_status = tr32(MAC_STATUS);
2479
2480 if ((sg_dig_status & (1 << 1)) &&
2481 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2482 u32 local_adv, remote_adv;
2483
2484 local_adv = ADVERTISE_PAUSE_CAP;
2485 remote_adv = 0;
2486 if (sg_dig_status & (1 << 19))
2487 remote_adv |= LPA_PAUSE_CAP;
2488 if (sg_dig_status & (1 << 20))
2489 remote_adv |= LPA_PAUSE_ASYM;
2490
2491 tg3_setup_flow_control(tp, local_adv, remote_adv);
2492 current_link_up = 1;
2493 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2494 } else if (!(sg_dig_status & (1 << 1))) {
2495 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2496 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2497 else {
2498 if (workaround) {
2499 u32 val = serdes_cfg;
2500
2501 if (port_a)
2502 val |= 0xc010000;
2503 else
2504 val |= 0x4010000;
2505
2506 tw32_f(MAC_SERDES_CFG, val);
2507 }
2508
2509 tw32_f(SG_DIG_CTRL, 0x01388400);
2510 udelay(40);
2511
2512 /* Link parallel detection - link is up */
2513 /* only if we have PCS_SYNC and not */
2514 /* receiving config code words */
2515 mac_status = tr32(MAC_STATUS);
2516 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2517 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2518 tg3_setup_flow_control(tp, 0, 0);
2519 current_link_up = 1;
2520 }
2521 }
2522 }
2523 }
2524
2525out:
2526 return current_link_up;
2527}
2528
2529static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2530{
2531 int current_link_up = 0;
2532
2533 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2534 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2535 goto out;
2536 }
2537
2538 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2539 u32 flags;
2540 int i;
2541
2542 if (fiber_autoneg(tp, &flags)) {
2543 u32 local_adv, remote_adv;
2544
2545 local_adv = ADVERTISE_PAUSE_CAP;
2546 remote_adv = 0;
2547 if (flags & MR_LP_ADV_SYM_PAUSE)
2548 remote_adv |= LPA_PAUSE_CAP;
2549 if (flags & MR_LP_ADV_ASYM_PAUSE)
2550 remote_adv |= LPA_PAUSE_ASYM;
2551
2552 tg3_setup_flow_control(tp, local_adv, remote_adv);
2553
2554 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2555 current_link_up = 1;
2556 }
2557 for (i = 0; i < 30; i++) {
2558 udelay(20);
2559 tw32_f(MAC_STATUS,
2560 (MAC_STATUS_SYNC_CHANGED |
2561 MAC_STATUS_CFG_CHANGED));
2562 udelay(40);
2563 if ((tr32(MAC_STATUS) &
2564 (MAC_STATUS_SYNC_CHANGED |
2565 MAC_STATUS_CFG_CHANGED)) == 0)
2566 break;
2567 }
2568
2569 mac_status = tr32(MAC_STATUS);
2570 if (current_link_up == 0 &&
2571 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2572 !(mac_status & MAC_STATUS_RCVD_CFG))
2573 current_link_up = 1;
2574 } else {
2575 /* Forcing 1000FD link up. */
2576 current_link_up = 1;
2577 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2578
2579 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2580 udelay(40);
2581 }
2582
2583out:
2584 return current_link_up;
2585}
2586
2587static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2588{
2589 u32 orig_pause_cfg;
2590 u16 orig_active_speed;
2591 u8 orig_active_duplex;
2592 u32 mac_status;
2593 int current_link_up;
2594 int i;
2595
2596 orig_pause_cfg =
2597 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2598 TG3_FLAG_TX_PAUSE));
2599 orig_active_speed = tp->link_config.active_speed;
2600 orig_active_duplex = tp->link_config.active_duplex;
2601
2602 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2603 netif_carrier_ok(tp->dev) &&
2604 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2605 mac_status = tr32(MAC_STATUS);
2606 mac_status &= (MAC_STATUS_PCS_SYNCED |
2607 MAC_STATUS_SIGNAL_DET |
2608 MAC_STATUS_CFG_CHANGED |
2609 MAC_STATUS_RCVD_CFG);
2610 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2611 MAC_STATUS_SIGNAL_DET)) {
2612 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2613 MAC_STATUS_CFG_CHANGED));
2614 return 0;
2615 }
2616 }
2617
2618 tw32_f(MAC_TX_AUTO_NEG, 0);
2619
2620 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2621 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2622 tw32_f(MAC_MODE, tp->mac_mode);
2623 udelay(40);
2624
2625 if (tp->phy_id == PHY_ID_BCM8002)
2626 tg3_init_bcm8002(tp);
2627
2628 /* Enable link change event even when serdes polling. */
2629 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2630 udelay(40);
2631
2632 current_link_up = 0;
2633 mac_status = tr32(MAC_STATUS);
2634
2635 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2636 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2637 else
2638 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2639
2640 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2641 tw32_f(MAC_MODE, tp->mac_mode);
2642 udelay(40);
2643
2644 tp->hw_status->status =
2645 (SD_STATUS_UPDATED |
2646 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2647
2648 for (i = 0; i < 100; i++) {
2649 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2650 MAC_STATUS_CFG_CHANGED));
2651 udelay(5);
2652 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2653 MAC_STATUS_CFG_CHANGED)) == 0)
2654 break;
2655 }
2656
2657 mac_status = tr32(MAC_STATUS);
2658 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2659 current_link_up = 0;
2660 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2661 tw32_f(MAC_MODE, (tp->mac_mode |
2662 MAC_MODE_SEND_CONFIGS));
2663 udelay(1);
2664 tw32_f(MAC_MODE, tp->mac_mode);
2665 }
2666 }
2667
2668 if (current_link_up == 1) {
2669 tp->link_config.active_speed = SPEED_1000;
2670 tp->link_config.active_duplex = DUPLEX_FULL;
2671 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2672 LED_CTRL_LNKLED_OVERRIDE |
2673 LED_CTRL_1000MBPS_ON));
2674 } else {
2675 tp->link_config.active_speed = SPEED_INVALID;
2676 tp->link_config.active_duplex = DUPLEX_INVALID;
2677 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2678 LED_CTRL_LNKLED_OVERRIDE |
2679 LED_CTRL_TRAFFIC_OVERRIDE));
2680 }
2681
2682 if (current_link_up != netif_carrier_ok(tp->dev)) {
2683 if (current_link_up)
2684 netif_carrier_on(tp->dev);
2685 else
2686 netif_carrier_off(tp->dev);
2687 tg3_link_report(tp);
2688 } else {
2689 u32 now_pause_cfg =
2690 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2691 TG3_FLAG_TX_PAUSE);
2692 if (orig_pause_cfg != now_pause_cfg ||
2693 orig_active_speed != tp->link_config.active_speed ||
2694 orig_active_duplex != tp->link_config.active_duplex)
2695 tg3_link_report(tp);
2696 }
2697
2698 return 0;
2699}
2700
747e8f8b
MC
2701static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2702{
2703 int current_link_up, err = 0;
2704 u32 bmsr, bmcr;
2705 u16 current_speed;
2706 u8 current_duplex;
2707
2708 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2709 tw32_f(MAC_MODE, tp->mac_mode);
2710 udelay(40);
2711
2712 tw32(MAC_EVENT, 0);
2713
2714 tw32_f(MAC_STATUS,
2715 (MAC_STATUS_SYNC_CHANGED |
2716 MAC_STATUS_CFG_CHANGED |
2717 MAC_STATUS_MI_COMPLETION |
2718 MAC_STATUS_LNKSTATE_CHANGED));
2719 udelay(40);
2720
2721 if (force_reset)
2722 tg3_phy_reset(tp);
2723
2724 current_link_up = 0;
2725 current_speed = SPEED_INVALID;
2726 current_duplex = DUPLEX_INVALID;
2727
2728 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2729 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
2730 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2731 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2732 bmsr |= BMSR_LSTATUS;
2733 else
2734 bmsr &= ~BMSR_LSTATUS;
2735 }
747e8f8b
MC
2736
2737 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2738
2739 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2740 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2741 /* do nothing, just check for link up at the end */
2742 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2743 u32 adv, new_adv;
2744
2745 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2746 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2747 ADVERTISE_1000XPAUSE |
2748 ADVERTISE_1000XPSE_ASYM |
2749 ADVERTISE_SLCT);
2750
2751 /* Always advertise symmetric PAUSE just like copper */
2752 new_adv |= ADVERTISE_1000XPAUSE;
2753
2754 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2755 new_adv |= ADVERTISE_1000XHALF;
2756 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2757 new_adv |= ADVERTISE_1000XFULL;
2758
2759 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2760 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2761 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2762 tg3_writephy(tp, MII_BMCR, bmcr);
2763
2764 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2765 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2766 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2767
2768 return err;
2769 }
2770 } else {
2771 u32 new_bmcr;
2772
2773 bmcr &= ~BMCR_SPEED1000;
2774 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2775
2776 if (tp->link_config.duplex == DUPLEX_FULL)
2777 new_bmcr |= BMCR_FULLDPLX;
2778
2779 if (new_bmcr != bmcr) {
2780 /* BMCR_SPEED1000 is a reserved bit that needs
2781 * to be set on write.
2782 */
2783 new_bmcr |= BMCR_SPEED1000;
2784
2785 /* Force a linkdown */
2786 if (netif_carrier_ok(tp->dev)) {
2787 u32 adv;
2788
2789 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2790 adv &= ~(ADVERTISE_1000XFULL |
2791 ADVERTISE_1000XHALF |
2792 ADVERTISE_SLCT);
2793 tg3_writephy(tp, MII_ADVERTISE, adv);
2794 tg3_writephy(tp, MII_BMCR, bmcr |
2795 BMCR_ANRESTART |
2796 BMCR_ANENABLE);
2797 udelay(10);
2798 netif_carrier_off(tp->dev);
2799 }
2800 tg3_writephy(tp, MII_BMCR, new_bmcr);
2801 bmcr = new_bmcr;
2802 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2803 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
2804 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2805 ASIC_REV_5714) {
2806 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2807 bmsr |= BMSR_LSTATUS;
2808 else
2809 bmsr &= ~BMSR_LSTATUS;
2810 }
747e8f8b
MC
2811 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2812 }
2813 }
2814
2815 if (bmsr & BMSR_LSTATUS) {
2816 current_speed = SPEED_1000;
2817 current_link_up = 1;
2818 if (bmcr & BMCR_FULLDPLX)
2819 current_duplex = DUPLEX_FULL;
2820 else
2821 current_duplex = DUPLEX_HALF;
2822
2823 if (bmcr & BMCR_ANENABLE) {
2824 u32 local_adv, remote_adv, common;
2825
2826 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2827 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2828 common = local_adv & remote_adv;
2829 if (common & (ADVERTISE_1000XHALF |
2830 ADVERTISE_1000XFULL)) {
2831 if (common & ADVERTISE_1000XFULL)
2832 current_duplex = DUPLEX_FULL;
2833 else
2834 current_duplex = DUPLEX_HALF;
2835
2836 tg3_setup_flow_control(tp, local_adv,
2837 remote_adv);
2838 }
2839 else
2840 current_link_up = 0;
2841 }
2842 }
2843
2844 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2845 if (tp->link_config.active_duplex == DUPLEX_HALF)
2846 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2847
2848 tw32_f(MAC_MODE, tp->mac_mode);
2849 udelay(40);
2850
2851 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2852
2853 tp->link_config.active_speed = current_speed;
2854 tp->link_config.active_duplex = current_duplex;
2855
2856 if (current_link_up != netif_carrier_ok(tp->dev)) {
2857 if (current_link_up)
2858 netif_carrier_on(tp->dev);
2859 else {
2860 netif_carrier_off(tp->dev);
2861 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2862 }
2863 tg3_link_report(tp);
2864 }
2865 return err;
2866}
2867
2868static void tg3_serdes_parallel_detect(struct tg3 *tp)
2869{
2870 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2871 /* Give autoneg time to complete. */
2872 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2873 return;
2874 }
2875 if (!netif_carrier_ok(tp->dev) &&
2876 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2877 u32 bmcr;
2878
2879 tg3_readphy(tp, MII_BMCR, &bmcr);
2880 if (bmcr & BMCR_ANENABLE) {
2881 u32 phy1, phy2;
2882
2883 /* Select shadow register 0x1f */
2884 tg3_writephy(tp, 0x1c, 0x7c00);
2885 tg3_readphy(tp, 0x1c, &phy1);
2886
2887 /* Select expansion interrupt status register */
2888 tg3_writephy(tp, 0x17, 0x0f01);
2889 tg3_readphy(tp, 0x15, &phy2);
2890 tg3_readphy(tp, 0x15, &phy2);
2891
2892 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2893 /* We have signal detect and not receiving
2894 * config code words, link is up by parallel
2895 * detection.
2896 */
2897
2898 bmcr &= ~BMCR_ANENABLE;
2899 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2900 tg3_writephy(tp, MII_BMCR, bmcr);
2901 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2902 }
2903 }
2904 }
2905 else if (netif_carrier_ok(tp->dev) &&
2906 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2907 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2908 u32 phy2;
2909
2910 /* Select expansion interrupt status register */
2911 tg3_writephy(tp, 0x17, 0x0f01);
2912 tg3_readphy(tp, 0x15, &phy2);
2913 if (phy2 & 0x20) {
2914 u32 bmcr;
2915
2916 /* Config code words received, turn on autoneg. */
2917 tg3_readphy(tp, MII_BMCR, &bmcr);
2918 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2919
2920 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2921
2922 }
2923 }
2924}
2925
1da177e4
LT
2926static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2927{
2928 int err;
2929
2930 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2931 err = tg3_setup_fiber_phy(tp, force_reset);
747e8f8b
MC
2932 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2933 err = tg3_setup_fiber_mii_phy(tp, force_reset);
1da177e4
LT
2934 } else {
2935 err = tg3_setup_copper_phy(tp, force_reset);
2936 }
2937
2938 if (tp->link_config.active_speed == SPEED_1000 &&
2939 tp->link_config.active_duplex == DUPLEX_HALF)
2940 tw32(MAC_TX_LENGTHS,
2941 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2942 (6 << TX_LENGTHS_IPG_SHIFT) |
2943 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2944 else
2945 tw32(MAC_TX_LENGTHS,
2946 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2947 (6 << TX_LENGTHS_IPG_SHIFT) |
2948 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2949
2950 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2951 if (netif_carrier_ok(tp->dev)) {
2952 tw32(HOSTCC_STAT_COAL_TICKS,
15f9850d 2953 tp->coal.stats_block_coalesce_usecs);
1da177e4
LT
2954 } else {
2955 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2956 }
2957 }
2958
2959 return err;
2960}
2961
2962/* Tigon3 never reports partial packet sends. So we do not
2963 * need special logic to handle SKBs that have not had all
2964 * of their frags sent yet, like SunGEM does.
2965 */
2966static void tg3_tx(struct tg3 *tp)
2967{
2968 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2969 u32 sw_idx = tp->tx_cons;
2970
2971 while (sw_idx != hw_idx) {
2972 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2973 struct sk_buff *skb = ri->skb;
2974 int i;
2975
5d9428de 2976 BUG_ON(skb == NULL);
1da177e4
LT
2977 pci_unmap_single(tp->pdev,
2978 pci_unmap_addr(ri, mapping),
2979 skb_headlen(skb),
2980 PCI_DMA_TODEVICE);
2981
2982 ri->skb = NULL;
2983
2984 sw_idx = NEXT_TX(sw_idx);
2985
2986 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5d9428de 2987 BUG_ON(sw_idx == hw_idx);
1da177e4
LT
2988
2989 ri = &tp->tx_buffers[sw_idx];
5d9428de 2990 BUG_ON(ri->skb != NULL);
1da177e4
LT
2991
2992 pci_unmap_page(tp->pdev,
2993 pci_unmap_addr(ri, mapping),
2994 skb_shinfo(skb)->frags[i].size,
2995 PCI_DMA_TODEVICE);
2996
2997 sw_idx = NEXT_TX(sw_idx);
2998 }
2999
f47c11ee 3000 dev_kfree_skb(skb);
1da177e4
LT
3001 }
3002
3003 tp->tx_cons = sw_idx;
3004
51b91468
MC
3005 if (unlikely(netif_queue_stopped(tp->dev))) {
3006 spin_lock(&tp->tx_lock);
3007 if (netif_queue_stopped(tp->dev) &&
3008 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
3009 netif_wake_queue(tp->dev);
3010 spin_unlock(&tp->tx_lock);
3011 }
1da177e4
LT
3012}
3013
3014/* Returns size of skb allocated or < 0 on error.
3015 *
3016 * We only need to fill in the address because the other members
3017 * of the RX descriptor are invariant, see tg3_init_rings.
3018 *
3019 * Note the purposeful assymetry of cpu vs. chip accesses. For
3020 * posting buffers we only dirty the first cache line of the RX
3021 * descriptor (containing the address). Whereas for the RX status
3022 * buffers the cpu only reads the last cacheline of the RX descriptor
3023 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3024 */
3025static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3026 int src_idx, u32 dest_idx_unmasked)
3027{
3028 struct tg3_rx_buffer_desc *desc;
3029 struct ring_info *map, *src_map;
3030 struct sk_buff *skb;
3031 dma_addr_t mapping;
3032 int skb_size, dest_idx;
3033
3034 src_map = NULL;
3035 switch (opaque_key) {
3036 case RXD_OPAQUE_RING_STD:
3037 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3038 desc = &tp->rx_std[dest_idx];
3039 map = &tp->rx_std_buffers[dest_idx];
3040 if (src_idx >= 0)
3041 src_map = &tp->rx_std_buffers[src_idx];
7e72aad4 3042 skb_size = tp->rx_pkt_buf_sz;
1da177e4
LT
3043 break;
3044
3045 case RXD_OPAQUE_RING_JUMBO:
3046 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3047 desc = &tp->rx_jumbo[dest_idx];
3048 map = &tp->rx_jumbo_buffers[dest_idx];
3049 if (src_idx >= 0)
3050 src_map = &tp->rx_jumbo_buffers[src_idx];
3051 skb_size = RX_JUMBO_PKT_BUF_SZ;
3052 break;
3053
3054 default:
3055 return -EINVAL;
3056 };
3057
3058 /* Do not overwrite any of the map or rp information
3059 * until we are sure we can commit to a new buffer.
3060 *
3061 * Callers depend upon this behavior and assume that
3062 * we leave everything unchanged if we fail.
3063 */
3064 skb = dev_alloc_skb(skb_size);
3065 if (skb == NULL)
3066 return -ENOMEM;
3067
3068 skb->dev = tp->dev;
3069 skb_reserve(skb, tp->rx_offset);
3070
3071 mapping = pci_map_single(tp->pdev, skb->data,
3072 skb_size - tp->rx_offset,
3073 PCI_DMA_FROMDEVICE);
3074
3075 map->skb = skb;
3076 pci_unmap_addr_set(map, mapping, mapping);
3077
3078 if (src_map != NULL)
3079 src_map->skb = NULL;
3080
3081 desc->addr_hi = ((u64)mapping >> 32);
3082 desc->addr_lo = ((u64)mapping & 0xffffffff);
3083
3084 return skb_size;
3085}
3086
3087/* We only need to move over in the address because the other
3088 * members of the RX descriptor are invariant. See notes above
3089 * tg3_alloc_rx_skb for full details.
3090 */
3091static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3092 int src_idx, u32 dest_idx_unmasked)
3093{
3094 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3095 struct ring_info *src_map, *dest_map;
3096 int dest_idx;
3097
3098 switch (opaque_key) {
3099 case RXD_OPAQUE_RING_STD:
3100 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3101 dest_desc = &tp->rx_std[dest_idx];
3102 dest_map = &tp->rx_std_buffers[dest_idx];
3103 src_desc = &tp->rx_std[src_idx];
3104 src_map = &tp->rx_std_buffers[src_idx];
3105 break;
3106
3107 case RXD_OPAQUE_RING_JUMBO:
3108 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3109 dest_desc = &tp->rx_jumbo[dest_idx];
3110 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3111 src_desc = &tp->rx_jumbo[src_idx];
3112 src_map = &tp->rx_jumbo_buffers[src_idx];
3113 break;
3114
3115 default:
3116 return;
3117 };
3118
3119 dest_map->skb = src_map->skb;
3120 pci_unmap_addr_set(dest_map, mapping,
3121 pci_unmap_addr(src_map, mapping));
3122 dest_desc->addr_hi = src_desc->addr_hi;
3123 dest_desc->addr_lo = src_desc->addr_lo;
3124
3125 src_map->skb = NULL;
3126}
3127
3128#if TG3_VLAN_TAG_USED
3129static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3130{
3131 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3132}
3133#endif
3134
3135/* The RX ring scheme is composed of multiple rings which post fresh
3136 * buffers to the chip, and one special ring the chip uses to report
3137 * status back to the host.
3138 *
3139 * The special ring reports the status of received packets to the
3140 * host. The chip does not write into the original descriptor the
3141 * RX buffer was obtained from. The chip simply takes the original
3142 * descriptor as provided by the host, updates the status and length
3143 * field, then writes this into the next status ring entry.
3144 *
3145 * Each ring the host uses to post buffers to the chip is described
3146 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3147 * it is first placed into the on-chip ram. When the packet's length
3148 * is known, it walks down the TG3_BDINFO entries to select the ring.
3149 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3150 * which is within the range of the new packet's length is chosen.
3151 *
3152 * The "separate ring for rx status" scheme may sound queer, but it makes
3153 * sense from a cache coherency perspective. If only the host writes
3154 * to the buffer post rings, and only the chip writes to the rx status
3155 * rings, then cache lines never move beyond shared-modified state.
3156 * If both the host and chip were to write into the same ring, cache line
3157 * eviction could occur since both entities want it in an exclusive state.
3158 */
3159static int tg3_rx(struct tg3 *tp, int budget)
3160{
3161 u32 work_mask;
483ba50b
MC
3162 u32 sw_idx = tp->rx_rcb_ptr;
3163 u16 hw_idx;
1da177e4
LT
3164 int received;
3165
3166 hw_idx = tp->hw_status->idx[0].rx_producer;
3167 /*
3168 * We need to order the read of hw_idx and the read of
3169 * the opaque cookie.
3170 */
3171 rmb();
1da177e4
LT
3172 work_mask = 0;
3173 received = 0;
3174 while (sw_idx != hw_idx && budget > 0) {
3175 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3176 unsigned int len;
3177 struct sk_buff *skb;
3178 dma_addr_t dma_addr;
3179 u32 opaque_key, desc_idx, *post_ptr;
3180
3181 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3182 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3183 if (opaque_key == RXD_OPAQUE_RING_STD) {
3184 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3185 mapping);
3186 skb = tp->rx_std_buffers[desc_idx].skb;
3187 post_ptr = &tp->rx_std_ptr;
3188 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3189 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3190 mapping);
3191 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3192 post_ptr = &tp->rx_jumbo_ptr;
3193 }
3194 else {
3195 goto next_pkt_nopost;
3196 }
3197
3198 work_mask |= opaque_key;
3199
3200 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3201 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3202 drop_it:
3203 tg3_recycle_rx(tp, opaque_key,
3204 desc_idx, *post_ptr);
3205 drop_it_no_recycle:
3206 /* Other statistics kept track of by card. */
3207 tp->net_stats.rx_dropped++;
3208 goto next_pkt;
3209 }
3210
3211 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3212
3213 if (len > RX_COPY_THRESHOLD
3214 && tp->rx_offset == 2
3215 /* rx_offset != 2 iff this is a 5701 card running
3216 * in PCI-X mode [see tg3_get_invariants()] */
3217 ) {
3218 int skb_size;
3219
3220 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3221 desc_idx, *post_ptr);
3222 if (skb_size < 0)
3223 goto drop_it;
3224
3225 pci_unmap_single(tp->pdev, dma_addr,
3226 skb_size - tp->rx_offset,
3227 PCI_DMA_FROMDEVICE);
3228
3229 skb_put(skb, len);
3230 } else {
3231 struct sk_buff *copy_skb;
3232
3233 tg3_recycle_rx(tp, opaque_key,
3234 desc_idx, *post_ptr);
3235
3236 copy_skb = dev_alloc_skb(len + 2);
3237 if (copy_skb == NULL)
3238 goto drop_it_no_recycle;
3239
3240 copy_skb->dev = tp->dev;
3241 skb_reserve(copy_skb, 2);
3242 skb_put(copy_skb, len);
3243 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3244 memcpy(copy_skb->data, skb->data, len);
3245 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3246
3247 /* We'll reuse the original ring buffer. */
3248 skb = copy_skb;
3249 }
3250
3251 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3252 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3253 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3254 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3255 skb->ip_summed = CHECKSUM_UNNECESSARY;
3256 else
3257 skb->ip_summed = CHECKSUM_NONE;
3258
3259 skb->protocol = eth_type_trans(skb, tp->dev);
3260#if TG3_VLAN_TAG_USED
3261 if (tp->vlgrp != NULL &&
3262 desc->type_flags & RXD_FLAG_VLAN) {
3263 tg3_vlan_rx(tp, skb,
3264 desc->err_vlan & RXD_VLAN_MASK);
3265 } else
3266#endif
3267 netif_receive_skb(skb);
3268
3269 tp->dev->last_rx = jiffies;
3270 received++;
3271 budget--;
3272
3273next_pkt:
3274 (*post_ptr)++;
3275next_pkt_nopost:
483ba50b
MC
3276 sw_idx++;
3277 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
52f6d697
MC
3278
3279 /* Refresh hw_idx to see if there is new work */
3280 if (sw_idx == hw_idx) {
3281 hw_idx = tp->hw_status->idx[0].rx_producer;
3282 rmb();
3283 }
1da177e4
LT
3284 }
3285
3286 /* ACK the status ring. */
483ba50b
MC
3287 tp->rx_rcb_ptr = sw_idx;
3288 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
1da177e4
LT
3289
3290 /* Refill RX ring(s). */
3291 if (work_mask & RXD_OPAQUE_RING_STD) {
3292 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3293 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3294 sw_idx);
3295 }
3296 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3297 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3298 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3299 sw_idx);
3300 }
3301 mmiowb();
3302
3303 return received;
3304}
3305
3306static int tg3_poll(struct net_device *netdev, int *budget)
3307{
3308 struct tg3 *tp = netdev_priv(netdev);
3309 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4
LT
3310 int done;
3311
1da177e4
LT
3312 /* handle link change and other phy events */
3313 if (!(tp->tg3_flags &
3314 (TG3_FLAG_USE_LINKCHG_REG |
3315 TG3_FLAG_POLL_SERDES))) {
3316 if (sblk->status & SD_STATUS_LINK_CHG) {
3317 sblk->status = SD_STATUS_UPDATED |
3318 (sblk->status & ~SD_STATUS_LINK_CHG);
f47c11ee 3319 spin_lock(&tp->lock);
1da177e4 3320 tg3_setup_phy(tp, 0);
f47c11ee 3321 spin_unlock(&tp->lock);
1da177e4
LT
3322 }
3323 }
3324
3325 /* run TX completion thread */
3326 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
1da177e4 3327 tg3_tx(tp);
1da177e4
LT
3328 }
3329
1da177e4
LT
3330 /* run RX thread, within the bounds set by NAPI.
3331 * All RX "locking" is done by ensuring outside
3332 * code synchronizes with dev->poll()
3333 */
1da177e4
LT
3334 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3335 int orig_budget = *budget;
3336 int work_done;
3337
3338 if (orig_budget > netdev->quota)
3339 orig_budget = netdev->quota;
3340
3341 work_done = tg3_rx(tp, orig_budget);
3342
3343 *budget -= work_done;
3344 netdev->quota -= work_done;
1da177e4
LT
3345 }
3346
38f3843e 3347 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
f7383c22 3348 tp->last_tag = sblk->status_tag;
38f3843e
MC
3349 rmb();
3350 } else
3351 sblk->status &= ~SD_STATUS_UPDATED;
f7383c22 3352
1da177e4 3353 /* if no more work, tell net stack and NIC we're done */
f7383c22 3354 done = !tg3_has_work(tp);
1da177e4 3355 if (done) {
f47c11ee 3356 netif_rx_complete(netdev);
1da177e4 3357 tg3_restart_ints(tp);
1da177e4
LT
3358 }
3359
3360 return (done ? 0 : 1);
3361}
3362
f47c11ee
DM
3363static void tg3_irq_quiesce(struct tg3 *tp)
3364{
3365 BUG_ON(tp->irq_sync);
3366
3367 tp->irq_sync = 1;
3368 smp_mb();
3369
3370 synchronize_irq(tp->pdev->irq);
3371}
3372
3373static inline int tg3_irq_sync(struct tg3 *tp)
3374{
3375 return tp->irq_sync;
3376}
3377
3378/* Fully shutdown all tg3 driver activity elsewhere in the system.
3379 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3380 * with as well. Most of the time, this is not necessary except when
3381 * shutting down the device.
3382 */
3383static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3384{
3385 if (irq_sync)
3386 tg3_irq_quiesce(tp);
3387 spin_lock_bh(&tp->lock);
3388 spin_lock(&tp->tx_lock);
3389}
3390
3391static inline void tg3_full_unlock(struct tg3 *tp)
3392{
3393 spin_unlock(&tp->tx_lock);
3394 spin_unlock_bh(&tp->lock);
3395}
3396
fcfa0a32
MC
3397/* One-shot MSI handler - Chip automatically disables interrupt
3398 * after sending MSI so driver doesn't have to do it.
3399 */
3400static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3401{
3402 struct net_device *dev = dev_id;
3403 struct tg3 *tp = netdev_priv(dev);
3404
3405 prefetch(tp->hw_status);
3406 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3407
3408 if (likely(!tg3_irq_sync(tp)))
3409 netif_rx_schedule(dev); /* schedule NAPI poll */
3410
3411 return IRQ_HANDLED;
3412}
3413
88b06bc2
MC
3414/* MSI ISR - No need to check for interrupt sharing and no need to
3415 * flush status block and interrupt mailbox. PCI ordering rules
3416 * guarantee that MSI will arrive after the status block.
3417 */
3418static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3419{
3420 struct net_device *dev = dev_id;
3421 struct tg3 *tp = netdev_priv(dev);
88b06bc2 3422
61487480
MC
3423 prefetch(tp->hw_status);
3424 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
88b06bc2 3425 /*
fac9b83e 3426 * Writing any value to intr-mbox-0 clears PCI INTA# and
88b06bc2 3427 * chip-internal interrupt pending events.
fac9b83e 3428 * Writing non-zero to intr-mbox-0 additional tells the
88b06bc2
MC
3429 * NIC to stop sending us irqs, engaging "in-intr-handler"
3430 * event coalescing.
3431 */
3432 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
61487480 3433 if (likely(!tg3_irq_sync(tp)))
88b06bc2 3434 netif_rx_schedule(dev); /* schedule NAPI poll */
61487480 3435
88b06bc2
MC
3436 return IRQ_RETVAL(1);
3437}
3438
1da177e4
LT
3439static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3440{
3441 struct net_device *dev = dev_id;
3442 struct tg3 *tp = netdev_priv(dev);
3443 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4
LT
3444 unsigned int handled = 1;
3445
1da177e4
LT
3446 /* In INTx mode, it is possible for the interrupt to arrive at
3447 * the CPU before the status block posted prior to the interrupt.
3448 * Reading the PCI State register will confirm whether the
3449 * interrupt is ours and will flush the status block.
3450 */
3451 if ((sblk->status & SD_STATUS_UPDATED) ||
3452 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3453 /*
fac9b83e 3454 * Writing any value to intr-mbox-0 clears PCI INTA# and
1da177e4 3455 * chip-internal interrupt pending events.
fac9b83e 3456 * Writing non-zero to intr-mbox-0 additional tells the
1da177e4
LT
3457 * NIC to stop sending us irqs, engaging "in-intr-handler"
3458 * event coalescing.
3459 */
3460 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3461 0x00000001);
f47c11ee
DM
3462 if (tg3_irq_sync(tp))
3463 goto out;
fac9b83e 3464 sblk->status &= ~SD_STATUS_UPDATED;
61487480
MC
3465 if (likely(tg3_has_work(tp))) {
3466 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
fac9b83e 3467 netif_rx_schedule(dev); /* schedule NAPI poll */
61487480 3468 } else {
fac9b83e
DM
3469 /* No work, shared interrupt perhaps? re-enable
3470 * interrupts, and flush that PCI write
3471 */
09ee929c 3472 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
fac9b83e 3473 0x00000000);
fac9b83e
DM
3474 }
3475 } else { /* shared interrupt */
3476 handled = 0;
3477 }
f47c11ee 3478out:
fac9b83e
DM
3479 return IRQ_RETVAL(handled);
3480}
3481
3482static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3483{
3484 struct net_device *dev = dev_id;
3485 struct tg3 *tp = netdev_priv(dev);
3486 struct tg3_hw_status *sblk = tp->hw_status;
fac9b83e
DM
3487 unsigned int handled = 1;
3488
fac9b83e
DM
3489 /* In INTx mode, it is possible for the interrupt to arrive at
3490 * the CPU before the status block posted prior to the interrupt.
3491 * Reading the PCI State register will confirm whether the
3492 * interrupt is ours and will flush the status block.
3493 */
38f3843e 3494 if ((sblk->status_tag != tp->last_tag) ||
fac9b83e 3495 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
1da177e4 3496 /*
fac9b83e
DM
3497 * writing any value to intr-mbox-0 clears PCI INTA# and
3498 * chip-internal interrupt pending events.
3499 * writing non-zero to intr-mbox-0 additional tells the
3500 * NIC to stop sending us irqs, engaging "in-intr-handler"
3501 * event coalescing.
1da177e4 3502 */
fac9b83e
DM
3503 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3504 0x00000001);
f47c11ee
DM
3505 if (tg3_irq_sync(tp))
3506 goto out;
38f3843e 3507 if (netif_rx_schedule_prep(dev)) {
61487480 3508 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
38f3843e
MC
3509 /* Update last_tag to mark that this status has been
3510 * seen. Because interrupt may be shared, we may be
3511 * racing with tg3_poll(), so only update last_tag
3512 * if tg3_poll() is not scheduled.
1da177e4 3513 */
38f3843e
MC
3514 tp->last_tag = sblk->status_tag;
3515 __netif_rx_schedule(dev);
1da177e4
LT
3516 }
3517 } else { /* shared interrupt */
3518 handled = 0;
3519 }
f47c11ee 3520out:
1da177e4
LT
3521 return IRQ_RETVAL(handled);
3522}
3523
7938109f
MC
3524/* ISR for interrupt test */
3525static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3526 struct pt_regs *regs)
3527{
3528 struct net_device *dev = dev_id;
3529 struct tg3 *tp = netdev_priv(dev);
3530 struct tg3_hw_status *sblk = tp->hw_status;
3531
f9804ddb
MC
3532 if ((sblk->status & SD_STATUS_UPDATED) ||
3533 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7938109f
MC
3534 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3535 0x00000001);
3536 return IRQ_RETVAL(1);
3537 }
3538 return IRQ_RETVAL(0);
3539}
3540
1da177e4 3541static int tg3_init_hw(struct tg3 *);
944d980e 3542static int tg3_halt(struct tg3 *, int, int);
1da177e4
LT
3543
3544#ifdef CONFIG_NET_POLL_CONTROLLER
3545static void tg3_poll_controller(struct net_device *dev)
3546{
88b06bc2
MC
3547 struct tg3 *tp = netdev_priv(dev);
3548
3549 tg3_interrupt(tp->pdev->irq, dev, NULL);
1da177e4
LT
3550}
3551#endif
3552
3553static void tg3_reset_task(void *_data)
3554{
3555 struct tg3 *tp = _data;
3556 unsigned int restart_timer;
3557
7faa006f
MC
3558 tg3_full_lock(tp, 0);
3559 tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3560
3561 if (!netif_running(tp->dev)) {
3562 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3563 tg3_full_unlock(tp);
3564 return;
3565 }
3566
3567 tg3_full_unlock(tp);
3568
1da177e4
LT
3569 tg3_netif_stop(tp);
3570
f47c11ee 3571 tg3_full_lock(tp, 1);
1da177e4
LT
3572
3573 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3574 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3575
944d980e 3576 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
1da177e4
LT
3577 tg3_init_hw(tp);
3578
3579 tg3_netif_start(tp);
3580
1da177e4
LT
3581 if (restart_timer)
3582 mod_timer(&tp->timer, jiffies + 1);
7faa006f
MC
3583
3584 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3585
3586 tg3_full_unlock(tp);
1da177e4
LT
3587}
3588
3589static void tg3_tx_timeout(struct net_device *dev)
3590{
3591 struct tg3 *tp = netdev_priv(dev);
3592
3593 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3594 dev->name);
3595
3596 schedule_work(&tp->reset_task);
3597}
3598
c58ec932
MC
3599/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3600static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3601{
3602 u32 base = (u32) mapping & 0xffffffff;
3603
3604 return ((base > 0xffffdcc0) &&
3605 (base + len + 8 < base));
3606}
3607
72f2afb8
MC
3608/* Test for DMA addresses > 40-bit */
3609static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3610 int len)
3611{
3612#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6728a8e2 3613 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
72f2afb8
MC
3614 return (((u64) mapping + len) > DMA_40BIT_MASK);
3615 return 0;
3616#else
3617 return 0;
3618#endif
3619}
3620
1da177e4
LT
3621static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3622
72f2afb8
MC
3623/* Workaround 4GB and 40-bit hardware DMA bugs. */
3624static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
c58ec932
MC
3625 u32 last_plus_one, u32 *start,
3626 u32 base_flags, u32 mss)
1da177e4
LT
3627{
3628 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
c58ec932 3629 dma_addr_t new_addr = 0;
1da177e4 3630 u32 entry = *start;
c58ec932 3631 int i, ret = 0;
1da177e4
LT
3632
3633 if (!new_skb) {
c58ec932
MC
3634 ret = -1;
3635 } else {
3636 /* New SKB is guaranteed to be linear. */
3637 entry = *start;
3638 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3639 PCI_DMA_TODEVICE);
3640 /* Make sure new skb does not cross any 4G boundaries.
3641 * Drop the packet if it does.
3642 */
3643 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3644 ret = -1;
3645 dev_kfree_skb(new_skb);
3646 new_skb = NULL;
3647 } else {
3648 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3649 base_flags, 1 | (mss << 1));
3650 *start = NEXT_TX(entry);
3651 }
1da177e4
LT
3652 }
3653
1da177e4
LT
3654 /* Now clean up the sw ring entries. */
3655 i = 0;
3656 while (entry != last_plus_one) {
3657 int len;
3658
3659 if (i == 0)
3660 len = skb_headlen(skb);
3661 else
3662 len = skb_shinfo(skb)->frags[i-1].size;
3663 pci_unmap_single(tp->pdev,
3664 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3665 len, PCI_DMA_TODEVICE);
3666 if (i == 0) {
3667 tp->tx_buffers[entry].skb = new_skb;
3668 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3669 } else {
3670 tp->tx_buffers[entry].skb = NULL;
3671 }
3672 entry = NEXT_TX(entry);
3673 i++;
3674 }
3675
3676 dev_kfree_skb(skb);
3677
c58ec932 3678 return ret;
1da177e4
LT
3679}
3680
3681static void tg3_set_txd(struct tg3 *tp, int entry,
3682 dma_addr_t mapping, int len, u32 flags,
3683 u32 mss_and_is_end)
3684{
3685 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3686 int is_end = (mss_and_is_end & 0x1);
3687 u32 mss = (mss_and_is_end >> 1);
3688 u32 vlan_tag = 0;
3689
3690 if (is_end)
3691 flags |= TXD_FLAG_END;
3692 if (flags & TXD_FLAG_VLAN) {
3693 vlan_tag = flags >> 16;
3694 flags &= 0xffff;
3695 }
3696 vlan_tag |= (mss << TXD_MSS_SHIFT);
3697
3698 txd->addr_hi = ((u64) mapping >> 32);
3699 txd->addr_lo = ((u64) mapping & 0xffffffff);
3700 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3701 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3702}
3703
5a6f3074
MC
3704/* hard_start_xmit for devices that don't have any bugs and
3705 * support TG3_FLG2_HW_TSO_2 only.
3706 */
1da177e4 3707static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5a6f3074
MC
3708{
3709 struct tg3 *tp = netdev_priv(dev);
3710 dma_addr_t mapping;
3711 u32 len, entry, base_flags, mss;
3712
3713 len = skb_headlen(skb);
3714
3715 /* No BH disabling for tx_lock here. We are running in BH disabled
3716 * context and TX reclaim runs via tp->poll inside of a software
3717 * interrupt. Furthermore, IRQ processing runs lockless so we have
3718 * no IRQ context deadlocks to worry about either. Rejoice!
3719 */
3720 if (!spin_trylock(&tp->tx_lock))
3721 return NETDEV_TX_LOCKED;
3722
3723 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3724 if (!netif_queue_stopped(dev)) {
3725 netif_stop_queue(dev);
3726
3727 /* This is a hard error, log it. */
3728 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3729 "queue awake!\n", dev->name);
3730 }
3731 spin_unlock(&tp->tx_lock);
3732 return NETDEV_TX_BUSY;
3733 }
3734
3735 entry = tp->tx_prod;
3736 base_flags = 0;
3737#if TG3_TSO_SUPPORT != 0
3738 mss = 0;
3739 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3740 (mss = skb_shinfo(skb)->tso_size) != 0) {
3741 int tcp_opt_len, ip_tcp_len;
3742
3743 if (skb_header_cloned(skb) &&
3744 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3745 dev_kfree_skb(skb);
3746 goto out_unlock;
3747 }
3748
3749 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3750 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3751
3752 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3753 TXD_FLAG_CPU_POST_DMA);
3754
3755 skb->nh.iph->check = 0;
3756 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3757
3758 skb->h.th->check = 0;
3759
3760 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3761 }
3762 else if (skb->ip_summed == CHECKSUM_HW)
3763 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3764#else
3765 mss = 0;
3766 if (skb->ip_summed == CHECKSUM_HW)
3767 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3768#endif
3769#if TG3_VLAN_TAG_USED
3770 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3771 base_flags |= (TXD_FLAG_VLAN |
3772 (vlan_tx_tag_get(skb) << 16));
3773#endif
3774
3775 /* Queue skb data, a.k.a. the main skb fragment. */
3776 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3777
3778 tp->tx_buffers[entry].skb = skb;
3779 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3780
3781 tg3_set_txd(tp, entry, mapping, len, base_flags,
3782 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3783
3784 entry = NEXT_TX(entry);
3785
3786 /* Now loop through additional data fragments, and queue them. */
3787 if (skb_shinfo(skb)->nr_frags > 0) {
3788 unsigned int i, last;
3789
3790 last = skb_shinfo(skb)->nr_frags - 1;
3791 for (i = 0; i <= last; i++) {
3792 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3793
3794 len = frag->size;
3795 mapping = pci_map_page(tp->pdev,
3796 frag->page,
3797 frag->page_offset,
3798 len, PCI_DMA_TODEVICE);
3799
3800 tp->tx_buffers[entry].skb = NULL;
3801 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3802
3803 tg3_set_txd(tp, entry, mapping, len,
3804 base_flags, (i == last) | (mss << 1));
3805
3806 entry = NEXT_TX(entry);
3807 }
3808 }
3809
3810 /* Packets are ready, update Tx producer idx local and on card. */
3811 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3812
3813 tp->tx_prod = entry;
3814 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3815 netif_stop_queue(dev);
3816 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3817 netif_wake_queue(tp->dev);
3818 }
3819
3820out_unlock:
3821 mmiowb();
3822 spin_unlock(&tp->tx_lock);
3823
3824 dev->trans_start = jiffies;
3825
3826 return NETDEV_TX_OK;
3827}
3828
3829/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3830 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3831 */
3832static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
3833{
3834 struct tg3 *tp = netdev_priv(dev);
3835 dma_addr_t mapping;
1da177e4
LT
3836 u32 len, entry, base_flags, mss;
3837 int would_hit_hwbug;
1da177e4
LT
3838
3839 len = skb_headlen(skb);
3840
3841 /* No BH disabling for tx_lock here. We are running in BH disabled
3842 * context and TX reclaim runs via tp->poll inside of a software
f47c11ee
DM
3843 * interrupt. Furthermore, IRQ processing runs lockless so we have
3844 * no IRQ context deadlocks to worry about either. Rejoice!
1da177e4 3845 */
f47c11ee 3846 if (!spin_trylock(&tp->tx_lock))
1da177e4 3847 return NETDEV_TX_LOCKED;
1da177e4 3848
1da177e4 3849 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
1f064a87
SH
3850 if (!netif_queue_stopped(dev)) {
3851 netif_stop_queue(dev);
3852
3853 /* This is a hard error, log it. */
3854 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3855 "queue awake!\n", dev->name);
3856 }
f47c11ee 3857 spin_unlock(&tp->tx_lock);
1da177e4
LT
3858 return NETDEV_TX_BUSY;
3859 }
3860
3861 entry = tp->tx_prod;
3862 base_flags = 0;
3863 if (skb->ip_summed == CHECKSUM_HW)
3864 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3865#if TG3_TSO_SUPPORT != 0
3866 mss = 0;
3867 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3868 (mss = skb_shinfo(skb)->tso_size) != 0) {
3869 int tcp_opt_len, ip_tcp_len;
3870
3871 if (skb_header_cloned(skb) &&
3872 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3873 dev_kfree_skb(skb);
3874 goto out_unlock;
3875 }
3876
3877 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3878 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3879
3880 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3881 TXD_FLAG_CPU_POST_DMA);
3882
3883 skb->nh.iph->check = 0;
fd30333d 3884 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
1da177e4
LT
3885 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3886 skb->h.th->check = 0;
3887 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3888 }
3889 else {
3890 skb->h.th->check =
3891 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3892 skb->nh.iph->daddr,
3893 0, IPPROTO_TCP, 0);
3894 }
3895
3896 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3897 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3898 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3899 int tsflags;
3900
3901 tsflags = ((skb->nh.iph->ihl - 5) +
3902 (tcp_opt_len >> 2));
3903 mss |= (tsflags << 11);
3904 }
3905 } else {
3906 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3907 int tsflags;
3908
3909 tsflags = ((skb->nh.iph->ihl - 5) +
3910 (tcp_opt_len >> 2));
3911 base_flags |= tsflags << 12;
3912 }
3913 }
3914 }
3915#else
3916 mss = 0;
3917#endif
3918#if TG3_VLAN_TAG_USED
3919 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3920 base_flags |= (TXD_FLAG_VLAN |
3921 (vlan_tx_tag_get(skb) << 16));
3922#endif
3923
3924 /* Queue skb data, a.k.a. the main skb fragment. */
3925 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3926
3927 tp->tx_buffers[entry].skb = skb;
3928 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3929
3930 would_hit_hwbug = 0;
3931
3932 if (tg3_4g_overflow_test(mapping, len))
c58ec932 3933 would_hit_hwbug = 1;
1da177e4
LT
3934
3935 tg3_set_txd(tp, entry, mapping, len, base_flags,
3936 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3937
3938 entry = NEXT_TX(entry);
3939
3940 /* Now loop through additional data fragments, and queue them. */
3941 if (skb_shinfo(skb)->nr_frags > 0) {
3942 unsigned int i, last;
3943
3944 last = skb_shinfo(skb)->nr_frags - 1;
3945 for (i = 0; i <= last; i++) {
3946 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3947
3948 len = frag->size;
3949 mapping = pci_map_page(tp->pdev,
3950 frag->page,
3951 frag->page_offset,
3952 len, PCI_DMA_TODEVICE);
3953
3954 tp->tx_buffers[entry].skb = NULL;
3955 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3956
c58ec932
MC
3957 if (tg3_4g_overflow_test(mapping, len))
3958 would_hit_hwbug = 1;
1da177e4 3959
72f2afb8
MC
3960 if (tg3_40bit_overflow_test(tp, mapping, len))
3961 would_hit_hwbug = 1;
3962
1da177e4
LT
3963 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3964 tg3_set_txd(tp, entry, mapping, len,
3965 base_flags, (i == last)|(mss << 1));
3966 else
3967 tg3_set_txd(tp, entry, mapping, len,
3968 base_flags, (i == last));
3969
3970 entry = NEXT_TX(entry);
3971 }
3972 }
3973
3974 if (would_hit_hwbug) {
3975 u32 last_plus_one = entry;
3976 u32 start;
1da177e4 3977
c58ec932
MC
3978 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3979 start &= (TG3_TX_RING_SIZE - 1);
1da177e4
LT
3980
3981 /* If the workaround fails due to memory/mapping
3982 * failure, silently drop this packet.
3983 */
72f2afb8 3984 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
c58ec932 3985 &start, base_flags, mss))
1da177e4
LT
3986 goto out_unlock;
3987
3988 entry = start;
3989 }
3990
3991 /* Packets are ready, update Tx producer idx local and on card. */
3992 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3993
3994 tp->tx_prod = entry;
51b91468 3995 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
1da177e4 3996 netif_stop_queue(dev);
51b91468
MC
3997 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3998 netif_wake_queue(tp->dev);
3999 }
1da177e4
LT
4000
4001out_unlock:
4002 mmiowb();
f47c11ee 4003 spin_unlock(&tp->tx_lock);
1da177e4
LT
4004
4005 dev->trans_start = jiffies;
4006
4007 return NETDEV_TX_OK;
4008}
4009
4010static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4011 int new_mtu)
4012{
4013 dev->mtu = new_mtu;
4014
ef7f5ec0 4015 if (new_mtu > ETH_DATA_LEN) {
a4e2b347 4016 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
ef7f5ec0
MC
4017 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4018 ethtool_op_set_tso(dev, 0);
4019 }
4020 else
4021 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4022 } else {
a4e2b347 4023 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
ef7f5ec0 4024 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
0f893dc6 4025 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
ef7f5ec0 4026 }
1da177e4
LT
4027}
4028
4029static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4030{
4031 struct tg3 *tp = netdev_priv(dev);
4032
4033 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4034 return -EINVAL;
4035
4036 if (!netif_running(dev)) {
4037 /* We'll just catch it later when the
4038 * device is up'd.
4039 */
4040 tg3_set_mtu(dev, tp, new_mtu);
4041 return 0;
4042 }
4043
4044 tg3_netif_stop(tp);
f47c11ee
DM
4045
4046 tg3_full_lock(tp, 1);
1da177e4 4047
944d980e 4048 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
4049
4050 tg3_set_mtu(dev, tp, new_mtu);
4051
4052 tg3_init_hw(tp);
4053
4054 tg3_netif_start(tp);
4055
f47c11ee 4056 tg3_full_unlock(tp);
1da177e4
LT
4057
4058 return 0;
4059}
4060
4061/* Free up pending packets in all rx/tx rings.
4062 *
4063 * The chip has been shut down and the driver detached from
4064 * the networking, so no interrupts or new tx packets will
4065 * end up in the driver. tp->{tx,}lock is not held and we are not
4066 * in an interrupt context and thus may sleep.
4067 */
4068static void tg3_free_rings(struct tg3 *tp)
4069{
4070 struct ring_info *rxp;
4071 int i;
4072
4073 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4074 rxp = &tp->rx_std_buffers[i];
4075
4076 if (rxp->skb == NULL)
4077 continue;
4078 pci_unmap_single(tp->pdev,
4079 pci_unmap_addr(rxp, mapping),
7e72aad4 4080 tp->rx_pkt_buf_sz - tp->rx_offset,
1da177e4
LT
4081 PCI_DMA_FROMDEVICE);
4082 dev_kfree_skb_any(rxp->skb);
4083 rxp->skb = NULL;
4084 }
4085
4086 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4087 rxp = &tp->rx_jumbo_buffers[i];
4088
4089 if (rxp->skb == NULL)
4090 continue;
4091 pci_unmap_single(tp->pdev,
4092 pci_unmap_addr(rxp, mapping),
4093 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4094 PCI_DMA_FROMDEVICE);
4095 dev_kfree_skb_any(rxp->skb);
4096 rxp->skb = NULL;
4097 }
4098
4099 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4100 struct tx_ring_info *txp;
4101 struct sk_buff *skb;
4102 int j;
4103
4104 txp = &tp->tx_buffers[i];
4105 skb = txp->skb;
4106
4107 if (skb == NULL) {
4108 i++;
4109 continue;
4110 }
4111
4112 pci_unmap_single(tp->pdev,
4113 pci_unmap_addr(txp, mapping),
4114 skb_headlen(skb),
4115 PCI_DMA_TODEVICE);
4116 txp->skb = NULL;
4117
4118 i++;
4119
4120 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4121 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4122 pci_unmap_page(tp->pdev,
4123 pci_unmap_addr(txp, mapping),
4124 skb_shinfo(skb)->frags[j].size,
4125 PCI_DMA_TODEVICE);
4126 i++;
4127 }
4128
4129 dev_kfree_skb_any(skb);
4130 }
4131}
4132
4133/* Initialize tx/rx rings for packet processing.
4134 *
4135 * The chip has been shut down and the driver detached from
4136 * the networking, so no interrupts or new tx packets will
4137 * end up in the driver. tp->{tx,}lock are held and thus
4138 * we may not sleep.
4139 */
4140static void tg3_init_rings(struct tg3 *tp)
4141{
4142 u32 i;
4143
4144 /* Free up all the SKBs. */
4145 tg3_free_rings(tp);
4146
4147 /* Zero out all descriptors. */
4148 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4149 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4150 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4151 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4152
7e72aad4 4153 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
a4e2b347 4154 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
7e72aad4
MC
4155 (tp->dev->mtu > ETH_DATA_LEN))
4156 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4157
1da177e4
LT
4158 /* Initialize invariants of the rings, we only set this
4159 * stuff once. This works because the card does not
4160 * write into the rx buffer posting rings.
4161 */
4162 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4163 struct tg3_rx_buffer_desc *rxd;
4164
4165 rxd = &tp->rx_std[i];
7e72aad4 4166 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
1da177e4
LT
4167 << RXD_LEN_SHIFT;
4168 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4169 rxd->opaque = (RXD_OPAQUE_RING_STD |
4170 (i << RXD_OPAQUE_INDEX_SHIFT));
4171 }
4172
0f893dc6 4173 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
4174 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4175 struct tg3_rx_buffer_desc *rxd;
4176
4177 rxd = &tp->rx_jumbo[i];
4178 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4179 << RXD_LEN_SHIFT;
4180 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4181 RXD_FLAG_JUMBO;
4182 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4183 (i << RXD_OPAQUE_INDEX_SHIFT));
4184 }
4185 }
4186
4187 /* Now allocate fresh SKBs for each rx ring. */
4188 for (i = 0; i < tp->rx_pending; i++) {
4189 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4190 -1, i) < 0)
4191 break;
4192 }
4193
0f893dc6 4194 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
4195 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4196 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4197 -1, i) < 0)
4198 break;
4199 }
4200 }
4201}
4202
4203/*
4204 * Must not be invoked with interrupt sources disabled and
4205 * the hardware shutdown down.
4206 */
4207static void tg3_free_consistent(struct tg3 *tp)
4208{
b4558ea9
JJ
4209 kfree(tp->rx_std_buffers);
4210 tp->rx_std_buffers = NULL;
1da177e4
LT
4211 if (tp->rx_std) {
4212 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4213 tp->rx_std, tp->rx_std_mapping);
4214 tp->rx_std = NULL;
4215 }
4216 if (tp->rx_jumbo) {
4217 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4218 tp->rx_jumbo, tp->rx_jumbo_mapping);
4219 tp->rx_jumbo = NULL;
4220 }
4221 if (tp->rx_rcb) {
4222 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4223 tp->rx_rcb, tp->rx_rcb_mapping);
4224 tp->rx_rcb = NULL;
4225 }
4226 if (tp->tx_ring) {
4227 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4228 tp->tx_ring, tp->tx_desc_mapping);
4229 tp->tx_ring = NULL;
4230 }
4231 if (tp->hw_status) {
4232 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4233 tp->hw_status, tp->status_mapping);
4234 tp->hw_status = NULL;
4235 }
4236 if (tp->hw_stats) {
4237 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4238 tp->hw_stats, tp->stats_mapping);
4239 tp->hw_stats = NULL;
4240 }
4241}
4242
4243/*
4244 * Must not be invoked with interrupt sources disabled and
4245 * the hardware shutdown down. Can sleep.
4246 */
4247static int tg3_alloc_consistent(struct tg3 *tp)
4248{
4249 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4250 (TG3_RX_RING_SIZE +
4251 TG3_RX_JUMBO_RING_SIZE)) +
4252 (sizeof(struct tx_ring_info) *
4253 TG3_TX_RING_SIZE),
4254 GFP_KERNEL);
4255 if (!tp->rx_std_buffers)
4256 return -ENOMEM;
4257
4258 memset(tp->rx_std_buffers, 0,
4259 (sizeof(struct ring_info) *
4260 (TG3_RX_RING_SIZE +
4261 TG3_RX_JUMBO_RING_SIZE)) +
4262 (sizeof(struct tx_ring_info) *
4263 TG3_TX_RING_SIZE));
4264
4265 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4266 tp->tx_buffers = (struct tx_ring_info *)
4267 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4268
4269 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4270 &tp->rx_std_mapping);
4271 if (!tp->rx_std)
4272 goto err_out;
4273
4274 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4275 &tp->rx_jumbo_mapping);
4276
4277 if (!tp->rx_jumbo)
4278 goto err_out;
4279
4280 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4281 &tp->rx_rcb_mapping);
4282 if (!tp->rx_rcb)
4283 goto err_out;
4284
4285 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4286 &tp->tx_desc_mapping);
4287 if (!tp->tx_ring)
4288 goto err_out;
4289
4290 tp->hw_status = pci_alloc_consistent(tp->pdev,
4291 TG3_HW_STATUS_SIZE,
4292 &tp->status_mapping);
4293 if (!tp->hw_status)
4294 goto err_out;
4295
4296 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4297 sizeof(struct tg3_hw_stats),
4298 &tp->stats_mapping);
4299 if (!tp->hw_stats)
4300 goto err_out;
4301
4302 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4303 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4304
4305 return 0;
4306
4307err_out:
4308 tg3_free_consistent(tp);
4309 return -ENOMEM;
4310}
4311
4312#define MAX_WAIT_CNT 1000
4313
4314/* To stop a block, clear the enable bit and poll till it
4315 * clears. tp->lock is held.
4316 */
b3b7d6be 4317static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
1da177e4
LT
4318{
4319 unsigned int i;
4320 u32 val;
4321
4322 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4323 switch (ofs) {
4324 case RCVLSC_MODE:
4325 case DMAC_MODE:
4326 case MBFREE_MODE:
4327 case BUFMGR_MODE:
4328 case MEMARB_MODE:
4329 /* We can't enable/disable these bits of the
4330 * 5705/5750, just say success.
4331 */
4332 return 0;
4333
4334 default:
4335 break;
4336 };
4337 }
4338
4339 val = tr32(ofs);
4340 val &= ~enable_bit;
4341 tw32_f(ofs, val);
4342
4343 for (i = 0; i < MAX_WAIT_CNT; i++) {
4344 udelay(100);
4345 val = tr32(ofs);
4346 if ((val & enable_bit) == 0)
4347 break;
4348 }
4349
b3b7d6be 4350 if (i == MAX_WAIT_CNT && !silent) {
1da177e4
LT
4351 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4352 "ofs=%lx enable_bit=%x\n",
4353 ofs, enable_bit);
4354 return -ENODEV;
4355 }
4356
4357 return 0;
4358}
4359
4360/* tp->lock is held. */
b3b7d6be 4361static int tg3_abort_hw(struct tg3 *tp, int silent)
1da177e4
LT
4362{
4363 int i, err;
4364
4365 tg3_disable_ints(tp);
4366
4367 tp->rx_mode &= ~RX_MODE_ENABLE;
4368 tw32_f(MAC_RX_MODE, tp->rx_mode);
4369 udelay(10);
4370
b3b7d6be
DM
4371 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4372 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4373 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4374 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4375 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4376 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4377
4378 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4379 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4380 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4381 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4382 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4383 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4384 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
1da177e4
LT
4385
4386 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4387 tw32_f(MAC_MODE, tp->mac_mode);
4388 udelay(40);
4389
4390 tp->tx_mode &= ~TX_MODE_ENABLE;
4391 tw32_f(MAC_TX_MODE, tp->tx_mode);
4392
4393 for (i = 0; i < MAX_WAIT_CNT; i++) {
4394 udelay(100);
4395 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4396 break;
4397 }
4398 if (i >= MAX_WAIT_CNT) {
4399 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4400 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4401 tp->dev->name, tr32(MAC_TX_MODE));
e6de8ad1 4402 err |= -ENODEV;
1da177e4
LT
4403 }
4404
e6de8ad1 4405 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
b3b7d6be
DM
4406 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4407 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
1da177e4
LT
4408
4409 tw32(FTQ_RESET, 0xffffffff);
4410 tw32(FTQ_RESET, 0x00000000);
4411
b3b7d6be
DM
4412 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4413 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
1da177e4
LT
4414
4415 if (tp->hw_status)
4416 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4417 if (tp->hw_stats)
4418 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4419
1da177e4
LT
4420 return err;
4421}
4422
4423/* tp->lock is held. */
4424static int tg3_nvram_lock(struct tg3 *tp)
4425{
4426 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4427 int i;
4428
ec41c7df
MC
4429 if (tp->nvram_lock_cnt == 0) {
4430 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4431 for (i = 0; i < 8000; i++) {
4432 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4433 break;
4434 udelay(20);
4435 }
4436 if (i == 8000) {
4437 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4438 return -ENODEV;
4439 }
1da177e4 4440 }
ec41c7df 4441 tp->nvram_lock_cnt++;
1da177e4
LT
4442 }
4443 return 0;
4444}
4445
4446/* tp->lock is held. */
4447static void tg3_nvram_unlock(struct tg3 *tp)
4448{
ec41c7df
MC
4449 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4450 if (tp->nvram_lock_cnt > 0)
4451 tp->nvram_lock_cnt--;
4452 if (tp->nvram_lock_cnt == 0)
4453 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4454 }
1da177e4
LT
4455}
4456
e6af301b
MC
4457/* tp->lock is held. */
4458static void tg3_enable_nvram_access(struct tg3 *tp)
4459{
4460 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4461 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4462 u32 nvaccess = tr32(NVRAM_ACCESS);
4463
4464 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4465 }
4466}
4467
4468/* tp->lock is held. */
4469static void tg3_disable_nvram_access(struct tg3 *tp)
4470{
4471 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4472 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4473 u32 nvaccess = tr32(NVRAM_ACCESS);
4474
4475 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4476 }
4477}
4478
1da177e4
LT
4479/* tp->lock is held. */
4480static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4481{
4482 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4483 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4484 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4485
4486 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4487 switch (kind) {
4488 case RESET_KIND_INIT:
4489 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4490 DRV_STATE_START);
4491 break;
4492
4493 case RESET_KIND_SHUTDOWN:
4494 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4495 DRV_STATE_UNLOAD);
4496 break;
4497
4498 case RESET_KIND_SUSPEND:
4499 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4500 DRV_STATE_SUSPEND);
4501 break;
4502
4503 default:
4504 break;
4505 };
4506 }
4507}
4508
4509/* tp->lock is held. */
4510static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4511{
4512 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4513 switch (kind) {
4514 case RESET_KIND_INIT:
4515 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4516 DRV_STATE_START_DONE);
4517 break;
4518
4519 case RESET_KIND_SHUTDOWN:
4520 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4521 DRV_STATE_UNLOAD_DONE);
4522 break;
4523
4524 default:
4525 break;
4526 };
4527 }
4528}
4529
4530/* tp->lock is held. */
4531static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4532{
4533 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4534 switch (kind) {
4535 case RESET_KIND_INIT:
4536 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4537 DRV_STATE_START);
4538 break;
4539
4540 case RESET_KIND_SHUTDOWN:
4541 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4542 DRV_STATE_UNLOAD);
4543 break;
4544
4545 case RESET_KIND_SUSPEND:
4546 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4547 DRV_STATE_SUSPEND);
4548 break;
4549
4550 default:
4551 break;
4552 };
4553 }
4554}
4555
4556static void tg3_stop_fw(struct tg3 *);
4557
4558/* tp->lock is held. */
4559static int tg3_chip_reset(struct tg3 *tp)
4560{
4561 u32 val;
1ee582d8 4562 void (*write_op)(struct tg3 *, u32, u32);
1da177e4
LT
4563 int i;
4564
ec41c7df 4565 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
1da177e4 4566 tg3_nvram_lock(tp);
ec41c7df
MC
4567 /* No matching tg3_nvram_unlock() after this because
4568 * chip reset below will undo the nvram lock.
4569 */
4570 tp->nvram_lock_cnt = 0;
4571 }
1da177e4 4572
d9ab5ad1 4573 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
af36e6b6 4574 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d9ab5ad1
MC
4575 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4576 tw32(GRC_FASTBOOT_PC, 0);
4577
1da177e4
LT
4578 /*
4579 * We must avoid the readl() that normally takes place.
4580 * It locks machines, causes machine checks, and other
4581 * fun things. So, temporarily disable the 5701
4582 * hardware workaround, while we do the reset.
4583 */
1ee582d8
MC
4584 write_op = tp->write32;
4585 if (write_op == tg3_write_flush_reg32)
4586 tp->write32 = tg3_write32;
1da177e4
LT
4587
4588 /* do the reset */
4589 val = GRC_MISC_CFG_CORECLK_RESET;
4590
4591 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4592 if (tr32(0x7e2c) == 0x60) {
4593 tw32(0x7e2c, 0x20);
4594 }
4595 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4596 tw32(GRC_MISC_CFG, (1 << 29));
4597 val |= (1 << 29);
4598 }
4599 }
4600
4601 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4602 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4603 tw32(GRC_MISC_CFG, val);
4604
1ee582d8
MC
4605 /* restore 5701 hardware bug workaround write method */
4606 tp->write32 = write_op;
1da177e4
LT
4607
4608 /* Unfortunately, we have to delay before the PCI read back.
4609 * Some 575X chips even will not respond to a PCI cfg access
4610 * when the reset command is given to the chip.
4611 *
4612 * How do these hardware designers expect things to work
4613 * properly if the PCI write is posted for a long period
4614 * of time? It is always necessary to have some method by
4615 * which a register read back can occur to push the write
4616 * out which does the reset.
4617 *
4618 * For most tg3 variants the trick below was working.
4619 * Ho hum...
4620 */
4621 udelay(120);
4622
4623 /* Flush PCI posted writes. The normal MMIO registers
4624 * are inaccessible at this time so this is the only
4625 * way to make this reliably (actually, this is no longer
4626 * the case, see above). I tried to use indirect
4627 * register read/write but this upset some 5701 variants.
4628 */
4629 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4630
4631 udelay(120);
4632
4633 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4634 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4635 int i;
4636 u32 cfg_val;
4637
4638 /* Wait for link training to complete. */
4639 for (i = 0; i < 5000; i++)
4640 udelay(100);
4641
4642 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4643 pci_write_config_dword(tp->pdev, 0xc4,
4644 cfg_val | (1 << 15));
4645 }
4646 /* Set PCIE max payload size and clear error status. */
4647 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4648 }
4649
4650 /* Re-enable indirect register accesses. */
4651 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4652 tp->misc_host_ctrl);
4653
4654 /* Set MAX PCI retry to zero. */
4655 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4656 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4657 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4658 val |= PCISTATE_RETRY_SAME_DMA;
4659 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4660
4661 pci_restore_state(tp->pdev);
4662
4663 /* Make sure PCI-X relaxed ordering bit is clear. */
4664 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4665 val &= ~PCIX_CAPS_RELAXED_ORDERING;
4666 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4667
a4e2b347 4668 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4cf78e4f
MC
4669 u32 val;
4670
4671 /* Chip reset on 5780 will reset MSI enable bit,
4672 * so need to restore it.
4673 */
4674 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4675 u16 ctrl;
4676
4677 pci_read_config_word(tp->pdev,
4678 tp->msi_cap + PCI_MSI_FLAGS,
4679 &ctrl);
4680 pci_write_config_word(tp->pdev,
4681 tp->msi_cap + PCI_MSI_FLAGS,
4682 ctrl | PCI_MSI_FLAGS_ENABLE);
4683 val = tr32(MSGINT_MODE);
4684 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4685 }
4686
4687 val = tr32(MEMARB_MODE);
4688 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4689
4690 } else
4691 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
1da177e4
LT
4692
4693 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4694 tg3_stop_fw(tp);
4695 tw32(0x5000, 0x400);
4696 }
4697
4698 tw32(GRC_MODE, tp->grc_mode);
4699
4700 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4701 u32 val = tr32(0xc4);
4702
4703 tw32(0xc4, val | (1 << 15));
4704 }
4705
4706 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4707 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4708 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4709 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4710 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4711 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4712 }
4713
4714 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4715 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4716 tw32_f(MAC_MODE, tp->mac_mode);
747e8f8b
MC
4717 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4718 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4719 tw32_f(MAC_MODE, tp->mac_mode);
1da177e4
LT
4720 } else
4721 tw32_f(MAC_MODE, 0);
4722 udelay(40);
4723
4724 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4725 /* Wait for firmware initialization to complete. */
4726 for (i = 0; i < 100000; i++) {
4727 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4728 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4729 break;
4730 udelay(10);
4731 }
4732 if (i >= 100000) {
4733 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4734 "firmware will not restart magic=%08x\n",
4735 tp->dev->name, val);
4736 return -ENODEV;
4737 }
4738 }
4739
4740 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4741 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4742 u32 val = tr32(0x7c00);
4743
4744 tw32(0x7c00, val | (1 << 25));
4745 }
4746
4747 /* Reprobe ASF enable state. */
4748 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4749 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4750 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4751 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4752 u32 nic_cfg;
4753
4754 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4755 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4756 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 4757 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
4758 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4759 }
4760 }
4761
4762 return 0;
4763}
4764
4765/* tp->lock is held. */
4766static void tg3_stop_fw(struct tg3 *tp)
4767{
4768 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4769 u32 val;
4770 int i;
4771
4772 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4773 val = tr32(GRC_RX_CPU_EVENT);
4774 val |= (1 << 14);
4775 tw32(GRC_RX_CPU_EVENT, val);
4776
4777 /* Wait for RX cpu to ACK the event. */
4778 for (i = 0; i < 100; i++) {
4779 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4780 break;
4781 udelay(1);
4782 }
4783 }
4784}
4785
4786/* tp->lock is held. */
944d980e 4787static int tg3_halt(struct tg3 *tp, int kind, int silent)
1da177e4
LT
4788{
4789 int err;
4790
4791 tg3_stop_fw(tp);
4792
944d980e 4793 tg3_write_sig_pre_reset(tp, kind);
1da177e4 4794
b3b7d6be 4795 tg3_abort_hw(tp, silent);
1da177e4
LT
4796 err = tg3_chip_reset(tp);
4797
944d980e
MC
4798 tg3_write_sig_legacy(tp, kind);
4799 tg3_write_sig_post_reset(tp, kind);
1da177e4
LT
4800
4801 if (err)
4802 return err;
4803
4804 return 0;
4805}
4806
4807#define TG3_FW_RELEASE_MAJOR 0x0
4808#define TG3_FW_RELASE_MINOR 0x0
4809#define TG3_FW_RELEASE_FIX 0x0
4810#define TG3_FW_START_ADDR 0x08000000
4811#define TG3_FW_TEXT_ADDR 0x08000000
4812#define TG3_FW_TEXT_LEN 0x9c0
4813#define TG3_FW_RODATA_ADDR 0x080009c0
4814#define TG3_FW_RODATA_LEN 0x60
4815#define TG3_FW_DATA_ADDR 0x08000a40
4816#define TG3_FW_DATA_LEN 0x20
4817#define TG3_FW_SBSS_ADDR 0x08000a60
4818#define TG3_FW_SBSS_LEN 0xc
4819#define TG3_FW_BSS_ADDR 0x08000a70
4820#define TG3_FW_BSS_LEN 0x10
4821
4822static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4823 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4824 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4825 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4826 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4827 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4828 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4829 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4830 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4831 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4832 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4833 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4834 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4835 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4836 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4837 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4838 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4839 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4840 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4841 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4842 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4843 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4844 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4845 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4846 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4847 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4848 0, 0, 0, 0, 0, 0,
4849 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4850 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4851 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4852 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4853 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4854 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4855 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4856 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4857 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4858 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4859 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4860 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4861 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4862 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4863 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4864 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4865 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4866 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4867 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4868 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4869 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4870 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4871 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4872 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4873 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4874 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4875 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4876 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4877 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4878 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4879 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4880 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4881 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4882 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4883 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4884 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4885 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4886 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4887 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4888 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4889 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4890 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4891 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4892 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4893 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4894 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4895 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4896 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4897 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4898 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4899 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4900 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4901 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4902 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4903 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4904 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4905 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4906 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4907 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4908 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4909 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4910 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4911 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4912 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4913 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4914};
4915
4916static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4917 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4918 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4919 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4920 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4921 0x00000000
4922};
4923
4924#if 0 /* All zeros, don't eat up space with it. */
4925u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4926 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4927 0x00000000, 0x00000000, 0x00000000, 0x00000000
4928};
4929#endif
4930
4931#define RX_CPU_SCRATCH_BASE 0x30000
4932#define RX_CPU_SCRATCH_SIZE 0x04000
4933#define TX_CPU_SCRATCH_BASE 0x34000
4934#define TX_CPU_SCRATCH_SIZE 0x04000
4935
4936/* tp->lock is held. */
4937static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4938{
4939 int i;
4940
5d9428de
ES
4941 BUG_ON(offset == TX_CPU_BASE &&
4942 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
1da177e4
LT
4943
4944 if (offset == RX_CPU_BASE) {
4945 for (i = 0; i < 10000; i++) {
4946 tw32(offset + CPU_STATE, 0xffffffff);
4947 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4948 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4949 break;
4950 }
4951
4952 tw32(offset + CPU_STATE, 0xffffffff);
4953 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
4954 udelay(10);
4955 } else {
4956 for (i = 0; i < 10000; i++) {
4957 tw32(offset + CPU_STATE, 0xffffffff);
4958 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4959 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4960 break;
4961 }
4962 }
4963
4964 if (i >= 10000) {
4965 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4966 "and %s CPU\n",
4967 tp->dev->name,
4968 (offset == RX_CPU_BASE ? "RX" : "TX"));
4969 return -ENODEV;
4970 }
ec41c7df
MC
4971
4972 /* Clear firmware's nvram arbitration. */
4973 if (tp->tg3_flags & TG3_FLAG_NVRAM)
4974 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
1da177e4
LT
4975 return 0;
4976}
4977
4978struct fw_info {
4979 unsigned int text_base;
4980 unsigned int text_len;
4981 u32 *text_data;
4982 unsigned int rodata_base;
4983 unsigned int rodata_len;
4984 u32 *rodata_data;
4985 unsigned int data_base;
4986 unsigned int data_len;
4987 u32 *data_data;
4988};
4989
4990/* tp->lock is held. */
4991static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4992 int cpu_scratch_size, struct fw_info *info)
4993{
ec41c7df 4994 int err, lock_err, i;
1da177e4
LT
4995 void (*write_op)(struct tg3 *, u32, u32);
4996
4997 if (cpu_base == TX_CPU_BASE &&
4998 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4999 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5000 "TX cpu firmware on %s which is 5705.\n",
5001 tp->dev->name);
5002 return -EINVAL;
5003 }
5004
5005 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5006 write_op = tg3_write_mem;
5007 else
5008 write_op = tg3_write_indirect_reg32;
5009
1b628151
MC
5010 /* It is possible that bootcode is still loading at this point.
5011 * Get the nvram lock first before halting the cpu.
5012 */
ec41c7df 5013 lock_err = tg3_nvram_lock(tp);
1da177e4 5014 err = tg3_halt_cpu(tp, cpu_base);
ec41c7df
MC
5015 if (!lock_err)
5016 tg3_nvram_unlock(tp);
1da177e4
LT
5017 if (err)
5018 goto out;
5019
5020 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5021 write_op(tp, cpu_scratch_base + i, 0);
5022 tw32(cpu_base + CPU_STATE, 0xffffffff);
5023 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5024 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5025 write_op(tp, (cpu_scratch_base +
5026 (info->text_base & 0xffff) +
5027 (i * sizeof(u32))),
5028 (info->text_data ?
5029 info->text_data[i] : 0));
5030 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5031 write_op(tp, (cpu_scratch_base +
5032 (info->rodata_base & 0xffff) +
5033 (i * sizeof(u32))),
5034 (info->rodata_data ?
5035 info->rodata_data[i] : 0));
5036 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5037 write_op(tp, (cpu_scratch_base +
5038 (info->data_base & 0xffff) +
5039 (i * sizeof(u32))),
5040 (info->data_data ?
5041 info->data_data[i] : 0));
5042
5043 err = 0;
5044
5045out:
1da177e4
LT
5046 return err;
5047}
5048
5049/* tp->lock is held. */
5050static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5051{
5052 struct fw_info info;
5053 int err, i;
5054
5055 info.text_base = TG3_FW_TEXT_ADDR;
5056 info.text_len = TG3_FW_TEXT_LEN;
5057 info.text_data = &tg3FwText[0];
5058 info.rodata_base = TG3_FW_RODATA_ADDR;
5059 info.rodata_len = TG3_FW_RODATA_LEN;
5060 info.rodata_data = &tg3FwRodata[0];
5061 info.data_base = TG3_FW_DATA_ADDR;
5062 info.data_len = TG3_FW_DATA_LEN;
5063 info.data_data = NULL;
5064
5065 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5066 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5067 &info);
5068 if (err)
5069 return err;
5070
5071 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5072 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5073 &info);
5074 if (err)
5075 return err;
5076
5077 /* Now startup only the RX cpu. */
5078 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5079 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5080
5081 for (i = 0; i < 5; i++) {
5082 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5083 break;
5084 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5085 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
5086 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5087 udelay(1000);
5088 }
5089 if (i >= 5) {
5090 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5091 "to set RX CPU PC, is %08x should be %08x\n",
5092 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5093 TG3_FW_TEXT_ADDR);
5094 return -ENODEV;
5095 }
5096 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5097 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
5098
5099 return 0;
5100}
5101
5102#if TG3_TSO_SUPPORT != 0
5103
5104#define TG3_TSO_FW_RELEASE_MAJOR 0x1
5105#define TG3_TSO_FW_RELASE_MINOR 0x6
5106#define TG3_TSO_FW_RELEASE_FIX 0x0
5107#define TG3_TSO_FW_START_ADDR 0x08000000
5108#define TG3_TSO_FW_TEXT_ADDR 0x08000000
5109#define TG3_TSO_FW_TEXT_LEN 0x1aa0
5110#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5111#define TG3_TSO_FW_RODATA_LEN 0x60
5112#define TG3_TSO_FW_DATA_ADDR 0x08001b20
5113#define TG3_TSO_FW_DATA_LEN 0x30
5114#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5115#define TG3_TSO_FW_SBSS_LEN 0x2c
5116#define TG3_TSO_FW_BSS_ADDR 0x08001b80
5117#define TG3_TSO_FW_BSS_LEN 0x894
5118
5119static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5120 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5121 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5122 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5123 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5124 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5125 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5126 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5127 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5128 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5129 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5130 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5131 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5132 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5133 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5134 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5135 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5136 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5137 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5138 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5139 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5140 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5141 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5142 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5143 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5144 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5145 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5146 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5147 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5148 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5149 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5150 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5151 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5152 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5153 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5154 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5155 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5156 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5157 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5158 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5159 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5160 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5161 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5162 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5163 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5164 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5165 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5166 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5167 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5168 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5169 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5170 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5171 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5172 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5173 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5174 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5175 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5176 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5177 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5178 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5179 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5180 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5181 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5182 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5183 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5184 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5185 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5186 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5187 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5188 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5189 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5190 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5191 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5192 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5193 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5194 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5195 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5196 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5197 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5198 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5199 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5200 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5201 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5202 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5203 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5204 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5205 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5206 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5207 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5208 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5209 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5210 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5211 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5212 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5213 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5214 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5215 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5216 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5217 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5218 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5219 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5220 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5221 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5222 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5223 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5224 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5225 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5226 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5227 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5228 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5229 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5230 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5231 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5232 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5233 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5234 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5235 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5236 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5237 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5238 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5239 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5240 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5241 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5242 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5243 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5244 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5245 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5246 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5247 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5248 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5249 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5250 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5251 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5252 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5253 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5254 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5255 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5256 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5257 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5258 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5259 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5260 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5261 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5262 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5263 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5264 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5265 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5266 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5267 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5268 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5269 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5270 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5271 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5272 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5273 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5274 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5275 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5276 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5277 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5278 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5279 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5280 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5281 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5282 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5283 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5284 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5285 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5286 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5287 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5288 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5289 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5290 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5291 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5292 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5293 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5294 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5295 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5296 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5297 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5298 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5299 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5300 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5301 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5302 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5303 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5304 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5305 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5306 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5307 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5308 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5309 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5310 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5311 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5312 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5313 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5314 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5315 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5316 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5317 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5318 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5319 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5320 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5321 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5322 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5323 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5324 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5325 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5326 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5327 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5328 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5329 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5330 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5331 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5332 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5333 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5334 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5335 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5336 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5337 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5338 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5339 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5340 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5341 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5342 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5343 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5344 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5345 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5346 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5347 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5348 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5349 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5350 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5351 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5352 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5353 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5354 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5355 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5356 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5357 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5358 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5359 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5360 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5361 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5362 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5363 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5364 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5365 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5366 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5367 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5368 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5369 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5370 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5371 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5372 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5373 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5374 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5375 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5376 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5377 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5378 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5379 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5380 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5381 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5382 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5383 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5384 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5385 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5386 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5387 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5388 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5389 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5390 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5391 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5392 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5393 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5394 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5395 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5396 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5397 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5398 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5399 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5400 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5401 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5402 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5403 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5404};
5405
5406static u32 tg3TsoFwRodata[] = {
5407 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5408 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5409 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5410 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5411 0x00000000,
5412};
5413
5414static u32 tg3TsoFwData[] = {
5415 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5416 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5417 0x00000000,
5418};
5419
5420/* 5705 needs a special version of the TSO firmware. */
5421#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5422#define TG3_TSO5_FW_RELASE_MINOR 0x2
5423#define TG3_TSO5_FW_RELEASE_FIX 0x0
5424#define TG3_TSO5_FW_START_ADDR 0x00010000
5425#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5426#define TG3_TSO5_FW_TEXT_LEN 0xe90
5427#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5428#define TG3_TSO5_FW_RODATA_LEN 0x50
5429#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5430#define TG3_TSO5_FW_DATA_LEN 0x20
5431#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5432#define TG3_TSO5_FW_SBSS_LEN 0x28
5433#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5434#define TG3_TSO5_FW_BSS_LEN 0x88
5435
5436static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5437 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5438 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5439 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5440 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5441 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5442 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5443 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5444 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5445 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5446 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5447 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5448 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5449 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5450 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5451 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5452 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5453 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5454 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5455 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5456 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5457 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5458 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5459 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5460 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5461 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5462 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5463 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5464 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5465 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5466 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5467 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5468 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5469 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5470 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5471 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5472 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5473 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5474 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5475 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5476 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5477 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5478 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5479 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5480 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5481 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5482 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5483 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5484 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5485 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5486 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5487 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5488 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5489 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5490 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5491 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5492 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5493 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5494 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5495 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5496 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5497 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5498 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5499 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5500 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5501 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5502 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5503 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5504 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5505 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5506 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5507 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5508 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5509 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5510 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5511 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5512 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5513 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5514 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5515 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5516 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5517 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5518 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5519 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5520 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5521 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5522 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5523 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5524 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5525 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5526 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5527 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5528 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5529 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5530 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5531 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5532 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5533 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5534 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5535 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5536 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5537 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5538 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5539 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5540 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5541 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5542 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5543 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5544 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5545 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5546 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5547 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5548 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5549 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5550 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5551 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5552 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5553 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5554 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5555 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5556 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5557 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5558 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5559 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5560 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5561 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5562 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5563 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5564 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5565 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5566 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5567 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5568 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5569 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5570 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5571 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5572 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5573 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5574 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5575 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5576 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5577 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5578 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5579 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5580 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5581 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5582 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5583 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5584 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5585 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5586 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5587 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5588 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5589 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5590 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5591 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5592 0x00000000, 0x00000000, 0x00000000,
5593};
5594
5595static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5596 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5597 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5598 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5599 0x00000000, 0x00000000, 0x00000000,
5600};
5601
5602static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5603 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5604 0x00000000, 0x00000000, 0x00000000,
5605};
5606
5607/* tp->lock is held. */
5608static int tg3_load_tso_firmware(struct tg3 *tp)
5609{
5610 struct fw_info info;
5611 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5612 int err, i;
5613
5614 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5615 return 0;
5616
5617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5618 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5619 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5620 info.text_data = &tg3Tso5FwText[0];
5621 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5622 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5623 info.rodata_data = &tg3Tso5FwRodata[0];
5624 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5625 info.data_len = TG3_TSO5_FW_DATA_LEN;
5626 info.data_data = &tg3Tso5FwData[0];
5627 cpu_base = RX_CPU_BASE;
5628 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5629 cpu_scratch_size = (info.text_len +
5630 info.rodata_len +
5631 info.data_len +
5632 TG3_TSO5_FW_SBSS_LEN +
5633 TG3_TSO5_FW_BSS_LEN);
5634 } else {
5635 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5636 info.text_len = TG3_TSO_FW_TEXT_LEN;
5637 info.text_data = &tg3TsoFwText[0];
5638 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5639 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5640 info.rodata_data = &tg3TsoFwRodata[0];
5641 info.data_base = TG3_TSO_FW_DATA_ADDR;
5642 info.data_len = TG3_TSO_FW_DATA_LEN;
5643 info.data_data = &tg3TsoFwData[0];
5644 cpu_base = TX_CPU_BASE;
5645 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5646 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5647 }
5648
5649 err = tg3_load_firmware_cpu(tp, cpu_base,
5650 cpu_scratch_base, cpu_scratch_size,
5651 &info);
5652 if (err)
5653 return err;
5654
5655 /* Now startup the cpu. */
5656 tw32(cpu_base + CPU_STATE, 0xffffffff);
5657 tw32_f(cpu_base + CPU_PC, info.text_base);
5658
5659 for (i = 0; i < 5; i++) {
5660 if (tr32(cpu_base + CPU_PC) == info.text_base)
5661 break;
5662 tw32(cpu_base + CPU_STATE, 0xffffffff);
5663 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
5664 tw32_f(cpu_base + CPU_PC, info.text_base);
5665 udelay(1000);
5666 }
5667 if (i >= 5) {
5668 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5669 "to set CPU PC, is %08x should be %08x\n",
5670 tp->dev->name, tr32(cpu_base + CPU_PC),
5671 info.text_base);
5672 return -ENODEV;
5673 }
5674 tw32(cpu_base + CPU_STATE, 0xffffffff);
5675 tw32_f(cpu_base + CPU_MODE, 0x00000000);
5676 return 0;
5677}
5678
5679#endif /* TG3_TSO_SUPPORT != 0 */
5680
5681/* tp->lock is held. */
5682static void __tg3_set_mac_addr(struct tg3 *tp)
5683{
5684 u32 addr_high, addr_low;
5685 int i;
5686
5687 addr_high = ((tp->dev->dev_addr[0] << 8) |
5688 tp->dev->dev_addr[1]);
5689 addr_low = ((tp->dev->dev_addr[2] << 24) |
5690 (tp->dev->dev_addr[3] << 16) |
5691 (tp->dev->dev_addr[4] << 8) |
5692 (tp->dev->dev_addr[5] << 0));
5693 for (i = 0; i < 4; i++) {
5694 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5695 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5696 }
5697
5698 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5699 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5700 for (i = 0; i < 12; i++) {
5701 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5702 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5703 }
5704 }
5705
5706 addr_high = (tp->dev->dev_addr[0] +
5707 tp->dev->dev_addr[1] +
5708 tp->dev->dev_addr[2] +
5709 tp->dev->dev_addr[3] +
5710 tp->dev->dev_addr[4] +
5711 tp->dev->dev_addr[5]) &
5712 TX_BACKOFF_SEED_MASK;
5713 tw32(MAC_TX_BACKOFF_SEED, addr_high);
5714}
5715
5716static int tg3_set_mac_addr(struct net_device *dev, void *p)
5717{
5718 struct tg3 *tp = netdev_priv(dev);
5719 struct sockaddr *addr = p;
5720
f9804ddb
MC
5721 if (!is_valid_ether_addr(addr->sa_data))
5722 return -EINVAL;
5723
1da177e4
LT
5724 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5725
e75f7c90
MC
5726 if (!netif_running(dev))
5727 return 0;
5728
f47c11ee 5729 spin_lock_bh(&tp->lock);
1da177e4 5730 __tg3_set_mac_addr(tp);
f47c11ee 5731 spin_unlock_bh(&tp->lock);
1da177e4
LT
5732
5733 return 0;
5734}
5735
5736/* tp->lock is held. */
5737static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5738 dma_addr_t mapping, u32 maxlen_flags,
5739 u32 nic_addr)
5740{
5741 tg3_write_mem(tp,
5742 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5743 ((u64) mapping >> 32));
5744 tg3_write_mem(tp,
5745 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5746 ((u64) mapping & 0xffffffff));
5747 tg3_write_mem(tp,
5748 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5749 maxlen_flags);
5750
5751 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5752 tg3_write_mem(tp,
5753 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5754 nic_addr);
5755}
5756
5757static void __tg3_set_rx_mode(struct net_device *);
d244c892 5758static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
15f9850d
DM
5759{
5760 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5761 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5762 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5763 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5764 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5765 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5766 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5767 }
5768 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5769 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5770 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5771 u32 val = ec->stats_block_coalesce_usecs;
5772
5773 if (!netif_carrier_ok(tp->dev))
5774 val = 0;
5775
5776 tw32(HOSTCC_STAT_COAL_TICKS, val);
5777 }
5778}
1da177e4
LT
5779
5780/* tp->lock is held. */
5781static int tg3_reset_hw(struct tg3 *tp)
5782{
5783 u32 val, rdmac_mode;
5784 int i, err, limit;
5785
5786 tg3_disable_ints(tp);
5787
5788 tg3_stop_fw(tp);
5789
5790 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5791
5792 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
e6de8ad1 5793 tg3_abort_hw(tp, 1);
1da177e4
LT
5794 }
5795
d4d2c558
MC
5796 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
5797 tg3_phy_reset(tp);
5798
1da177e4
LT
5799 err = tg3_chip_reset(tp);
5800 if (err)
5801 return err;
5802
5803 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5804
5805 /* This works around an issue with Athlon chipsets on
5806 * B3 tigon3 silicon. This bit has no effect on any
5807 * other revision. But do not set this on PCI Express
5808 * chips.
5809 */
5810 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5811 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5812 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5813
5814 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5815 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5816 val = tr32(TG3PCI_PCISTATE);
5817 val |= PCISTATE_RETRY_SAME_DMA;
5818 tw32(TG3PCI_PCISTATE, val);
5819 }
5820
5821 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5822 /* Enable some hw fixes. */
5823 val = tr32(TG3PCI_MSI_DATA);
5824 val |= (1 << 26) | (1 << 28) | (1 << 29);
5825 tw32(TG3PCI_MSI_DATA, val);
5826 }
5827
5828 /* Descriptor ring init may make accesses to the
5829 * NIC SRAM area to setup the TX descriptors, so we
5830 * can only do this after the hardware has been
5831 * successfully reset.
5832 */
5833 tg3_init_rings(tp);
5834
5835 /* This value is determined during the probe time DMA
5836 * engine test, tg3_test_dma.
5837 */
5838 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5839
5840 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5841 GRC_MODE_4X_NIC_SEND_RINGS |
5842 GRC_MODE_NO_TX_PHDR_CSUM |
5843 GRC_MODE_NO_RX_PHDR_CSUM);
5844 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
d2d746f8
MC
5845
5846 /* Pseudo-header checksum is done by hardware logic and not
5847 * the offload processers, so make the chip do the pseudo-
5848 * header checksums on receive. For transmit it is more
5849 * convenient to do the pseudo-header checksum in software
5850 * as Linux does that on transmit for us in all cases.
5851 */
5852 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
1da177e4
LT
5853
5854 tw32(GRC_MODE,
5855 tp->grc_mode |
5856 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5857
5858 /* Setup the timer prescalar register. Clock is always 66Mhz. */
5859 val = tr32(GRC_MISC_CFG);
5860 val &= ~0xff;
5861 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5862 tw32(GRC_MISC_CFG, val);
5863
5864 /* Initialize MBUF/DESC pool. */
cbf46853 5865 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
1da177e4
LT
5866 /* Do nothing. */
5867 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5868 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5869 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5870 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5871 else
5872 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5873 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5874 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5875 }
5876#if TG3_TSO_SUPPORT != 0
5877 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5878 int fw_len;
5879
5880 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5881 TG3_TSO5_FW_RODATA_LEN +
5882 TG3_TSO5_FW_DATA_LEN +
5883 TG3_TSO5_FW_SBSS_LEN +
5884 TG3_TSO5_FW_BSS_LEN);
5885 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5886 tw32(BUFMGR_MB_POOL_ADDR,
5887 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5888 tw32(BUFMGR_MB_POOL_SIZE,
5889 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5890 }
5891#endif
5892
0f893dc6 5893 if (tp->dev->mtu <= ETH_DATA_LEN) {
1da177e4
LT
5894 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5895 tp->bufmgr_config.mbuf_read_dma_low_water);
5896 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5897 tp->bufmgr_config.mbuf_mac_rx_low_water);
5898 tw32(BUFMGR_MB_HIGH_WATER,
5899 tp->bufmgr_config.mbuf_high_water);
5900 } else {
5901 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5902 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5903 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5904 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5905 tw32(BUFMGR_MB_HIGH_WATER,
5906 tp->bufmgr_config.mbuf_high_water_jumbo);
5907 }
5908 tw32(BUFMGR_DMA_LOW_WATER,
5909 tp->bufmgr_config.dma_low_water);
5910 tw32(BUFMGR_DMA_HIGH_WATER,
5911 tp->bufmgr_config.dma_high_water);
5912
5913 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5914 for (i = 0; i < 2000; i++) {
5915 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5916 break;
5917 udelay(10);
5918 }
5919 if (i >= 2000) {
5920 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5921 tp->dev->name);
5922 return -ENODEV;
5923 }
5924
5925 /* Setup replenish threshold. */
5926 tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5927
5928 /* Initialize TG3_BDINFO's at:
5929 * RCVDBDI_STD_BD: standard eth size rx ring
5930 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
5931 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
5932 *
5933 * like so:
5934 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
5935 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
5936 * ring attribute flags
5937 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
5938 *
5939 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5940 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5941 *
5942 * The size of each ring is fixed in the firmware, but the location is
5943 * configurable.
5944 */
5945 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5946 ((u64) tp->rx_std_mapping >> 32));
5947 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5948 ((u64) tp->rx_std_mapping & 0xffffffff));
5949 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5950 NIC_SRAM_RX_BUFFER_DESC);
5951
5952 /* Don't even try to program the JUMBO/MINI buffer descriptor
5953 * configs on 5705.
5954 */
5955 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5956 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5957 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5958 } else {
5959 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5960 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5961
5962 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5963 BDINFO_FLAGS_DISABLED);
5964
5965 /* Setup replenish threshold. */
5966 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5967
0f893dc6 5968 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
5969 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5970 ((u64) tp->rx_jumbo_mapping >> 32));
5971 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5972 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5973 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5974 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5975 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5976 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5977 } else {
5978 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5979 BDINFO_FLAGS_DISABLED);
5980 }
5981
5982 }
5983
5984 /* There is only one send ring on 5705/5750, no need to explicitly
5985 * disable the others.
5986 */
5987 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5988 /* Clear out send RCB ring in SRAM. */
5989 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5990 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5991 BDINFO_FLAGS_DISABLED);
5992 }
5993
5994 tp->tx_prod = 0;
5995 tp->tx_cons = 0;
5996 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5997 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5998
5999 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6000 tp->tx_desc_mapping,
6001 (TG3_TX_RING_SIZE <<
6002 BDINFO_FLAGS_MAXLEN_SHIFT),
6003 NIC_SRAM_TX_BUFFER_DESC);
6004
6005 /* There is only one receive return ring on 5705/5750, no need
6006 * to explicitly disable the others.
6007 */
6008 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6009 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6010 i += TG3_BDINFO_SIZE) {
6011 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6012 BDINFO_FLAGS_DISABLED);
6013 }
6014 }
6015
6016 tp->rx_rcb_ptr = 0;
6017 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6018
6019 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6020 tp->rx_rcb_mapping,
6021 (TG3_RX_RCB_RING_SIZE(tp) <<
6022 BDINFO_FLAGS_MAXLEN_SHIFT),
6023 0);
6024
6025 tp->rx_std_ptr = tp->rx_pending;
6026 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6027 tp->rx_std_ptr);
6028
0f893dc6 6029 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
1da177e4
LT
6030 tp->rx_jumbo_pending : 0;
6031 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6032 tp->rx_jumbo_ptr);
6033
6034 /* Initialize MAC address and backoff seed. */
6035 __tg3_set_mac_addr(tp);
6036
6037 /* MTU + ethernet header + FCS + optional VLAN tag */
6038 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6039
6040 /* The slot time is changed by tg3_setup_phy if we
6041 * run at gigabit with half duplex.
6042 */
6043 tw32(MAC_TX_LENGTHS,
6044 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6045 (6 << TX_LENGTHS_IPG_SHIFT) |
6046 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6047
6048 /* Receive rules. */
6049 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6050 tw32(RCVLPC_CONFIG, 0x0181);
6051
6052 /* Calculate RDMAC_MODE setting early, we need it to determine
6053 * the RCVLPC_STATE_ENABLE mask.
6054 */
6055 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6056 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6057 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6058 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6059 RDMAC_MODE_LNGREAD_ENAB);
6060 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6061 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
85e94ced
MC
6062
6063 /* If statement applies to 5705 and 5750 PCI devices only */
6064 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6065 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6066 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
1da177e4
LT
6067 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6068 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6069 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6070 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6071 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6072 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6073 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6074 }
6075 }
6076
85e94ced
MC
6077 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6078 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6079
1da177e4
LT
6080#if TG3_TSO_SUPPORT != 0
6081 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6082 rdmac_mode |= (1 << 27);
6083#endif
6084
6085 /* Receive/send statistics. */
6086 if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6087 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6088 val = tr32(RCVLPC_STATS_ENABLE);
6089 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6090 tw32(RCVLPC_STATS_ENABLE, val);
6091 } else {
6092 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6093 }
6094 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6095 tw32(SNDDATAI_STATSENAB, 0xffffff);
6096 tw32(SNDDATAI_STATSCTRL,
6097 (SNDDATAI_SCTRL_ENABLE |
6098 SNDDATAI_SCTRL_FASTUPD));
6099
6100 /* Setup host coalescing engine. */
6101 tw32(HOSTCC_MODE, 0);
6102 for (i = 0; i < 2000; i++) {
6103 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6104 break;
6105 udelay(10);
6106 }
6107
d244c892 6108 __tg3_set_coalesce(tp, &tp->coal);
1da177e4
LT
6109
6110 /* set status block DMA address */
6111 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6112 ((u64) tp->status_mapping >> 32));
6113 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6114 ((u64) tp->status_mapping & 0xffffffff));
6115
6116 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6117 /* Status/statistics block address. See tg3_timer,
6118 * the tg3_periodic_fetch_stats call there, and
6119 * tg3_get_stats to see how this works for 5705/5750 chips.
6120 */
1da177e4
LT
6121 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6122 ((u64) tp->stats_mapping >> 32));
6123 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6124 ((u64) tp->stats_mapping & 0xffffffff));
6125 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6126 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6127 }
6128
6129 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6130
6131 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6132 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6133 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6134 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6135
6136 /* Clear statistics/status block in chip, and status block in ram. */
6137 for (i = NIC_SRAM_STATS_BLK;
6138 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6139 i += sizeof(u32)) {
6140 tg3_write_mem(tp, i, 0);
6141 udelay(40);
6142 }
6143 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6144
c94e3941
MC
6145 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6146 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6147 /* reset to prevent losing 1st rx packet intermittently */
6148 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6149 udelay(10);
6150 }
6151
1da177e4
LT
6152 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6153 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6154 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6155 udelay(40);
6156
314fba34
MC
6157 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6158 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6159 * register to preserve the GPIO settings for LOMs. The GPIOs,
6160 * whether used as inputs or outputs, are set by boot code after
6161 * reset.
6162 */
6163 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6164 u32 gpio_mask;
6165
6166 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6167 GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
3e7d83bc
MC
6168
6169 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6170 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6171 GRC_LCLCTRL_GPIO_OUTPUT3;
6172
af36e6b6
MC
6173 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6174 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6175
314fba34
MC
6176 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6177
6178 /* GPIO1 must be driven high for eeprom write protect */
1da177e4
LT
6179 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6180 GRC_LCLCTRL_GPIO_OUTPUT1);
314fba34 6181 }
1da177e4
LT
6182 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6183 udelay(100);
6184
09ee929c 6185 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
fac9b83e 6186 tp->last_tag = 0;
1da177e4
LT
6187
6188 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6189 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6190 udelay(40);
6191 }
6192
6193 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6194 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6195 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6196 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6197 WDMAC_MODE_LNGREAD_ENAB);
6198
85e94ced
MC
6199 /* If statement applies to 5705 and 5750 PCI devices only */
6200 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6201 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6202 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1da177e4
LT
6203 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6204 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6205 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6206 /* nothing */
6207 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6208 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6209 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6210 val |= WDMAC_MODE_RX_ACCEL;
6211 }
6212 }
6213
d9ab5ad1 6214 /* Enable host coalescing bug fix */
af36e6b6
MC
6215 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6216 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
d9ab5ad1
MC
6217 val |= (1 << 29);
6218
1da177e4
LT
6219 tw32_f(WDMAC_MODE, val);
6220 udelay(40);
6221
6222 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6223 val = tr32(TG3PCI_X_CAPS);
6224 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6225 val &= ~PCIX_CAPS_BURST_MASK;
6226 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6227 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6228 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6229 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6230 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6231 val |= (tp->split_mode_max_reqs <<
6232 PCIX_CAPS_SPLIT_SHIFT);
6233 }
6234 tw32(TG3PCI_X_CAPS, val);
6235 }
6236
6237 tw32_f(RDMAC_MODE, rdmac_mode);
6238 udelay(40);
6239
6240 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6241 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6242 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6243 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6244 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6245 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6246 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6247 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6248#if TG3_TSO_SUPPORT != 0
6249 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6250 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6251#endif
6252 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6253 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6254
6255 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6256 err = tg3_load_5701_a0_firmware_fix(tp);
6257 if (err)
6258 return err;
6259 }
6260
6261#if TG3_TSO_SUPPORT != 0
6262 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6263 err = tg3_load_tso_firmware(tp);
6264 if (err)
6265 return err;
6266 }
6267#endif
6268
6269 tp->tx_mode = TX_MODE_ENABLE;
6270 tw32_f(MAC_TX_MODE, tp->tx_mode);
6271 udelay(100);
6272
6273 tp->rx_mode = RX_MODE_ENABLE;
af36e6b6
MC
6274 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6275 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6276
1da177e4
LT
6277 tw32_f(MAC_RX_MODE, tp->rx_mode);
6278 udelay(10);
6279
6280 if (tp->link_config.phy_is_low_power) {
6281 tp->link_config.phy_is_low_power = 0;
6282 tp->link_config.speed = tp->link_config.orig_speed;
6283 tp->link_config.duplex = tp->link_config.orig_duplex;
6284 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6285 }
6286
6287 tp->mi_mode = MAC_MI_MODE_BASE;
6288 tw32_f(MAC_MI_MODE, tp->mi_mode);
6289 udelay(80);
6290
6291 tw32(MAC_LED_CTRL, tp->led_ctrl);
6292
6293 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
c94e3941 6294 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1da177e4
LT
6295 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6296 udelay(10);
6297 }
6298 tw32_f(MAC_RX_MODE, tp->rx_mode);
6299 udelay(10);
6300
6301 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6302 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6303 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6304 /* Set drive transmission level to 1.2V */
6305 /* only if the signal pre-emphasis bit is not set */
6306 val = tr32(MAC_SERDES_CFG);
6307 val &= 0xfffff000;
6308 val |= 0x880;
6309 tw32(MAC_SERDES_CFG, val);
6310 }
6311 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6312 tw32(MAC_SERDES_CFG, 0x616000);
6313 }
6314
6315 /* Prevent chip from dropping frames when flow control
6316 * is enabled.
6317 */
6318 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6319
6320 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6321 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6322 /* Use hardware link auto-negotiation */
6323 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6324 }
6325
d4d2c558
MC
6326 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6327 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6328 u32 tmp;
6329
6330 tmp = tr32(SERDES_RX_CTRL);
6331 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6332 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6333 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6334 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6335 }
6336
1da177e4
LT
6337 err = tg3_setup_phy(tp, 1);
6338 if (err)
6339 return err;
6340
6341 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6342 u32 tmp;
6343
6344 /* Clear CRC stats. */
6345 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6346 tg3_writephy(tp, 0x1e, tmp | 0x8000);
6347 tg3_readphy(tp, 0x14, &tmp);
6348 }
6349 }
6350
6351 __tg3_set_rx_mode(tp->dev);
6352
6353 /* Initialize receive rules. */
6354 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
6355 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6356 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
6357 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6358
4cf78e4f 6359 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
a4e2b347 6360 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
1da177e4
LT
6361 limit = 8;
6362 else
6363 limit = 16;
6364 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6365 limit -= 4;
6366 switch (limit) {
6367 case 16:
6368 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
6369 case 15:
6370 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
6371 case 14:
6372 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
6373 case 13:
6374 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
6375 case 12:
6376 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
6377 case 11:
6378 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
6379 case 10:
6380 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
6381 case 9:
6382 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
6383 case 8:
6384 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
6385 case 7:
6386 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
6387 case 6:
6388 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
6389 case 5:
6390 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
6391 case 4:
6392 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6393 case 3:
6394 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6395 case 2:
6396 case 1:
6397
6398 default:
6399 break;
6400 };
6401
6402 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6403
1da177e4
LT
6404 return 0;
6405}
6406
6407/* Called at device open time to get the chip ready for
6408 * packet processing. Invoked with tp->lock held.
6409 */
6410static int tg3_init_hw(struct tg3 *tp)
6411{
6412 int err;
6413
6414 /* Force the chip into D0. */
bc1c7567 6415 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
6416 if (err)
6417 goto out;
6418
6419 tg3_switch_clocks(tp);
6420
6421 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6422
6423 err = tg3_reset_hw(tp);
6424
6425out:
6426 return err;
6427}
6428
6429#define TG3_STAT_ADD32(PSTAT, REG) \
6430do { u32 __val = tr32(REG); \
6431 (PSTAT)->low += __val; \
6432 if ((PSTAT)->low < __val) \
6433 (PSTAT)->high += 1; \
6434} while (0)
6435
6436static void tg3_periodic_fetch_stats(struct tg3 *tp)
6437{
6438 struct tg3_hw_stats *sp = tp->hw_stats;
6439
6440 if (!netif_carrier_ok(tp->dev))
6441 return;
6442
6443 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6444 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6445 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6446 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6447 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6448 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6449 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6450 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6451 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6452 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6453 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6454 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6455 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6456
6457 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6458 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6459 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6460 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6461 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6462 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6463 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6464 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6465 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6466 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6467 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6468 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6469 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6470 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6471}
6472
6473static void tg3_timer(unsigned long __opaque)
6474{
6475 struct tg3 *tp = (struct tg3 *) __opaque;
1da177e4 6476
f475f163
MC
6477 if (tp->irq_sync)
6478 goto restart_timer;
6479
f47c11ee 6480 spin_lock(&tp->lock);
1da177e4 6481
fac9b83e
DM
6482 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6483 /* All of this garbage is because when using non-tagged
6484 * IRQ status the mailbox/status_block protocol the chip
6485 * uses with the cpu is race prone.
6486 */
6487 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6488 tw32(GRC_LOCAL_CTRL,
6489 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6490 } else {
6491 tw32(HOSTCC_MODE, tp->coalesce_mode |
6492 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6493 }
1da177e4 6494
fac9b83e
DM
6495 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6496 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
f47c11ee 6497 spin_unlock(&tp->lock);
fac9b83e
DM
6498 schedule_work(&tp->reset_task);
6499 return;
6500 }
1da177e4
LT
6501 }
6502
1da177e4
LT
6503 /* This part only runs once per second. */
6504 if (!--tp->timer_counter) {
fac9b83e
DM
6505 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6506 tg3_periodic_fetch_stats(tp);
6507
1da177e4
LT
6508 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6509 u32 mac_stat;
6510 int phy_event;
6511
6512 mac_stat = tr32(MAC_STATUS);
6513
6514 phy_event = 0;
6515 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6516 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6517 phy_event = 1;
6518 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6519 phy_event = 1;
6520
6521 if (phy_event)
6522 tg3_setup_phy(tp, 0);
6523 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6524 u32 mac_stat = tr32(MAC_STATUS);
6525 int need_setup = 0;
6526
6527 if (netif_carrier_ok(tp->dev) &&
6528 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6529 need_setup = 1;
6530 }
6531 if (! netif_carrier_ok(tp->dev) &&
6532 (mac_stat & (MAC_STATUS_PCS_SYNCED |
6533 MAC_STATUS_SIGNAL_DET))) {
6534 need_setup = 1;
6535 }
6536 if (need_setup) {
6537 tw32_f(MAC_MODE,
6538 (tp->mac_mode &
6539 ~MAC_MODE_PORT_MODE_MASK));
6540 udelay(40);
6541 tw32_f(MAC_MODE, tp->mac_mode);
6542 udelay(40);
6543 tg3_setup_phy(tp, 0);
6544 }
747e8f8b
MC
6545 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6546 tg3_serdes_parallel_detect(tp);
1da177e4
LT
6547
6548 tp->timer_counter = tp->timer_multiplier;
6549 }
6550
28fbef78 6551 /* Heartbeat is only sent once every 2 seconds. */
1da177e4
LT
6552 if (!--tp->asf_counter) {
6553 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6554 u32 val;
6555
bbadf503
MC
6556 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6557 FWCMD_NICDRV_ALIVE2);
6558 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
28fbef78 6559 /* 5 seconds timeout */
bbadf503 6560 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
1da177e4
LT
6561 val = tr32(GRC_RX_CPU_EVENT);
6562 val |= (1 << 14);
6563 tw32(GRC_RX_CPU_EVENT, val);
6564 }
6565 tp->asf_counter = tp->asf_multiplier;
6566 }
6567
f47c11ee 6568 spin_unlock(&tp->lock);
1da177e4 6569
f475f163 6570restart_timer:
1da177e4
LT
6571 tp->timer.expires = jiffies + tp->timer_offset;
6572 add_timer(&tp->timer);
6573}
6574
81789ef5 6575static int tg3_request_irq(struct tg3 *tp)
fcfa0a32
MC
6576{
6577 irqreturn_t (*fn)(int, void *, struct pt_regs *);
6578 unsigned long flags;
6579 struct net_device *dev = tp->dev;
6580
6581 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6582 fn = tg3_msi;
6583 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6584 fn = tg3_msi_1shot;
6585 flags = SA_SAMPLE_RANDOM;
6586 } else {
6587 fn = tg3_interrupt;
6588 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6589 fn = tg3_interrupt_tagged;
6590 flags = SA_SHIRQ | SA_SAMPLE_RANDOM;
6591 }
6592 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6593}
6594
7938109f
MC
6595static int tg3_test_interrupt(struct tg3 *tp)
6596{
6597 struct net_device *dev = tp->dev;
6598 int err, i;
6599 u32 int_mbox = 0;
6600
d4bc3927
MC
6601 if (!netif_running(dev))
6602 return -ENODEV;
6603
7938109f
MC
6604 tg3_disable_ints(tp);
6605
6606 free_irq(tp->pdev->irq, dev);
6607
6608 err = request_irq(tp->pdev->irq, tg3_test_isr,
f4d0ee98 6609 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
7938109f
MC
6610 if (err)
6611 return err;
6612
38f3843e 6613 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7938109f
MC
6614 tg3_enable_ints(tp);
6615
6616 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6617 HOSTCC_MODE_NOW);
6618
6619 for (i = 0; i < 5; i++) {
09ee929c
MC
6620 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6621 TG3_64BIT_REG_LOW);
7938109f
MC
6622 if (int_mbox != 0)
6623 break;
6624 msleep(10);
6625 }
6626
6627 tg3_disable_ints(tp);
6628
6629 free_irq(tp->pdev->irq, dev);
6630
fcfa0a32 6631 err = tg3_request_irq(tp);
7938109f
MC
6632
6633 if (err)
6634 return err;
6635
6636 if (int_mbox != 0)
6637 return 0;
6638
6639 return -EIO;
6640}
6641
6642/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6643 * successfully restored
6644 */
6645static int tg3_test_msi(struct tg3 *tp)
6646{
6647 struct net_device *dev = tp->dev;
6648 int err;
6649 u16 pci_cmd;
6650
6651 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6652 return 0;
6653
6654 /* Turn off SERR reporting in case MSI terminates with Master
6655 * Abort.
6656 */
6657 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6658 pci_write_config_word(tp->pdev, PCI_COMMAND,
6659 pci_cmd & ~PCI_COMMAND_SERR);
6660
6661 err = tg3_test_interrupt(tp);
6662
6663 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6664
6665 if (!err)
6666 return 0;
6667
6668 /* other failures */
6669 if (err != -EIO)
6670 return err;
6671
6672 /* MSI test failed, go back to INTx mode */
6673 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6674 "switching to INTx mode. Please report this failure to "
6675 "the PCI maintainer and include system chipset information.\n",
6676 tp->dev->name);
6677
6678 free_irq(tp->pdev->irq, dev);
6679 pci_disable_msi(tp->pdev);
6680
6681 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6682
fcfa0a32 6683 err = tg3_request_irq(tp);
7938109f
MC
6684 if (err)
6685 return err;
6686
6687 /* Need to reset the chip because the MSI cycle may have terminated
6688 * with Master Abort.
6689 */
f47c11ee 6690 tg3_full_lock(tp, 1);
7938109f 6691
944d980e 6692 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f
MC
6693 err = tg3_init_hw(tp);
6694
f47c11ee 6695 tg3_full_unlock(tp);
7938109f
MC
6696
6697 if (err)
6698 free_irq(tp->pdev->irq, dev);
6699
6700 return err;
6701}
6702
1da177e4
LT
6703static int tg3_open(struct net_device *dev)
6704{
6705 struct tg3 *tp = netdev_priv(dev);
6706 int err;
6707
f47c11ee 6708 tg3_full_lock(tp, 0);
1da177e4 6709
bc1c7567
MC
6710 err = tg3_set_power_state(tp, PCI_D0);
6711 if (err)
6712 return err;
6713
1da177e4
LT
6714 tg3_disable_ints(tp);
6715 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6716
f47c11ee 6717 tg3_full_unlock(tp);
1da177e4
LT
6718
6719 /* The placement of this call is tied
6720 * to the setup and use of Host TX descriptors.
6721 */
6722 err = tg3_alloc_consistent(tp);
6723 if (err)
6724 return err;
6725
88b06bc2
MC
6726 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6727 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
d4d2c558
MC
6728 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6729 !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6730 (tp->pdev_peer == tp->pdev))) {
fac9b83e
DM
6731 /* All MSI supporting chips should support tagged
6732 * status. Assert that this is the case.
6733 */
6734 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6735 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6736 "Not using MSI.\n", tp->dev->name);
6737 } else if (pci_enable_msi(tp->pdev) == 0) {
88b06bc2
MC
6738 u32 msi_mode;
6739
6740 msi_mode = tr32(MSGINT_MODE);
6741 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6742 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6743 }
6744 }
fcfa0a32 6745 err = tg3_request_irq(tp);
1da177e4
LT
6746
6747 if (err) {
88b06bc2
MC
6748 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6749 pci_disable_msi(tp->pdev);
6750 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6751 }
1da177e4
LT
6752 tg3_free_consistent(tp);
6753 return err;
6754 }
6755
f47c11ee 6756 tg3_full_lock(tp, 0);
1da177e4
LT
6757
6758 err = tg3_init_hw(tp);
6759 if (err) {
944d980e 6760 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
6761 tg3_free_rings(tp);
6762 } else {
fac9b83e
DM
6763 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6764 tp->timer_offset = HZ;
6765 else
6766 tp->timer_offset = HZ / 10;
6767
6768 BUG_ON(tp->timer_offset > HZ);
6769 tp->timer_counter = tp->timer_multiplier =
6770 (HZ / tp->timer_offset);
6771 tp->asf_counter = tp->asf_multiplier =
28fbef78 6772 ((HZ / tp->timer_offset) * 2);
1da177e4
LT
6773
6774 init_timer(&tp->timer);
6775 tp->timer.expires = jiffies + tp->timer_offset;
6776 tp->timer.data = (unsigned long) tp;
6777 tp->timer.function = tg3_timer;
1da177e4
LT
6778 }
6779
f47c11ee 6780 tg3_full_unlock(tp);
1da177e4
LT
6781
6782 if (err) {
88b06bc2
MC
6783 free_irq(tp->pdev->irq, dev);
6784 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6785 pci_disable_msi(tp->pdev);
6786 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6787 }
1da177e4
LT
6788 tg3_free_consistent(tp);
6789 return err;
6790 }
6791
7938109f
MC
6792 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6793 err = tg3_test_msi(tp);
fac9b83e 6794
7938109f 6795 if (err) {
f47c11ee 6796 tg3_full_lock(tp, 0);
7938109f
MC
6797
6798 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6799 pci_disable_msi(tp->pdev);
6800 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6801 }
944d980e 6802 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f
MC
6803 tg3_free_rings(tp);
6804 tg3_free_consistent(tp);
6805
f47c11ee 6806 tg3_full_unlock(tp);
7938109f
MC
6807
6808 return err;
6809 }
fcfa0a32
MC
6810
6811 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6812 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6813 u32 val = tr32(0x7c04);
6814
6815 tw32(0x7c04, val | (1 << 29));
6816 }
6817 }
7938109f
MC
6818 }
6819
f47c11ee 6820 tg3_full_lock(tp, 0);
1da177e4 6821
7938109f
MC
6822 add_timer(&tp->timer);
6823 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
1da177e4
LT
6824 tg3_enable_ints(tp);
6825
f47c11ee 6826 tg3_full_unlock(tp);
1da177e4
LT
6827
6828 netif_start_queue(dev);
6829
6830 return 0;
6831}
6832
6833#if 0
6834/*static*/ void tg3_dump_state(struct tg3 *tp)
6835{
6836 u32 val32, val32_2, val32_3, val32_4, val32_5;
6837 u16 val16;
6838 int i;
6839
6840 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6841 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6842 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6843 val16, val32);
6844
6845 /* MAC block */
6846 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6847 tr32(MAC_MODE), tr32(MAC_STATUS));
6848 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6849 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6850 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6851 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6852 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6853 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6854
6855 /* Send data initiator control block */
6856 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6857 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6858 printk(" SNDDATAI_STATSCTRL[%08x]\n",
6859 tr32(SNDDATAI_STATSCTRL));
6860
6861 /* Send data completion control block */
6862 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6863
6864 /* Send BD ring selector block */
6865 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6866 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6867
6868 /* Send BD initiator control block */
6869 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6870 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6871
6872 /* Send BD completion control block */
6873 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6874
6875 /* Receive list placement control block */
6876 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6877 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6878 printk(" RCVLPC_STATSCTRL[%08x]\n",
6879 tr32(RCVLPC_STATSCTRL));
6880
6881 /* Receive data and receive BD initiator control block */
6882 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6883 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6884
6885 /* Receive data completion control block */
6886 printk("DEBUG: RCVDCC_MODE[%08x]\n",
6887 tr32(RCVDCC_MODE));
6888
6889 /* Receive BD initiator control block */
6890 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6891 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6892
6893 /* Receive BD completion control block */
6894 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6895 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6896
6897 /* Receive list selector control block */
6898 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6899 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6900
6901 /* Mbuf cluster free block */
6902 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6903 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6904
6905 /* Host coalescing control block */
6906 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6907 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6908 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6909 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6910 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6911 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6912 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6913 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6914 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6915 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6916 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6917 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6918
6919 /* Memory arbiter control block */
6920 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6921 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6922
6923 /* Buffer manager control block */
6924 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6925 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6926 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6927 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6928 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6929 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6930 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6931 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6932
6933 /* Read DMA control block */
6934 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6935 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6936
6937 /* Write DMA control block */
6938 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6939 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6940
6941 /* DMA completion block */
6942 printk("DEBUG: DMAC_MODE[%08x]\n",
6943 tr32(DMAC_MODE));
6944
6945 /* GRC block */
6946 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6947 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6948 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6949 tr32(GRC_LOCAL_CTRL));
6950
6951 /* TG3_BDINFOs */
6952 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6953 tr32(RCVDBDI_JUMBO_BD + 0x0),
6954 tr32(RCVDBDI_JUMBO_BD + 0x4),
6955 tr32(RCVDBDI_JUMBO_BD + 0x8),
6956 tr32(RCVDBDI_JUMBO_BD + 0xc));
6957 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6958 tr32(RCVDBDI_STD_BD + 0x0),
6959 tr32(RCVDBDI_STD_BD + 0x4),
6960 tr32(RCVDBDI_STD_BD + 0x8),
6961 tr32(RCVDBDI_STD_BD + 0xc));
6962 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6963 tr32(RCVDBDI_MINI_BD + 0x0),
6964 tr32(RCVDBDI_MINI_BD + 0x4),
6965 tr32(RCVDBDI_MINI_BD + 0x8),
6966 tr32(RCVDBDI_MINI_BD + 0xc));
6967
6968 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6969 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6970 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6971 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6972 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6973 val32, val32_2, val32_3, val32_4);
6974
6975 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6976 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6977 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6978 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6979 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6980 val32, val32_2, val32_3, val32_4);
6981
6982 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6983 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6984 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6985 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6986 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6987 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6988 val32, val32_2, val32_3, val32_4, val32_5);
6989
6990 /* SW status block */
6991 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6992 tp->hw_status->status,
6993 tp->hw_status->status_tag,
6994 tp->hw_status->rx_jumbo_consumer,
6995 tp->hw_status->rx_consumer,
6996 tp->hw_status->rx_mini_consumer,
6997 tp->hw_status->idx[0].rx_producer,
6998 tp->hw_status->idx[0].tx_consumer);
6999
7000 /* SW statistics block */
7001 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7002 ((u32 *)tp->hw_stats)[0],
7003 ((u32 *)tp->hw_stats)[1],
7004 ((u32 *)tp->hw_stats)[2],
7005 ((u32 *)tp->hw_stats)[3]);
7006
7007 /* Mailboxes */
7008 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
09ee929c
MC
7009 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7010 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7011 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7012 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
1da177e4
LT
7013
7014 /* NIC side send descriptors. */
7015 for (i = 0; i < 6; i++) {
7016 unsigned long txd;
7017
7018 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7019 + (i * sizeof(struct tg3_tx_buffer_desc));
7020 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7021 i,
7022 readl(txd + 0x0), readl(txd + 0x4),
7023 readl(txd + 0x8), readl(txd + 0xc));
7024 }
7025
7026 /* NIC side RX descriptors. */
7027 for (i = 0; i < 6; i++) {
7028 unsigned long rxd;
7029
7030 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7031 + (i * sizeof(struct tg3_rx_buffer_desc));
7032 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7033 i,
7034 readl(rxd + 0x0), readl(rxd + 0x4),
7035 readl(rxd + 0x8), readl(rxd + 0xc));
7036 rxd += (4 * sizeof(u32));
7037 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7038 i,
7039 readl(rxd + 0x0), readl(rxd + 0x4),
7040 readl(rxd + 0x8), readl(rxd + 0xc));
7041 }
7042
7043 for (i = 0; i < 6; i++) {
7044 unsigned long rxd;
7045
7046 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7047 + (i * sizeof(struct tg3_rx_buffer_desc));
7048 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7049 i,
7050 readl(rxd + 0x0), readl(rxd + 0x4),
7051 readl(rxd + 0x8), readl(rxd + 0xc));
7052 rxd += (4 * sizeof(u32));
7053 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7054 i,
7055 readl(rxd + 0x0), readl(rxd + 0x4),
7056 readl(rxd + 0x8), readl(rxd + 0xc));
7057 }
7058}
7059#endif
7060
7061static struct net_device_stats *tg3_get_stats(struct net_device *);
7062static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7063
7064static int tg3_close(struct net_device *dev)
7065{
7066 struct tg3 *tp = netdev_priv(dev);
7067
7faa006f
MC
7068 /* Calling flush_scheduled_work() may deadlock because
7069 * linkwatch_event() may be on the workqueue and it will try to get
7070 * the rtnl_lock which we are holding.
7071 */
7072 while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7073 msleep(1);
7074
1da177e4
LT
7075 netif_stop_queue(dev);
7076
7077 del_timer_sync(&tp->timer);
7078
f47c11ee 7079 tg3_full_lock(tp, 1);
1da177e4
LT
7080#if 0
7081 tg3_dump_state(tp);
7082#endif
7083
7084 tg3_disable_ints(tp);
7085
944d980e 7086 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7087 tg3_free_rings(tp);
7088 tp->tg3_flags &=
7089 ~(TG3_FLAG_INIT_COMPLETE |
7090 TG3_FLAG_GOT_SERDES_FLOWCTL);
1da177e4 7091
f47c11ee 7092 tg3_full_unlock(tp);
1da177e4 7093
88b06bc2
MC
7094 free_irq(tp->pdev->irq, dev);
7095 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7096 pci_disable_msi(tp->pdev);
7097 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7098 }
1da177e4
LT
7099
7100 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7101 sizeof(tp->net_stats_prev));
7102 memcpy(&tp->estats_prev, tg3_get_estats(tp),
7103 sizeof(tp->estats_prev));
7104
7105 tg3_free_consistent(tp);
7106
bc1c7567
MC
7107 tg3_set_power_state(tp, PCI_D3hot);
7108
7109 netif_carrier_off(tp->dev);
7110
1da177e4
LT
7111 return 0;
7112}
7113
7114static inline unsigned long get_stat64(tg3_stat64_t *val)
7115{
7116 unsigned long ret;
7117
7118#if (BITS_PER_LONG == 32)
7119 ret = val->low;
7120#else
7121 ret = ((u64)val->high << 32) | ((u64)val->low);
7122#endif
7123 return ret;
7124}
7125
7126static unsigned long calc_crc_errors(struct tg3 *tp)
7127{
7128 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7129
7130 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7131 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7132 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1da177e4
LT
7133 u32 val;
7134
f47c11ee 7135 spin_lock_bh(&tp->lock);
1da177e4
LT
7136 if (!tg3_readphy(tp, 0x1e, &val)) {
7137 tg3_writephy(tp, 0x1e, val | 0x8000);
7138 tg3_readphy(tp, 0x14, &val);
7139 } else
7140 val = 0;
f47c11ee 7141 spin_unlock_bh(&tp->lock);
1da177e4
LT
7142
7143 tp->phy_crc_errors += val;
7144
7145 return tp->phy_crc_errors;
7146 }
7147
7148 return get_stat64(&hw_stats->rx_fcs_errors);
7149}
7150
7151#define ESTAT_ADD(member) \
7152 estats->member = old_estats->member + \
7153 get_stat64(&hw_stats->member)
7154
7155static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7156{
7157 struct tg3_ethtool_stats *estats = &tp->estats;
7158 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7159 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7160
7161 if (!hw_stats)
7162 return old_estats;
7163
7164 ESTAT_ADD(rx_octets);
7165 ESTAT_ADD(rx_fragments);
7166 ESTAT_ADD(rx_ucast_packets);
7167 ESTAT_ADD(rx_mcast_packets);
7168 ESTAT_ADD(rx_bcast_packets);
7169 ESTAT_ADD(rx_fcs_errors);
7170 ESTAT_ADD(rx_align_errors);
7171 ESTAT_ADD(rx_xon_pause_rcvd);
7172 ESTAT_ADD(rx_xoff_pause_rcvd);
7173 ESTAT_ADD(rx_mac_ctrl_rcvd);
7174 ESTAT_ADD(rx_xoff_entered);
7175 ESTAT_ADD(rx_frame_too_long_errors);
7176 ESTAT_ADD(rx_jabbers);
7177 ESTAT_ADD(rx_undersize_packets);
7178 ESTAT_ADD(rx_in_length_errors);
7179 ESTAT_ADD(rx_out_length_errors);
7180 ESTAT_ADD(rx_64_or_less_octet_packets);
7181 ESTAT_ADD(rx_65_to_127_octet_packets);
7182 ESTAT_ADD(rx_128_to_255_octet_packets);
7183 ESTAT_ADD(rx_256_to_511_octet_packets);
7184 ESTAT_ADD(rx_512_to_1023_octet_packets);
7185 ESTAT_ADD(rx_1024_to_1522_octet_packets);
7186 ESTAT_ADD(rx_1523_to_2047_octet_packets);
7187 ESTAT_ADD(rx_2048_to_4095_octet_packets);
7188 ESTAT_ADD(rx_4096_to_8191_octet_packets);
7189 ESTAT_ADD(rx_8192_to_9022_octet_packets);
7190
7191 ESTAT_ADD(tx_octets);
7192 ESTAT_ADD(tx_collisions);
7193 ESTAT_ADD(tx_xon_sent);
7194 ESTAT_ADD(tx_xoff_sent);
7195 ESTAT_ADD(tx_flow_control);
7196 ESTAT_ADD(tx_mac_errors);
7197 ESTAT_ADD(tx_single_collisions);
7198 ESTAT_ADD(tx_mult_collisions);
7199 ESTAT_ADD(tx_deferred);
7200 ESTAT_ADD(tx_excessive_collisions);
7201 ESTAT_ADD(tx_late_collisions);
7202 ESTAT_ADD(tx_collide_2times);
7203 ESTAT_ADD(tx_collide_3times);
7204 ESTAT_ADD(tx_collide_4times);
7205 ESTAT_ADD(tx_collide_5times);
7206 ESTAT_ADD(tx_collide_6times);
7207 ESTAT_ADD(tx_collide_7times);
7208 ESTAT_ADD(tx_collide_8times);
7209 ESTAT_ADD(tx_collide_9times);
7210 ESTAT_ADD(tx_collide_10times);
7211 ESTAT_ADD(tx_collide_11times);
7212 ESTAT_ADD(tx_collide_12times);
7213 ESTAT_ADD(tx_collide_13times);
7214 ESTAT_ADD(tx_collide_14times);
7215 ESTAT_ADD(tx_collide_15times);
7216 ESTAT_ADD(tx_ucast_packets);
7217 ESTAT_ADD(tx_mcast_packets);
7218 ESTAT_ADD(tx_bcast_packets);
7219 ESTAT_ADD(tx_carrier_sense_errors);
7220 ESTAT_ADD(tx_discards);
7221 ESTAT_ADD(tx_errors);
7222
7223 ESTAT_ADD(dma_writeq_full);
7224 ESTAT_ADD(dma_write_prioq_full);
7225 ESTAT_ADD(rxbds_empty);
7226 ESTAT_ADD(rx_discards);
7227 ESTAT_ADD(rx_errors);
7228 ESTAT_ADD(rx_threshold_hit);
7229
7230 ESTAT_ADD(dma_readq_full);
7231 ESTAT_ADD(dma_read_prioq_full);
7232 ESTAT_ADD(tx_comp_queue_full);
7233
7234 ESTAT_ADD(ring_set_send_prod_index);
7235 ESTAT_ADD(ring_status_update);
7236 ESTAT_ADD(nic_irqs);
7237 ESTAT_ADD(nic_avoided_irqs);
7238 ESTAT_ADD(nic_tx_threshold_hit);
7239
7240 return estats;
7241}
7242
7243static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7244{
7245 struct tg3 *tp = netdev_priv(dev);
7246 struct net_device_stats *stats = &tp->net_stats;
7247 struct net_device_stats *old_stats = &tp->net_stats_prev;
7248 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7249
7250 if (!hw_stats)
7251 return old_stats;
7252
7253 stats->rx_packets = old_stats->rx_packets +
7254 get_stat64(&hw_stats->rx_ucast_packets) +
7255 get_stat64(&hw_stats->rx_mcast_packets) +
7256 get_stat64(&hw_stats->rx_bcast_packets);
7257
7258 stats->tx_packets = old_stats->tx_packets +
7259 get_stat64(&hw_stats->tx_ucast_packets) +
7260 get_stat64(&hw_stats->tx_mcast_packets) +
7261 get_stat64(&hw_stats->tx_bcast_packets);
7262
7263 stats->rx_bytes = old_stats->rx_bytes +
7264 get_stat64(&hw_stats->rx_octets);
7265 stats->tx_bytes = old_stats->tx_bytes +
7266 get_stat64(&hw_stats->tx_octets);
7267
7268 stats->rx_errors = old_stats->rx_errors +
4f63b877 7269 get_stat64(&hw_stats->rx_errors);
1da177e4
LT
7270 stats->tx_errors = old_stats->tx_errors +
7271 get_stat64(&hw_stats->tx_errors) +
7272 get_stat64(&hw_stats->tx_mac_errors) +
7273 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7274 get_stat64(&hw_stats->tx_discards);
7275
7276 stats->multicast = old_stats->multicast +
7277 get_stat64(&hw_stats->rx_mcast_packets);
7278 stats->collisions = old_stats->collisions +
7279 get_stat64(&hw_stats->tx_collisions);
7280
7281 stats->rx_length_errors = old_stats->rx_length_errors +
7282 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7283 get_stat64(&hw_stats->rx_undersize_packets);
7284
7285 stats->rx_over_errors = old_stats->rx_over_errors +
7286 get_stat64(&hw_stats->rxbds_empty);
7287 stats->rx_frame_errors = old_stats->rx_frame_errors +
7288 get_stat64(&hw_stats->rx_align_errors);
7289 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7290 get_stat64(&hw_stats->tx_discards);
7291 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7292 get_stat64(&hw_stats->tx_carrier_sense_errors);
7293
7294 stats->rx_crc_errors = old_stats->rx_crc_errors +
7295 calc_crc_errors(tp);
7296
4f63b877
JL
7297 stats->rx_missed_errors = old_stats->rx_missed_errors +
7298 get_stat64(&hw_stats->rx_discards);
7299
1da177e4
LT
7300 return stats;
7301}
7302
7303static inline u32 calc_crc(unsigned char *buf, int len)
7304{
7305 u32 reg;
7306 u32 tmp;
7307 int j, k;
7308
7309 reg = 0xffffffff;
7310
7311 for (j = 0; j < len; j++) {
7312 reg ^= buf[j];
7313
7314 for (k = 0; k < 8; k++) {
7315 tmp = reg & 0x01;
7316
7317 reg >>= 1;
7318
7319 if (tmp) {
7320 reg ^= 0xedb88320;
7321 }
7322 }
7323 }
7324
7325 return ~reg;
7326}
7327
7328static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7329{
7330 /* accept or reject all multicast frames */
7331 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7332 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7333 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7334 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7335}
7336
7337static void __tg3_set_rx_mode(struct net_device *dev)
7338{
7339 struct tg3 *tp = netdev_priv(dev);
7340 u32 rx_mode;
7341
7342 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7343 RX_MODE_KEEP_VLAN_TAG);
7344
7345 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7346 * flag clear.
7347 */
7348#if TG3_VLAN_TAG_USED
7349 if (!tp->vlgrp &&
7350 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7351 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7352#else
7353 /* By definition, VLAN is disabled always in this
7354 * case.
7355 */
7356 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7357 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7358#endif
7359
7360 if (dev->flags & IFF_PROMISC) {
7361 /* Promiscuous mode. */
7362 rx_mode |= RX_MODE_PROMISC;
7363 } else if (dev->flags & IFF_ALLMULTI) {
7364 /* Accept all multicast. */
7365 tg3_set_multi (tp, 1);
7366 } else if (dev->mc_count < 1) {
7367 /* Reject all multicast. */
7368 tg3_set_multi (tp, 0);
7369 } else {
7370 /* Accept one or more multicast(s). */
7371 struct dev_mc_list *mclist;
7372 unsigned int i;
7373 u32 mc_filter[4] = { 0, };
7374 u32 regidx;
7375 u32 bit;
7376 u32 crc;
7377
7378 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7379 i++, mclist = mclist->next) {
7380
7381 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7382 bit = ~crc & 0x7f;
7383 regidx = (bit & 0x60) >> 5;
7384 bit &= 0x1f;
7385 mc_filter[regidx] |= (1 << bit);
7386 }
7387
7388 tw32(MAC_HASH_REG_0, mc_filter[0]);
7389 tw32(MAC_HASH_REG_1, mc_filter[1]);
7390 tw32(MAC_HASH_REG_2, mc_filter[2]);
7391 tw32(MAC_HASH_REG_3, mc_filter[3]);
7392 }
7393
7394 if (rx_mode != tp->rx_mode) {
7395 tp->rx_mode = rx_mode;
7396 tw32_f(MAC_RX_MODE, rx_mode);
7397 udelay(10);
7398 }
7399}
7400
7401static void tg3_set_rx_mode(struct net_device *dev)
7402{
7403 struct tg3 *tp = netdev_priv(dev);
7404
e75f7c90
MC
7405 if (!netif_running(dev))
7406 return;
7407
f47c11ee 7408 tg3_full_lock(tp, 0);
1da177e4 7409 __tg3_set_rx_mode(dev);
f47c11ee 7410 tg3_full_unlock(tp);
1da177e4
LT
7411}
7412
7413#define TG3_REGDUMP_LEN (32 * 1024)
7414
7415static int tg3_get_regs_len(struct net_device *dev)
7416{
7417 return TG3_REGDUMP_LEN;
7418}
7419
7420static void tg3_get_regs(struct net_device *dev,
7421 struct ethtool_regs *regs, void *_p)
7422{
7423 u32 *p = _p;
7424 struct tg3 *tp = netdev_priv(dev);
7425 u8 *orig_p = _p;
7426 int i;
7427
7428 regs->version = 0;
7429
7430 memset(p, 0, TG3_REGDUMP_LEN);
7431
bc1c7567
MC
7432 if (tp->link_config.phy_is_low_power)
7433 return;
7434
f47c11ee 7435 tg3_full_lock(tp, 0);
1da177e4
LT
7436
7437#define __GET_REG32(reg) (*(p)++ = tr32(reg))
7438#define GET_REG32_LOOP(base,len) \
7439do { p = (u32 *)(orig_p + (base)); \
7440 for (i = 0; i < len; i += 4) \
7441 __GET_REG32((base) + i); \
7442} while (0)
7443#define GET_REG32_1(reg) \
7444do { p = (u32 *)(orig_p + (reg)); \
7445 __GET_REG32((reg)); \
7446} while (0)
7447
7448 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7449 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7450 GET_REG32_LOOP(MAC_MODE, 0x4f0);
7451 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7452 GET_REG32_1(SNDDATAC_MODE);
7453 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7454 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7455 GET_REG32_1(SNDBDC_MODE);
7456 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7457 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7458 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7459 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7460 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7461 GET_REG32_1(RCVDCC_MODE);
7462 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7463 GET_REG32_LOOP(RCVCC_MODE, 0x14);
7464 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7465 GET_REG32_1(MBFREE_MODE);
7466 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7467 GET_REG32_LOOP(MEMARB_MODE, 0x10);
7468 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7469 GET_REG32_LOOP(RDMAC_MODE, 0x08);
7470 GET_REG32_LOOP(WDMAC_MODE, 0x08);
091465d7
CE
7471 GET_REG32_1(RX_CPU_MODE);
7472 GET_REG32_1(RX_CPU_STATE);
7473 GET_REG32_1(RX_CPU_PGMCTR);
7474 GET_REG32_1(RX_CPU_HWBKPT);
7475 GET_REG32_1(TX_CPU_MODE);
7476 GET_REG32_1(TX_CPU_STATE);
7477 GET_REG32_1(TX_CPU_PGMCTR);
1da177e4
LT
7478 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7479 GET_REG32_LOOP(FTQ_RESET, 0x120);
7480 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7481 GET_REG32_1(DMAC_MODE);
7482 GET_REG32_LOOP(GRC_MODE, 0x4c);
7483 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7484 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7485
7486#undef __GET_REG32
7487#undef GET_REG32_LOOP
7488#undef GET_REG32_1
7489
f47c11ee 7490 tg3_full_unlock(tp);
1da177e4
LT
7491}
7492
7493static int tg3_get_eeprom_len(struct net_device *dev)
7494{
7495 struct tg3 *tp = netdev_priv(dev);
7496
7497 return tp->nvram_size;
7498}
7499
7500static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
1820180b 7501static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
1da177e4
LT
7502
7503static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7504{
7505 struct tg3 *tp = netdev_priv(dev);
7506 int ret;
7507 u8 *pd;
7508 u32 i, offset, len, val, b_offset, b_count;
7509
bc1c7567
MC
7510 if (tp->link_config.phy_is_low_power)
7511 return -EAGAIN;
7512
1da177e4
LT
7513 offset = eeprom->offset;
7514 len = eeprom->len;
7515 eeprom->len = 0;
7516
7517 eeprom->magic = TG3_EEPROM_MAGIC;
7518
7519 if (offset & 3) {
7520 /* adjustments to start on required 4 byte boundary */
7521 b_offset = offset & 3;
7522 b_count = 4 - b_offset;
7523 if (b_count > len) {
7524 /* i.e. offset=1 len=2 */
7525 b_count = len;
7526 }
7527 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7528 if (ret)
7529 return ret;
7530 val = cpu_to_le32(val);
7531 memcpy(data, ((char*)&val) + b_offset, b_count);
7532 len -= b_count;
7533 offset += b_count;
7534 eeprom->len += b_count;
7535 }
7536
7537 /* read bytes upto the last 4 byte boundary */
7538 pd = &data[eeprom->len];
7539 for (i = 0; i < (len - (len & 3)); i += 4) {
7540 ret = tg3_nvram_read(tp, offset + i, &val);
7541 if (ret) {
7542 eeprom->len += i;
7543 return ret;
7544 }
7545 val = cpu_to_le32(val);
7546 memcpy(pd + i, &val, 4);
7547 }
7548 eeprom->len += i;
7549
7550 if (len & 3) {
7551 /* read last bytes not ending on 4 byte boundary */
7552 pd = &data[eeprom->len];
7553 b_count = len & 3;
7554 b_offset = offset + len - b_count;
7555 ret = tg3_nvram_read(tp, b_offset, &val);
7556 if (ret)
7557 return ret;
7558 val = cpu_to_le32(val);
7559 memcpy(pd, ((char*)&val), b_count);
7560 eeprom->len += b_count;
7561 }
7562 return 0;
7563}
7564
7565static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7566
7567static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7568{
7569 struct tg3 *tp = netdev_priv(dev);
7570 int ret;
7571 u32 offset, len, b_offset, odd_len, start, end;
7572 u8 *buf;
7573
bc1c7567
MC
7574 if (tp->link_config.phy_is_low_power)
7575 return -EAGAIN;
7576
1da177e4
LT
7577 if (eeprom->magic != TG3_EEPROM_MAGIC)
7578 return -EINVAL;
7579
7580 offset = eeprom->offset;
7581 len = eeprom->len;
7582
7583 if ((b_offset = (offset & 3))) {
7584 /* adjustments to start on required 4 byte boundary */
7585 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7586 if (ret)
7587 return ret;
7588 start = cpu_to_le32(start);
7589 len += b_offset;
7590 offset &= ~3;
1c8594b4
MC
7591 if (len < 4)
7592 len = 4;
1da177e4
LT
7593 }
7594
7595 odd_len = 0;
1c8594b4 7596 if (len & 3) {
1da177e4
LT
7597 /* adjustments to end on required 4 byte boundary */
7598 odd_len = 1;
7599 len = (len + 3) & ~3;
7600 ret = tg3_nvram_read(tp, offset+len-4, &end);
7601 if (ret)
7602 return ret;
7603 end = cpu_to_le32(end);
7604 }
7605
7606 buf = data;
7607 if (b_offset || odd_len) {
7608 buf = kmalloc(len, GFP_KERNEL);
7609 if (buf == 0)
7610 return -ENOMEM;
7611 if (b_offset)
7612 memcpy(buf, &start, 4);
7613 if (odd_len)
7614 memcpy(buf+len-4, &end, 4);
7615 memcpy(buf + b_offset, data, eeprom->len);
7616 }
7617
7618 ret = tg3_nvram_write_block(tp, offset, len, buf);
7619
7620 if (buf != data)
7621 kfree(buf);
7622
7623 return ret;
7624}
7625
7626static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7627{
7628 struct tg3 *tp = netdev_priv(dev);
7629
7630 cmd->supported = (SUPPORTED_Autoneg);
7631
7632 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7633 cmd->supported |= (SUPPORTED_1000baseT_Half |
7634 SUPPORTED_1000baseT_Full);
7635
a4e2b347 7636 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1da177e4
LT
7637 cmd->supported |= (SUPPORTED_100baseT_Half |
7638 SUPPORTED_100baseT_Full |
7639 SUPPORTED_10baseT_Half |
7640 SUPPORTED_10baseT_Full |
7641 SUPPORTED_MII);
7642 else
7643 cmd->supported |= SUPPORTED_FIBRE;
7644
7645 cmd->advertising = tp->link_config.advertising;
7646 if (netif_running(dev)) {
7647 cmd->speed = tp->link_config.active_speed;
7648 cmd->duplex = tp->link_config.active_duplex;
7649 }
7650 cmd->port = 0;
7651 cmd->phy_address = PHY_ADDR;
7652 cmd->transceiver = 0;
7653 cmd->autoneg = tp->link_config.autoneg;
7654 cmd->maxtxpkt = 0;
7655 cmd->maxrxpkt = 0;
7656 return 0;
7657}
7658
7659static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7660{
7661 struct tg3 *tp = netdev_priv(dev);
7662
37ff238d 7663 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
1da177e4
LT
7664 /* These are the only valid advertisement bits allowed. */
7665 if (cmd->autoneg == AUTONEG_ENABLE &&
7666 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7667 ADVERTISED_1000baseT_Full |
7668 ADVERTISED_Autoneg |
7669 ADVERTISED_FIBRE)))
7670 return -EINVAL;
37ff238d
MC
7671 /* Fiber can only do SPEED_1000. */
7672 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7673 (cmd->speed != SPEED_1000))
7674 return -EINVAL;
7675 /* Copper cannot force SPEED_1000. */
7676 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7677 (cmd->speed == SPEED_1000))
7678 return -EINVAL;
7679 else if ((cmd->speed == SPEED_1000) &&
7680 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7681 return -EINVAL;
1da177e4 7682
f47c11ee 7683 tg3_full_lock(tp, 0);
1da177e4
LT
7684
7685 tp->link_config.autoneg = cmd->autoneg;
7686 if (cmd->autoneg == AUTONEG_ENABLE) {
7687 tp->link_config.advertising = cmd->advertising;
7688 tp->link_config.speed = SPEED_INVALID;
7689 tp->link_config.duplex = DUPLEX_INVALID;
7690 } else {
7691 tp->link_config.advertising = 0;
7692 tp->link_config.speed = cmd->speed;
7693 tp->link_config.duplex = cmd->duplex;
7694 }
7695
7696 if (netif_running(dev))
7697 tg3_setup_phy(tp, 1);
7698
f47c11ee 7699 tg3_full_unlock(tp);
1da177e4
LT
7700
7701 return 0;
7702}
7703
7704static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7705{
7706 struct tg3 *tp = netdev_priv(dev);
7707
7708 strcpy(info->driver, DRV_MODULE_NAME);
7709 strcpy(info->version, DRV_MODULE_VERSION);
c4e6575c 7710 strcpy(info->fw_version, tp->fw_ver);
1da177e4
LT
7711 strcpy(info->bus_info, pci_name(tp->pdev));
7712}
7713
7714static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7715{
7716 struct tg3 *tp = netdev_priv(dev);
7717
7718 wol->supported = WAKE_MAGIC;
7719 wol->wolopts = 0;
7720 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7721 wol->wolopts = WAKE_MAGIC;
7722 memset(&wol->sopass, 0, sizeof(wol->sopass));
7723}
7724
7725static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7726{
7727 struct tg3 *tp = netdev_priv(dev);
7728
7729 if (wol->wolopts & ~WAKE_MAGIC)
7730 return -EINVAL;
7731 if ((wol->wolopts & WAKE_MAGIC) &&
7732 tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7733 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7734 return -EINVAL;
7735
f47c11ee 7736 spin_lock_bh(&tp->lock);
1da177e4
LT
7737 if (wol->wolopts & WAKE_MAGIC)
7738 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7739 else
7740 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
f47c11ee 7741 spin_unlock_bh(&tp->lock);
1da177e4
LT
7742
7743 return 0;
7744}
7745
7746static u32 tg3_get_msglevel(struct net_device *dev)
7747{
7748 struct tg3 *tp = netdev_priv(dev);
7749 return tp->msg_enable;
7750}
7751
7752static void tg3_set_msglevel(struct net_device *dev, u32 value)
7753{
7754 struct tg3 *tp = netdev_priv(dev);
7755 tp->msg_enable = value;
7756}
7757
7758#if TG3_TSO_SUPPORT != 0
7759static int tg3_set_tso(struct net_device *dev, u32 value)
7760{
7761 struct tg3 *tp = netdev_priv(dev);
7762
7763 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7764 if (value)
7765 return -EINVAL;
7766 return 0;
7767 }
7768 return ethtool_op_set_tso(dev, value);
7769}
7770#endif
7771
7772static int tg3_nway_reset(struct net_device *dev)
7773{
7774 struct tg3 *tp = netdev_priv(dev);
7775 u32 bmcr;
7776 int r;
7777
7778 if (!netif_running(dev))
7779 return -EAGAIN;
7780
c94e3941
MC
7781 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7782 return -EINVAL;
7783
f47c11ee 7784 spin_lock_bh(&tp->lock);
1da177e4
LT
7785 r = -EINVAL;
7786 tg3_readphy(tp, MII_BMCR, &bmcr);
7787 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
c94e3941
MC
7788 ((bmcr & BMCR_ANENABLE) ||
7789 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7790 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7791 BMCR_ANENABLE);
1da177e4
LT
7792 r = 0;
7793 }
f47c11ee 7794 spin_unlock_bh(&tp->lock);
1da177e4
LT
7795
7796 return r;
7797}
7798
7799static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7800{
7801 struct tg3 *tp = netdev_priv(dev);
7802
7803 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7804 ering->rx_mini_max_pending = 0;
4f81c32b
MC
7805 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7806 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7807 else
7808 ering->rx_jumbo_max_pending = 0;
7809
7810 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
1da177e4
LT
7811
7812 ering->rx_pending = tp->rx_pending;
7813 ering->rx_mini_pending = 0;
4f81c32b
MC
7814 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7815 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7816 else
7817 ering->rx_jumbo_pending = 0;
7818
1da177e4
LT
7819 ering->tx_pending = tp->tx_pending;
7820}
7821
7822static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7823{
7824 struct tg3 *tp = netdev_priv(dev);
bbe832c0 7825 int irq_sync = 0;
1da177e4
LT
7826
7827 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7828 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7829 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7830 return -EINVAL;
7831
bbe832c0 7832 if (netif_running(dev)) {
1da177e4 7833 tg3_netif_stop(tp);
bbe832c0
MC
7834 irq_sync = 1;
7835 }
1da177e4 7836
bbe832c0 7837 tg3_full_lock(tp, irq_sync);
1da177e4
LT
7838
7839 tp->rx_pending = ering->rx_pending;
7840
7841 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7842 tp->rx_pending > 63)
7843 tp->rx_pending = 63;
7844 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7845 tp->tx_pending = ering->tx_pending;
7846
7847 if (netif_running(dev)) {
944d980e 7848 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7849 tg3_init_hw(tp);
7850 tg3_netif_start(tp);
7851 }
7852
f47c11ee 7853 tg3_full_unlock(tp);
1da177e4
LT
7854
7855 return 0;
7856}
7857
7858static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7859{
7860 struct tg3 *tp = netdev_priv(dev);
7861
7862 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7863 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7864 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7865}
7866
7867static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7868{
7869 struct tg3 *tp = netdev_priv(dev);
bbe832c0 7870 int irq_sync = 0;
1da177e4 7871
bbe832c0 7872 if (netif_running(dev)) {
1da177e4 7873 tg3_netif_stop(tp);
bbe832c0
MC
7874 irq_sync = 1;
7875 }
1da177e4 7876
bbe832c0 7877 tg3_full_lock(tp, irq_sync);
f47c11ee 7878
1da177e4
LT
7879 if (epause->autoneg)
7880 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7881 else
7882 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7883 if (epause->rx_pause)
7884 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7885 else
7886 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7887 if (epause->tx_pause)
7888 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7889 else
7890 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7891
7892 if (netif_running(dev)) {
944d980e 7893 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7894 tg3_init_hw(tp);
7895 tg3_netif_start(tp);
7896 }
f47c11ee
DM
7897
7898 tg3_full_unlock(tp);
1da177e4
LT
7899
7900 return 0;
7901}
7902
7903static u32 tg3_get_rx_csum(struct net_device *dev)
7904{
7905 struct tg3 *tp = netdev_priv(dev);
7906 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7907}
7908
7909static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7910{
7911 struct tg3 *tp = netdev_priv(dev);
7912
7913 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7914 if (data != 0)
7915 return -EINVAL;
7916 return 0;
7917 }
7918
f47c11ee 7919 spin_lock_bh(&tp->lock);
1da177e4
LT
7920 if (data)
7921 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7922 else
7923 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
f47c11ee 7924 spin_unlock_bh(&tp->lock);
1da177e4
LT
7925
7926 return 0;
7927}
7928
7929static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7930{
7931 struct tg3 *tp = netdev_priv(dev);
7932
7933 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7934 if (data != 0)
7935 return -EINVAL;
7936 return 0;
7937 }
7938
af36e6b6
MC
7939 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7940 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9c27dbdf 7941 ethtool_op_set_tx_hw_csum(dev, data);
1da177e4 7942 else
9c27dbdf 7943 ethtool_op_set_tx_csum(dev, data);
1da177e4
LT
7944
7945 return 0;
7946}
7947
7948static int tg3_get_stats_count (struct net_device *dev)
7949{
7950 return TG3_NUM_STATS;
7951}
7952
4cafd3f5
MC
7953static int tg3_get_test_count (struct net_device *dev)
7954{
7955 return TG3_NUM_TEST;
7956}
7957
1da177e4
LT
7958static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7959{
7960 switch (stringset) {
7961 case ETH_SS_STATS:
7962 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7963 break;
4cafd3f5
MC
7964 case ETH_SS_TEST:
7965 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7966 break;
1da177e4
LT
7967 default:
7968 WARN_ON(1); /* we need a WARN() */
7969 break;
7970 }
7971}
7972
4009a93d
MC
7973static int tg3_phys_id(struct net_device *dev, u32 data)
7974{
7975 struct tg3 *tp = netdev_priv(dev);
7976 int i;
7977
7978 if (!netif_running(tp->dev))
7979 return -EAGAIN;
7980
7981 if (data == 0)
7982 data = 2;
7983
7984 for (i = 0; i < (data * 2); i++) {
7985 if ((i % 2) == 0)
7986 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7987 LED_CTRL_1000MBPS_ON |
7988 LED_CTRL_100MBPS_ON |
7989 LED_CTRL_10MBPS_ON |
7990 LED_CTRL_TRAFFIC_OVERRIDE |
7991 LED_CTRL_TRAFFIC_BLINK |
7992 LED_CTRL_TRAFFIC_LED);
7993
7994 else
7995 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7996 LED_CTRL_TRAFFIC_OVERRIDE);
7997
7998 if (msleep_interruptible(500))
7999 break;
8000 }
8001 tw32(MAC_LED_CTRL, tp->led_ctrl);
8002 return 0;
8003}
8004
1da177e4
LT
8005static void tg3_get_ethtool_stats (struct net_device *dev,
8006 struct ethtool_stats *estats, u64 *tmp_stats)
8007{
8008 struct tg3 *tp = netdev_priv(dev);
8009 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8010}
8011
566f86ad 8012#define NVRAM_TEST_SIZE 0x100
1b27777a 8013#define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
566f86ad
MC
8014
8015static int tg3_test_nvram(struct tg3 *tp)
8016{
1b27777a
MC
8017 u32 *buf, csum, magic;
8018 int i, j, err = 0, size;
566f86ad 8019
1820180b 8020 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
1b27777a
MC
8021 return -EIO;
8022
1b27777a
MC
8023 if (magic == TG3_EEPROM_MAGIC)
8024 size = NVRAM_TEST_SIZE;
8025 else if ((magic & 0xff000000) == 0xa5000000) {
8026 if ((magic & 0xe00000) == 0x200000)
8027 size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8028 else
8029 return 0;
8030 } else
8031 return -EIO;
8032
8033 buf = kmalloc(size, GFP_KERNEL);
566f86ad
MC
8034 if (buf == NULL)
8035 return -ENOMEM;
8036
1b27777a
MC
8037 err = -EIO;
8038 for (i = 0, j = 0; i < size; i += 4, j++) {
566f86ad
MC
8039 u32 val;
8040
8041 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8042 break;
8043 buf[j] = cpu_to_le32(val);
8044 }
1b27777a 8045 if (i < size)
566f86ad
MC
8046 goto out;
8047
1b27777a
MC
8048 /* Selfboot format */
8049 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8050 u8 *buf8 = (u8 *) buf, csum8 = 0;
8051
8052 for (i = 0; i < size; i++)
8053 csum8 += buf8[i];
8054
ad96b485
AB
8055 if (csum8 == 0) {
8056 err = 0;
8057 goto out;
8058 }
8059
8060 err = -EIO;
8061 goto out;
1b27777a 8062 }
566f86ad
MC
8063
8064 /* Bootstrap checksum at offset 0x10 */
8065 csum = calc_crc((unsigned char *) buf, 0x10);
8066 if(csum != cpu_to_le32(buf[0x10/4]))
8067 goto out;
8068
8069 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8070 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8071 if (csum != cpu_to_le32(buf[0xfc/4]))
8072 goto out;
8073
8074 err = 0;
8075
8076out:
8077 kfree(buf);
8078 return err;
8079}
8080
ca43007a
MC
8081#define TG3_SERDES_TIMEOUT_SEC 2
8082#define TG3_COPPER_TIMEOUT_SEC 6
8083
8084static int tg3_test_link(struct tg3 *tp)
8085{
8086 int i, max;
8087
8088 if (!netif_running(tp->dev))
8089 return -ENODEV;
8090
4c987487 8091 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
ca43007a
MC
8092 max = TG3_SERDES_TIMEOUT_SEC;
8093 else
8094 max = TG3_COPPER_TIMEOUT_SEC;
8095
8096 for (i = 0; i < max; i++) {
8097 if (netif_carrier_ok(tp->dev))
8098 return 0;
8099
8100 if (msleep_interruptible(1000))
8101 break;
8102 }
8103
8104 return -EIO;
8105}
8106
a71116d1 8107/* Only test the commonly used registers */
30ca3e37 8108static int tg3_test_registers(struct tg3 *tp)
a71116d1
MC
8109{
8110 int i, is_5705;
8111 u32 offset, read_mask, write_mask, val, save_val, read_val;
8112 static struct {
8113 u16 offset;
8114 u16 flags;
8115#define TG3_FL_5705 0x1
8116#define TG3_FL_NOT_5705 0x2
8117#define TG3_FL_NOT_5788 0x4
8118 u32 read_mask;
8119 u32 write_mask;
8120 } reg_tbl[] = {
8121 /* MAC Control Registers */
8122 { MAC_MODE, TG3_FL_NOT_5705,
8123 0x00000000, 0x00ef6f8c },
8124 { MAC_MODE, TG3_FL_5705,
8125 0x00000000, 0x01ef6b8c },
8126 { MAC_STATUS, TG3_FL_NOT_5705,
8127 0x03800107, 0x00000000 },
8128 { MAC_STATUS, TG3_FL_5705,
8129 0x03800100, 0x00000000 },
8130 { MAC_ADDR_0_HIGH, 0x0000,
8131 0x00000000, 0x0000ffff },
8132 { MAC_ADDR_0_LOW, 0x0000,
8133 0x00000000, 0xffffffff },
8134 { MAC_RX_MTU_SIZE, 0x0000,
8135 0x00000000, 0x0000ffff },
8136 { MAC_TX_MODE, 0x0000,
8137 0x00000000, 0x00000070 },
8138 { MAC_TX_LENGTHS, 0x0000,
8139 0x00000000, 0x00003fff },
8140 { MAC_RX_MODE, TG3_FL_NOT_5705,
8141 0x00000000, 0x000007fc },
8142 { MAC_RX_MODE, TG3_FL_5705,
8143 0x00000000, 0x000007dc },
8144 { MAC_HASH_REG_0, 0x0000,
8145 0x00000000, 0xffffffff },
8146 { MAC_HASH_REG_1, 0x0000,
8147 0x00000000, 0xffffffff },
8148 { MAC_HASH_REG_2, 0x0000,
8149 0x00000000, 0xffffffff },
8150 { MAC_HASH_REG_3, 0x0000,
8151 0x00000000, 0xffffffff },
8152
8153 /* Receive Data and Receive BD Initiator Control Registers. */
8154 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8155 0x00000000, 0xffffffff },
8156 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8157 0x00000000, 0xffffffff },
8158 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8159 0x00000000, 0x00000003 },
8160 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8161 0x00000000, 0xffffffff },
8162 { RCVDBDI_STD_BD+0, 0x0000,
8163 0x00000000, 0xffffffff },
8164 { RCVDBDI_STD_BD+4, 0x0000,
8165 0x00000000, 0xffffffff },
8166 { RCVDBDI_STD_BD+8, 0x0000,
8167 0x00000000, 0xffff0002 },
8168 { RCVDBDI_STD_BD+0xc, 0x0000,
8169 0x00000000, 0xffffffff },
8170
8171 /* Receive BD Initiator Control Registers. */
8172 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8173 0x00000000, 0xffffffff },
8174 { RCVBDI_STD_THRESH, TG3_FL_5705,
8175 0x00000000, 0x000003ff },
8176 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8177 0x00000000, 0xffffffff },
8178
8179 /* Host Coalescing Control Registers. */
8180 { HOSTCC_MODE, TG3_FL_NOT_5705,
8181 0x00000000, 0x00000004 },
8182 { HOSTCC_MODE, TG3_FL_5705,
8183 0x00000000, 0x000000f6 },
8184 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8185 0x00000000, 0xffffffff },
8186 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8187 0x00000000, 0x000003ff },
8188 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8189 0x00000000, 0xffffffff },
8190 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8191 0x00000000, 0x000003ff },
8192 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8193 0x00000000, 0xffffffff },
8194 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8195 0x00000000, 0x000000ff },
8196 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8197 0x00000000, 0xffffffff },
8198 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8199 0x00000000, 0x000000ff },
8200 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8201 0x00000000, 0xffffffff },
8202 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8203 0x00000000, 0xffffffff },
8204 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8205 0x00000000, 0xffffffff },
8206 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8207 0x00000000, 0x000000ff },
8208 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8209 0x00000000, 0xffffffff },
8210 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8211 0x00000000, 0x000000ff },
8212 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8213 0x00000000, 0xffffffff },
8214 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8215 0x00000000, 0xffffffff },
8216 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8217 0x00000000, 0xffffffff },
8218 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8219 0x00000000, 0xffffffff },
8220 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8221 0x00000000, 0xffffffff },
8222 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8223 0xffffffff, 0x00000000 },
8224 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8225 0xffffffff, 0x00000000 },
8226
8227 /* Buffer Manager Control Registers. */
8228 { BUFMGR_MB_POOL_ADDR, 0x0000,
8229 0x00000000, 0x007fff80 },
8230 { BUFMGR_MB_POOL_SIZE, 0x0000,
8231 0x00000000, 0x007fffff },
8232 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8233 0x00000000, 0x0000003f },
8234 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8235 0x00000000, 0x000001ff },
8236 { BUFMGR_MB_HIGH_WATER, 0x0000,
8237 0x00000000, 0x000001ff },
8238 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8239 0xffffffff, 0x00000000 },
8240 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8241 0xffffffff, 0x00000000 },
8242
8243 /* Mailbox Registers */
8244 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8245 0x00000000, 0x000001ff },
8246 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8247 0x00000000, 0x000001ff },
8248 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8249 0x00000000, 0x000007ff },
8250 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8251 0x00000000, 0x000001ff },
8252
8253 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8254 };
8255
8256 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8257 is_5705 = 1;
8258 else
8259 is_5705 = 0;
8260
8261 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8262 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8263 continue;
8264
8265 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8266 continue;
8267
8268 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8269 (reg_tbl[i].flags & TG3_FL_NOT_5788))
8270 continue;
8271
8272 offset = (u32) reg_tbl[i].offset;
8273 read_mask = reg_tbl[i].read_mask;
8274 write_mask = reg_tbl[i].write_mask;
8275
8276 /* Save the original register content */
8277 save_val = tr32(offset);
8278
8279 /* Determine the read-only value. */
8280 read_val = save_val & read_mask;
8281
8282 /* Write zero to the register, then make sure the read-only bits
8283 * are not changed and the read/write bits are all zeros.
8284 */
8285 tw32(offset, 0);
8286
8287 val = tr32(offset);
8288
8289 /* Test the read-only and read/write bits. */
8290 if (((val & read_mask) != read_val) || (val & write_mask))
8291 goto out;
8292
8293 /* Write ones to all the bits defined by RdMask and WrMask, then
8294 * make sure the read-only bits are not changed and the
8295 * read/write bits are all ones.
8296 */
8297 tw32(offset, read_mask | write_mask);
8298
8299 val = tr32(offset);
8300
8301 /* Test the read-only bits. */
8302 if ((val & read_mask) != read_val)
8303 goto out;
8304
8305 /* Test the read/write bits. */
8306 if ((val & write_mask) != write_mask)
8307 goto out;
8308
8309 tw32(offset, save_val);
8310 }
8311
8312 return 0;
8313
8314out:
8315 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8316 tw32(offset, save_val);
8317 return -EIO;
8318}
8319
7942e1db
MC
8320static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8321{
f71e1309 8322 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7942e1db
MC
8323 int i;
8324 u32 j;
8325
8326 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8327 for (j = 0; j < len; j += 4) {
8328 u32 val;
8329
8330 tg3_write_mem(tp, offset + j, test_pattern[i]);
8331 tg3_read_mem(tp, offset + j, &val);
8332 if (val != test_pattern[i])
8333 return -EIO;
8334 }
8335 }
8336 return 0;
8337}
8338
8339static int tg3_test_memory(struct tg3 *tp)
8340{
8341 static struct mem_entry {
8342 u32 offset;
8343 u32 len;
8344 } mem_tbl_570x[] = {
38690194 8345 { 0x00000000, 0x00b50},
7942e1db
MC
8346 { 0x00002000, 0x1c000},
8347 { 0xffffffff, 0x00000}
8348 }, mem_tbl_5705[] = {
8349 { 0x00000100, 0x0000c},
8350 { 0x00000200, 0x00008},
7942e1db
MC
8351 { 0x00004000, 0x00800},
8352 { 0x00006000, 0x01000},
8353 { 0x00008000, 0x02000},
8354 { 0x00010000, 0x0e000},
8355 { 0xffffffff, 0x00000}
79f4d13a
MC
8356 }, mem_tbl_5755[] = {
8357 { 0x00000200, 0x00008},
8358 { 0x00004000, 0x00800},
8359 { 0x00006000, 0x00800},
8360 { 0x00008000, 0x02000},
8361 { 0x00010000, 0x0c000},
8362 { 0xffffffff, 0x00000}
7942e1db
MC
8363 };
8364 struct mem_entry *mem_tbl;
8365 int err = 0;
8366 int i;
8367
79f4d13a 8368 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
af36e6b6
MC
8369 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8370 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
79f4d13a
MC
8371 mem_tbl = mem_tbl_5755;
8372 else
8373 mem_tbl = mem_tbl_5705;
8374 } else
7942e1db
MC
8375 mem_tbl = mem_tbl_570x;
8376
8377 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8378 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8379 mem_tbl[i].len)) != 0)
8380 break;
8381 }
8382
8383 return err;
8384}
8385
9f40dead
MC
8386#define TG3_MAC_LOOPBACK 0
8387#define TG3_PHY_LOOPBACK 1
8388
8389static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
c76949a6 8390{
9f40dead 8391 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
c76949a6
MC
8392 u32 desc_idx;
8393 struct sk_buff *skb, *rx_skb;
8394 u8 *tx_data;
8395 dma_addr_t map;
8396 int num_pkts, tx_len, rx_len, i, err;
8397 struct tg3_rx_buffer_desc *desc;
8398
9f40dead 8399 if (loopback_mode == TG3_MAC_LOOPBACK) {
c94e3941
MC
8400 /* HW errata - mac loopback fails in some cases on 5780.
8401 * Normal traffic and PHY loopback are not affected by
8402 * errata.
8403 */
8404 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8405 return 0;
8406
9f40dead
MC
8407 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8408 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8409 MAC_MODE_PORT_MODE_GMII;
8410 tw32(MAC_MODE, mac_mode);
8411 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
c94e3941
MC
8412 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8413 BMCR_SPEED1000);
8414 udelay(40);
8415 /* reset to prevent losing 1st rx packet intermittently */
8416 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8417 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8418 udelay(10);
8419 tw32_f(MAC_RX_MODE, tp->rx_mode);
8420 }
9f40dead
MC
8421 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8422 MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
ff18ff02 8423 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9f40dead 8424 mac_mode &= ~MAC_MODE_LINK_POLARITY;
ff18ff02
MC
8425 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8426 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8427 }
9f40dead 8428 tw32(MAC_MODE, mac_mode);
9f40dead
MC
8429 }
8430 else
8431 return -EINVAL;
c76949a6
MC
8432
8433 err = -EIO;
8434
c76949a6
MC
8435 tx_len = 1514;
8436 skb = dev_alloc_skb(tx_len);
8437 tx_data = skb_put(skb, tx_len);
8438 memcpy(tx_data, tp->dev->dev_addr, 6);
8439 memset(tx_data + 6, 0x0, 8);
8440
8441 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8442
8443 for (i = 14; i < tx_len; i++)
8444 tx_data[i] = (u8) (i & 0xff);
8445
8446 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8447
8448 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8449 HOSTCC_MODE_NOW);
8450
8451 udelay(10);
8452
8453 rx_start_idx = tp->hw_status->idx[0].rx_producer;
8454
c76949a6
MC
8455 num_pkts = 0;
8456
9f40dead 8457 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
c76949a6 8458
9f40dead 8459 tp->tx_prod++;
c76949a6
MC
8460 num_pkts++;
8461
9f40dead
MC
8462 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8463 tp->tx_prod);
09ee929c 8464 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
c76949a6
MC
8465
8466 udelay(10);
8467
8468 for (i = 0; i < 10; i++) {
8469 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8470 HOSTCC_MODE_NOW);
8471
8472 udelay(10);
8473
8474 tx_idx = tp->hw_status->idx[0].tx_consumer;
8475 rx_idx = tp->hw_status->idx[0].rx_producer;
9f40dead 8476 if ((tx_idx == tp->tx_prod) &&
c76949a6
MC
8477 (rx_idx == (rx_start_idx + num_pkts)))
8478 break;
8479 }
8480
8481 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8482 dev_kfree_skb(skb);
8483
9f40dead 8484 if (tx_idx != tp->tx_prod)
c76949a6
MC
8485 goto out;
8486
8487 if (rx_idx != rx_start_idx + num_pkts)
8488 goto out;
8489
8490 desc = &tp->rx_rcb[rx_start_idx];
8491 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8492 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8493 if (opaque_key != RXD_OPAQUE_RING_STD)
8494 goto out;
8495
8496 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8497 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8498 goto out;
8499
8500 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8501 if (rx_len != tx_len)
8502 goto out;
8503
8504 rx_skb = tp->rx_std_buffers[desc_idx].skb;
8505
8506 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8507 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8508
8509 for (i = 14; i < tx_len; i++) {
8510 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8511 goto out;
8512 }
8513 err = 0;
8514
8515 /* tg3_free_rings will unmap and free the rx_skb */
8516out:
8517 return err;
8518}
8519
9f40dead
MC
8520#define TG3_MAC_LOOPBACK_FAILED 1
8521#define TG3_PHY_LOOPBACK_FAILED 2
8522#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
8523 TG3_PHY_LOOPBACK_FAILED)
8524
8525static int tg3_test_loopback(struct tg3 *tp)
8526{
8527 int err = 0;
8528
8529 if (!netif_running(tp->dev))
8530 return TG3_LOOPBACK_FAILED;
8531
8532 tg3_reset_hw(tp);
8533
8534 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8535 err |= TG3_MAC_LOOPBACK_FAILED;
8536 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8537 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8538 err |= TG3_PHY_LOOPBACK_FAILED;
8539 }
8540
8541 return err;
8542}
8543
4cafd3f5
MC
8544static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8545 u64 *data)
8546{
566f86ad
MC
8547 struct tg3 *tp = netdev_priv(dev);
8548
bc1c7567
MC
8549 if (tp->link_config.phy_is_low_power)
8550 tg3_set_power_state(tp, PCI_D0);
8551
566f86ad
MC
8552 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8553
8554 if (tg3_test_nvram(tp) != 0) {
8555 etest->flags |= ETH_TEST_FL_FAILED;
8556 data[0] = 1;
8557 }
ca43007a
MC
8558 if (tg3_test_link(tp) != 0) {
8559 etest->flags |= ETH_TEST_FL_FAILED;
8560 data[1] = 1;
8561 }
a71116d1 8562 if (etest->flags & ETH_TEST_FL_OFFLINE) {
ec41c7df 8563 int err, irq_sync = 0;
bbe832c0
MC
8564
8565 if (netif_running(dev)) {
a71116d1 8566 tg3_netif_stop(tp);
bbe832c0
MC
8567 irq_sync = 1;
8568 }
a71116d1 8569
bbe832c0 8570 tg3_full_lock(tp, irq_sync);
a71116d1
MC
8571
8572 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
ec41c7df 8573 err = tg3_nvram_lock(tp);
a71116d1
MC
8574 tg3_halt_cpu(tp, RX_CPU_BASE);
8575 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8576 tg3_halt_cpu(tp, TX_CPU_BASE);
ec41c7df
MC
8577 if (!err)
8578 tg3_nvram_unlock(tp);
a71116d1 8579
d9ab5ad1
MC
8580 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8581 tg3_phy_reset(tp);
8582
a71116d1
MC
8583 if (tg3_test_registers(tp) != 0) {
8584 etest->flags |= ETH_TEST_FL_FAILED;
8585 data[2] = 1;
8586 }
7942e1db
MC
8587 if (tg3_test_memory(tp) != 0) {
8588 etest->flags |= ETH_TEST_FL_FAILED;
8589 data[3] = 1;
8590 }
9f40dead 8591 if ((data[4] = tg3_test_loopback(tp)) != 0)
c76949a6 8592 etest->flags |= ETH_TEST_FL_FAILED;
a71116d1 8593
f47c11ee
DM
8594 tg3_full_unlock(tp);
8595
d4bc3927
MC
8596 if (tg3_test_interrupt(tp) != 0) {
8597 etest->flags |= ETH_TEST_FL_FAILED;
8598 data[5] = 1;
8599 }
f47c11ee
DM
8600
8601 tg3_full_lock(tp, 0);
d4bc3927 8602
a71116d1
MC
8603 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8604 if (netif_running(dev)) {
8605 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8606 tg3_init_hw(tp);
8607 tg3_netif_start(tp);
8608 }
f47c11ee
DM
8609
8610 tg3_full_unlock(tp);
a71116d1 8611 }
bc1c7567
MC
8612 if (tp->link_config.phy_is_low_power)
8613 tg3_set_power_state(tp, PCI_D3hot);
8614
4cafd3f5
MC
8615}
8616
1da177e4
LT
8617static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8618{
8619 struct mii_ioctl_data *data = if_mii(ifr);
8620 struct tg3 *tp = netdev_priv(dev);
8621 int err;
8622
8623 switch(cmd) {
8624 case SIOCGMIIPHY:
8625 data->phy_id = PHY_ADDR;
8626
8627 /* fallthru */
8628 case SIOCGMIIREG: {
8629 u32 mii_regval;
8630
8631 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8632 break; /* We have no PHY */
8633
bc1c7567
MC
8634 if (tp->link_config.phy_is_low_power)
8635 return -EAGAIN;
8636
f47c11ee 8637 spin_lock_bh(&tp->lock);
1da177e4 8638 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
f47c11ee 8639 spin_unlock_bh(&tp->lock);
1da177e4
LT
8640
8641 data->val_out = mii_regval;
8642
8643 return err;
8644 }
8645
8646 case SIOCSMIIREG:
8647 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8648 break; /* We have no PHY */
8649
8650 if (!capable(CAP_NET_ADMIN))
8651 return -EPERM;
8652
bc1c7567
MC
8653 if (tp->link_config.phy_is_low_power)
8654 return -EAGAIN;
8655
f47c11ee 8656 spin_lock_bh(&tp->lock);
1da177e4 8657 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
f47c11ee 8658 spin_unlock_bh(&tp->lock);
1da177e4
LT
8659
8660 return err;
8661
8662 default:
8663 /* do nothing */
8664 break;
8665 }
8666 return -EOPNOTSUPP;
8667}
8668
8669#if TG3_VLAN_TAG_USED
8670static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8671{
8672 struct tg3 *tp = netdev_priv(dev);
8673
f47c11ee 8674 tg3_full_lock(tp, 0);
1da177e4
LT
8675
8676 tp->vlgrp = grp;
8677
8678 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8679 __tg3_set_rx_mode(dev);
8680
f47c11ee 8681 tg3_full_unlock(tp);
1da177e4
LT
8682}
8683
8684static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8685{
8686 struct tg3 *tp = netdev_priv(dev);
8687
f47c11ee 8688 tg3_full_lock(tp, 0);
1da177e4
LT
8689 if (tp->vlgrp)
8690 tp->vlgrp->vlan_devices[vid] = NULL;
f47c11ee 8691 tg3_full_unlock(tp);
1da177e4
LT
8692}
8693#endif
8694
15f9850d
DM
8695static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8696{
8697 struct tg3 *tp = netdev_priv(dev);
8698
8699 memcpy(ec, &tp->coal, sizeof(*ec));
8700 return 0;
8701}
8702
d244c892
MC
8703static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8704{
8705 struct tg3 *tp = netdev_priv(dev);
8706 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8707 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8708
8709 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8710 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8711 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8712 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8713 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8714 }
8715
8716 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8717 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8718 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8719 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8720 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8721 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8722 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8723 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8724 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8725 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8726 return -EINVAL;
8727
8728 /* No rx interrupts will be generated if both are zero */
8729 if ((ec->rx_coalesce_usecs == 0) &&
8730 (ec->rx_max_coalesced_frames == 0))
8731 return -EINVAL;
8732
8733 /* No tx interrupts will be generated if both are zero */
8734 if ((ec->tx_coalesce_usecs == 0) &&
8735 (ec->tx_max_coalesced_frames == 0))
8736 return -EINVAL;
8737
8738 /* Only copy relevant parameters, ignore all others. */
8739 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8740 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8741 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8742 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8743 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8744 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8745 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8746 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8747 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8748
8749 if (netif_running(dev)) {
8750 tg3_full_lock(tp, 0);
8751 __tg3_set_coalesce(tp, &tp->coal);
8752 tg3_full_unlock(tp);
8753 }
8754 return 0;
8755}
8756
1da177e4
LT
8757static struct ethtool_ops tg3_ethtool_ops = {
8758 .get_settings = tg3_get_settings,
8759 .set_settings = tg3_set_settings,
8760 .get_drvinfo = tg3_get_drvinfo,
8761 .get_regs_len = tg3_get_regs_len,
8762 .get_regs = tg3_get_regs,
8763 .get_wol = tg3_get_wol,
8764 .set_wol = tg3_set_wol,
8765 .get_msglevel = tg3_get_msglevel,
8766 .set_msglevel = tg3_set_msglevel,
8767 .nway_reset = tg3_nway_reset,
8768 .get_link = ethtool_op_get_link,
8769 .get_eeprom_len = tg3_get_eeprom_len,
8770 .get_eeprom = tg3_get_eeprom,
8771 .set_eeprom = tg3_set_eeprom,
8772 .get_ringparam = tg3_get_ringparam,
8773 .set_ringparam = tg3_set_ringparam,
8774 .get_pauseparam = tg3_get_pauseparam,
8775 .set_pauseparam = tg3_set_pauseparam,
8776 .get_rx_csum = tg3_get_rx_csum,
8777 .set_rx_csum = tg3_set_rx_csum,
8778 .get_tx_csum = ethtool_op_get_tx_csum,
8779 .set_tx_csum = tg3_set_tx_csum,
8780 .get_sg = ethtool_op_get_sg,
8781 .set_sg = ethtool_op_set_sg,
8782#if TG3_TSO_SUPPORT != 0
8783 .get_tso = ethtool_op_get_tso,
8784 .set_tso = tg3_set_tso,
8785#endif
4cafd3f5
MC
8786 .self_test_count = tg3_get_test_count,
8787 .self_test = tg3_self_test,
1da177e4 8788 .get_strings = tg3_get_strings,
4009a93d 8789 .phys_id = tg3_phys_id,
1da177e4
LT
8790 .get_stats_count = tg3_get_stats_count,
8791 .get_ethtool_stats = tg3_get_ethtool_stats,
15f9850d 8792 .get_coalesce = tg3_get_coalesce,
d244c892 8793 .set_coalesce = tg3_set_coalesce,
2ff43697 8794 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
8795};
8796
8797static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8798{
1b27777a 8799 u32 cursize, val, magic;
1da177e4
LT
8800
8801 tp->nvram_size = EEPROM_CHIP_SIZE;
8802
1820180b 8803 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
1da177e4
LT
8804 return;
8805
1b27777a 8806 if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
1da177e4
LT
8807 return;
8808
8809 /*
8810 * Size the chip by reading offsets at increasing powers of two.
8811 * When we encounter our validation signature, we know the addressing
8812 * has wrapped around, and thus have our chip size.
8813 */
1b27777a 8814 cursize = 0x10;
1da177e4
LT
8815
8816 while (cursize < tp->nvram_size) {
1820180b 8817 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
1da177e4
LT
8818 return;
8819
1820180b 8820 if (val == magic)
1da177e4
LT
8821 break;
8822
8823 cursize <<= 1;
8824 }
8825
8826 tp->nvram_size = cursize;
8827}
8828
8829static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8830{
8831 u32 val;
8832
1820180b 8833 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
1b27777a
MC
8834 return;
8835
8836 /* Selfboot format */
1820180b 8837 if (val != TG3_EEPROM_MAGIC) {
1b27777a
MC
8838 tg3_get_eeprom_size(tp);
8839 return;
8840 }
8841
1da177e4
LT
8842 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8843 if (val != 0) {
8844 tp->nvram_size = (val >> 16) * 1024;
8845 return;
8846 }
8847 }
8848 tp->nvram_size = 0x20000;
8849}
8850
8851static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8852{
8853 u32 nvcfg1;
8854
8855 nvcfg1 = tr32(NVRAM_CFG1);
8856 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8857 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8858 }
8859 else {
8860 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8861 tw32(NVRAM_CFG1, nvcfg1);
8862 }
8863
4c987487 8864 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
a4e2b347 8865 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
8866 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8867 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8868 tp->nvram_jedecnum = JEDEC_ATMEL;
8869 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8870 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8871 break;
8872 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8873 tp->nvram_jedecnum = JEDEC_ATMEL;
8874 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8875 break;
8876 case FLASH_VENDOR_ATMEL_EEPROM:
8877 tp->nvram_jedecnum = JEDEC_ATMEL;
8878 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8879 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8880 break;
8881 case FLASH_VENDOR_ST:
8882 tp->nvram_jedecnum = JEDEC_ST;
8883 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8884 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8885 break;
8886 case FLASH_VENDOR_SAIFUN:
8887 tp->nvram_jedecnum = JEDEC_SAIFUN;
8888 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8889 break;
8890 case FLASH_VENDOR_SST_SMALL:
8891 case FLASH_VENDOR_SST_LARGE:
8892 tp->nvram_jedecnum = JEDEC_SST;
8893 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8894 break;
8895 }
8896 }
8897 else {
8898 tp->nvram_jedecnum = JEDEC_ATMEL;
8899 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8900 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8901 }
8902}
8903
361b4ac2
MC
8904static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8905{
8906 u32 nvcfg1;
8907
8908 nvcfg1 = tr32(NVRAM_CFG1);
8909
e6af301b
MC
8910 /* NVRAM protection for TPM */
8911 if (nvcfg1 & (1 << 27))
8912 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8913
361b4ac2
MC
8914 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8915 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8916 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8917 tp->nvram_jedecnum = JEDEC_ATMEL;
8918 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8919 break;
8920 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8921 tp->nvram_jedecnum = JEDEC_ATMEL;
8922 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8923 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8924 break;
8925 case FLASH_5752VENDOR_ST_M45PE10:
8926 case FLASH_5752VENDOR_ST_M45PE20:
8927 case FLASH_5752VENDOR_ST_M45PE40:
8928 tp->nvram_jedecnum = JEDEC_ST;
8929 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8930 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8931 break;
8932 }
8933
8934 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8935 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8936 case FLASH_5752PAGE_SIZE_256:
8937 tp->nvram_pagesize = 256;
8938 break;
8939 case FLASH_5752PAGE_SIZE_512:
8940 tp->nvram_pagesize = 512;
8941 break;
8942 case FLASH_5752PAGE_SIZE_1K:
8943 tp->nvram_pagesize = 1024;
8944 break;
8945 case FLASH_5752PAGE_SIZE_2K:
8946 tp->nvram_pagesize = 2048;
8947 break;
8948 case FLASH_5752PAGE_SIZE_4K:
8949 tp->nvram_pagesize = 4096;
8950 break;
8951 case FLASH_5752PAGE_SIZE_264:
8952 tp->nvram_pagesize = 264;
8953 break;
8954 }
8955 }
8956 else {
8957 /* For eeprom, set pagesize to maximum eeprom size */
8958 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8959
8960 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8961 tw32(NVRAM_CFG1, nvcfg1);
8962 }
8963}
8964
d3c7b886
MC
8965static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
8966{
8967 u32 nvcfg1;
8968
8969 nvcfg1 = tr32(NVRAM_CFG1);
8970
8971 /* NVRAM protection for TPM */
8972 if (nvcfg1 & (1 << 27))
8973 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8974
8975 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8976 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
8977 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
8978 tp->nvram_jedecnum = JEDEC_ATMEL;
8979 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8980 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8981
8982 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8983 tw32(NVRAM_CFG1, nvcfg1);
8984 break;
8985 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8986 case FLASH_5755VENDOR_ATMEL_FLASH_1:
8987 case FLASH_5755VENDOR_ATMEL_FLASH_2:
8988 case FLASH_5755VENDOR_ATMEL_FLASH_3:
8989 case FLASH_5755VENDOR_ATMEL_FLASH_4:
8990 tp->nvram_jedecnum = JEDEC_ATMEL;
8991 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8992 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8993 tp->nvram_pagesize = 264;
8994 break;
8995 case FLASH_5752VENDOR_ST_M45PE10:
8996 case FLASH_5752VENDOR_ST_M45PE20:
8997 case FLASH_5752VENDOR_ST_M45PE40:
8998 tp->nvram_jedecnum = JEDEC_ST;
8999 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9000 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9001 tp->nvram_pagesize = 256;
9002 break;
9003 }
9004}
9005
1b27777a
MC
9006static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9007{
9008 u32 nvcfg1;
9009
9010 nvcfg1 = tr32(NVRAM_CFG1);
9011
9012 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9013 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9014 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9015 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9016 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9017 tp->nvram_jedecnum = JEDEC_ATMEL;
9018 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9019 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9020
9021 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9022 tw32(NVRAM_CFG1, nvcfg1);
9023 break;
9024 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9025 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9026 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9027 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9028 tp->nvram_jedecnum = JEDEC_ATMEL;
9029 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9030 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9031 tp->nvram_pagesize = 264;
9032 break;
9033 case FLASH_5752VENDOR_ST_M45PE10:
9034 case FLASH_5752VENDOR_ST_M45PE20:
9035 case FLASH_5752VENDOR_ST_M45PE40:
9036 tp->nvram_jedecnum = JEDEC_ST;
9037 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9038 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9039 tp->nvram_pagesize = 256;
9040 break;
9041 }
9042}
9043
1da177e4
LT
9044/* Chips other than 5700/5701 use the NVRAM for fetching info. */
9045static void __devinit tg3_nvram_init(struct tg3 *tp)
9046{
9047 int j;
9048
9049 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9050 return;
9051
9052 tw32_f(GRC_EEPROM_ADDR,
9053 (EEPROM_ADDR_FSM_RESET |
9054 (EEPROM_DEFAULT_CLOCK_PERIOD <<
9055 EEPROM_ADDR_CLKPERD_SHIFT)));
9056
9057 /* XXX schedule_timeout() ... */
9058 for (j = 0; j < 100; j++)
9059 udelay(10);
9060
9061 /* Enable seeprom accesses. */
9062 tw32_f(GRC_LOCAL_CTRL,
9063 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9064 udelay(100);
9065
9066 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9067 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9068 tp->tg3_flags |= TG3_FLAG_NVRAM;
9069
ec41c7df
MC
9070 if (tg3_nvram_lock(tp)) {
9071 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9072 "tg3_nvram_init failed.\n", tp->dev->name);
9073 return;
9074 }
e6af301b 9075 tg3_enable_nvram_access(tp);
1da177e4 9076
361b4ac2
MC
9077 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9078 tg3_get_5752_nvram_info(tp);
d3c7b886
MC
9079 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9080 tg3_get_5755_nvram_info(tp);
1b27777a
MC
9081 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9082 tg3_get_5787_nvram_info(tp);
361b4ac2
MC
9083 else
9084 tg3_get_nvram_info(tp);
9085
1da177e4
LT
9086 tg3_get_nvram_size(tp);
9087
e6af301b 9088 tg3_disable_nvram_access(tp);
381291b7 9089 tg3_nvram_unlock(tp);
1da177e4
LT
9090
9091 } else {
9092 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9093
9094 tg3_get_eeprom_size(tp);
9095 }
9096}
9097
9098static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9099 u32 offset, u32 *val)
9100{
9101 u32 tmp;
9102 int i;
9103
9104 if (offset > EEPROM_ADDR_ADDR_MASK ||
9105 (offset % 4) != 0)
9106 return -EINVAL;
9107
9108 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9109 EEPROM_ADDR_DEVID_MASK |
9110 EEPROM_ADDR_READ);
9111 tw32(GRC_EEPROM_ADDR,
9112 tmp |
9113 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9114 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9115 EEPROM_ADDR_ADDR_MASK) |
9116 EEPROM_ADDR_READ | EEPROM_ADDR_START);
9117
9118 for (i = 0; i < 10000; i++) {
9119 tmp = tr32(GRC_EEPROM_ADDR);
9120
9121 if (tmp & EEPROM_ADDR_COMPLETE)
9122 break;
9123 udelay(100);
9124 }
9125 if (!(tmp & EEPROM_ADDR_COMPLETE))
9126 return -EBUSY;
9127
9128 *val = tr32(GRC_EEPROM_DATA);
9129 return 0;
9130}
9131
9132#define NVRAM_CMD_TIMEOUT 10000
9133
9134static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9135{
9136 int i;
9137
9138 tw32(NVRAM_CMD, nvram_cmd);
9139 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9140 udelay(10);
9141 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9142 udelay(10);
9143 break;
9144 }
9145 }
9146 if (i == NVRAM_CMD_TIMEOUT) {
9147 return -EBUSY;
9148 }
9149 return 0;
9150}
9151
1820180b
MC
9152static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9153{
9154 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9155 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9156 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9157 (tp->nvram_jedecnum == JEDEC_ATMEL))
9158
9159 addr = ((addr / tp->nvram_pagesize) <<
9160 ATMEL_AT45DB0X1B_PAGE_POS) +
9161 (addr % tp->nvram_pagesize);
9162
9163 return addr;
9164}
9165
c4e6575c
MC
9166static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9167{
9168 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9169 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9170 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9171 (tp->nvram_jedecnum == JEDEC_ATMEL))
9172
9173 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9174 tp->nvram_pagesize) +
9175 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9176
9177 return addr;
9178}
9179
1da177e4
LT
9180static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9181{
9182 int ret;
9183
9184 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9185 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
9186 return -EINVAL;
9187 }
9188
9189 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9190 return tg3_nvram_read_using_eeprom(tp, offset, val);
9191
1820180b 9192 offset = tg3_nvram_phys_addr(tp, offset);
1da177e4
LT
9193
9194 if (offset > NVRAM_ADDR_MSK)
9195 return -EINVAL;
9196
ec41c7df
MC
9197 ret = tg3_nvram_lock(tp);
9198 if (ret)
9199 return ret;
1da177e4 9200
e6af301b 9201 tg3_enable_nvram_access(tp);
1da177e4
LT
9202
9203 tw32(NVRAM_ADDR, offset);
9204 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9205 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9206
9207 if (ret == 0)
9208 *val = swab32(tr32(NVRAM_RDDATA));
9209
e6af301b 9210 tg3_disable_nvram_access(tp);
1da177e4 9211
381291b7
MC
9212 tg3_nvram_unlock(tp);
9213
1da177e4
LT
9214 return ret;
9215}
9216
1820180b
MC
9217static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9218{
9219 int err;
9220 u32 tmp;
9221
9222 err = tg3_nvram_read(tp, offset, &tmp);
9223 *val = swab32(tmp);
9224 return err;
9225}
9226
1da177e4
LT
9227static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9228 u32 offset, u32 len, u8 *buf)
9229{
9230 int i, j, rc = 0;
9231 u32 val;
9232
9233 for (i = 0; i < len; i += 4) {
9234 u32 addr, data;
9235
9236 addr = offset + i;
9237
9238 memcpy(&data, buf + i, 4);
9239
9240 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9241
9242 val = tr32(GRC_EEPROM_ADDR);
9243 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9244
9245 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9246 EEPROM_ADDR_READ);
9247 tw32(GRC_EEPROM_ADDR, val |
9248 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9249 (addr & EEPROM_ADDR_ADDR_MASK) |
9250 EEPROM_ADDR_START |
9251 EEPROM_ADDR_WRITE);
9252
9253 for (j = 0; j < 10000; j++) {
9254 val = tr32(GRC_EEPROM_ADDR);
9255
9256 if (val & EEPROM_ADDR_COMPLETE)
9257 break;
9258 udelay(100);
9259 }
9260 if (!(val & EEPROM_ADDR_COMPLETE)) {
9261 rc = -EBUSY;
9262 break;
9263 }
9264 }
9265
9266 return rc;
9267}
9268
9269/* offset and length are dword aligned */
9270static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9271 u8 *buf)
9272{
9273 int ret = 0;
9274 u32 pagesize = tp->nvram_pagesize;
9275 u32 pagemask = pagesize - 1;
9276 u32 nvram_cmd;
9277 u8 *tmp;
9278
9279 tmp = kmalloc(pagesize, GFP_KERNEL);
9280 if (tmp == NULL)
9281 return -ENOMEM;
9282
9283 while (len) {
9284 int j;
e6af301b 9285 u32 phy_addr, page_off, size;
1da177e4
LT
9286
9287 phy_addr = offset & ~pagemask;
9288
9289 for (j = 0; j < pagesize; j += 4) {
9290 if ((ret = tg3_nvram_read(tp, phy_addr + j,
9291 (u32 *) (tmp + j))))
9292 break;
9293 }
9294 if (ret)
9295 break;
9296
9297 page_off = offset & pagemask;
9298 size = pagesize;
9299 if (len < size)
9300 size = len;
9301
9302 len -= size;
9303
9304 memcpy(tmp + page_off, buf, size);
9305
9306 offset = offset + (pagesize - page_off);
9307
e6af301b 9308 tg3_enable_nvram_access(tp);
1da177e4
LT
9309
9310 /*
9311 * Before we can erase the flash page, we need
9312 * to issue a special "write enable" command.
9313 */
9314 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9315
9316 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9317 break;
9318
9319 /* Erase the target page */
9320 tw32(NVRAM_ADDR, phy_addr);
9321
9322 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9323 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9324
9325 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9326 break;
9327
9328 /* Issue another write enable to start the write. */
9329 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9330
9331 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9332 break;
9333
9334 for (j = 0; j < pagesize; j += 4) {
9335 u32 data;
9336
9337 data = *((u32 *) (tmp + j));
9338 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9339
9340 tw32(NVRAM_ADDR, phy_addr + j);
9341
9342 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9343 NVRAM_CMD_WR;
9344
9345 if (j == 0)
9346 nvram_cmd |= NVRAM_CMD_FIRST;
9347 else if (j == (pagesize - 4))
9348 nvram_cmd |= NVRAM_CMD_LAST;
9349
9350 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9351 break;
9352 }
9353 if (ret)
9354 break;
9355 }
9356
9357 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9358 tg3_nvram_exec_cmd(tp, nvram_cmd);
9359
9360 kfree(tmp);
9361
9362 return ret;
9363}
9364
9365/* offset and length are dword aligned */
9366static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9367 u8 *buf)
9368{
9369 int i, ret = 0;
9370
9371 for (i = 0; i < len; i += 4, offset += 4) {
9372 u32 data, page_off, phy_addr, nvram_cmd;
9373
9374 memcpy(&data, buf + i, 4);
9375 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9376
9377 page_off = offset % tp->nvram_pagesize;
9378
1820180b 9379 phy_addr = tg3_nvram_phys_addr(tp, offset);
1da177e4
LT
9380
9381 tw32(NVRAM_ADDR, phy_addr);
9382
9383 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9384
9385 if ((page_off == 0) || (i == 0))
9386 nvram_cmd |= NVRAM_CMD_FIRST;
9387 else if (page_off == (tp->nvram_pagesize - 4))
9388 nvram_cmd |= NVRAM_CMD_LAST;
9389
9390 if (i == (len - 4))
9391 nvram_cmd |= NVRAM_CMD_LAST;
9392
4c987487 9393 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
af36e6b6 9394 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
1b27777a 9395 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
4c987487
MC
9396 (tp->nvram_jedecnum == JEDEC_ST) &&
9397 (nvram_cmd & NVRAM_CMD_FIRST)) {
1da177e4
LT
9398
9399 if ((ret = tg3_nvram_exec_cmd(tp,
9400 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9401 NVRAM_CMD_DONE)))
9402
9403 break;
9404 }
9405 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9406 /* We always do complete word writes to eeprom. */
9407 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9408 }
9409
9410 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9411 break;
9412 }
9413 return ret;
9414}
9415
9416/* offset and length are dword aligned */
9417static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9418{
9419 int ret;
9420
9421 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9422 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
9423 return -EINVAL;
9424 }
9425
9426 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34
MC
9427 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9428 ~GRC_LCLCTRL_GPIO_OUTPUT1);
1da177e4
LT
9429 udelay(40);
9430 }
9431
9432 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9433 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9434 }
9435 else {
9436 u32 grc_mode;
9437
ec41c7df
MC
9438 ret = tg3_nvram_lock(tp);
9439 if (ret)
9440 return ret;
1da177e4 9441
e6af301b
MC
9442 tg3_enable_nvram_access(tp);
9443 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9444 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
1da177e4 9445 tw32(NVRAM_WRITE1, 0x406);
1da177e4
LT
9446
9447 grc_mode = tr32(GRC_MODE);
9448 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9449
9450 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9451 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9452
9453 ret = tg3_nvram_write_block_buffered(tp, offset, len,
9454 buf);
9455 }
9456 else {
9457 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9458 buf);
9459 }
9460
9461 grc_mode = tr32(GRC_MODE);
9462 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9463
e6af301b 9464 tg3_disable_nvram_access(tp);
1da177e4
LT
9465 tg3_nvram_unlock(tp);
9466 }
9467
9468 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34 9469 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1da177e4
LT
9470 udelay(40);
9471 }
9472
9473 return ret;
9474}
9475
9476struct subsys_tbl_ent {
9477 u16 subsys_vendor, subsys_devid;
9478 u32 phy_id;
9479};
9480
9481static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9482 /* Broadcom boards. */
9483 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9484 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9485 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9486 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
9487 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9488 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9489 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
9490 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9491 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9492 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9493 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9494
9495 /* 3com boards. */
9496 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9497 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9498 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
9499 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9500 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9501
9502 /* DELL boards. */
9503 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9504 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9505 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9506 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9507
9508 /* Compaq boards. */
9509 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9510 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9511 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
9512 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9513 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9514
9515 /* IBM boards. */
9516 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9517};
9518
9519static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9520{
9521 int i;
9522
9523 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9524 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9525 tp->pdev->subsystem_vendor) &&
9526 (subsys_id_to_phy_id[i].subsys_devid ==
9527 tp->pdev->subsystem_device))
9528 return &subsys_id_to_phy_id[i];
9529 }
9530 return NULL;
9531}
9532
7d0c41ef 9533static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
1da177e4 9534{
1da177e4 9535 u32 val;
caf636c7
MC
9536 u16 pmcsr;
9537
9538 /* On some early chips the SRAM cannot be accessed in D3hot state,
9539 * so need make sure we're in D0.
9540 */
9541 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9542 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9543 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9544 msleep(1);
7d0c41ef
MC
9545
9546 /* Make sure register accesses (indirect or otherwise)
9547 * will function correctly.
9548 */
9549 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9550 tp->misc_host_ctrl);
1da177e4
LT
9551
9552 tp->phy_id = PHY_ID_INVALID;
7d0c41ef
MC
9553 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9554
72b845e0 9555 /* Do not even try poking around in here on Sun parts. */
bbadf503
MC
9556 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9557 /* All SUN chips are built-in LOMs. */
9558 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
72b845e0 9559 return;
bbadf503 9560 }
72b845e0 9561
1da177e4
LT
9562 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9563 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9564 u32 nic_cfg, led_cfg;
7d0c41ef
MC
9565 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9566 int eeprom_phy_serdes = 0;
1da177e4
LT
9567
9568 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9569 tp->nic_sram_data_cfg = nic_cfg;
9570
9571 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9572 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9573 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9574 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9575 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9576 (ver > 0) && (ver < 0x100))
9577 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9578
1da177e4
LT
9579 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9580 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9581 eeprom_phy_serdes = 1;
9582
9583 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9584 if (nic_phy_id != 0) {
9585 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9586 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9587
9588 eeprom_phy_id = (id1 >> 16) << 10;
9589 eeprom_phy_id |= (id2 & 0xfc00) << 16;
9590 eeprom_phy_id |= (id2 & 0x03ff) << 0;
9591 } else
9592 eeprom_phy_id = 0;
9593
7d0c41ef 9594 tp->phy_id = eeprom_phy_id;
747e8f8b 9595 if (eeprom_phy_serdes) {
a4e2b347 9596 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
747e8f8b
MC
9597 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9598 else
9599 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9600 }
7d0c41ef 9601
cbf46853 9602 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
9603 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9604 SHASTA_EXT_LED_MODE_MASK);
cbf46853 9605 else
1da177e4
LT
9606 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9607
9608 switch (led_cfg) {
9609 default:
9610 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9611 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9612 break;
9613
9614 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9615 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9616 break;
9617
9618 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9619 tp->led_ctrl = LED_CTRL_MODE_MAC;
9ba27794
MC
9620
9621 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9622 * read on some older 5700/5701 bootcode.
9623 */
9624 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9625 ASIC_REV_5700 ||
9626 GET_ASIC_REV(tp->pci_chip_rev_id) ==
9627 ASIC_REV_5701)
9628 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9629
1da177e4
LT
9630 break;
9631
9632 case SHASTA_EXT_LED_SHARED:
9633 tp->led_ctrl = LED_CTRL_MODE_SHARED;
9634 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9635 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9636 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9637 LED_CTRL_MODE_PHY_2);
9638 break;
9639
9640 case SHASTA_EXT_LED_MAC:
9641 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9642 break;
9643
9644 case SHASTA_EXT_LED_COMBO:
9645 tp->led_ctrl = LED_CTRL_MODE_COMBO;
9646 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9647 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9648 LED_CTRL_MODE_PHY_2);
9649 break;
9650
9651 };
9652
9653 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9654 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9655 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9656 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9657
bbadf503 9658 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
1da177e4
LT
9659 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9660
9661 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9662 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 9663 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
9664 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9665 }
9666 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9667 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9668
9669 if (cfg2 & (1 << 17))
9670 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9671
9672 /* serdes signal pre-emphasis in register 0x590 set by */
9673 /* bootcode if bit 18 is set */
9674 if (cfg2 & (1 << 18))
9675 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9676 }
7d0c41ef
MC
9677}
9678
9679static int __devinit tg3_phy_probe(struct tg3 *tp)
9680{
9681 u32 hw_phy_id_1, hw_phy_id_2;
9682 u32 hw_phy_id, hw_phy_id_masked;
9683 int err;
1da177e4
LT
9684
9685 /* Reading the PHY ID register can conflict with ASF
9686 * firwmare access to the PHY hardware.
9687 */
9688 err = 0;
9689 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9690 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9691 } else {
9692 /* Now read the physical PHY_ID from the chip and verify
9693 * that it is sane. If it doesn't look good, we fall back
9694 * to either the hard-coded table based PHY_ID and failing
9695 * that the value found in the eeprom area.
9696 */
9697 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9698 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9699
9700 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
9701 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9702 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
9703
9704 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9705 }
9706
9707 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9708 tp->phy_id = hw_phy_id;
9709 if (hw_phy_id_masked == PHY_ID_BCM8002)
9710 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
da6b2d01
MC
9711 else
9712 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
1da177e4 9713 } else {
7d0c41ef
MC
9714 if (tp->phy_id != PHY_ID_INVALID) {
9715 /* Do nothing, phy ID already set up in
9716 * tg3_get_eeprom_hw_cfg().
9717 */
1da177e4
LT
9718 } else {
9719 struct subsys_tbl_ent *p;
9720
9721 /* No eeprom signature? Try the hardcoded
9722 * subsys device table.
9723 */
9724 p = lookup_by_subsys(tp);
9725 if (!p)
9726 return -ENODEV;
9727
9728 tp->phy_id = p->phy_id;
9729 if (!tp->phy_id ||
9730 tp->phy_id == PHY_ID_BCM8002)
9731 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9732 }
9733 }
9734
747e8f8b 9735 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
1da177e4
LT
9736 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9737 u32 bmsr, adv_reg, tg3_ctrl;
9738
9739 tg3_readphy(tp, MII_BMSR, &bmsr);
9740 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9741 (bmsr & BMSR_LSTATUS))
9742 goto skip_phy_reset;
9743
9744 err = tg3_phy_reset(tp);
9745 if (err)
9746 return err;
9747
9748 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9749 ADVERTISE_100HALF | ADVERTISE_100FULL |
9750 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9751 tg3_ctrl = 0;
9752 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9753 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9754 MII_TG3_CTRL_ADV_1000_FULL);
9755 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9756 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9757 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9758 MII_TG3_CTRL_ENABLE_AS_MASTER);
9759 }
9760
9761 if (!tg3_copper_is_advertising_all(tp)) {
9762 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9763
9764 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9765 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9766
9767 tg3_writephy(tp, MII_BMCR,
9768 BMCR_ANENABLE | BMCR_ANRESTART);
9769 }
9770 tg3_phy_set_wirespeed(tp);
9771
9772 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9773 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9774 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9775 }
9776
9777skip_phy_reset:
9778 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9779 err = tg3_init_5401phy_dsp(tp);
9780 if (err)
9781 return err;
9782 }
9783
9784 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9785 err = tg3_init_5401phy_dsp(tp);
9786 }
9787
747e8f8b 9788 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1da177e4
LT
9789 tp->link_config.advertising =
9790 (ADVERTISED_1000baseT_Half |
9791 ADVERTISED_1000baseT_Full |
9792 ADVERTISED_Autoneg |
9793 ADVERTISED_FIBRE);
9794 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9795 tp->link_config.advertising &=
9796 ~(ADVERTISED_1000baseT_Half |
9797 ADVERTISED_1000baseT_Full);
9798
9799 return err;
9800}
9801
9802static void __devinit tg3_read_partno(struct tg3 *tp)
9803{
9804 unsigned char vpd_data[256];
9805 int i;
1b27777a 9806 u32 magic;
1da177e4
LT
9807
9808 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9809 /* Sun decided not to put the necessary bits in the
9810 * NVRAM of their onboard tg3 parts :(
9811 */
9812 strcpy(tp->board_part_number, "Sun 570X");
9813 return;
9814 }
9815
1820180b 9816 if (tg3_nvram_read_swab(tp, 0x0, &magic))
1b27777a 9817 return;
1da177e4 9818
1820180b 9819 if (magic == TG3_EEPROM_MAGIC) {
1b27777a
MC
9820 for (i = 0; i < 256; i += 4) {
9821 u32 tmp;
1da177e4 9822
1b27777a
MC
9823 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9824 goto out_not_found;
9825
9826 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
9827 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
9828 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9829 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9830 }
9831 } else {
9832 int vpd_cap;
9833
9834 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
9835 for (i = 0; i < 256; i += 4) {
9836 u32 tmp, j = 0;
9837 u16 tmp16;
9838
9839 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
9840 i);
9841 while (j++ < 100) {
9842 pci_read_config_word(tp->pdev, vpd_cap +
9843 PCI_VPD_ADDR, &tmp16);
9844 if (tmp16 & 0x8000)
9845 break;
9846 msleep(1);
9847 }
9848 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9849 &tmp);
9850 tmp = cpu_to_le32(tmp);
9851 memcpy(&vpd_data[i], &tmp, 4);
9852 }
1da177e4
LT
9853 }
9854
9855 /* Now parse and find the part number. */
9856 for (i = 0; i < 256; ) {
9857 unsigned char val = vpd_data[i];
9858 int block_end;
9859
9860 if (val == 0x82 || val == 0x91) {
9861 i = (i + 3 +
9862 (vpd_data[i + 1] +
9863 (vpd_data[i + 2] << 8)));
9864 continue;
9865 }
9866
9867 if (val != 0x90)
9868 goto out_not_found;
9869
9870 block_end = (i + 3 +
9871 (vpd_data[i + 1] +
9872 (vpd_data[i + 2] << 8)));
9873 i += 3;
9874 while (i < block_end) {
9875 if (vpd_data[i + 0] == 'P' &&
9876 vpd_data[i + 1] == 'N') {
9877 int partno_len = vpd_data[i + 2];
9878
9879 if (partno_len > 24)
9880 goto out_not_found;
9881
9882 memcpy(tp->board_part_number,
9883 &vpd_data[i + 3],
9884 partno_len);
9885
9886 /* Success. */
9887 return;
9888 }
9889 }
9890
9891 /* Part number not found. */
9892 goto out_not_found;
9893 }
9894
9895out_not_found:
9896 strcpy(tp->board_part_number, "none");
9897}
9898
c4e6575c
MC
9899static void __devinit tg3_read_fw_ver(struct tg3 *tp)
9900{
9901 u32 val, offset, start;
9902
9903 if (tg3_nvram_read_swab(tp, 0, &val))
9904 return;
9905
9906 if (val != TG3_EEPROM_MAGIC)
9907 return;
9908
9909 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
9910 tg3_nvram_read_swab(tp, 0x4, &start))
9911 return;
9912
9913 offset = tg3_nvram_logical_addr(tp, offset);
9914 if (tg3_nvram_read_swab(tp, offset, &val))
9915 return;
9916
9917 if ((val & 0xfc000000) == 0x0c000000) {
9918 u32 ver_offset, addr;
9919 int i;
9920
9921 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
9922 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
9923 return;
9924
9925 if (val != 0)
9926 return;
9927
9928 addr = offset + ver_offset - start;
9929 for (i = 0; i < 16; i += 4) {
9930 if (tg3_nvram_read(tp, addr + i, &val))
9931 return;
9932
9933 val = cpu_to_le32(val);
9934 memcpy(tp->fw_ver + i, &val, 4);
9935 }
9936 }
9937}
9938
1da177e4
LT
9939#ifdef CONFIG_SPARC64
9940static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9941{
9942 struct pci_dev *pdev = tp->pdev;
9943 struct pcidev_cookie *pcp = pdev->sysdata;
9944
9945 if (pcp != NULL) {
9946 int node = pcp->prom_node;
9947 u32 venid;
9948 int err;
9949
9950 err = prom_getproperty(node, "subsystem-vendor-id",
9951 (char *) &venid, sizeof(venid));
9952 if (err == 0 || err == -1)
9953 return 0;
9954 if (venid == PCI_VENDOR_ID_SUN)
9955 return 1;
051d3cbd
DM
9956
9957 /* TG3 chips onboard the SunBlade-2500 don't have the
9958 * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
9959 * are distinguishable from non-Sun variants by being
9960 * named "network" by the firmware. Non-Sun cards will
9961 * show up as being named "ethernet".
9962 */
9963 if (!strcmp(pcp->prom_name, "network"))
9964 return 1;
1da177e4
LT
9965 }
9966 return 0;
9967}
9968#endif
9969
9970static int __devinit tg3_get_invariants(struct tg3 *tp)
9971{
9972 static struct pci_device_id write_reorder_chipsets[] = {
1da177e4
LT
9973 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9974 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
399de50b
MC
9975 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9976 PCI_DEVICE_ID_VIA_8385_0) },
1da177e4
LT
9977 { },
9978 };
9979 u32 misc_ctrl_reg;
9980 u32 cacheline_sz_reg;
9981 u32 pci_state_reg, grc_misc_cfg;
9982 u32 val;
9983 u16 pci_cmd;
9984 int err;
9985
9986#ifdef CONFIG_SPARC64
9987 if (tg3_is_sun_570X(tp))
9988 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9989#endif
9990
1da177e4
LT
9991 /* Force memory write invalidate off. If we leave it on,
9992 * then on 5700_BX chips we have to enable a workaround.
9993 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9994 * to match the cacheline size. The Broadcom driver have this
9995 * workaround but turns MWI off all the times so never uses
9996 * it. This seems to suggest that the workaround is insufficient.
9997 */
9998 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9999 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10000 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10001
10002 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10003 * has the register indirect write enable bit set before
10004 * we try to access any of the MMIO registers. It is also
10005 * critical that the PCI-X hw workaround situation is decided
10006 * before that as well.
10007 */
10008 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10009 &misc_ctrl_reg);
10010
10011 tp->pci_chip_rev_id = (misc_ctrl_reg >>
10012 MISC_HOST_CTRL_CHIPREV_SHIFT);
10013
ff645bec
MC
10014 /* Wrong chip ID in 5752 A0. This code can be removed later
10015 * as A0 is not in production.
10016 */
10017 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10018 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10019
6892914f
MC
10020 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10021 * we need to disable memory and use config. cycles
10022 * only to access all registers. The 5702/03 chips
10023 * can mistakenly decode the special cycles from the
10024 * ICH chipsets as memory write cycles, causing corruption
10025 * of register and memory space. Only certain ICH bridges
10026 * will drive special cycles with non-zero data during the
10027 * address phase which can fall within the 5703's address
10028 * range. This is not an ICH bug as the PCI spec allows
10029 * non-zero address during special cycles. However, only
10030 * these ICH bridges are known to drive non-zero addresses
10031 * during special cycles.
10032 *
10033 * Since special cycles do not cross PCI bridges, we only
10034 * enable this workaround if the 5703 is on the secondary
10035 * bus of these ICH bridges.
10036 */
10037 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10038 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10039 static struct tg3_dev_id {
10040 u32 vendor;
10041 u32 device;
10042 u32 rev;
10043 } ich_chipsets[] = {
10044 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10045 PCI_ANY_ID },
10046 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10047 PCI_ANY_ID },
10048 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10049 0xa },
10050 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10051 PCI_ANY_ID },
10052 { },
10053 };
10054 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10055 struct pci_dev *bridge = NULL;
10056
10057 while (pci_id->vendor != 0) {
10058 bridge = pci_get_device(pci_id->vendor, pci_id->device,
10059 bridge);
10060 if (!bridge) {
10061 pci_id++;
10062 continue;
10063 }
10064 if (pci_id->rev != PCI_ANY_ID) {
10065 u8 rev;
10066
10067 pci_read_config_byte(bridge, PCI_REVISION_ID,
10068 &rev);
10069 if (rev > pci_id->rev)
10070 continue;
10071 }
10072 if (bridge->subordinate &&
10073 (bridge->subordinate->number ==
10074 tp->pdev->bus->number)) {
10075
10076 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10077 pci_dev_put(bridge);
10078 break;
10079 }
10080 }
10081 }
10082
4a29cc2e
MC
10083 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10084 * DMA addresses > 40-bit. This bridge may have other additional
10085 * 57xx devices behind it in some 4-port NIC designs for example.
10086 * Any tg3 device found behind the bridge will also need the 40-bit
10087 * DMA workaround.
10088 */
a4e2b347
MC
10089 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10090 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10091 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
4a29cc2e 10092 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
4cf78e4f 10093 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
a4e2b347 10094 }
4a29cc2e
MC
10095 else {
10096 struct pci_dev *bridge = NULL;
10097
10098 do {
10099 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10100 PCI_DEVICE_ID_SERVERWORKS_EPB,
10101 bridge);
10102 if (bridge && bridge->subordinate &&
10103 (bridge->subordinate->number <=
10104 tp->pdev->bus->number) &&
10105 (bridge->subordinate->subordinate >=
10106 tp->pdev->bus->number)) {
10107 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10108 pci_dev_put(bridge);
10109 break;
10110 }
10111 } while (bridge);
10112 }
4cf78e4f 10113
1da177e4
LT
10114 /* Initialize misc host control in PCI block. */
10115 tp->misc_host_ctrl |= (misc_ctrl_reg &
10116 MISC_HOST_CTRL_CHIPREV);
10117 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10118 tp->misc_host_ctrl);
10119
10120 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10121 &cacheline_sz_reg);
10122
10123 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
10124 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
10125 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
10126 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
10127
6708e5cc 10128 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
4cf78e4f 10129 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
af36e6b6 10130 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d9ab5ad1 10131 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
a4e2b347 10132 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6708e5cc
JL
10133 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10134
1b440c56
JL
10135 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10136 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10137 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10138
5a6f3074 10139 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
af36e6b6
MC
10140 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10141 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
5a6f3074 10142 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
fcfa0a32
MC
10143 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10144 } else
5a6f3074
MC
10145 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1;
10146 }
1da177e4 10147
0f893dc6
MC
10148 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10149 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
d9ab5ad1 10150 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
af36e6b6 10151 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
d9ab5ad1 10152 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
0f893dc6
MC
10153 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10154
1da177e4
LT
10155 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10156 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10157
399de50b
MC
10158 /* If we have an AMD 762 or VIA K8T800 chipset, write
10159 * reordering to the mailbox registers done by the host
10160 * controller can cause major troubles. We read back from
10161 * every mailbox register write to force the writes to be
10162 * posted to the chip in order.
10163 */
10164 if (pci_dev_present(write_reorder_chipsets) &&
10165 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10166 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10167
1da177e4
LT
10168 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10169 tp->pci_lat_timer < 64) {
10170 tp->pci_lat_timer = 64;
10171
10172 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
10173 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
10174 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
10175 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
10176
10177 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10178 cacheline_sz_reg);
10179 }
10180
10181 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10182 &pci_state_reg);
10183
10184 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10185 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10186
10187 /* If this is a 5700 BX chipset, and we are in PCI-X
10188 * mode, enable register write workaround.
10189 *
10190 * The workaround is to use indirect register accesses
10191 * for all chip writes not to mailbox registers.
10192 */
10193 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10194 u32 pm_reg;
10195 u16 pci_cmd;
10196
10197 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10198
10199 /* The chip can have it's power management PCI config
10200 * space registers clobbered due to this bug.
10201 * So explicitly force the chip into D0 here.
10202 */
10203 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10204 &pm_reg);
10205 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10206 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10207 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10208 pm_reg);
10209
10210 /* Also, force SERR#/PERR# in PCI command. */
10211 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10212 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10213 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10214 }
10215 }
10216
087fe256
MC
10217 /* 5700 BX chips need to have their TX producer index mailboxes
10218 * written twice to workaround a bug.
10219 */
10220 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10221 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10222
1da177e4
LT
10223 /* Back to back register writes can cause problems on this chip,
10224 * the workaround is to read back all reg writes except those to
10225 * mailbox regs. See tg3_write_indirect_reg32().
10226 *
10227 * PCI Express 5750_A0 rev chips need this workaround too.
10228 */
10229 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10230 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10231 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10232 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10233
10234 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10235 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10236 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10237 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10238
10239 /* Chip-specific fixup from Broadcom driver */
10240 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10241 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10242 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10243 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10244 }
10245
1ee582d8 10246 /* Default fast path register access methods */
20094930 10247 tp->read32 = tg3_read32;
1ee582d8 10248 tp->write32 = tg3_write32;
09ee929c 10249 tp->read32_mbox = tg3_read32;
20094930 10250 tp->write32_mbox = tg3_write32;
1ee582d8
MC
10251 tp->write32_tx_mbox = tg3_write32;
10252 tp->write32_rx_mbox = tg3_write32;
10253
10254 /* Various workaround register access methods */
10255 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10256 tp->write32 = tg3_write_indirect_reg32;
10257 else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10258 tp->write32 = tg3_write_flush_reg32;
10259
10260 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10261 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10262 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10263 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10264 tp->write32_rx_mbox = tg3_write_flush_reg32;
10265 }
20094930 10266
6892914f
MC
10267 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10268 tp->read32 = tg3_read_indirect_reg32;
10269 tp->write32 = tg3_write_indirect_reg32;
10270 tp->read32_mbox = tg3_read_indirect_mbox;
10271 tp->write32_mbox = tg3_write_indirect_mbox;
10272 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10273 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10274
10275 iounmap(tp->regs);
22abe310 10276 tp->regs = NULL;
6892914f
MC
10277
10278 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10279 pci_cmd &= ~PCI_COMMAND_MEMORY;
10280 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10281 }
10282
bbadf503
MC
10283 if (tp->write32 == tg3_write_indirect_reg32 ||
10284 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10285 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10286 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) ||
10287 (tp->tg3_flags2 & TG3_FLG2_SUN_570X))
10288 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10289
7d0c41ef
MC
10290 /* Get eeprom hw config before calling tg3_set_power_state().
10291 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10292 * determined before calling tg3_set_power_state() so that
10293 * we know whether or not to switch out of Vaux power.
10294 * When the flag is set, it means that GPIO1 is used for eeprom
10295 * write protect and also implies that it is a LOM where GPIOs
10296 * are not used to switch power.
10297 */
10298 tg3_get_eeprom_hw_cfg(tp);
10299
314fba34
MC
10300 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10301 * GPIO1 driven high will bring 5700's external PHY out of reset.
10302 * It is also used as eeprom write protect on LOMs.
10303 */
10304 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10305 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10306 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10307 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10308 GRC_LCLCTRL_GPIO_OUTPUT1);
3e7d83bc
MC
10309 /* Unused GPIO3 must be driven as output on 5752 because there
10310 * are no pull-up resistors on unused GPIO pins.
10311 */
10312 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10313 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
314fba34 10314
af36e6b6
MC
10315 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10316 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10317
1da177e4 10318 /* Force the chip into D0. */
bc1c7567 10319 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
10320 if (err) {
10321 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10322 pci_name(tp->pdev));
10323 return err;
10324 }
10325
10326 /* 5700 B0 chips do not support checksumming correctly due
10327 * to hardware bugs.
10328 */
10329 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10330 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10331
1da177e4
LT
10332 /* Derive initial jumbo mode from MTU assigned in
10333 * ether_setup() via the alloc_etherdev() call
10334 */
0f893dc6 10335 if (tp->dev->mtu > ETH_DATA_LEN &&
a4e2b347 10336 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
0f893dc6 10337 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
1da177e4
LT
10338
10339 /* Determine WakeOnLan speed to use. */
10340 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10341 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10342 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10343 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10344 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10345 } else {
10346 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10347 }
10348
10349 /* A few boards don't want Ethernet@WireSpeed phy feature */
10350 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10351 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10352 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
747e8f8b
MC
10353 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10354 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1da177e4
LT
10355 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10356
10357 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10358 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10359 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10360 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10361 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10362
d9ab5ad1 10363 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
af36e6b6 10364 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
d9ab5ad1 10365 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787))
1da177e4
LT
10366 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10367
1da177e4 10368 tp->coalesce_mode = 0;
1da177e4
LT
10369 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10370 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10371 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10372
10373 /* Initialize MAC MI mode, polling disabled. */
10374 tw32_f(MAC_MI_MODE, tp->mi_mode);
10375 udelay(80);
10376
10377 /* Initialize data/descriptor byte/word swapping. */
10378 val = tr32(GRC_MODE);
10379 val &= GRC_MODE_HOST_STACKUP;
10380 tw32(GRC_MODE, val | tp->grc_mode);
10381
10382 tg3_switch_clocks(tp);
10383
10384 /* Clear this out for sanity. */
10385 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10386
10387 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10388 &pci_state_reg);
10389 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10390 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10391 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10392
10393 if (chiprevid == CHIPREV_ID_5701_A0 ||
10394 chiprevid == CHIPREV_ID_5701_B0 ||
10395 chiprevid == CHIPREV_ID_5701_B2 ||
10396 chiprevid == CHIPREV_ID_5701_B5) {
10397 void __iomem *sram_base;
10398
10399 /* Write some dummy words into the SRAM status block
10400 * area, see if it reads back correctly. If the return
10401 * value is bad, force enable the PCIX workaround.
10402 */
10403 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10404
10405 writel(0x00000000, sram_base);
10406 writel(0x00000000, sram_base + 4);
10407 writel(0xffffffff, sram_base + 4);
10408 if (readl(sram_base) != 0x00000000)
10409 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10410 }
10411 }
10412
10413 udelay(50);
10414 tg3_nvram_init(tp);
10415
10416 grc_misc_cfg = tr32(GRC_MISC_CFG);
10417 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10418
10419 /* Broadcom's driver says that CIOBE multisplit has a bug */
10420#if 0
10421 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10422 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10423 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10424 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10425 }
10426#endif
10427 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10428 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10429 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10430 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10431
fac9b83e
DM
10432 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10433 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10434 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10435 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10436 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10437 HOSTCC_MODE_CLRTICK_TXBD);
10438
10439 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10440 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10441 tp->misc_host_ctrl);
10442 }
10443
1da177e4
LT
10444 /* these are limited to 10/100 only */
10445 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10446 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10447 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10448 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10449 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10450 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10451 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10452 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10453 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10454 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10455 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10456
10457 err = tg3_phy_probe(tp);
10458 if (err) {
10459 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10460 pci_name(tp->pdev), err);
10461 /* ... but do not return immediately ... */
10462 }
10463
10464 tg3_read_partno(tp);
c4e6575c 10465 tg3_read_fw_ver(tp);
1da177e4
LT
10466
10467 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10468 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10469 } else {
10470 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10471 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10472 else
10473 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10474 }
10475
10476 /* 5700 {AX,BX} chips have a broken status block link
10477 * change bit implementation, so we must use the
10478 * status register in those cases.
10479 */
10480 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10481 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10482 else
10483 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10484
10485 /* The led_ctrl is set during tg3_phy_probe, here we might
10486 * have to force the link status polling mechanism based
10487 * upon subsystem IDs.
10488 */
10489 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10490 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10491 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10492 TG3_FLAG_USE_LINKCHG_REG);
10493 }
10494
10495 /* For all SERDES we poll the MAC status register. */
10496 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10497 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10498 else
10499 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10500
5a6f3074 10501 /* All chips before 5787 can get confused if TX buffers
1da177e4
LT
10502 * straddle the 4GB address boundary in some cases.
10503 */
af36e6b6
MC
10504 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10505 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
5a6f3074
MC
10506 tp->dev->hard_start_xmit = tg3_start_xmit;
10507 else
10508 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
1da177e4
LT
10509
10510 tp->rx_offset = 2;
10511 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10512 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10513 tp->rx_offset = 0;
10514
10515 /* By default, disable wake-on-lan. User can change this
10516 * using ETHTOOL_SWOL.
10517 */
10518 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10519
10520 return err;
10521}
10522
10523#ifdef CONFIG_SPARC64
10524static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10525{
10526 struct net_device *dev = tp->dev;
10527 struct pci_dev *pdev = tp->pdev;
10528 struct pcidev_cookie *pcp = pdev->sysdata;
10529
10530 if (pcp != NULL) {
10531 int node = pcp->prom_node;
10532
10533 if (prom_getproplen(node, "local-mac-address") == 6) {
10534 prom_getproperty(node, "local-mac-address",
10535 dev->dev_addr, 6);
2ff43697 10536 memcpy(dev->perm_addr, dev->dev_addr, 6);
1da177e4
LT
10537 return 0;
10538 }
10539 }
10540 return -ENODEV;
10541}
10542
10543static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10544{
10545 struct net_device *dev = tp->dev;
10546
10547 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2ff43697 10548 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
1da177e4
LT
10549 return 0;
10550}
10551#endif
10552
10553static int __devinit tg3_get_device_address(struct tg3 *tp)
10554{
10555 struct net_device *dev = tp->dev;
10556 u32 hi, lo, mac_offset;
008652b3 10557 int addr_ok = 0;
1da177e4
LT
10558
10559#ifdef CONFIG_SPARC64
10560 if (!tg3_get_macaddr_sparc(tp))
10561 return 0;
10562#endif
10563
10564 mac_offset = 0x7c;
4cf78e4f
MC
10565 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10566 !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
a4e2b347 10567 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
10568 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10569 mac_offset = 0xcc;
10570 if (tg3_nvram_lock(tp))
10571 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10572 else
10573 tg3_nvram_unlock(tp);
10574 }
10575
10576 /* First try to get it from MAC address mailbox. */
10577 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10578 if ((hi >> 16) == 0x484b) {
10579 dev->dev_addr[0] = (hi >> 8) & 0xff;
10580 dev->dev_addr[1] = (hi >> 0) & 0xff;
10581
10582 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10583 dev->dev_addr[2] = (lo >> 24) & 0xff;
10584 dev->dev_addr[3] = (lo >> 16) & 0xff;
10585 dev->dev_addr[4] = (lo >> 8) & 0xff;
10586 dev->dev_addr[5] = (lo >> 0) & 0xff;
1da177e4 10587
008652b3
MC
10588 /* Some old bootcode may report a 0 MAC address in SRAM */
10589 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10590 }
10591 if (!addr_ok) {
10592 /* Next, try NVRAM. */
10593 if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
10594 !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10595 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10596 dev->dev_addr[0] = ((hi >> 16) & 0xff);
10597 dev->dev_addr[1] = ((hi >> 24) & 0xff);
10598 dev->dev_addr[2] = ((lo >> 0) & 0xff);
10599 dev->dev_addr[3] = ((lo >> 8) & 0xff);
10600 dev->dev_addr[4] = ((lo >> 16) & 0xff);
10601 dev->dev_addr[5] = ((lo >> 24) & 0xff);
10602 }
10603 /* Finally just fetch it out of the MAC control regs. */
10604 else {
10605 hi = tr32(MAC_ADDR_0_HIGH);
10606 lo = tr32(MAC_ADDR_0_LOW);
10607
10608 dev->dev_addr[5] = lo & 0xff;
10609 dev->dev_addr[4] = (lo >> 8) & 0xff;
10610 dev->dev_addr[3] = (lo >> 16) & 0xff;
10611 dev->dev_addr[2] = (lo >> 24) & 0xff;
10612 dev->dev_addr[1] = hi & 0xff;
10613 dev->dev_addr[0] = (hi >> 8) & 0xff;
10614 }
1da177e4
LT
10615 }
10616
10617 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10618#ifdef CONFIG_SPARC64
10619 if (!tg3_get_default_macaddr_sparc(tp))
10620 return 0;
10621#endif
10622 return -EINVAL;
10623 }
2ff43697 10624 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
10625 return 0;
10626}
10627
59e6b434
DM
10628#define BOUNDARY_SINGLE_CACHELINE 1
10629#define BOUNDARY_MULTI_CACHELINE 2
10630
10631static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10632{
10633 int cacheline_size;
10634 u8 byte;
10635 int goal;
10636
10637 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10638 if (byte == 0)
10639 cacheline_size = 1024;
10640 else
10641 cacheline_size = (int) byte * 4;
10642
10643 /* On 5703 and later chips, the boundary bits have no
10644 * effect.
10645 */
10646 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10647 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10648 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10649 goto out;
10650
10651#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10652 goal = BOUNDARY_MULTI_CACHELINE;
10653#else
10654#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10655 goal = BOUNDARY_SINGLE_CACHELINE;
10656#else
10657 goal = 0;
10658#endif
10659#endif
10660
10661 if (!goal)
10662 goto out;
10663
10664 /* PCI controllers on most RISC systems tend to disconnect
10665 * when a device tries to burst across a cache-line boundary.
10666 * Therefore, letting tg3 do so just wastes PCI bandwidth.
10667 *
10668 * Unfortunately, for PCI-E there are only limited
10669 * write-side controls for this, and thus for reads
10670 * we will still get the disconnects. We'll also waste
10671 * these PCI cycles for both read and write for chips
10672 * other than 5700 and 5701 which do not implement the
10673 * boundary bits.
10674 */
10675 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10676 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10677 switch (cacheline_size) {
10678 case 16:
10679 case 32:
10680 case 64:
10681 case 128:
10682 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10683 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10684 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10685 } else {
10686 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10687 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10688 }
10689 break;
10690
10691 case 256:
10692 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10693 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10694 break;
10695
10696 default:
10697 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10698 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10699 break;
10700 };
10701 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10702 switch (cacheline_size) {
10703 case 16:
10704 case 32:
10705 case 64:
10706 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10707 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10708 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10709 break;
10710 }
10711 /* fallthrough */
10712 case 128:
10713 default:
10714 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10715 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10716 break;
10717 };
10718 } else {
10719 switch (cacheline_size) {
10720 case 16:
10721 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10722 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10723 DMA_RWCTRL_WRITE_BNDRY_16);
10724 break;
10725 }
10726 /* fallthrough */
10727 case 32:
10728 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10729 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10730 DMA_RWCTRL_WRITE_BNDRY_32);
10731 break;
10732 }
10733 /* fallthrough */
10734 case 64:
10735 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10736 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10737 DMA_RWCTRL_WRITE_BNDRY_64);
10738 break;
10739 }
10740 /* fallthrough */
10741 case 128:
10742 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10743 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10744 DMA_RWCTRL_WRITE_BNDRY_128);
10745 break;
10746 }
10747 /* fallthrough */
10748 case 256:
10749 val |= (DMA_RWCTRL_READ_BNDRY_256 |
10750 DMA_RWCTRL_WRITE_BNDRY_256);
10751 break;
10752 case 512:
10753 val |= (DMA_RWCTRL_READ_BNDRY_512 |
10754 DMA_RWCTRL_WRITE_BNDRY_512);
10755 break;
10756 case 1024:
10757 default:
10758 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10759 DMA_RWCTRL_WRITE_BNDRY_1024);
10760 break;
10761 };
10762 }
10763
10764out:
10765 return val;
10766}
10767
1da177e4
LT
10768static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10769{
10770 struct tg3_internal_buffer_desc test_desc;
10771 u32 sram_dma_descs;
10772 int i, ret;
10773
10774 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10775
10776 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10777 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10778 tw32(RDMAC_STATUS, 0);
10779 tw32(WDMAC_STATUS, 0);
10780
10781 tw32(BUFMGR_MODE, 0);
10782 tw32(FTQ_RESET, 0);
10783
10784 test_desc.addr_hi = ((u64) buf_dma) >> 32;
10785 test_desc.addr_lo = buf_dma & 0xffffffff;
10786 test_desc.nic_mbuf = 0x00002100;
10787 test_desc.len = size;
10788
10789 /*
10790 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10791 * the *second* time the tg3 driver was getting loaded after an
10792 * initial scan.
10793 *
10794 * Broadcom tells me:
10795 * ...the DMA engine is connected to the GRC block and a DMA
10796 * reset may affect the GRC block in some unpredictable way...
10797 * The behavior of resets to individual blocks has not been tested.
10798 *
10799 * Broadcom noted the GRC reset will also reset all sub-components.
10800 */
10801 if (to_device) {
10802 test_desc.cqid_sqid = (13 << 8) | 2;
10803
10804 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10805 udelay(40);
10806 } else {
10807 test_desc.cqid_sqid = (16 << 8) | 7;
10808
10809 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10810 udelay(40);
10811 }
10812 test_desc.flags = 0x00000005;
10813
10814 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10815 u32 val;
10816
10817 val = *(((u32 *)&test_desc) + i);
10818 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10819 sram_dma_descs + (i * sizeof(u32)));
10820 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10821 }
10822 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10823
10824 if (to_device) {
10825 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10826 } else {
10827 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10828 }
10829
10830 ret = -ENODEV;
10831 for (i = 0; i < 40; i++) {
10832 u32 val;
10833
10834 if (to_device)
10835 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10836 else
10837 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10838 if ((val & 0xffff) == sram_dma_descs) {
10839 ret = 0;
10840 break;
10841 }
10842
10843 udelay(100);
10844 }
10845
10846 return ret;
10847}
10848
ded7340d 10849#define TEST_BUFFER_SIZE 0x2000
1da177e4
LT
10850
10851static int __devinit tg3_test_dma(struct tg3 *tp)
10852{
10853 dma_addr_t buf_dma;
59e6b434 10854 u32 *buf, saved_dma_rwctrl;
1da177e4
LT
10855 int ret;
10856
10857 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10858 if (!buf) {
10859 ret = -ENOMEM;
10860 goto out_nofree;
10861 }
10862
10863 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10864 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10865
59e6b434 10866 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
1da177e4
LT
10867
10868 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10869 /* DMA read watermark not used on PCIE */
10870 tp->dma_rwctrl |= 0x00180000;
10871 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
85e94ced
MC
10872 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10873 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
1da177e4
LT
10874 tp->dma_rwctrl |= 0x003f0000;
10875 else
10876 tp->dma_rwctrl |= 0x003f000f;
10877 } else {
10878 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10879 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10880 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10881
4a29cc2e
MC
10882 /* If the 5704 is behind the EPB bridge, we can
10883 * do the less restrictive ONE_DMA workaround for
10884 * better performance.
10885 */
10886 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10887 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10888 tp->dma_rwctrl |= 0x8000;
10889 else if (ccval == 0x6 || ccval == 0x7)
1da177e4
LT
10890 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10891
59e6b434 10892 /* Set bit 23 to enable PCIX hw bug fix */
1da177e4 10893 tp->dma_rwctrl |= 0x009f0000;
4cf78e4f
MC
10894 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10895 /* 5780 always in PCIX mode */
10896 tp->dma_rwctrl |= 0x00144000;
a4e2b347
MC
10897 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10898 /* 5714 always in PCIX mode */
10899 tp->dma_rwctrl |= 0x00148000;
1da177e4
LT
10900 } else {
10901 tp->dma_rwctrl |= 0x001b000f;
10902 }
10903 }
10904
10905 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10906 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10907 tp->dma_rwctrl &= 0xfffffff0;
10908
10909 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10910 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10911 /* Remove this if it causes problems for some boards. */
10912 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10913
10914 /* On 5700/5701 chips, we need to set this bit.
10915 * Otherwise the chip will issue cacheline transactions
10916 * to streamable DMA memory with not all the byte
10917 * enables turned on. This is an error on several
10918 * RISC PCI controllers, in particular sparc64.
10919 *
10920 * On 5703/5704 chips, this bit has been reassigned
10921 * a different meaning. In particular, it is used
10922 * on those chips to enable a PCI-X workaround.
10923 */
10924 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10925 }
10926
10927 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10928
10929#if 0
10930 /* Unneeded, already done by tg3_get_invariants. */
10931 tg3_switch_clocks(tp);
10932#endif
10933
10934 ret = 0;
10935 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10936 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10937 goto out;
10938
59e6b434
DM
10939 /* It is best to perform DMA test with maximum write burst size
10940 * to expose the 5700/5701 write DMA bug.
10941 */
10942 saved_dma_rwctrl = tp->dma_rwctrl;
10943 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10944 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10945
1da177e4
LT
10946 while (1) {
10947 u32 *p = buf, i;
10948
10949 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10950 p[i] = i;
10951
10952 /* Send the buffer to the chip. */
10953 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10954 if (ret) {
10955 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10956 break;
10957 }
10958
10959#if 0
10960 /* validate data reached card RAM correctly. */
10961 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10962 u32 val;
10963 tg3_read_mem(tp, 0x2100 + (i*4), &val);
10964 if (le32_to_cpu(val) != p[i]) {
10965 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
10966 /* ret = -ENODEV here? */
10967 }
10968 p[i] = 0;
10969 }
10970#endif
10971 /* Now read it back. */
10972 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10973 if (ret) {
10974 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10975
10976 break;
10977 }
10978
10979 /* Verify it. */
10980 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10981 if (p[i] == i)
10982 continue;
10983
59e6b434
DM
10984 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10985 DMA_RWCTRL_WRITE_BNDRY_16) {
10986 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
1da177e4
LT
10987 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10988 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10989 break;
10990 } else {
10991 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10992 ret = -ENODEV;
10993 goto out;
10994 }
10995 }
10996
10997 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10998 /* Success. */
10999 ret = 0;
11000 break;
11001 }
11002 }
59e6b434
DM
11003 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11004 DMA_RWCTRL_WRITE_BNDRY_16) {
6d1cfbab
MC
11005 static struct pci_device_id dma_wait_state_chipsets[] = {
11006 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11007 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11008 { },
11009 };
11010
59e6b434 11011 /* DMA test passed without adjusting DMA boundary,
6d1cfbab
MC
11012 * now look for chipsets that are known to expose the
11013 * DMA bug without failing the test.
59e6b434 11014 */
6d1cfbab
MC
11015 if (pci_dev_present(dma_wait_state_chipsets)) {
11016 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11017 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11018 }
11019 else
11020 /* Safe to use the calculated DMA boundary. */
11021 tp->dma_rwctrl = saved_dma_rwctrl;
11022
59e6b434
DM
11023 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11024 }
1da177e4
LT
11025
11026out:
11027 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11028out_nofree:
11029 return ret;
11030}
11031
11032static void __devinit tg3_init_link_config(struct tg3 *tp)
11033{
11034 tp->link_config.advertising =
11035 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11036 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11037 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11038 ADVERTISED_Autoneg | ADVERTISED_MII);
11039 tp->link_config.speed = SPEED_INVALID;
11040 tp->link_config.duplex = DUPLEX_INVALID;
11041 tp->link_config.autoneg = AUTONEG_ENABLE;
1da177e4
LT
11042 tp->link_config.active_speed = SPEED_INVALID;
11043 tp->link_config.active_duplex = DUPLEX_INVALID;
11044 tp->link_config.phy_is_low_power = 0;
11045 tp->link_config.orig_speed = SPEED_INVALID;
11046 tp->link_config.orig_duplex = DUPLEX_INVALID;
11047 tp->link_config.orig_autoneg = AUTONEG_INVALID;
11048}
11049
11050static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11051{
fdfec172
MC
11052 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11053 tp->bufmgr_config.mbuf_read_dma_low_water =
11054 DEFAULT_MB_RDMA_LOW_WATER_5705;
11055 tp->bufmgr_config.mbuf_mac_rx_low_water =
11056 DEFAULT_MB_MACRX_LOW_WATER_5705;
11057 tp->bufmgr_config.mbuf_high_water =
11058 DEFAULT_MB_HIGH_WATER_5705;
11059
11060 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11061 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11062 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11063 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11064 tp->bufmgr_config.mbuf_high_water_jumbo =
11065 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11066 } else {
11067 tp->bufmgr_config.mbuf_read_dma_low_water =
11068 DEFAULT_MB_RDMA_LOW_WATER;
11069 tp->bufmgr_config.mbuf_mac_rx_low_water =
11070 DEFAULT_MB_MACRX_LOW_WATER;
11071 tp->bufmgr_config.mbuf_high_water =
11072 DEFAULT_MB_HIGH_WATER;
11073
11074 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11075 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11076 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11077 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11078 tp->bufmgr_config.mbuf_high_water_jumbo =
11079 DEFAULT_MB_HIGH_WATER_JUMBO;
11080 }
1da177e4
LT
11081
11082 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11083 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11084}
11085
11086static char * __devinit tg3_phy_string(struct tg3 *tp)
11087{
11088 switch (tp->phy_id & PHY_ID_MASK) {
11089 case PHY_ID_BCM5400: return "5400";
11090 case PHY_ID_BCM5401: return "5401";
11091 case PHY_ID_BCM5411: return "5411";
11092 case PHY_ID_BCM5701: return "5701";
11093 case PHY_ID_BCM5703: return "5703";
11094 case PHY_ID_BCM5704: return "5704";
11095 case PHY_ID_BCM5705: return "5705";
11096 case PHY_ID_BCM5750: return "5750";
85e94ced 11097 case PHY_ID_BCM5752: return "5752";
a4e2b347 11098 case PHY_ID_BCM5714: return "5714";
4cf78e4f 11099 case PHY_ID_BCM5780: return "5780";
af36e6b6 11100 case PHY_ID_BCM5755: return "5755";
d9ab5ad1 11101 case PHY_ID_BCM5787: return "5787";
1da177e4
LT
11102 case PHY_ID_BCM8002: return "8002/serdes";
11103 case 0: return "serdes";
11104 default: return "unknown";
11105 };
11106}
11107
f9804ddb
MC
11108static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11109{
11110 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11111 strcpy(str, "PCI Express");
11112 return str;
11113 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11114 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11115
11116 strcpy(str, "PCIX:");
11117
11118 if ((clock_ctrl == 7) ||
11119 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11120 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11121 strcat(str, "133MHz");
11122 else if (clock_ctrl == 0)
11123 strcat(str, "33MHz");
11124 else if (clock_ctrl == 2)
11125 strcat(str, "50MHz");
11126 else if (clock_ctrl == 4)
11127 strcat(str, "66MHz");
11128 else if (clock_ctrl == 6)
11129 strcat(str, "100MHz");
f9804ddb
MC
11130 } else {
11131 strcpy(str, "PCI:");
11132 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11133 strcat(str, "66MHz");
11134 else
11135 strcat(str, "33MHz");
11136 }
11137 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11138 strcat(str, ":32-bit");
11139 else
11140 strcat(str, ":64-bit");
11141 return str;
11142}
11143
8c2dc7e1 11144static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
1da177e4
LT
11145{
11146 struct pci_dev *peer;
11147 unsigned int func, devnr = tp->pdev->devfn & ~7;
11148
11149 for (func = 0; func < 8; func++) {
11150 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11151 if (peer && peer != tp->pdev)
11152 break;
11153 pci_dev_put(peer);
11154 }
16fe9d74
MC
11155 /* 5704 can be configured in single-port mode, set peer to
11156 * tp->pdev in that case.
11157 */
11158 if (!peer) {
11159 peer = tp->pdev;
11160 return peer;
11161 }
1da177e4
LT
11162
11163 /*
11164 * We don't need to keep the refcount elevated; there's no way
11165 * to remove one half of this device without removing the other
11166 */
11167 pci_dev_put(peer);
11168
11169 return peer;
11170}
11171
15f9850d
DM
11172static void __devinit tg3_init_coal(struct tg3 *tp)
11173{
11174 struct ethtool_coalesce *ec = &tp->coal;
11175
11176 memset(ec, 0, sizeof(*ec));
11177 ec->cmd = ETHTOOL_GCOALESCE;
11178 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11179 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11180 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11181 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11182 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11183 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11184 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11185 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11186 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11187
11188 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11189 HOSTCC_MODE_CLRTICK_TXBD)) {
11190 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11191 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11192 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11193 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11194 }
d244c892
MC
11195
11196 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11197 ec->rx_coalesce_usecs_irq = 0;
11198 ec->tx_coalesce_usecs_irq = 0;
11199 ec->stats_block_coalesce_usecs = 0;
11200 }
15f9850d
DM
11201}
11202
1da177e4
LT
11203static int __devinit tg3_init_one(struct pci_dev *pdev,
11204 const struct pci_device_id *ent)
11205{
11206 static int tg3_version_printed = 0;
11207 unsigned long tg3reg_base, tg3reg_len;
11208 struct net_device *dev;
11209 struct tg3 *tp;
72f2afb8 11210 int i, err, pm_cap;
f9804ddb 11211 char str[40];
72f2afb8 11212 u64 dma_mask, persist_dma_mask;
1da177e4
LT
11213
11214 if (tg3_version_printed++ == 0)
11215 printk(KERN_INFO "%s", version);
11216
11217 err = pci_enable_device(pdev);
11218 if (err) {
11219 printk(KERN_ERR PFX "Cannot enable PCI device, "
11220 "aborting.\n");
11221 return err;
11222 }
11223
11224 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11225 printk(KERN_ERR PFX "Cannot find proper PCI device "
11226 "base address, aborting.\n");
11227 err = -ENODEV;
11228 goto err_out_disable_pdev;
11229 }
11230
11231 err = pci_request_regions(pdev, DRV_MODULE_NAME);
11232 if (err) {
11233 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11234 "aborting.\n");
11235 goto err_out_disable_pdev;
11236 }
11237
11238 pci_set_master(pdev);
11239
11240 /* Find power-management capability. */
11241 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11242 if (pm_cap == 0) {
11243 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11244 "aborting.\n");
11245 err = -EIO;
11246 goto err_out_free_res;
11247 }
11248
1da177e4
LT
11249 tg3reg_base = pci_resource_start(pdev, 0);
11250 tg3reg_len = pci_resource_len(pdev, 0);
11251
11252 dev = alloc_etherdev(sizeof(*tp));
11253 if (!dev) {
11254 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11255 err = -ENOMEM;
11256 goto err_out_free_res;
11257 }
11258
11259 SET_MODULE_OWNER(dev);
11260 SET_NETDEV_DEV(dev, &pdev->dev);
11261
1da177e4
LT
11262 dev->features |= NETIF_F_LLTX;
11263#if TG3_VLAN_TAG_USED
11264 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11265 dev->vlan_rx_register = tg3_vlan_rx_register;
11266 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11267#endif
11268
11269 tp = netdev_priv(dev);
11270 tp->pdev = pdev;
11271 tp->dev = dev;
11272 tp->pm_cap = pm_cap;
11273 tp->mac_mode = TG3_DEF_MAC_MODE;
11274 tp->rx_mode = TG3_DEF_RX_MODE;
11275 tp->tx_mode = TG3_DEF_TX_MODE;
11276 tp->mi_mode = MAC_MI_MODE_BASE;
11277 if (tg3_debug > 0)
11278 tp->msg_enable = tg3_debug;
11279 else
11280 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11281
11282 /* The word/byte swap controls here control register access byte
11283 * swapping. DMA data byte swapping is controlled in the GRC_MODE
11284 * setting below.
11285 */
11286 tp->misc_host_ctrl =
11287 MISC_HOST_CTRL_MASK_PCI_INT |
11288 MISC_HOST_CTRL_WORD_SWAP |
11289 MISC_HOST_CTRL_INDIR_ACCESS |
11290 MISC_HOST_CTRL_PCISTATE_RW;
11291
11292 /* The NONFRM (non-frame) byte/word swap controls take effect
11293 * on descriptor entries, anything which isn't packet data.
11294 *
11295 * The StrongARM chips on the board (one for tx, one for rx)
11296 * are running in big-endian mode.
11297 */
11298 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11299 GRC_MODE_WSWAP_NONFRM_DATA);
11300#ifdef __BIG_ENDIAN
11301 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11302#endif
11303 spin_lock_init(&tp->lock);
11304 spin_lock_init(&tp->tx_lock);
11305 spin_lock_init(&tp->indirect_lock);
11306 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11307
11308 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11309 if (tp->regs == 0UL) {
11310 printk(KERN_ERR PFX "Cannot map device registers, "
11311 "aborting.\n");
11312 err = -ENOMEM;
11313 goto err_out_free_dev;
11314 }
11315
11316 tg3_init_link_config(tp);
11317
1da177e4
LT
11318 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11319 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11320 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11321
11322 dev->open = tg3_open;
11323 dev->stop = tg3_close;
11324 dev->get_stats = tg3_get_stats;
11325 dev->set_multicast_list = tg3_set_rx_mode;
11326 dev->set_mac_address = tg3_set_mac_addr;
11327 dev->do_ioctl = tg3_ioctl;
11328 dev->tx_timeout = tg3_tx_timeout;
11329 dev->poll = tg3_poll;
11330 dev->ethtool_ops = &tg3_ethtool_ops;
11331 dev->weight = 64;
11332 dev->watchdog_timeo = TG3_TX_TIMEOUT;
11333 dev->change_mtu = tg3_change_mtu;
11334 dev->irq = pdev->irq;
11335#ifdef CONFIG_NET_POLL_CONTROLLER
11336 dev->poll_controller = tg3_poll_controller;
11337#endif
11338
11339 err = tg3_get_invariants(tp);
11340 if (err) {
11341 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11342 "aborting.\n");
11343 goto err_out_iounmap;
11344 }
11345
4a29cc2e
MC
11346 /* The EPB bridge inside 5714, 5715, and 5780 and any
11347 * device behind the EPB cannot support DMA addresses > 40-bit.
72f2afb8
MC
11348 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11349 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11350 * do DMA address check in tg3_start_xmit().
11351 */
4a29cc2e
MC
11352 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11353 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11354 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
72f2afb8
MC
11355 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11356#ifdef CONFIG_HIGHMEM
11357 dma_mask = DMA_64BIT_MASK;
11358#endif
4a29cc2e 11359 } else
72f2afb8
MC
11360 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11361
11362 /* Configure DMA attributes. */
11363 if (dma_mask > DMA_32BIT_MASK) {
11364 err = pci_set_dma_mask(pdev, dma_mask);
11365 if (!err) {
11366 dev->features |= NETIF_F_HIGHDMA;
11367 err = pci_set_consistent_dma_mask(pdev,
11368 persist_dma_mask);
11369 if (err < 0) {
11370 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11371 "DMA for consistent allocations\n");
11372 goto err_out_iounmap;
11373 }
11374 }
11375 }
11376 if (err || dma_mask == DMA_32BIT_MASK) {
11377 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11378 if (err) {
11379 printk(KERN_ERR PFX "No usable DMA configuration, "
11380 "aborting.\n");
11381 goto err_out_iounmap;
11382 }
11383 }
11384
fdfec172 11385 tg3_init_bufmgr_config(tp);
1da177e4
LT
11386
11387#if TG3_TSO_SUPPORT != 0
11388 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11389 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11390 }
11391 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11392 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11393 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11394 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11395 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11396 } else {
11397 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11398 }
11399
4e3a7aaa
MC
11400 /* TSO is on by default on chips that support hardware TSO.
11401 * Firmware TSO on older chips gives lower performance, so it
11402 * is off by default, but can be enabled using ethtool.
11403 */
11404 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
1da177e4 11405 dev->features |= NETIF_F_TSO;
1da177e4
LT
11406
11407#endif
11408
11409 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11410 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11411 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11412 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11413 tp->rx_pending = 63;
11414 }
11415
8c2dc7e1
MC
11416 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11417 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11418 tp->pdev_peer = tg3_find_peer(tp);
1da177e4
LT
11419
11420 err = tg3_get_device_address(tp);
11421 if (err) {
11422 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11423 "aborting.\n");
11424 goto err_out_iounmap;
11425 }
11426
11427 /*
11428 * Reset chip in case UNDI or EFI driver did not shutdown
11429 * DMA self test will enable WDMAC and we'll see (spurious)
11430 * pending DMA on the PCI bus at that point.
11431 */
11432 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11433 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11434 pci_save_state(tp->pdev);
11435 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
944d980e 11436 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
11437 }
11438
11439 err = tg3_test_dma(tp);
11440 if (err) {
11441 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11442 goto err_out_iounmap;
11443 }
11444
11445 /* Tigon3 can do ipv4 only... and some chips have buggy
11446 * checksumming.
11447 */
11448 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
af36e6b6
MC
11449 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11450 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9c27dbdf
MC
11451 dev->features |= NETIF_F_HW_CSUM;
11452 else
11453 dev->features |= NETIF_F_IP_CSUM;
11454 dev->features |= NETIF_F_SG;
1da177e4
LT
11455 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11456 } else
11457 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11458
1da177e4
LT
11459 /* flow control autonegotiation is default behavior */
11460 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11461
15f9850d
DM
11462 tg3_init_coal(tp);
11463
7d3f4c97
DM
11464 /* Now that we have fully setup the chip, save away a snapshot
11465 * of the PCI config space. We need to restore this after
11466 * GRC_MISC_CFG core clock resets and some resume events.
11467 */
11468 pci_save_state(tp->pdev);
11469
1da177e4
LT
11470 err = register_netdev(dev);
11471 if (err) {
11472 printk(KERN_ERR PFX "Cannot register net device, "
11473 "aborting.\n");
11474 goto err_out_iounmap;
11475 }
11476
11477 pci_set_drvdata(pdev, dev);
11478
f9804ddb 11479 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
1da177e4
LT
11480 dev->name,
11481 tp->board_part_number,
11482 tp->pci_chip_rev_id,
11483 tg3_phy_string(tp),
f9804ddb 11484 tg3_bus_string(tp, str),
1da177e4
LT
11485 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11486
11487 for (i = 0; i < 6; i++)
11488 printk("%2.2x%c", dev->dev_addr[i],
11489 i == 5 ? '\n' : ':');
11490
11491 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11492 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11493 "TSOcap[%d] \n",
11494 dev->name,
11495 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11496 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11497 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11498 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11499 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11500 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11501 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
4a29cc2e
MC
11502 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11503 dev->name, tp->dma_rwctrl,
11504 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11505 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
1da177e4 11506
59f1741e
JM
11507 netif_carrier_off(tp->dev);
11508
1da177e4
LT
11509 return 0;
11510
11511err_out_iounmap:
6892914f
MC
11512 if (tp->regs) {
11513 iounmap(tp->regs);
22abe310 11514 tp->regs = NULL;
6892914f 11515 }
1da177e4
LT
11516
11517err_out_free_dev:
11518 free_netdev(dev);
11519
11520err_out_free_res:
11521 pci_release_regions(pdev);
11522
11523err_out_disable_pdev:
11524 pci_disable_device(pdev);
11525 pci_set_drvdata(pdev, NULL);
11526 return err;
11527}
11528
11529static void __devexit tg3_remove_one(struct pci_dev *pdev)
11530{
11531 struct net_device *dev = pci_get_drvdata(pdev);
11532
11533 if (dev) {
11534 struct tg3 *tp = netdev_priv(dev);
11535
7faa006f 11536 flush_scheduled_work();
1da177e4 11537 unregister_netdev(dev);
6892914f
MC
11538 if (tp->regs) {
11539 iounmap(tp->regs);
22abe310 11540 tp->regs = NULL;
6892914f 11541 }
1da177e4
LT
11542 free_netdev(dev);
11543 pci_release_regions(pdev);
11544 pci_disable_device(pdev);
11545 pci_set_drvdata(pdev, NULL);
11546 }
11547}
11548
11549static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11550{
11551 struct net_device *dev = pci_get_drvdata(pdev);
11552 struct tg3 *tp = netdev_priv(dev);
11553 int err;
11554
11555 if (!netif_running(dev))
11556 return 0;
11557
7faa006f 11558 flush_scheduled_work();
1da177e4
LT
11559 tg3_netif_stop(tp);
11560
11561 del_timer_sync(&tp->timer);
11562
f47c11ee 11563 tg3_full_lock(tp, 1);
1da177e4 11564 tg3_disable_ints(tp);
f47c11ee 11565 tg3_full_unlock(tp);
1da177e4
LT
11566
11567 netif_device_detach(dev);
11568
f47c11ee 11569 tg3_full_lock(tp, 0);
944d980e 11570 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6a9eba15 11571 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
f47c11ee 11572 tg3_full_unlock(tp);
1da177e4
LT
11573
11574 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11575 if (err) {
f47c11ee 11576 tg3_full_lock(tp, 0);
1da177e4 11577
6a9eba15 11578 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
1da177e4
LT
11579 tg3_init_hw(tp);
11580
11581 tp->timer.expires = jiffies + tp->timer_offset;
11582 add_timer(&tp->timer);
11583
11584 netif_device_attach(dev);
11585 tg3_netif_start(tp);
11586
f47c11ee 11587 tg3_full_unlock(tp);
1da177e4
LT
11588 }
11589
11590 return err;
11591}
11592
11593static int tg3_resume(struct pci_dev *pdev)
11594{
11595 struct net_device *dev = pci_get_drvdata(pdev);
11596 struct tg3 *tp = netdev_priv(dev);
11597 int err;
11598
11599 if (!netif_running(dev))
11600 return 0;
11601
11602 pci_restore_state(tp->pdev);
11603
bc1c7567 11604 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
11605 if (err)
11606 return err;
11607
11608 netif_device_attach(dev);
11609
f47c11ee 11610 tg3_full_lock(tp, 0);
1da177e4 11611
6a9eba15 11612 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
1da177e4
LT
11613 tg3_init_hw(tp);
11614
11615 tp->timer.expires = jiffies + tp->timer_offset;
11616 add_timer(&tp->timer);
11617
1da177e4
LT
11618 tg3_netif_start(tp);
11619
f47c11ee 11620 tg3_full_unlock(tp);
1da177e4
LT
11621
11622 return 0;
11623}
11624
11625static struct pci_driver tg3_driver = {
11626 .name = DRV_MODULE_NAME,
11627 .id_table = tg3_pci_tbl,
11628 .probe = tg3_init_one,
11629 .remove = __devexit_p(tg3_remove_one),
11630 .suspend = tg3_suspend,
11631 .resume = tg3_resume
11632};
11633
11634static int __init tg3_init(void)
11635{
11636 return pci_module_init(&tg3_driver);
11637}
11638
11639static void __exit tg3_cleanup(void)
11640{
11641 pci_unregister_driver(&tg3_driver);
11642}
11643
11644module_init(tg3_init);
11645module_exit(tg3_cleanup);