]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/tg3.c
[TG3]: Add rx BD workaround
[net-next-2.6.git] / drivers / net / tg3.c
CommitLineData
1da177e4
LT
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
8 *
9 * Firmware is:
49cabf49
MC
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
1da177e4
LT
16 */
17
18#include <linux/config.h>
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
14c85021 27#include <linux/in.h>
1da177e4
LT
28#include <linux/init.h>
29#include <linux/ioport.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/ethtool.h>
35#include <linux/mii.h>
36#include <linux/if_vlan.h>
37#include <linux/ip.h>
38#include <linux/tcp.h>
39#include <linux/workqueue.h>
61487480 40#include <linux/prefetch.h>
f9a5f7d3 41#include <linux/dma-mapping.h>
1da177e4
LT
42
43#include <net/checksum.h>
44
45#include <asm/system.h>
46#include <asm/io.h>
47#include <asm/byteorder.h>
48#include <asm/uaccess.h>
49
50#ifdef CONFIG_SPARC64
51#include <asm/idprom.h>
52#include <asm/oplib.h>
53#include <asm/pbm.h>
54#endif
55
56#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57#define TG3_VLAN_TAG_USED 1
58#else
59#define TG3_VLAN_TAG_USED 0
60#endif
61
62#ifdef NETIF_F_TSO
63#define TG3_TSO_SUPPORT 1
64#else
65#define TG3_TSO_SUPPORT 0
66#endif
67
68#include "tg3.h"
69
70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": "
9cb3528c
MC
72#define DRV_MODULE_VERSION "3.60"
73#define DRV_MODULE_RELDATE "June 17, 2006"
1da177e4
LT
74
75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0
77#define TG3_DEF_TX_MODE 0
78#define TG3_DEF_MSG_ENABLE \
79 (NETIF_MSG_DRV | \
80 NETIF_MSG_PROBE | \
81 NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | \
83 NETIF_MSG_IFDOWN | \
84 NETIF_MSG_IFUP | \
85 NETIF_MSG_RX_ERR | \
86 NETIF_MSG_TX_ERR)
87
88/* length of time before we decide the hardware is borked,
89 * and dev->tx_timeout() should be called to fix the problem
90 */
91#define TG3_TX_TIMEOUT (5 * HZ)
92
93/* hardware minimum and maximum for a single frame's data payload */
94#define TG3_MIN_MTU 60
95#define TG3_MAX_MTU(tp) \
0f893dc6 96 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
1da177e4
LT
97
98/* These numbers seem to be hard coded in the NIC firmware somehow.
99 * You can't change the ring sizes, but you can change where you place
100 * them in the NIC onboard memory.
101 */
102#define TG3_RX_RING_SIZE 512
103#define TG3_DEF_RX_RING_PENDING 200
104#define TG3_RX_JUMBO_RING_SIZE 256
105#define TG3_DEF_RX_JUMBO_RING_PENDING 100
106
107/* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
112 */
113#define TG3_RX_RCB_RING_SIZE(tp) \
114 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115
116#define TG3_TX_RING_SIZE 512
117#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118
119#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 TG3_RX_RING_SIZE)
121#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
126 TG3_TX_RING_SIZE)
1da177e4 127#define TX_BUFFS_AVAIL(TP) \
51b91468
MC
128 ((TP)->tx_pending - \
129 (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
1da177e4
LT
130#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
133#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
134
135/* minimum number of free TX descriptors required to wake up TX process */
136#define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
137
138/* number of ETHTOOL_GSTATS u64's */
139#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
4cafd3f5
MC
141#define TG3_NUM_TEST 6
142
1da177e4
LT
143static char version[] __devinitdata =
144 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148MODULE_LICENSE("GPL");
149MODULE_VERSION(DRV_MODULE_VERSION);
150
151static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
152module_param(tg3_debug, int, 0);
153MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155static struct pci_device_id tg3_pci_tbl[] = {
156 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
6e9017a7 214 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
af2bcd97 215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
d8659255
XVP
216 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1da177e4
LT
218 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
d9ab5ad1
MC
224 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
af36e6b6
MC
228 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
30b6c28d
MC
232 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786,
233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
d9ab5ad1
MC
234 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
237 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
a4e2b347
MC
238 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
239 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
d4d2c558
MC
240 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
a4e2b347
MC
242 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
d4d2c558
MC
244 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
245 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
4cf78e4f
MC
246 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
247 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
249 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1da177e4
LT
250 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
251 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
253 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
254 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
255 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
256 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
257 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
258 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
259 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
260 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
261 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
262 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
263 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
264 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
265 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
266 { 0, }
267};
268
269MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
270
271static struct {
272 const char string[ETH_GSTRING_LEN];
273} ethtool_stats_keys[TG3_NUM_STATS] = {
274 { "rx_octets" },
275 { "rx_fragments" },
276 { "rx_ucast_packets" },
277 { "rx_mcast_packets" },
278 { "rx_bcast_packets" },
279 { "rx_fcs_errors" },
280 { "rx_align_errors" },
281 { "rx_xon_pause_rcvd" },
282 { "rx_xoff_pause_rcvd" },
283 { "rx_mac_ctrl_rcvd" },
284 { "rx_xoff_entered" },
285 { "rx_frame_too_long_errors" },
286 { "rx_jabbers" },
287 { "rx_undersize_packets" },
288 { "rx_in_length_errors" },
289 { "rx_out_length_errors" },
290 { "rx_64_or_less_octet_packets" },
291 { "rx_65_to_127_octet_packets" },
292 { "rx_128_to_255_octet_packets" },
293 { "rx_256_to_511_octet_packets" },
294 { "rx_512_to_1023_octet_packets" },
295 { "rx_1024_to_1522_octet_packets" },
296 { "rx_1523_to_2047_octet_packets" },
297 { "rx_2048_to_4095_octet_packets" },
298 { "rx_4096_to_8191_octet_packets" },
299 { "rx_8192_to_9022_octet_packets" },
300
301 { "tx_octets" },
302 { "tx_collisions" },
303
304 { "tx_xon_sent" },
305 { "tx_xoff_sent" },
306 { "tx_flow_control" },
307 { "tx_mac_errors" },
308 { "tx_single_collisions" },
309 { "tx_mult_collisions" },
310 { "tx_deferred" },
311 { "tx_excessive_collisions" },
312 { "tx_late_collisions" },
313 { "tx_collide_2times" },
314 { "tx_collide_3times" },
315 { "tx_collide_4times" },
316 { "tx_collide_5times" },
317 { "tx_collide_6times" },
318 { "tx_collide_7times" },
319 { "tx_collide_8times" },
320 { "tx_collide_9times" },
321 { "tx_collide_10times" },
322 { "tx_collide_11times" },
323 { "tx_collide_12times" },
324 { "tx_collide_13times" },
325 { "tx_collide_14times" },
326 { "tx_collide_15times" },
327 { "tx_ucast_packets" },
328 { "tx_mcast_packets" },
329 { "tx_bcast_packets" },
330 { "tx_carrier_sense_errors" },
331 { "tx_discards" },
332 { "tx_errors" },
333
334 { "dma_writeq_full" },
335 { "dma_write_prioq_full" },
336 { "rxbds_empty" },
337 { "rx_discards" },
338 { "rx_errors" },
339 { "rx_threshold_hit" },
340
341 { "dma_readq_full" },
342 { "dma_read_prioq_full" },
343 { "tx_comp_queue_full" },
344
345 { "ring_set_send_prod_index" },
346 { "ring_status_update" },
347 { "nic_irqs" },
348 { "nic_avoided_irqs" },
349 { "nic_tx_threshold_hit" }
350};
351
4cafd3f5
MC
352static struct {
353 const char string[ETH_GSTRING_LEN];
354} ethtool_test_keys[TG3_NUM_TEST] = {
355 { "nvram test (online) " },
356 { "link test (online) " },
357 { "register test (offline)" },
358 { "memory test (offline)" },
359 { "loopback test (offline)" },
360 { "interrupt test (offline)" },
361};
362
b401e9e2
MC
363static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
364{
365 writel(val, tp->regs + off);
366}
367
368static u32 tg3_read32(struct tg3 *tp, u32 off)
369{
370 return (readl(tp->regs + off));
371}
372
1da177e4
LT
373static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
374{
6892914f
MC
375 unsigned long flags;
376
377 spin_lock_irqsave(&tp->indirect_lock, flags);
1ee582d8
MC
378 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
379 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
6892914f 380 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1ee582d8
MC
381}
382
383static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
384{
385 writel(val, tp->regs + off);
386 readl(tp->regs + off);
1da177e4
LT
387}
388
6892914f 389static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
1da177e4 390{
6892914f
MC
391 unsigned long flags;
392 u32 val;
393
394 spin_lock_irqsave(&tp->indirect_lock, flags);
395 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
396 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
397 spin_unlock_irqrestore(&tp->indirect_lock, flags);
398 return val;
399}
400
401static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
402{
403 unsigned long flags;
404
405 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
406 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
407 TG3_64BIT_REG_LOW, val);
408 return;
409 }
410 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
411 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
412 TG3_64BIT_REG_LOW, val);
413 return;
1da177e4 414 }
6892914f
MC
415
416 spin_lock_irqsave(&tp->indirect_lock, flags);
417 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
418 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
419 spin_unlock_irqrestore(&tp->indirect_lock, flags);
420
421 /* In indirect mode when disabling interrupts, we also need
422 * to clear the interrupt bit in the GRC local ctrl register.
423 */
424 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
425 (val == 0x1)) {
426 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
427 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
428 }
429}
430
431static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
432{
433 unsigned long flags;
434 u32 val;
435
436 spin_lock_irqsave(&tp->indirect_lock, flags);
437 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
438 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
439 spin_unlock_irqrestore(&tp->indirect_lock, flags);
440 return val;
441}
442
b401e9e2
MC
443/* usec_wait specifies the wait time in usec when writing to certain registers
444 * where it is unsafe to read back the register without some delay.
445 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
446 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
447 */
448static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
6892914f 449{
b401e9e2
MC
450 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
451 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
452 /* Non-posted methods */
453 tp->write32(tp, off, val);
454 else {
455 /* Posted method */
456 tg3_write32(tp, off, val);
457 if (usec_wait)
458 udelay(usec_wait);
459 tp->read32(tp, off);
460 }
461 /* Wait again after the read for the posted method to guarantee that
462 * the wait time is met.
463 */
464 if (usec_wait)
465 udelay(usec_wait);
1da177e4
LT
466}
467
09ee929c
MC
468static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
469{
470 tp->write32_mbox(tp, off, val);
6892914f
MC
471 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
472 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
473 tp->read32_mbox(tp, off);
09ee929c
MC
474}
475
20094930 476static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
1da177e4
LT
477{
478 void __iomem *mbox = tp->regs + off;
479 writel(val, mbox);
480 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
481 writel(val, mbox);
482 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
483 readl(mbox);
484}
485
20094930 486#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
09ee929c 487#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
20094930
MC
488#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
489#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
09ee929c 490#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
20094930
MC
491
492#define tw32(reg,val) tp->write32(tp, reg, val)
b401e9e2
MC
493#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
494#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
20094930 495#define tr32(reg) tp->read32(tp, reg)
1da177e4
LT
496
497static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
498{
6892914f
MC
499 unsigned long flags;
500
501 spin_lock_irqsave(&tp->indirect_lock, flags);
bbadf503
MC
502 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
503 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
504 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 505
bbadf503
MC
506 /* Always leave this as zero. */
507 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
508 } else {
509 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
510 tw32_f(TG3PCI_MEM_WIN_DATA, val);
28fbef78 511
bbadf503
MC
512 /* Always leave this as zero. */
513 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
514 }
515 spin_unlock_irqrestore(&tp->indirect_lock, flags);
758a6139
DM
516}
517
1da177e4
LT
518static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
519{
6892914f
MC
520 unsigned long flags;
521
522 spin_lock_irqsave(&tp->indirect_lock, flags);
bbadf503
MC
523 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
524 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
525 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 526
bbadf503
MC
527 /* Always leave this as zero. */
528 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
529 } else {
530 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
531 *val = tr32(TG3PCI_MEM_WIN_DATA);
532
533 /* Always leave this as zero. */
534 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
535 }
6892914f 536 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
537}
538
539static void tg3_disable_ints(struct tg3 *tp)
540{
541 tw32(TG3PCI_MISC_HOST_CTRL,
542 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c 543 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
1da177e4
LT
544}
545
546static inline void tg3_cond_int(struct tg3 *tp)
547{
38f3843e
MC
548 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
549 (tp->hw_status->status & SD_STATUS_UPDATED))
1da177e4
LT
550 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
551}
552
553static void tg3_enable_ints(struct tg3 *tp)
554{
bbe832c0
MC
555 tp->irq_sync = 0;
556 wmb();
557
1da177e4
LT
558 tw32(TG3PCI_MISC_HOST_CTRL,
559 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c
MC
560 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
561 (tp->last_tag << 24));
fcfa0a32
MC
562 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
563 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
564 (tp->last_tag << 24));
1da177e4
LT
565 tg3_cond_int(tp);
566}
567
04237ddd
MC
568static inline unsigned int tg3_has_work(struct tg3 *tp)
569{
570 struct tg3_hw_status *sblk = tp->hw_status;
571 unsigned int work_exists = 0;
572
573 /* check for phy events */
574 if (!(tp->tg3_flags &
575 (TG3_FLAG_USE_LINKCHG_REG |
576 TG3_FLAG_POLL_SERDES))) {
577 if (sblk->status & SD_STATUS_LINK_CHG)
578 work_exists = 1;
579 }
580 /* check for RX/TX work to do */
581 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
582 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
583 work_exists = 1;
584
585 return work_exists;
586}
587
1da177e4 588/* tg3_restart_ints
04237ddd
MC
589 * similar to tg3_enable_ints, but it accurately determines whether there
590 * is new work pending and can return without flushing the PIO write
591 * which reenables interrupts
1da177e4
LT
592 */
593static void tg3_restart_ints(struct tg3 *tp)
594{
fac9b83e
DM
595 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
596 tp->last_tag << 24);
1da177e4
LT
597 mmiowb();
598
fac9b83e
DM
599 /* When doing tagged status, this work check is unnecessary.
600 * The last_tag we write above tells the chip which piece of
601 * work we've completed.
602 */
603 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
604 tg3_has_work(tp))
04237ddd
MC
605 tw32(HOSTCC_MODE, tp->coalesce_mode |
606 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
1da177e4
LT
607}
608
609static inline void tg3_netif_stop(struct tg3 *tp)
610{
bbe832c0 611 tp->dev->trans_start = jiffies; /* prevent tx timeout */
1da177e4
LT
612 netif_poll_disable(tp->dev);
613 netif_tx_disable(tp->dev);
614}
615
616static inline void tg3_netif_start(struct tg3 *tp)
617{
618 netif_wake_queue(tp->dev);
619 /* NOTE: unconditional netif_wake_queue is only appropriate
620 * so long as all callers are assured to have free tx slots
621 * (such as after tg3_init_hw)
622 */
623 netif_poll_enable(tp->dev);
f47c11ee
DM
624 tp->hw_status->status |= SD_STATUS_UPDATED;
625 tg3_enable_ints(tp);
1da177e4
LT
626}
627
628static void tg3_switch_clocks(struct tg3 *tp)
629{
630 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
631 u32 orig_clock_ctrl;
632
a4e2b347 633 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4cf78e4f
MC
634 return;
635
1da177e4
LT
636 orig_clock_ctrl = clock_ctrl;
637 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
638 CLOCK_CTRL_CLKRUN_OENABLE |
639 0x1f);
640 tp->pci_clock_ctrl = clock_ctrl;
641
642 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
643 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
b401e9e2
MC
644 tw32_wait_f(TG3PCI_CLOCK_CTRL,
645 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1da177e4
LT
646 }
647 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
b401e9e2
MC
648 tw32_wait_f(TG3PCI_CLOCK_CTRL,
649 clock_ctrl |
650 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
651 40);
652 tw32_wait_f(TG3PCI_CLOCK_CTRL,
653 clock_ctrl | (CLOCK_CTRL_ALTCLK),
654 40);
1da177e4 655 }
b401e9e2 656 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1da177e4
LT
657}
658
659#define PHY_BUSY_LOOPS 5000
660
661static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
662{
663 u32 frame_val;
664 unsigned int loops;
665 int ret;
666
667 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
668 tw32_f(MAC_MI_MODE,
669 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
670 udelay(80);
671 }
672
673 *val = 0x0;
674
675 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
676 MI_COM_PHY_ADDR_MASK);
677 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
678 MI_COM_REG_ADDR_MASK);
679 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
680
681 tw32_f(MAC_MI_COM, frame_val);
682
683 loops = PHY_BUSY_LOOPS;
684 while (loops != 0) {
685 udelay(10);
686 frame_val = tr32(MAC_MI_COM);
687
688 if ((frame_val & MI_COM_BUSY) == 0) {
689 udelay(5);
690 frame_val = tr32(MAC_MI_COM);
691 break;
692 }
693 loops -= 1;
694 }
695
696 ret = -EBUSY;
697 if (loops != 0) {
698 *val = frame_val & MI_COM_DATA_MASK;
699 ret = 0;
700 }
701
702 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
703 tw32_f(MAC_MI_MODE, tp->mi_mode);
704 udelay(80);
705 }
706
707 return ret;
708}
709
710static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
711{
712 u32 frame_val;
713 unsigned int loops;
714 int ret;
715
716 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
717 tw32_f(MAC_MI_MODE,
718 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
719 udelay(80);
720 }
721
722 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
723 MI_COM_PHY_ADDR_MASK);
724 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
725 MI_COM_REG_ADDR_MASK);
726 frame_val |= (val & MI_COM_DATA_MASK);
727 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
728
729 tw32_f(MAC_MI_COM, frame_val);
730
731 loops = PHY_BUSY_LOOPS;
732 while (loops != 0) {
733 udelay(10);
734 frame_val = tr32(MAC_MI_COM);
735 if ((frame_val & MI_COM_BUSY) == 0) {
736 udelay(5);
737 frame_val = tr32(MAC_MI_COM);
738 break;
739 }
740 loops -= 1;
741 }
742
743 ret = -EBUSY;
744 if (loops != 0)
745 ret = 0;
746
747 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
748 tw32_f(MAC_MI_MODE, tp->mi_mode);
749 udelay(80);
750 }
751
752 return ret;
753}
754
755static void tg3_phy_set_wirespeed(struct tg3 *tp)
756{
757 u32 val;
758
759 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
760 return;
761
762 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
763 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
764 tg3_writephy(tp, MII_TG3_AUX_CTRL,
765 (val | (1 << 15) | (1 << 4)));
766}
767
768static int tg3_bmcr_reset(struct tg3 *tp)
769{
770 u32 phy_control;
771 int limit, err;
772
773 /* OK, reset it, and poll the BMCR_RESET bit until it
774 * clears or we time out.
775 */
776 phy_control = BMCR_RESET;
777 err = tg3_writephy(tp, MII_BMCR, phy_control);
778 if (err != 0)
779 return -EBUSY;
780
781 limit = 5000;
782 while (limit--) {
783 err = tg3_readphy(tp, MII_BMCR, &phy_control);
784 if (err != 0)
785 return -EBUSY;
786
787 if ((phy_control & BMCR_RESET) == 0) {
788 udelay(40);
789 break;
790 }
791 udelay(10);
792 }
793 if (limit <= 0)
794 return -EBUSY;
795
796 return 0;
797}
798
799static int tg3_wait_macro_done(struct tg3 *tp)
800{
801 int limit = 100;
802
803 while (limit--) {
804 u32 tmp32;
805
806 if (!tg3_readphy(tp, 0x16, &tmp32)) {
807 if ((tmp32 & 0x1000) == 0)
808 break;
809 }
810 }
811 if (limit <= 0)
812 return -EBUSY;
813
814 return 0;
815}
816
817static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
818{
819 static const u32 test_pat[4][6] = {
820 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
821 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
822 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
823 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
824 };
825 int chan;
826
827 for (chan = 0; chan < 4; chan++) {
828 int i;
829
830 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
831 (chan * 0x2000) | 0x0200);
832 tg3_writephy(tp, 0x16, 0x0002);
833
834 for (i = 0; i < 6; i++)
835 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
836 test_pat[chan][i]);
837
838 tg3_writephy(tp, 0x16, 0x0202);
839 if (tg3_wait_macro_done(tp)) {
840 *resetp = 1;
841 return -EBUSY;
842 }
843
844 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
845 (chan * 0x2000) | 0x0200);
846 tg3_writephy(tp, 0x16, 0x0082);
847 if (tg3_wait_macro_done(tp)) {
848 *resetp = 1;
849 return -EBUSY;
850 }
851
852 tg3_writephy(tp, 0x16, 0x0802);
853 if (tg3_wait_macro_done(tp)) {
854 *resetp = 1;
855 return -EBUSY;
856 }
857
858 for (i = 0; i < 6; i += 2) {
859 u32 low, high;
860
861 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
862 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
863 tg3_wait_macro_done(tp)) {
864 *resetp = 1;
865 return -EBUSY;
866 }
867 low &= 0x7fff;
868 high &= 0x000f;
869 if (low != test_pat[chan][i] ||
870 high != test_pat[chan][i+1]) {
871 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
872 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
873 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
874
875 return -EBUSY;
876 }
877 }
878 }
879
880 return 0;
881}
882
883static int tg3_phy_reset_chanpat(struct tg3 *tp)
884{
885 int chan;
886
887 for (chan = 0; chan < 4; chan++) {
888 int i;
889
890 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
891 (chan * 0x2000) | 0x0200);
892 tg3_writephy(tp, 0x16, 0x0002);
893 for (i = 0; i < 6; i++)
894 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
895 tg3_writephy(tp, 0x16, 0x0202);
896 if (tg3_wait_macro_done(tp))
897 return -EBUSY;
898 }
899
900 return 0;
901}
902
903static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
904{
905 u32 reg32, phy9_orig;
906 int retries, do_phy_reset, err;
907
908 retries = 10;
909 do_phy_reset = 1;
910 do {
911 if (do_phy_reset) {
912 err = tg3_bmcr_reset(tp);
913 if (err)
914 return err;
915 do_phy_reset = 0;
916 }
917
918 /* Disable transmitter and interrupt. */
919 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
920 continue;
921
922 reg32 |= 0x3000;
923 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
924
925 /* Set full-duplex, 1000 mbps. */
926 tg3_writephy(tp, MII_BMCR,
927 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
928
929 /* Set to master mode. */
930 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
931 continue;
932
933 tg3_writephy(tp, MII_TG3_CTRL,
934 (MII_TG3_CTRL_AS_MASTER |
935 MII_TG3_CTRL_ENABLE_AS_MASTER));
936
937 /* Enable SM_DSP_CLOCK and 6dB. */
938 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
939
940 /* Block the PHY control access. */
941 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
942 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
943
944 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
945 if (!err)
946 break;
947 } while (--retries);
948
949 err = tg3_phy_reset_chanpat(tp);
950 if (err)
951 return err;
952
953 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
954 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
955
956 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
957 tg3_writephy(tp, 0x16, 0x0000);
958
959 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
960 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
961 /* Set Extended packet length bit for jumbo frames */
962 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
963 }
964 else {
965 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
966 }
967
968 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
969
970 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
971 reg32 &= ~0x3000;
972 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
973 } else if (!err)
974 err = -EBUSY;
975
976 return err;
977}
978
c8e1e82b
MC
979static void tg3_link_report(struct tg3 *);
980
1da177e4
LT
981/* This will reset the tigon3 PHY if there is no valid
982 * link unless the FORCE argument is non-zero.
983 */
984static int tg3_phy_reset(struct tg3 *tp)
985{
986 u32 phy_status;
987 int err;
988
989 err = tg3_readphy(tp, MII_BMSR, &phy_status);
990 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
991 if (err != 0)
992 return -EBUSY;
993
c8e1e82b
MC
994 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
995 netif_carrier_off(tp->dev);
996 tg3_link_report(tp);
997 }
998
1da177e4
LT
999 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1000 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1001 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1002 err = tg3_phy_reset_5703_4_5(tp);
1003 if (err)
1004 return err;
1005 goto out;
1006 }
1007
1008 err = tg3_bmcr_reset(tp);
1009 if (err)
1010 return err;
1011
1012out:
1013 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1014 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1015 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1016 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1017 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1018 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1019 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1020 }
1021 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1022 tg3_writephy(tp, 0x1c, 0x8d68);
1023 tg3_writephy(tp, 0x1c, 0x8d68);
1024 }
1025 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1026 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1027 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1028 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1029 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1030 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1031 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1032 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1033 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1034 }
c424cb24
MC
1035 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1036 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1037 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1038 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1039 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1040 }
1da177e4
LT
1041 /* Set Extended packet length bit (bit 14) on all chips that */
1042 /* support jumbo frames */
1043 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1044 /* Cannot do read-modify-write on 5401 */
1045 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
0f893dc6 1046 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1047 u32 phy_reg;
1048
1049 /* Set bit 14 with read-modify-write to preserve other bits */
1050 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1051 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1052 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1053 }
1054
1055 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1056 * jumbo frames transmission.
1057 */
0f893dc6 1058 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1059 u32 phy_reg;
1060
1061 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1062 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1063 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1064 }
1065
1066 tg3_phy_set_wirespeed(tp);
1067 return 0;
1068}
1069
1070static void tg3_frob_aux_power(struct tg3 *tp)
1071{
1072 struct tg3 *tp_peer = tp;
1073
1074 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1075 return;
1076
8c2dc7e1
MC
1077 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1078 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1079 struct net_device *dev_peer;
1080
1081 dev_peer = pci_get_drvdata(tp->pdev_peer);
bc1c7567 1082 /* remove_one() may have been run on the peer. */
8c2dc7e1 1083 if (!dev_peer)
bc1c7567
MC
1084 tp_peer = tp;
1085 else
1086 tp_peer = netdev_priv(dev_peer);
1da177e4
LT
1087 }
1088
1da177e4 1089 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
6921d201
MC
1090 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1091 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1092 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1da177e4
LT
1093 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1094 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
b401e9e2
MC
1095 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1096 (GRC_LCLCTRL_GPIO_OE0 |
1097 GRC_LCLCTRL_GPIO_OE1 |
1098 GRC_LCLCTRL_GPIO_OE2 |
1099 GRC_LCLCTRL_GPIO_OUTPUT0 |
1100 GRC_LCLCTRL_GPIO_OUTPUT1),
1101 100);
1da177e4
LT
1102 } else {
1103 u32 no_gpio2;
dc56b7d4 1104 u32 grc_local_ctrl = 0;
1da177e4
LT
1105
1106 if (tp_peer != tp &&
1107 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1108 return;
1109
dc56b7d4
MC
1110 /* Workaround to prevent overdrawing Amps. */
1111 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1112 ASIC_REV_5714) {
1113 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
b401e9e2
MC
1114 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1115 grc_local_ctrl, 100);
dc56b7d4
MC
1116 }
1117
1da177e4
LT
1118 /* On 5753 and variants, GPIO2 cannot be used. */
1119 no_gpio2 = tp->nic_sram_data_cfg &
1120 NIC_SRAM_DATA_CFG_NO_GPIO2;
1121
dc56b7d4 1122 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1da177e4
LT
1123 GRC_LCLCTRL_GPIO_OE1 |
1124 GRC_LCLCTRL_GPIO_OE2 |
1125 GRC_LCLCTRL_GPIO_OUTPUT1 |
1126 GRC_LCLCTRL_GPIO_OUTPUT2;
1127 if (no_gpio2) {
1128 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1129 GRC_LCLCTRL_GPIO_OUTPUT2);
1130 }
b401e9e2
MC
1131 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1132 grc_local_ctrl, 100);
1da177e4
LT
1133
1134 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1135
b401e9e2
MC
1136 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1137 grc_local_ctrl, 100);
1da177e4
LT
1138
1139 if (!no_gpio2) {
1140 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
b401e9e2
MC
1141 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1142 grc_local_ctrl, 100);
1da177e4
LT
1143 }
1144 }
1145 } else {
1146 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1147 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1148 if (tp_peer != tp &&
1149 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1150 return;
1151
b401e9e2
MC
1152 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1153 (GRC_LCLCTRL_GPIO_OE1 |
1154 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4 1155
b401e9e2
MC
1156 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1157 GRC_LCLCTRL_GPIO_OE1, 100);
1da177e4 1158
b401e9e2
MC
1159 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1160 (GRC_LCLCTRL_GPIO_OE1 |
1161 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4
LT
1162 }
1163 }
1164}
1165
1166static int tg3_setup_phy(struct tg3 *, int);
1167
1168#define RESET_KIND_SHUTDOWN 0
1169#define RESET_KIND_INIT 1
1170#define RESET_KIND_SUSPEND 2
1171
1172static void tg3_write_sig_post_reset(struct tg3 *, int);
1173static int tg3_halt_cpu(struct tg3 *, u32);
6921d201
MC
1174static int tg3_nvram_lock(struct tg3 *);
1175static void tg3_nvram_unlock(struct tg3 *);
1da177e4 1176
15c3b696
MC
1177static void tg3_power_down_phy(struct tg3 *tp)
1178{
1179 /* The PHY should not be powered down on some chips because
1180 * of bugs.
1181 */
1182 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1183 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1184 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1185 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1186 return;
1187 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1188}
1189
bc1c7567 1190static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1da177e4
LT
1191{
1192 u32 misc_host_ctrl;
1193 u16 power_control, power_caps;
1194 int pm = tp->pm_cap;
1195
1196 /* Make sure register accesses (indirect or otherwise)
1197 * will function correctly.
1198 */
1199 pci_write_config_dword(tp->pdev,
1200 TG3PCI_MISC_HOST_CTRL,
1201 tp->misc_host_ctrl);
1202
1203 pci_read_config_word(tp->pdev,
1204 pm + PCI_PM_CTRL,
1205 &power_control);
1206 power_control |= PCI_PM_CTRL_PME_STATUS;
1207 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1208 switch (state) {
bc1c7567 1209 case PCI_D0:
1da177e4
LT
1210 power_control |= 0;
1211 pci_write_config_word(tp->pdev,
1212 pm + PCI_PM_CTRL,
1213 power_control);
8c6bda1a
MC
1214 udelay(100); /* Delay after power state change */
1215
1216 /* Switch out of Vaux if it is not a LOM */
b401e9e2
MC
1217 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1218 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1da177e4
LT
1219
1220 return 0;
1221
bc1c7567 1222 case PCI_D1:
1da177e4
LT
1223 power_control |= 1;
1224 break;
1225
bc1c7567 1226 case PCI_D2:
1da177e4
LT
1227 power_control |= 2;
1228 break;
1229
bc1c7567 1230 case PCI_D3hot:
1da177e4
LT
1231 power_control |= 3;
1232 break;
1233
1234 default:
1235 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1236 "requested.\n",
1237 tp->dev->name, state);
1238 return -EINVAL;
1239 };
1240
1241 power_control |= PCI_PM_CTRL_PME_ENABLE;
1242
1243 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1244 tw32(TG3PCI_MISC_HOST_CTRL,
1245 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1246
1247 if (tp->link_config.phy_is_low_power == 0) {
1248 tp->link_config.phy_is_low_power = 1;
1249 tp->link_config.orig_speed = tp->link_config.speed;
1250 tp->link_config.orig_duplex = tp->link_config.duplex;
1251 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1252 }
1253
747e8f8b 1254 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1da177e4
LT
1255 tp->link_config.speed = SPEED_10;
1256 tp->link_config.duplex = DUPLEX_HALF;
1257 tp->link_config.autoneg = AUTONEG_ENABLE;
1258 tg3_setup_phy(tp, 0);
1259 }
1260
6921d201
MC
1261 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1262 int i;
1263 u32 val;
1264
1265 for (i = 0; i < 200; i++) {
1266 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1267 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1268 break;
1269 msleep(1);
1270 }
1271 }
1272 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1273 WOL_DRV_STATE_SHUTDOWN |
1274 WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1275
1da177e4
LT
1276 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1277
1278 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1279 u32 mac_mode;
1280
1281 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1282 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1283 udelay(40);
1284
1285 mac_mode = MAC_MODE_PORT_MODE_MII;
1286
1287 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1288 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1289 mac_mode |= MAC_MODE_LINK_POLARITY;
1290 } else {
1291 mac_mode = MAC_MODE_PORT_MODE_TBI;
1292 }
1293
cbf46853 1294 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1da177e4
LT
1295 tw32(MAC_LED_CTRL, tp->led_ctrl);
1296
1297 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1298 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1299 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1300
1301 tw32_f(MAC_MODE, mac_mode);
1302 udelay(100);
1303
1304 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1305 udelay(10);
1306 }
1307
1308 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1309 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1310 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1311 u32 base_val;
1312
1313 base_val = tp->pci_clock_ctrl;
1314 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1315 CLOCK_CTRL_TXCLK_DISABLE);
1316
b401e9e2
MC
1317 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1318 CLOCK_CTRL_PWRDOWN_PLL133, 40);
a4e2b347 1319 } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4cf78e4f 1320 /* do nothing */
85e94ced 1321 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1da177e4
LT
1322 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1323 u32 newbits1, newbits2;
1324
1325 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1326 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1327 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1328 CLOCK_CTRL_TXCLK_DISABLE |
1329 CLOCK_CTRL_ALTCLK);
1330 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1331 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1332 newbits1 = CLOCK_CTRL_625_CORE;
1333 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1334 } else {
1335 newbits1 = CLOCK_CTRL_ALTCLK;
1336 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1337 }
1338
b401e9e2
MC
1339 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1340 40);
1da177e4 1341
b401e9e2
MC
1342 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1343 40);
1da177e4
LT
1344
1345 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1346 u32 newbits3;
1347
1348 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1349 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1350 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1351 CLOCK_CTRL_TXCLK_DISABLE |
1352 CLOCK_CTRL_44MHZ_CORE);
1353 } else {
1354 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1355 }
1356
b401e9e2
MC
1357 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1358 tp->pci_clock_ctrl | newbits3, 40);
1da177e4
LT
1359 }
1360 }
1361
6921d201
MC
1362 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1363 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1364 /* Turn off the PHY */
1365 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1366 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1367 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1368 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
15c3b696 1369 tg3_power_down_phy(tp);
6921d201
MC
1370 }
1371 }
1372
1da177e4
LT
1373 tg3_frob_aux_power(tp);
1374
1375 /* Workaround for unstable PLL clock */
1376 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1377 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1378 u32 val = tr32(0x7d00);
1379
1380 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1381 tw32(0x7d00, val);
6921d201 1382 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
ec41c7df
MC
1383 int err;
1384
1385 err = tg3_nvram_lock(tp);
1da177e4 1386 tg3_halt_cpu(tp, RX_CPU_BASE);
ec41c7df
MC
1387 if (!err)
1388 tg3_nvram_unlock(tp);
6921d201 1389 }
1da177e4
LT
1390 }
1391
bbadf503
MC
1392 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1393
1da177e4
LT
1394 /* Finally, set the new power state. */
1395 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
8c6bda1a 1396 udelay(100); /* Delay after power state change */
1da177e4 1397
1da177e4
LT
1398 return 0;
1399}
1400
1401static void tg3_link_report(struct tg3 *tp)
1402{
1403 if (!netif_carrier_ok(tp->dev)) {
1404 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1405 } else {
1406 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1407 tp->dev->name,
1408 (tp->link_config.active_speed == SPEED_1000 ?
1409 1000 :
1410 (tp->link_config.active_speed == SPEED_100 ?
1411 100 : 10)),
1412 (tp->link_config.active_duplex == DUPLEX_FULL ?
1413 "full" : "half"));
1414
1415 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1416 "%s for RX.\n",
1417 tp->dev->name,
1418 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1419 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1420 }
1421}
1422
1423static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1424{
1425 u32 new_tg3_flags = 0;
1426 u32 old_rx_mode = tp->rx_mode;
1427 u32 old_tx_mode = tp->tx_mode;
1428
1429 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
747e8f8b
MC
1430
1431 /* Convert 1000BaseX flow control bits to 1000BaseT
1432 * bits before resolving flow control.
1433 */
1434 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1435 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1436 ADVERTISE_PAUSE_ASYM);
1437 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1438
1439 if (local_adv & ADVERTISE_1000XPAUSE)
1440 local_adv |= ADVERTISE_PAUSE_CAP;
1441 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1442 local_adv |= ADVERTISE_PAUSE_ASYM;
1443 if (remote_adv & LPA_1000XPAUSE)
1444 remote_adv |= LPA_PAUSE_CAP;
1445 if (remote_adv & LPA_1000XPAUSE_ASYM)
1446 remote_adv |= LPA_PAUSE_ASYM;
1447 }
1448
1da177e4
LT
1449 if (local_adv & ADVERTISE_PAUSE_CAP) {
1450 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1451 if (remote_adv & LPA_PAUSE_CAP)
1452 new_tg3_flags |=
1453 (TG3_FLAG_RX_PAUSE |
1454 TG3_FLAG_TX_PAUSE);
1455 else if (remote_adv & LPA_PAUSE_ASYM)
1456 new_tg3_flags |=
1457 (TG3_FLAG_RX_PAUSE);
1458 } else {
1459 if (remote_adv & LPA_PAUSE_CAP)
1460 new_tg3_flags |=
1461 (TG3_FLAG_RX_PAUSE |
1462 TG3_FLAG_TX_PAUSE);
1463 }
1464 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1465 if ((remote_adv & LPA_PAUSE_CAP) &&
1466 (remote_adv & LPA_PAUSE_ASYM))
1467 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1468 }
1469
1470 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1471 tp->tg3_flags |= new_tg3_flags;
1472 } else {
1473 new_tg3_flags = tp->tg3_flags;
1474 }
1475
1476 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1477 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1478 else
1479 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1480
1481 if (old_rx_mode != tp->rx_mode) {
1482 tw32_f(MAC_RX_MODE, tp->rx_mode);
1483 }
1484
1485 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1486 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1487 else
1488 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1489
1490 if (old_tx_mode != tp->tx_mode) {
1491 tw32_f(MAC_TX_MODE, tp->tx_mode);
1492 }
1493}
1494
1495static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1496{
1497 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1498 case MII_TG3_AUX_STAT_10HALF:
1499 *speed = SPEED_10;
1500 *duplex = DUPLEX_HALF;
1501 break;
1502
1503 case MII_TG3_AUX_STAT_10FULL:
1504 *speed = SPEED_10;
1505 *duplex = DUPLEX_FULL;
1506 break;
1507
1508 case MII_TG3_AUX_STAT_100HALF:
1509 *speed = SPEED_100;
1510 *duplex = DUPLEX_HALF;
1511 break;
1512
1513 case MII_TG3_AUX_STAT_100FULL:
1514 *speed = SPEED_100;
1515 *duplex = DUPLEX_FULL;
1516 break;
1517
1518 case MII_TG3_AUX_STAT_1000HALF:
1519 *speed = SPEED_1000;
1520 *duplex = DUPLEX_HALF;
1521 break;
1522
1523 case MII_TG3_AUX_STAT_1000FULL:
1524 *speed = SPEED_1000;
1525 *duplex = DUPLEX_FULL;
1526 break;
1527
1528 default:
1529 *speed = SPEED_INVALID;
1530 *duplex = DUPLEX_INVALID;
1531 break;
1532 };
1533}
1534
1535static void tg3_phy_copper_begin(struct tg3 *tp)
1536{
1537 u32 new_adv;
1538 int i;
1539
1540 if (tp->link_config.phy_is_low_power) {
1541 /* Entering low power mode. Disable gigabit and
1542 * 100baseT advertisements.
1543 */
1544 tg3_writephy(tp, MII_TG3_CTRL, 0);
1545
1546 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1547 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1548 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1549 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1550
1551 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1552 } else if (tp->link_config.speed == SPEED_INVALID) {
1553 tp->link_config.advertising =
1554 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1555 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1556 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1557 ADVERTISED_Autoneg | ADVERTISED_MII);
1558
1559 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1560 tp->link_config.advertising &=
1561 ~(ADVERTISED_1000baseT_Half |
1562 ADVERTISED_1000baseT_Full);
1563
1564 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1565 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1566 new_adv |= ADVERTISE_10HALF;
1567 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1568 new_adv |= ADVERTISE_10FULL;
1569 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1570 new_adv |= ADVERTISE_100HALF;
1571 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1572 new_adv |= ADVERTISE_100FULL;
1573 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1574
1575 if (tp->link_config.advertising &
1576 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1577 new_adv = 0;
1578 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1579 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1580 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1581 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1582 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1583 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1584 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1585 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1586 MII_TG3_CTRL_ENABLE_AS_MASTER);
1587 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1588 } else {
1589 tg3_writephy(tp, MII_TG3_CTRL, 0);
1590 }
1591 } else {
1592 /* Asking for a specific link mode. */
1593 if (tp->link_config.speed == SPEED_1000) {
1594 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1595 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1596
1597 if (tp->link_config.duplex == DUPLEX_FULL)
1598 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1599 else
1600 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1601 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1602 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1603 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1604 MII_TG3_CTRL_ENABLE_AS_MASTER);
1605 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1606 } else {
1607 tg3_writephy(tp, MII_TG3_CTRL, 0);
1608
1609 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1610 if (tp->link_config.speed == SPEED_100) {
1611 if (tp->link_config.duplex == DUPLEX_FULL)
1612 new_adv |= ADVERTISE_100FULL;
1613 else
1614 new_adv |= ADVERTISE_100HALF;
1615 } else {
1616 if (tp->link_config.duplex == DUPLEX_FULL)
1617 new_adv |= ADVERTISE_10FULL;
1618 else
1619 new_adv |= ADVERTISE_10HALF;
1620 }
1621 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1622 }
1623 }
1624
1625 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1626 tp->link_config.speed != SPEED_INVALID) {
1627 u32 bmcr, orig_bmcr;
1628
1629 tp->link_config.active_speed = tp->link_config.speed;
1630 tp->link_config.active_duplex = tp->link_config.duplex;
1631
1632 bmcr = 0;
1633 switch (tp->link_config.speed) {
1634 default:
1635 case SPEED_10:
1636 break;
1637
1638 case SPEED_100:
1639 bmcr |= BMCR_SPEED100;
1640 break;
1641
1642 case SPEED_1000:
1643 bmcr |= TG3_BMCR_SPEED1000;
1644 break;
1645 };
1646
1647 if (tp->link_config.duplex == DUPLEX_FULL)
1648 bmcr |= BMCR_FULLDPLX;
1649
1650 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1651 (bmcr != orig_bmcr)) {
1652 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1653 for (i = 0; i < 1500; i++) {
1654 u32 tmp;
1655
1656 udelay(10);
1657 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1658 tg3_readphy(tp, MII_BMSR, &tmp))
1659 continue;
1660 if (!(tmp & BMSR_LSTATUS)) {
1661 udelay(40);
1662 break;
1663 }
1664 }
1665 tg3_writephy(tp, MII_BMCR, bmcr);
1666 udelay(40);
1667 }
1668 } else {
1669 tg3_writephy(tp, MII_BMCR,
1670 BMCR_ANENABLE | BMCR_ANRESTART);
1671 }
1672}
1673
1674static int tg3_init_5401phy_dsp(struct tg3 *tp)
1675{
1676 int err;
1677
1678 /* Turn off tap power management. */
1679 /* Set Extended packet length bit */
1680 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1681
1682 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1683 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1684
1685 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1686 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1687
1688 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1689 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1690
1691 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1692 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1693
1694 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1695 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1696
1697 udelay(40);
1698
1699 return err;
1700}
1701
1702static int tg3_copper_is_advertising_all(struct tg3 *tp)
1703{
1704 u32 adv_reg, all_mask;
1705
1706 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1707 return 0;
1708
1709 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1710 ADVERTISE_100HALF | ADVERTISE_100FULL);
1711 if ((adv_reg & all_mask) != all_mask)
1712 return 0;
1713 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1714 u32 tg3_ctrl;
1715
1716 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1717 return 0;
1718
1719 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1720 MII_TG3_CTRL_ADV_1000_FULL);
1721 if ((tg3_ctrl & all_mask) != all_mask)
1722 return 0;
1723 }
1724 return 1;
1725}
1726
1727static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1728{
1729 int current_link_up;
1730 u32 bmsr, dummy;
1731 u16 current_speed;
1732 u8 current_duplex;
1733 int i, err;
1734
1735 tw32(MAC_EVENT, 0);
1736
1737 tw32_f(MAC_STATUS,
1738 (MAC_STATUS_SYNC_CHANGED |
1739 MAC_STATUS_CFG_CHANGED |
1740 MAC_STATUS_MI_COMPLETION |
1741 MAC_STATUS_LNKSTATE_CHANGED));
1742 udelay(40);
1743
1744 tp->mi_mode = MAC_MI_MODE_BASE;
1745 tw32_f(MAC_MI_MODE, tp->mi_mode);
1746 udelay(80);
1747
1748 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1749
1750 /* Some third-party PHYs need to be reset on link going
1751 * down.
1752 */
1753 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1754 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1755 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1756 netif_carrier_ok(tp->dev)) {
1757 tg3_readphy(tp, MII_BMSR, &bmsr);
1758 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1759 !(bmsr & BMSR_LSTATUS))
1760 force_reset = 1;
1761 }
1762 if (force_reset)
1763 tg3_phy_reset(tp);
1764
1765 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1766 tg3_readphy(tp, MII_BMSR, &bmsr);
1767 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1768 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1769 bmsr = 0;
1770
1771 if (!(bmsr & BMSR_LSTATUS)) {
1772 err = tg3_init_5401phy_dsp(tp);
1773 if (err)
1774 return err;
1775
1776 tg3_readphy(tp, MII_BMSR, &bmsr);
1777 for (i = 0; i < 1000; i++) {
1778 udelay(10);
1779 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1780 (bmsr & BMSR_LSTATUS)) {
1781 udelay(40);
1782 break;
1783 }
1784 }
1785
1786 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1787 !(bmsr & BMSR_LSTATUS) &&
1788 tp->link_config.active_speed == SPEED_1000) {
1789 err = tg3_phy_reset(tp);
1790 if (!err)
1791 err = tg3_init_5401phy_dsp(tp);
1792 if (err)
1793 return err;
1794 }
1795 }
1796 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1797 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1798 /* 5701 {A0,B0} CRC bug workaround */
1799 tg3_writephy(tp, 0x15, 0x0a75);
1800 tg3_writephy(tp, 0x1c, 0x8c68);
1801 tg3_writephy(tp, 0x1c, 0x8d68);
1802 tg3_writephy(tp, 0x1c, 0x8c68);
1803 }
1804
1805 /* Clear pending interrupts... */
1806 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1807 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1808
1809 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1810 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1811 else
1812 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1813
1814 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1815 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1816 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1817 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1818 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1819 else
1820 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1821 }
1822
1823 current_link_up = 0;
1824 current_speed = SPEED_INVALID;
1825 current_duplex = DUPLEX_INVALID;
1826
1827 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1828 u32 val;
1829
1830 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1831 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1832 if (!(val & (1 << 10))) {
1833 val |= (1 << 10);
1834 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1835 goto relink;
1836 }
1837 }
1838
1839 bmsr = 0;
1840 for (i = 0; i < 100; i++) {
1841 tg3_readphy(tp, MII_BMSR, &bmsr);
1842 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1843 (bmsr & BMSR_LSTATUS))
1844 break;
1845 udelay(40);
1846 }
1847
1848 if (bmsr & BMSR_LSTATUS) {
1849 u32 aux_stat, bmcr;
1850
1851 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1852 for (i = 0; i < 2000; i++) {
1853 udelay(10);
1854 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1855 aux_stat)
1856 break;
1857 }
1858
1859 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1860 &current_speed,
1861 &current_duplex);
1862
1863 bmcr = 0;
1864 for (i = 0; i < 200; i++) {
1865 tg3_readphy(tp, MII_BMCR, &bmcr);
1866 if (tg3_readphy(tp, MII_BMCR, &bmcr))
1867 continue;
1868 if (bmcr && bmcr != 0x7fff)
1869 break;
1870 udelay(10);
1871 }
1872
1873 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1874 if (bmcr & BMCR_ANENABLE) {
1875 current_link_up = 1;
1876
1877 /* Force autoneg restart if we are exiting
1878 * low power mode.
1879 */
1880 if (!tg3_copper_is_advertising_all(tp))
1881 current_link_up = 0;
1882 } else {
1883 current_link_up = 0;
1884 }
1885 } else {
1886 if (!(bmcr & BMCR_ANENABLE) &&
1887 tp->link_config.speed == current_speed &&
1888 tp->link_config.duplex == current_duplex) {
1889 current_link_up = 1;
1890 } else {
1891 current_link_up = 0;
1892 }
1893 }
1894
1895 tp->link_config.active_speed = current_speed;
1896 tp->link_config.active_duplex = current_duplex;
1897 }
1898
1899 if (current_link_up == 1 &&
1900 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1901 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1902 u32 local_adv, remote_adv;
1903
1904 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1905 local_adv = 0;
1906 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1907
1908 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1909 remote_adv = 0;
1910
1911 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1912
1913 /* If we are not advertising full pause capability,
1914 * something is wrong. Bring the link down and reconfigure.
1915 */
1916 if (local_adv != ADVERTISE_PAUSE_CAP) {
1917 current_link_up = 0;
1918 } else {
1919 tg3_setup_flow_control(tp, local_adv, remote_adv);
1920 }
1921 }
1922relink:
6921d201 1923 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1da177e4
LT
1924 u32 tmp;
1925
1926 tg3_phy_copper_begin(tp);
1927
1928 tg3_readphy(tp, MII_BMSR, &tmp);
1929 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1930 (tmp & BMSR_LSTATUS))
1931 current_link_up = 1;
1932 }
1933
1934 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1935 if (current_link_up == 1) {
1936 if (tp->link_config.active_speed == SPEED_100 ||
1937 tp->link_config.active_speed == SPEED_10)
1938 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1939 else
1940 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1941 } else
1942 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1943
1944 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1945 if (tp->link_config.active_duplex == DUPLEX_HALF)
1946 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1947
1948 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1949 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1950 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1951 (current_link_up == 1 &&
1952 tp->link_config.active_speed == SPEED_10))
1953 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1954 } else {
1955 if (current_link_up == 1)
1956 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1957 }
1958
1959 /* ??? Without this setting Netgear GA302T PHY does not
1960 * ??? send/receive packets...
1961 */
1962 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1963 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1964 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1965 tw32_f(MAC_MI_MODE, tp->mi_mode);
1966 udelay(80);
1967 }
1968
1969 tw32_f(MAC_MODE, tp->mac_mode);
1970 udelay(40);
1971
1972 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1973 /* Polled via timer. */
1974 tw32_f(MAC_EVENT, 0);
1975 } else {
1976 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1977 }
1978 udelay(40);
1979
1980 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1981 current_link_up == 1 &&
1982 tp->link_config.active_speed == SPEED_1000 &&
1983 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1984 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1985 udelay(120);
1986 tw32_f(MAC_STATUS,
1987 (MAC_STATUS_SYNC_CHANGED |
1988 MAC_STATUS_CFG_CHANGED));
1989 udelay(40);
1990 tg3_write_mem(tp,
1991 NIC_SRAM_FIRMWARE_MBOX,
1992 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1993 }
1994
1995 if (current_link_up != netif_carrier_ok(tp->dev)) {
1996 if (current_link_up)
1997 netif_carrier_on(tp->dev);
1998 else
1999 netif_carrier_off(tp->dev);
2000 tg3_link_report(tp);
2001 }
2002
2003 return 0;
2004}
2005
2006struct tg3_fiber_aneginfo {
2007 int state;
2008#define ANEG_STATE_UNKNOWN 0
2009#define ANEG_STATE_AN_ENABLE 1
2010#define ANEG_STATE_RESTART_INIT 2
2011#define ANEG_STATE_RESTART 3
2012#define ANEG_STATE_DISABLE_LINK_OK 4
2013#define ANEG_STATE_ABILITY_DETECT_INIT 5
2014#define ANEG_STATE_ABILITY_DETECT 6
2015#define ANEG_STATE_ACK_DETECT_INIT 7
2016#define ANEG_STATE_ACK_DETECT 8
2017#define ANEG_STATE_COMPLETE_ACK_INIT 9
2018#define ANEG_STATE_COMPLETE_ACK 10
2019#define ANEG_STATE_IDLE_DETECT_INIT 11
2020#define ANEG_STATE_IDLE_DETECT 12
2021#define ANEG_STATE_LINK_OK 13
2022#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2023#define ANEG_STATE_NEXT_PAGE_WAIT 15
2024
2025 u32 flags;
2026#define MR_AN_ENABLE 0x00000001
2027#define MR_RESTART_AN 0x00000002
2028#define MR_AN_COMPLETE 0x00000004
2029#define MR_PAGE_RX 0x00000008
2030#define MR_NP_LOADED 0x00000010
2031#define MR_TOGGLE_TX 0x00000020
2032#define MR_LP_ADV_FULL_DUPLEX 0x00000040
2033#define MR_LP_ADV_HALF_DUPLEX 0x00000080
2034#define MR_LP_ADV_SYM_PAUSE 0x00000100
2035#define MR_LP_ADV_ASYM_PAUSE 0x00000200
2036#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2037#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2038#define MR_LP_ADV_NEXT_PAGE 0x00001000
2039#define MR_TOGGLE_RX 0x00002000
2040#define MR_NP_RX 0x00004000
2041
2042#define MR_LINK_OK 0x80000000
2043
2044 unsigned long link_time, cur_time;
2045
2046 u32 ability_match_cfg;
2047 int ability_match_count;
2048
2049 char ability_match, idle_match, ack_match;
2050
2051 u32 txconfig, rxconfig;
2052#define ANEG_CFG_NP 0x00000080
2053#define ANEG_CFG_ACK 0x00000040
2054#define ANEG_CFG_RF2 0x00000020
2055#define ANEG_CFG_RF1 0x00000010
2056#define ANEG_CFG_PS2 0x00000001
2057#define ANEG_CFG_PS1 0x00008000
2058#define ANEG_CFG_HD 0x00004000
2059#define ANEG_CFG_FD 0x00002000
2060#define ANEG_CFG_INVAL 0x00001f06
2061
2062};
2063#define ANEG_OK 0
2064#define ANEG_DONE 1
2065#define ANEG_TIMER_ENAB 2
2066#define ANEG_FAILED -1
2067
2068#define ANEG_STATE_SETTLE_TIME 10000
2069
2070static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2071 struct tg3_fiber_aneginfo *ap)
2072{
2073 unsigned long delta;
2074 u32 rx_cfg_reg;
2075 int ret;
2076
2077 if (ap->state == ANEG_STATE_UNKNOWN) {
2078 ap->rxconfig = 0;
2079 ap->link_time = 0;
2080 ap->cur_time = 0;
2081 ap->ability_match_cfg = 0;
2082 ap->ability_match_count = 0;
2083 ap->ability_match = 0;
2084 ap->idle_match = 0;
2085 ap->ack_match = 0;
2086 }
2087 ap->cur_time++;
2088
2089 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2090 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2091
2092 if (rx_cfg_reg != ap->ability_match_cfg) {
2093 ap->ability_match_cfg = rx_cfg_reg;
2094 ap->ability_match = 0;
2095 ap->ability_match_count = 0;
2096 } else {
2097 if (++ap->ability_match_count > 1) {
2098 ap->ability_match = 1;
2099 ap->ability_match_cfg = rx_cfg_reg;
2100 }
2101 }
2102 if (rx_cfg_reg & ANEG_CFG_ACK)
2103 ap->ack_match = 1;
2104 else
2105 ap->ack_match = 0;
2106
2107 ap->idle_match = 0;
2108 } else {
2109 ap->idle_match = 1;
2110 ap->ability_match_cfg = 0;
2111 ap->ability_match_count = 0;
2112 ap->ability_match = 0;
2113 ap->ack_match = 0;
2114
2115 rx_cfg_reg = 0;
2116 }
2117
2118 ap->rxconfig = rx_cfg_reg;
2119 ret = ANEG_OK;
2120
2121 switch(ap->state) {
2122 case ANEG_STATE_UNKNOWN:
2123 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2124 ap->state = ANEG_STATE_AN_ENABLE;
2125
2126 /* fallthru */
2127 case ANEG_STATE_AN_ENABLE:
2128 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2129 if (ap->flags & MR_AN_ENABLE) {
2130 ap->link_time = 0;
2131 ap->cur_time = 0;
2132 ap->ability_match_cfg = 0;
2133 ap->ability_match_count = 0;
2134 ap->ability_match = 0;
2135 ap->idle_match = 0;
2136 ap->ack_match = 0;
2137
2138 ap->state = ANEG_STATE_RESTART_INIT;
2139 } else {
2140 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2141 }
2142 break;
2143
2144 case ANEG_STATE_RESTART_INIT:
2145 ap->link_time = ap->cur_time;
2146 ap->flags &= ~(MR_NP_LOADED);
2147 ap->txconfig = 0;
2148 tw32(MAC_TX_AUTO_NEG, 0);
2149 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2150 tw32_f(MAC_MODE, tp->mac_mode);
2151 udelay(40);
2152
2153 ret = ANEG_TIMER_ENAB;
2154 ap->state = ANEG_STATE_RESTART;
2155
2156 /* fallthru */
2157 case ANEG_STATE_RESTART:
2158 delta = ap->cur_time - ap->link_time;
2159 if (delta > ANEG_STATE_SETTLE_TIME) {
2160 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2161 } else {
2162 ret = ANEG_TIMER_ENAB;
2163 }
2164 break;
2165
2166 case ANEG_STATE_DISABLE_LINK_OK:
2167 ret = ANEG_DONE;
2168 break;
2169
2170 case ANEG_STATE_ABILITY_DETECT_INIT:
2171 ap->flags &= ~(MR_TOGGLE_TX);
2172 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2173 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2174 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2175 tw32_f(MAC_MODE, tp->mac_mode);
2176 udelay(40);
2177
2178 ap->state = ANEG_STATE_ABILITY_DETECT;
2179 break;
2180
2181 case ANEG_STATE_ABILITY_DETECT:
2182 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2183 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2184 }
2185 break;
2186
2187 case ANEG_STATE_ACK_DETECT_INIT:
2188 ap->txconfig |= ANEG_CFG_ACK;
2189 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2190 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2191 tw32_f(MAC_MODE, tp->mac_mode);
2192 udelay(40);
2193
2194 ap->state = ANEG_STATE_ACK_DETECT;
2195
2196 /* fallthru */
2197 case ANEG_STATE_ACK_DETECT:
2198 if (ap->ack_match != 0) {
2199 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2200 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2201 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2202 } else {
2203 ap->state = ANEG_STATE_AN_ENABLE;
2204 }
2205 } else if (ap->ability_match != 0 &&
2206 ap->rxconfig == 0) {
2207 ap->state = ANEG_STATE_AN_ENABLE;
2208 }
2209 break;
2210
2211 case ANEG_STATE_COMPLETE_ACK_INIT:
2212 if (ap->rxconfig & ANEG_CFG_INVAL) {
2213 ret = ANEG_FAILED;
2214 break;
2215 }
2216 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2217 MR_LP_ADV_HALF_DUPLEX |
2218 MR_LP_ADV_SYM_PAUSE |
2219 MR_LP_ADV_ASYM_PAUSE |
2220 MR_LP_ADV_REMOTE_FAULT1 |
2221 MR_LP_ADV_REMOTE_FAULT2 |
2222 MR_LP_ADV_NEXT_PAGE |
2223 MR_TOGGLE_RX |
2224 MR_NP_RX);
2225 if (ap->rxconfig & ANEG_CFG_FD)
2226 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2227 if (ap->rxconfig & ANEG_CFG_HD)
2228 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2229 if (ap->rxconfig & ANEG_CFG_PS1)
2230 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2231 if (ap->rxconfig & ANEG_CFG_PS2)
2232 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2233 if (ap->rxconfig & ANEG_CFG_RF1)
2234 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2235 if (ap->rxconfig & ANEG_CFG_RF2)
2236 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2237 if (ap->rxconfig & ANEG_CFG_NP)
2238 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2239
2240 ap->link_time = ap->cur_time;
2241
2242 ap->flags ^= (MR_TOGGLE_TX);
2243 if (ap->rxconfig & 0x0008)
2244 ap->flags |= MR_TOGGLE_RX;
2245 if (ap->rxconfig & ANEG_CFG_NP)
2246 ap->flags |= MR_NP_RX;
2247 ap->flags |= MR_PAGE_RX;
2248
2249 ap->state = ANEG_STATE_COMPLETE_ACK;
2250 ret = ANEG_TIMER_ENAB;
2251 break;
2252
2253 case ANEG_STATE_COMPLETE_ACK:
2254 if (ap->ability_match != 0 &&
2255 ap->rxconfig == 0) {
2256 ap->state = ANEG_STATE_AN_ENABLE;
2257 break;
2258 }
2259 delta = ap->cur_time - ap->link_time;
2260 if (delta > ANEG_STATE_SETTLE_TIME) {
2261 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2262 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2263 } else {
2264 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2265 !(ap->flags & MR_NP_RX)) {
2266 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2267 } else {
2268 ret = ANEG_FAILED;
2269 }
2270 }
2271 }
2272 break;
2273
2274 case ANEG_STATE_IDLE_DETECT_INIT:
2275 ap->link_time = ap->cur_time;
2276 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2277 tw32_f(MAC_MODE, tp->mac_mode);
2278 udelay(40);
2279
2280 ap->state = ANEG_STATE_IDLE_DETECT;
2281 ret = ANEG_TIMER_ENAB;
2282 break;
2283
2284 case ANEG_STATE_IDLE_DETECT:
2285 if (ap->ability_match != 0 &&
2286 ap->rxconfig == 0) {
2287 ap->state = ANEG_STATE_AN_ENABLE;
2288 break;
2289 }
2290 delta = ap->cur_time - ap->link_time;
2291 if (delta > ANEG_STATE_SETTLE_TIME) {
2292 /* XXX another gem from the Broadcom driver :( */
2293 ap->state = ANEG_STATE_LINK_OK;
2294 }
2295 break;
2296
2297 case ANEG_STATE_LINK_OK:
2298 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2299 ret = ANEG_DONE;
2300 break;
2301
2302 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2303 /* ??? unimplemented */
2304 break;
2305
2306 case ANEG_STATE_NEXT_PAGE_WAIT:
2307 /* ??? unimplemented */
2308 break;
2309
2310 default:
2311 ret = ANEG_FAILED;
2312 break;
2313 };
2314
2315 return ret;
2316}
2317
2318static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2319{
2320 int res = 0;
2321 struct tg3_fiber_aneginfo aninfo;
2322 int status = ANEG_FAILED;
2323 unsigned int tick;
2324 u32 tmp;
2325
2326 tw32_f(MAC_TX_AUTO_NEG, 0);
2327
2328 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2329 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2330 udelay(40);
2331
2332 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2333 udelay(40);
2334
2335 memset(&aninfo, 0, sizeof(aninfo));
2336 aninfo.flags |= MR_AN_ENABLE;
2337 aninfo.state = ANEG_STATE_UNKNOWN;
2338 aninfo.cur_time = 0;
2339 tick = 0;
2340 while (++tick < 195000) {
2341 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2342 if (status == ANEG_DONE || status == ANEG_FAILED)
2343 break;
2344
2345 udelay(1);
2346 }
2347
2348 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2349 tw32_f(MAC_MODE, tp->mac_mode);
2350 udelay(40);
2351
2352 *flags = aninfo.flags;
2353
2354 if (status == ANEG_DONE &&
2355 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2356 MR_LP_ADV_FULL_DUPLEX)))
2357 res = 1;
2358
2359 return res;
2360}
2361
2362static void tg3_init_bcm8002(struct tg3 *tp)
2363{
2364 u32 mac_status = tr32(MAC_STATUS);
2365 int i;
2366
2367 /* Reset when initting first time or we have a link. */
2368 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2369 !(mac_status & MAC_STATUS_PCS_SYNCED))
2370 return;
2371
2372 /* Set PLL lock range. */
2373 tg3_writephy(tp, 0x16, 0x8007);
2374
2375 /* SW reset */
2376 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2377
2378 /* Wait for reset to complete. */
2379 /* XXX schedule_timeout() ... */
2380 for (i = 0; i < 500; i++)
2381 udelay(10);
2382
2383 /* Config mode; select PMA/Ch 1 regs. */
2384 tg3_writephy(tp, 0x10, 0x8411);
2385
2386 /* Enable auto-lock and comdet, select txclk for tx. */
2387 tg3_writephy(tp, 0x11, 0x0a10);
2388
2389 tg3_writephy(tp, 0x18, 0x00a0);
2390 tg3_writephy(tp, 0x16, 0x41ff);
2391
2392 /* Assert and deassert POR. */
2393 tg3_writephy(tp, 0x13, 0x0400);
2394 udelay(40);
2395 tg3_writephy(tp, 0x13, 0x0000);
2396
2397 tg3_writephy(tp, 0x11, 0x0a50);
2398 udelay(40);
2399 tg3_writephy(tp, 0x11, 0x0a10);
2400
2401 /* Wait for signal to stabilize */
2402 /* XXX schedule_timeout() ... */
2403 for (i = 0; i < 15000; i++)
2404 udelay(10);
2405
2406 /* Deselect the channel register so we can read the PHYID
2407 * later.
2408 */
2409 tg3_writephy(tp, 0x10, 0x8011);
2410}
2411
2412static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2413{
2414 u32 sg_dig_ctrl, sg_dig_status;
2415 u32 serdes_cfg, expected_sg_dig_ctrl;
2416 int workaround, port_a;
2417 int current_link_up;
2418
2419 serdes_cfg = 0;
2420 expected_sg_dig_ctrl = 0;
2421 workaround = 0;
2422 port_a = 1;
2423 current_link_up = 0;
2424
2425 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2426 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2427 workaround = 1;
2428 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2429 port_a = 0;
2430
2431 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2432 /* preserve bits 20-23 for voltage regulator */
2433 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2434 }
2435
2436 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2437
2438 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2439 if (sg_dig_ctrl & (1 << 31)) {
2440 if (workaround) {
2441 u32 val = serdes_cfg;
2442
2443 if (port_a)
2444 val |= 0xc010000;
2445 else
2446 val |= 0x4010000;
2447 tw32_f(MAC_SERDES_CFG, val);
2448 }
2449 tw32_f(SG_DIG_CTRL, 0x01388400);
2450 }
2451 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2452 tg3_setup_flow_control(tp, 0, 0);
2453 current_link_up = 1;
2454 }
2455 goto out;
2456 }
2457
2458 /* Want auto-negotiation. */
2459 expected_sg_dig_ctrl = 0x81388400;
2460
2461 /* Pause capability */
2462 expected_sg_dig_ctrl |= (1 << 11);
2463
2464 /* Asymettric pause */
2465 expected_sg_dig_ctrl |= (1 << 12);
2466
2467 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2468 if (workaround)
2469 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2470 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2471 udelay(5);
2472 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2473
2474 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2475 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2476 MAC_STATUS_SIGNAL_DET)) {
2477 int i;
2478
2479 /* Giver time to negotiate (~200ms) */
2480 for (i = 0; i < 40000; i++) {
2481 sg_dig_status = tr32(SG_DIG_STATUS);
2482 if (sg_dig_status & (0x3))
2483 break;
2484 udelay(5);
2485 }
2486 mac_status = tr32(MAC_STATUS);
2487
2488 if ((sg_dig_status & (1 << 1)) &&
2489 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2490 u32 local_adv, remote_adv;
2491
2492 local_adv = ADVERTISE_PAUSE_CAP;
2493 remote_adv = 0;
2494 if (sg_dig_status & (1 << 19))
2495 remote_adv |= LPA_PAUSE_CAP;
2496 if (sg_dig_status & (1 << 20))
2497 remote_adv |= LPA_PAUSE_ASYM;
2498
2499 tg3_setup_flow_control(tp, local_adv, remote_adv);
2500 current_link_up = 1;
2501 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2502 } else if (!(sg_dig_status & (1 << 1))) {
2503 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2504 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2505 else {
2506 if (workaround) {
2507 u32 val = serdes_cfg;
2508
2509 if (port_a)
2510 val |= 0xc010000;
2511 else
2512 val |= 0x4010000;
2513
2514 tw32_f(MAC_SERDES_CFG, val);
2515 }
2516
2517 tw32_f(SG_DIG_CTRL, 0x01388400);
2518 udelay(40);
2519
2520 /* Link parallel detection - link is up */
2521 /* only if we have PCS_SYNC and not */
2522 /* receiving config code words */
2523 mac_status = tr32(MAC_STATUS);
2524 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2525 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2526 tg3_setup_flow_control(tp, 0, 0);
2527 current_link_up = 1;
2528 }
2529 }
2530 }
2531 }
2532
2533out:
2534 return current_link_up;
2535}
2536
2537static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2538{
2539 int current_link_up = 0;
2540
2541 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2542 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2543 goto out;
2544 }
2545
2546 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2547 u32 flags;
2548 int i;
2549
2550 if (fiber_autoneg(tp, &flags)) {
2551 u32 local_adv, remote_adv;
2552
2553 local_adv = ADVERTISE_PAUSE_CAP;
2554 remote_adv = 0;
2555 if (flags & MR_LP_ADV_SYM_PAUSE)
2556 remote_adv |= LPA_PAUSE_CAP;
2557 if (flags & MR_LP_ADV_ASYM_PAUSE)
2558 remote_adv |= LPA_PAUSE_ASYM;
2559
2560 tg3_setup_flow_control(tp, local_adv, remote_adv);
2561
2562 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2563 current_link_up = 1;
2564 }
2565 for (i = 0; i < 30; i++) {
2566 udelay(20);
2567 tw32_f(MAC_STATUS,
2568 (MAC_STATUS_SYNC_CHANGED |
2569 MAC_STATUS_CFG_CHANGED));
2570 udelay(40);
2571 if ((tr32(MAC_STATUS) &
2572 (MAC_STATUS_SYNC_CHANGED |
2573 MAC_STATUS_CFG_CHANGED)) == 0)
2574 break;
2575 }
2576
2577 mac_status = tr32(MAC_STATUS);
2578 if (current_link_up == 0 &&
2579 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2580 !(mac_status & MAC_STATUS_RCVD_CFG))
2581 current_link_up = 1;
2582 } else {
2583 /* Forcing 1000FD link up. */
2584 current_link_up = 1;
2585 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2586
2587 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2588 udelay(40);
2589 }
2590
2591out:
2592 return current_link_up;
2593}
2594
2595static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2596{
2597 u32 orig_pause_cfg;
2598 u16 orig_active_speed;
2599 u8 orig_active_duplex;
2600 u32 mac_status;
2601 int current_link_up;
2602 int i;
2603
2604 orig_pause_cfg =
2605 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2606 TG3_FLAG_TX_PAUSE));
2607 orig_active_speed = tp->link_config.active_speed;
2608 orig_active_duplex = tp->link_config.active_duplex;
2609
2610 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2611 netif_carrier_ok(tp->dev) &&
2612 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2613 mac_status = tr32(MAC_STATUS);
2614 mac_status &= (MAC_STATUS_PCS_SYNCED |
2615 MAC_STATUS_SIGNAL_DET |
2616 MAC_STATUS_CFG_CHANGED |
2617 MAC_STATUS_RCVD_CFG);
2618 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2619 MAC_STATUS_SIGNAL_DET)) {
2620 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2621 MAC_STATUS_CFG_CHANGED));
2622 return 0;
2623 }
2624 }
2625
2626 tw32_f(MAC_TX_AUTO_NEG, 0);
2627
2628 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2629 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2630 tw32_f(MAC_MODE, tp->mac_mode);
2631 udelay(40);
2632
2633 if (tp->phy_id == PHY_ID_BCM8002)
2634 tg3_init_bcm8002(tp);
2635
2636 /* Enable link change event even when serdes polling. */
2637 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2638 udelay(40);
2639
2640 current_link_up = 0;
2641 mac_status = tr32(MAC_STATUS);
2642
2643 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2644 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2645 else
2646 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2647
2648 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2649 tw32_f(MAC_MODE, tp->mac_mode);
2650 udelay(40);
2651
2652 tp->hw_status->status =
2653 (SD_STATUS_UPDATED |
2654 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2655
2656 for (i = 0; i < 100; i++) {
2657 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2658 MAC_STATUS_CFG_CHANGED));
2659 udelay(5);
2660 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2661 MAC_STATUS_CFG_CHANGED)) == 0)
2662 break;
2663 }
2664
2665 mac_status = tr32(MAC_STATUS);
2666 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2667 current_link_up = 0;
2668 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2669 tw32_f(MAC_MODE, (tp->mac_mode |
2670 MAC_MODE_SEND_CONFIGS));
2671 udelay(1);
2672 tw32_f(MAC_MODE, tp->mac_mode);
2673 }
2674 }
2675
2676 if (current_link_up == 1) {
2677 tp->link_config.active_speed = SPEED_1000;
2678 tp->link_config.active_duplex = DUPLEX_FULL;
2679 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2680 LED_CTRL_LNKLED_OVERRIDE |
2681 LED_CTRL_1000MBPS_ON));
2682 } else {
2683 tp->link_config.active_speed = SPEED_INVALID;
2684 tp->link_config.active_duplex = DUPLEX_INVALID;
2685 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2686 LED_CTRL_LNKLED_OVERRIDE |
2687 LED_CTRL_TRAFFIC_OVERRIDE));
2688 }
2689
2690 if (current_link_up != netif_carrier_ok(tp->dev)) {
2691 if (current_link_up)
2692 netif_carrier_on(tp->dev);
2693 else
2694 netif_carrier_off(tp->dev);
2695 tg3_link_report(tp);
2696 } else {
2697 u32 now_pause_cfg =
2698 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2699 TG3_FLAG_TX_PAUSE);
2700 if (orig_pause_cfg != now_pause_cfg ||
2701 orig_active_speed != tp->link_config.active_speed ||
2702 orig_active_duplex != tp->link_config.active_duplex)
2703 tg3_link_report(tp);
2704 }
2705
2706 return 0;
2707}
2708
747e8f8b
MC
2709static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2710{
2711 int current_link_up, err = 0;
2712 u32 bmsr, bmcr;
2713 u16 current_speed;
2714 u8 current_duplex;
2715
2716 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2717 tw32_f(MAC_MODE, tp->mac_mode);
2718 udelay(40);
2719
2720 tw32(MAC_EVENT, 0);
2721
2722 tw32_f(MAC_STATUS,
2723 (MAC_STATUS_SYNC_CHANGED |
2724 MAC_STATUS_CFG_CHANGED |
2725 MAC_STATUS_MI_COMPLETION |
2726 MAC_STATUS_LNKSTATE_CHANGED));
2727 udelay(40);
2728
2729 if (force_reset)
2730 tg3_phy_reset(tp);
2731
2732 current_link_up = 0;
2733 current_speed = SPEED_INVALID;
2734 current_duplex = DUPLEX_INVALID;
2735
2736 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2737 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
2738 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2739 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2740 bmsr |= BMSR_LSTATUS;
2741 else
2742 bmsr &= ~BMSR_LSTATUS;
2743 }
747e8f8b
MC
2744
2745 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2746
2747 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2748 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2749 /* do nothing, just check for link up at the end */
2750 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2751 u32 adv, new_adv;
2752
2753 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2754 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2755 ADVERTISE_1000XPAUSE |
2756 ADVERTISE_1000XPSE_ASYM |
2757 ADVERTISE_SLCT);
2758
2759 /* Always advertise symmetric PAUSE just like copper */
2760 new_adv |= ADVERTISE_1000XPAUSE;
2761
2762 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2763 new_adv |= ADVERTISE_1000XHALF;
2764 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2765 new_adv |= ADVERTISE_1000XFULL;
2766
2767 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2768 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2769 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2770 tg3_writephy(tp, MII_BMCR, bmcr);
2771
2772 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2773 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2774 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2775
2776 return err;
2777 }
2778 } else {
2779 u32 new_bmcr;
2780
2781 bmcr &= ~BMCR_SPEED1000;
2782 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2783
2784 if (tp->link_config.duplex == DUPLEX_FULL)
2785 new_bmcr |= BMCR_FULLDPLX;
2786
2787 if (new_bmcr != bmcr) {
2788 /* BMCR_SPEED1000 is a reserved bit that needs
2789 * to be set on write.
2790 */
2791 new_bmcr |= BMCR_SPEED1000;
2792
2793 /* Force a linkdown */
2794 if (netif_carrier_ok(tp->dev)) {
2795 u32 adv;
2796
2797 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2798 adv &= ~(ADVERTISE_1000XFULL |
2799 ADVERTISE_1000XHALF |
2800 ADVERTISE_SLCT);
2801 tg3_writephy(tp, MII_ADVERTISE, adv);
2802 tg3_writephy(tp, MII_BMCR, bmcr |
2803 BMCR_ANRESTART |
2804 BMCR_ANENABLE);
2805 udelay(10);
2806 netif_carrier_off(tp->dev);
2807 }
2808 tg3_writephy(tp, MII_BMCR, new_bmcr);
2809 bmcr = new_bmcr;
2810 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2811 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
2812 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2813 ASIC_REV_5714) {
2814 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2815 bmsr |= BMSR_LSTATUS;
2816 else
2817 bmsr &= ~BMSR_LSTATUS;
2818 }
747e8f8b
MC
2819 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2820 }
2821 }
2822
2823 if (bmsr & BMSR_LSTATUS) {
2824 current_speed = SPEED_1000;
2825 current_link_up = 1;
2826 if (bmcr & BMCR_FULLDPLX)
2827 current_duplex = DUPLEX_FULL;
2828 else
2829 current_duplex = DUPLEX_HALF;
2830
2831 if (bmcr & BMCR_ANENABLE) {
2832 u32 local_adv, remote_adv, common;
2833
2834 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2835 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2836 common = local_adv & remote_adv;
2837 if (common & (ADVERTISE_1000XHALF |
2838 ADVERTISE_1000XFULL)) {
2839 if (common & ADVERTISE_1000XFULL)
2840 current_duplex = DUPLEX_FULL;
2841 else
2842 current_duplex = DUPLEX_HALF;
2843
2844 tg3_setup_flow_control(tp, local_adv,
2845 remote_adv);
2846 }
2847 else
2848 current_link_up = 0;
2849 }
2850 }
2851
2852 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2853 if (tp->link_config.active_duplex == DUPLEX_HALF)
2854 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2855
2856 tw32_f(MAC_MODE, tp->mac_mode);
2857 udelay(40);
2858
2859 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2860
2861 tp->link_config.active_speed = current_speed;
2862 tp->link_config.active_duplex = current_duplex;
2863
2864 if (current_link_up != netif_carrier_ok(tp->dev)) {
2865 if (current_link_up)
2866 netif_carrier_on(tp->dev);
2867 else {
2868 netif_carrier_off(tp->dev);
2869 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2870 }
2871 tg3_link_report(tp);
2872 }
2873 return err;
2874}
2875
2876static void tg3_serdes_parallel_detect(struct tg3 *tp)
2877{
2878 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2879 /* Give autoneg time to complete. */
2880 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2881 return;
2882 }
2883 if (!netif_carrier_ok(tp->dev) &&
2884 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2885 u32 bmcr;
2886
2887 tg3_readphy(tp, MII_BMCR, &bmcr);
2888 if (bmcr & BMCR_ANENABLE) {
2889 u32 phy1, phy2;
2890
2891 /* Select shadow register 0x1f */
2892 tg3_writephy(tp, 0x1c, 0x7c00);
2893 tg3_readphy(tp, 0x1c, &phy1);
2894
2895 /* Select expansion interrupt status register */
2896 tg3_writephy(tp, 0x17, 0x0f01);
2897 tg3_readphy(tp, 0x15, &phy2);
2898 tg3_readphy(tp, 0x15, &phy2);
2899
2900 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2901 /* We have signal detect and not receiving
2902 * config code words, link is up by parallel
2903 * detection.
2904 */
2905
2906 bmcr &= ~BMCR_ANENABLE;
2907 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2908 tg3_writephy(tp, MII_BMCR, bmcr);
2909 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2910 }
2911 }
2912 }
2913 else if (netif_carrier_ok(tp->dev) &&
2914 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2915 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2916 u32 phy2;
2917
2918 /* Select expansion interrupt status register */
2919 tg3_writephy(tp, 0x17, 0x0f01);
2920 tg3_readphy(tp, 0x15, &phy2);
2921 if (phy2 & 0x20) {
2922 u32 bmcr;
2923
2924 /* Config code words received, turn on autoneg. */
2925 tg3_readphy(tp, MII_BMCR, &bmcr);
2926 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2927
2928 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2929
2930 }
2931 }
2932}
2933
1da177e4
LT
2934static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2935{
2936 int err;
2937
2938 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2939 err = tg3_setup_fiber_phy(tp, force_reset);
747e8f8b
MC
2940 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2941 err = tg3_setup_fiber_mii_phy(tp, force_reset);
1da177e4
LT
2942 } else {
2943 err = tg3_setup_copper_phy(tp, force_reset);
2944 }
2945
2946 if (tp->link_config.active_speed == SPEED_1000 &&
2947 tp->link_config.active_duplex == DUPLEX_HALF)
2948 tw32(MAC_TX_LENGTHS,
2949 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2950 (6 << TX_LENGTHS_IPG_SHIFT) |
2951 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2952 else
2953 tw32(MAC_TX_LENGTHS,
2954 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2955 (6 << TX_LENGTHS_IPG_SHIFT) |
2956 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2957
2958 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2959 if (netif_carrier_ok(tp->dev)) {
2960 tw32(HOSTCC_STAT_COAL_TICKS,
15f9850d 2961 tp->coal.stats_block_coalesce_usecs);
1da177e4
LT
2962 } else {
2963 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2964 }
2965 }
2966
2967 return err;
2968}
2969
df3e6548
MC
2970/* This is called whenever we suspect that the system chipset is re-
2971 * ordering the sequence of MMIO to the tx send mailbox. The symptom
2972 * is bogus tx completions. We try to recover by setting the
2973 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2974 * in the workqueue.
2975 */
2976static void tg3_tx_recover(struct tg3 *tp)
2977{
2978 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
2979 tp->write32_tx_mbox == tg3_write_indirect_mbox);
2980
2981 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
2982 "mapped I/O cycles to the network device, attempting to "
2983 "recover. Please report the problem to the driver maintainer "
2984 "and include system chipset information.\n", tp->dev->name);
2985
2986 spin_lock(&tp->lock);
df3e6548 2987 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
df3e6548
MC
2988 spin_unlock(&tp->lock);
2989}
2990
1da177e4
LT
2991/* Tigon3 never reports partial packet sends. So we do not
2992 * need special logic to handle SKBs that have not had all
2993 * of their frags sent yet, like SunGEM does.
2994 */
2995static void tg3_tx(struct tg3 *tp)
2996{
2997 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2998 u32 sw_idx = tp->tx_cons;
2999
3000 while (sw_idx != hw_idx) {
3001 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3002 struct sk_buff *skb = ri->skb;
df3e6548
MC
3003 int i, tx_bug = 0;
3004
3005 if (unlikely(skb == NULL)) {
3006 tg3_tx_recover(tp);
3007 return;
3008 }
1da177e4 3009
1da177e4
LT
3010 pci_unmap_single(tp->pdev,
3011 pci_unmap_addr(ri, mapping),
3012 skb_headlen(skb),
3013 PCI_DMA_TODEVICE);
3014
3015 ri->skb = NULL;
3016
3017 sw_idx = NEXT_TX(sw_idx);
3018
3019 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1da177e4 3020 ri = &tp->tx_buffers[sw_idx];
df3e6548
MC
3021 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3022 tx_bug = 1;
1da177e4
LT
3023
3024 pci_unmap_page(tp->pdev,
3025 pci_unmap_addr(ri, mapping),
3026 skb_shinfo(skb)->frags[i].size,
3027 PCI_DMA_TODEVICE);
3028
3029 sw_idx = NEXT_TX(sw_idx);
3030 }
3031
f47c11ee 3032 dev_kfree_skb(skb);
df3e6548
MC
3033
3034 if (unlikely(tx_bug)) {
3035 tg3_tx_recover(tp);
3036 return;
3037 }
1da177e4
LT
3038 }
3039
3040 tp->tx_cons = sw_idx;
3041
51b91468
MC
3042 if (unlikely(netif_queue_stopped(tp->dev))) {
3043 spin_lock(&tp->tx_lock);
3044 if (netif_queue_stopped(tp->dev) &&
3045 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
3046 netif_wake_queue(tp->dev);
3047 spin_unlock(&tp->tx_lock);
3048 }
1da177e4
LT
3049}
3050
3051/* Returns size of skb allocated or < 0 on error.
3052 *
3053 * We only need to fill in the address because the other members
3054 * of the RX descriptor are invariant, see tg3_init_rings.
3055 *
3056 * Note the purposeful assymetry of cpu vs. chip accesses. For
3057 * posting buffers we only dirty the first cache line of the RX
3058 * descriptor (containing the address). Whereas for the RX status
3059 * buffers the cpu only reads the last cacheline of the RX descriptor
3060 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3061 */
3062static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3063 int src_idx, u32 dest_idx_unmasked)
3064{
3065 struct tg3_rx_buffer_desc *desc;
3066 struct ring_info *map, *src_map;
3067 struct sk_buff *skb;
3068 dma_addr_t mapping;
3069 int skb_size, dest_idx;
3070
3071 src_map = NULL;
3072 switch (opaque_key) {
3073 case RXD_OPAQUE_RING_STD:
3074 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3075 desc = &tp->rx_std[dest_idx];
3076 map = &tp->rx_std_buffers[dest_idx];
3077 if (src_idx >= 0)
3078 src_map = &tp->rx_std_buffers[src_idx];
7e72aad4 3079 skb_size = tp->rx_pkt_buf_sz;
1da177e4
LT
3080 break;
3081
3082 case RXD_OPAQUE_RING_JUMBO:
3083 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3084 desc = &tp->rx_jumbo[dest_idx];
3085 map = &tp->rx_jumbo_buffers[dest_idx];
3086 if (src_idx >= 0)
3087 src_map = &tp->rx_jumbo_buffers[src_idx];
3088 skb_size = RX_JUMBO_PKT_BUF_SZ;
3089 break;
3090
3091 default:
3092 return -EINVAL;
3093 };
3094
3095 /* Do not overwrite any of the map or rp information
3096 * until we are sure we can commit to a new buffer.
3097 *
3098 * Callers depend upon this behavior and assume that
3099 * we leave everything unchanged if we fail.
3100 */
3101 skb = dev_alloc_skb(skb_size);
3102 if (skb == NULL)
3103 return -ENOMEM;
3104
3105 skb->dev = tp->dev;
3106 skb_reserve(skb, tp->rx_offset);
3107
3108 mapping = pci_map_single(tp->pdev, skb->data,
3109 skb_size - tp->rx_offset,
3110 PCI_DMA_FROMDEVICE);
3111
3112 map->skb = skb;
3113 pci_unmap_addr_set(map, mapping, mapping);
3114
3115 if (src_map != NULL)
3116 src_map->skb = NULL;
3117
3118 desc->addr_hi = ((u64)mapping >> 32);
3119 desc->addr_lo = ((u64)mapping & 0xffffffff);
3120
3121 return skb_size;
3122}
3123
3124/* We only need to move over in the address because the other
3125 * members of the RX descriptor are invariant. See notes above
3126 * tg3_alloc_rx_skb for full details.
3127 */
3128static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3129 int src_idx, u32 dest_idx_unmasked)
3130{
3131 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3132 struct ring_info *src_map, *dest_map;
3133 int dest_idx;
3134
3135 switch (opaque_key) {
3136 case RXD_OPAQUE_RING_STD:
3137 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3138 dest_desc = &tp->rx_std[dest_idx];
3139 dest_map = &tp->rx_std_buffers[dest_idx];
3140 src_desc = &tp->rx_std[src_idx];
3141 src_map = &tp->rx_std_buffers[src_idx];
3142 break;
3143
3144 case RXD_OPAQUE_RING_JUMBO:
3145 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3146 dest_desc = &tp->rx_jumbo[dest_idx];
3147 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3148 src_desc = &tp->rx_jumbo[src_idx];
3149 src_map = &tp->rx_jumbo_buffers[src_idx];
3150 break;
3151
3152 default:
3153 return;
3154 };
3155
3156 dest_map->skb = src_map->skb;
3157 pci_unmap_addr_set(dest_map, mapping,
3158 pci_unmap_addr(src_map, mapping));
3159 dest_desc->addr_hi = src_desc->addr_hi;
3160 dest_desc->addr_lo = src_desc->addr_lo;
3161
3162 src_map->skb = NULL;
3163}
3164
3165#if TG3_VLAN_TAG_USED
3166static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3167{
3168 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3169}
3170#endif
3171
3172/* The RX ring scheme is composed of multiple rings which post fresh
3173 * buffers to the chip, and one special ring the chip uses to report
3174 * status back to the host.
3175 *
3176 * The special ring reports the status of received packets to the
3177 * host. The chip does not write into the original descriptor the
3178 * RX buffer was obtained from. The chip simply takes the original
3179 * descriptor as provided by the host, updates the status and length
3180 * field, then writes this into the next status ring entry.
3181 *
3182 * Each ring the host uses to post buffers to the chip is described
3183 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3184 * it is first placed into the on-chip ram. When the packet's length
3185 * is known, it walks down the TG3_BDINFO entries to select the ring.
3186 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3187 * which is within the range of the new packet's length is chosen.
3188 *
3189 * The "separate ring for rx status" scheme may sound queer, but it makes
3190 * sense from a cache coherency perspective. If only the host writes
3191 * to the buffer post rings, and only the chip writes to the rx status
3192 * rings, then cache lines never move beyond shared-modified state.
3193 * If both the host and chip were to write into the same ring, cache line
3194 * eviction could occur since both entities want it in an exclusive state.
3195 */
3196static int tg3_rx(struct tg3 *tp, int budget)
3197{
f92905de 3198 u32 work_mask, rx_std_posted = 0;
483ba50b
MC
3199 u32 sw_idx = tp->rx_rcb_ptr;
3200 u16 hw_idx;
1da177e4
LT
3201 int received;
3202
3203 hw_idx = tp->hw_status->idx[0].rx_producer;
3204 /*
3205 * We need to order the read of hw_idx and the read of
3206 * the opaque cookie.
3207 */
3208 rmb();
1da177e4
LT
3209 work_mask = 0;
3210 received = 0;
3211 while (sw_idx != hw_idx && budget > 0) {
3212 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3213 unsigned int len;
3214 struct sk_buff *skb;
3215 dma_addr_t dma_addr;
3216 u32 opaque_key, desc_idx, *post_ptr;
3217
3218 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3219 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3220 if (opaque_key == RXD_OPAQUE_RING_STD) {
3221 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3222 mapping);
3223 skb = tp->rx_std_buffers[desc_idx].skb;
3224 post_ptr = &tp->rx_std_ptr;
f92905de 3225 rx_std_posted++;
1da177e4
LT
3226 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3227 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3228 mapping);
3229 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3230 post_ptr = &tp->rx_jumbo_ptr;
3231 }
3232 else {
3233 goto next_pkt_nopost;
3234 }
3235
3236 work_mask |= opaque_key;
3237
3238 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3239 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3240 drop_it:
3241 tg3_recycle_rx(tp, opaque_key,
3242 desc_idx, *post_ptr);
3243 drop_it_no_recycle:
3244 /* Other statistics kept track of by card. */
3245 tp->net_stats.rx_dropped++;
3246 goto next_pkt;
3247 }
3248
3249 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3250
3251 if (len > RX_COPY_THRESHOLD
3252 && tp->rx_offset == 2
3253 /* rx_offset != 2 iff this is a 5701 card running
3254 * in PCI-X mode [see tg3_get_invariants()] */
3255 ) {
3256 int skb_size;
3257
3258 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3259 desc_idx, *post_ptr);
3260 if (skb_size < 0)
3261 goto drop_it;
3262
3263 pci_unmap_single(tp->pdev, dma_addr,
3264 skb_size - tp->rx_offset,
3265 PCI_DMA_FROMDEVICE);
3266
3267 skb_put(skb, len);
3268 } else {
3269 struct sk_buff *copy_skb;
3270
3271 tg3_recycle_rx(tp, opaque_key,
3272 desc_idx, *post_ptr);
3273
3274 copy_skb = dev_alloc_skb(len + 2);
3275 if (copy_skb == NULL)
3276 goto drop_it_no_recycle;
3277
3278 copy_skb->dev = tp->dev;
3279 skb_reserve(copy_skb, 2);
3280 skb_put(copy_skb, len);
3281 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3282 memcpy(copy_skb->data, skb->data, len);
3283 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3284
3285 /* We'll reuse the original ring buffer. */
3286 skb = copy_skb;
3287 }
3288
3289 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3290 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3291 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3292 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3293 skb->ip_summed = CHECKSUM_UNNECESSARY;
3294 else
3295 skb->ip_summed = CHECKSUM_NONE;
3296
3297 skb->protocol = eth_type_trans(skb, tp->dev);
3298#if TG3_VLAN_TAG_USED
3299 if (tp->vlgrp != NULL &&
3300 desc->type_flags & RXD_FLAG_VLAN) {
3301 tg3_vlan_rx(tp, skb,
3302 desc->err_vlan & RXD_VLAN_MASK);
3303 } else
3304#endif
3305 netif_receive_skb(skb);
3306
3307 tp->dev->last_rx = jiffies;
3308 received++;
3309 budget--;
3310
3311next_pkt:
3312 (*post_ptr)++;
f92905de
MC
3313
3314 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3315 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3316
3317 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3318 TG3_64BIT_REG_LOW, idx);
3319 work_mask &= ~RXD_OPAQUE_RING_STD;
3320 rx_std_posted = 0;
3321 }
1da177e4 3322next_pkt_nopost:
483ba50b
MC
3323 sw_idx++;
3324 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
52f6d697
MC
3325
3326 /* Refresh hw_idx to see if there is new work */
3327 if (sw_idx == hw_idx) {
3328 hw_idx = tp->hw_status->idx[0].rx_producer;
3329 rmb();
3330 }
1da177e4
LT
3331 }
3332
3333 /* ACK the status ring. */
483ba50b
MC
3334 tp->rx_rcb_ptr = sw_idx;
3335 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
1da177e4
LT
3336
3337 /* Refill RX ring(s). */
3338 if (work_mask & RXD_OPAQUE_RING_STD) {
3339 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3340 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3341 sw_idx);
3342 }
3343 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3344 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3345 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3346 sw_idx);
3347 }
3348 mmiowb();
3349
3350 return received;
3351}
3352
3353static int tg3_poll(struct net_device *netdev, int *budget)
3354{
3355 struct tg3 *tp = netdev_priv(netdev);
3356 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4
LT
3357 int done;
3358
1da177e4
LT
3359 /* handle link change and other phy events */
3360 if (!(tp->tg3_flags &
3361 (TG3_FLAG_USE_LINKCHG_REG |
3362 TG3_FLAG_POLL_SERDES))) {
3363 if (sblk->status & SD_STATUS_LINK_CHG) {
3364 sblk->status = SD_STATUS_UPDATED |
3365 (sblk->status & ~SD_STATUS_LINK_CHG);
f47c11ee 3366 spin_lock(&tp->lock);
1da177e4 3367 tg3_setup_phy(tp, 0);
f47c11ee 3368 spin_unlock(&tp->lock);
1da177e4
LT
3369 }
3370 }
3371
3372 /* run TX completion thread */
3373 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
1da177e4 3374 tg3_tx(tp);
df3e6548
MC
3375 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3376 netif_rx_complete(netdev);
3377 schedule_work(&tp->reset_task);
3378 return 0;
3379 }
1da177e4
LT
3380 }
3381
1da177e4
LT
3382 /* run RX thread, within the bounds set by NAPI.
3383 * All RX "locking" is done by ensuring outside
3384 * code synchronizes with dev->poll()
3385 */
1da177e4
LT
3386 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3387 int orig_budget = *budget;
3388 int work_done;
3389
3390 if (orig_budget > netdev->quota)
3391 orig_budget = netdev->quota;
3392
3393 work_done = tg3_rx(tp, orig_budget);
3394
3395 *budget -= work_done;
3396 netdev->quota -= work_done;
1da177e4
LT
3397 }
3398
38f3843e 3399 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
f7383c22 3400 tp->last_tag = sblk->status_tag;
38f3843e
MC
3401 rmb();
3402 } else
3403 sblk->status &= ~SD_STATUS_UPDATED;
f7383c22 3404
1da177e4 3405 /* if no more work, tell net stack and NIC we're done */
f7383c22 3406 done = !tg3_has_work(tp);
1da177e4 3407 if (done) {
f47c11ee 3408 netif_rx_complete(netdev);
1da177e4 3409 tg3_restart_ints(tp);
1da177e4
LT
3410 }
3411
3412 return (done ? 0 : 1);
3413}
3414
f47c11ee
DM
3415static void tg3_irq_quiesce(struct tg3 *tp)
3416{
3417 BUG_ON(tp->irq_sync);
3418
3419 tp->irq_sync = 1;
3420 smp_mb();
3421
3422 synchronize_irq(tp->pdev->irq);
3423}
3424
3425static inline int tg3_irq_sync(struct tg3 *tp)
3426{
3427 return tp->irq_sync;
3428}
3429
3430/* Fully shutdown all tg3 driver activity elsewhere in the system.
3431 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3432 * with as well. Most of the time, this is not necessary except when
3433 * shutting down the device.
3434 */
3435static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3436{
3437 if (irq_sync)
3438 tg3_irq_quiesce(tp);
3439 spin_lock_bh(&tp->lock);
f47c11ee
DM
3440}
3441
3442static inline void tg3_full_unlock(struct tg3 *tp)
3443{
f47c11ee
DM
3444 spin_unlock_bh(&tp->lock);
3445}
3446
fcfa0a32
MC
3447/* One-shot MSI handler - Chip automatically disables interrupt
3448 * after sending MSI so driver doesn't have to do it.
3449 */
3450static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3451{
3452 struct net_device *dev = dev_id;
3453 struct tg3 *tp = netdev_priv(dev);
3454
3455 prefetch(tp->hw_status);
3456 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3457
3458 if (likely(!tg3_irq_sync(tp)))
3459 netif_rx_schedule(dev); /* schedule NAPI poll */
3460
3461 return IRQ_HANDLED;
3462}
3463
88b06bc2
MC
3464/* MSI ISR - No need to check for interrupt sharing and no need to
3465 * flush status block and interrupt mailbox. PCI ordering rules
3466 * guarantee that MSI will arrive after the status block.
3467 */
3468static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3469{
3470 struct net_device *dev = dev_id;
3471 struct tg3 *tp = netdev_priv(dev);
88b06bc2 3472
61487480
MC
3473 prefetch(tp->hw_status);
3474 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
88b06bc2 3475 /*
fac9b83e 3476 * Writing any value to intr-mbox-0 clears PCI INTA# and
88b06bc2 3477 * chip-internal interrupt pending events.
fac9b83e 3478 * Writing non-zero to intr-mbox-0 additional tells the
88b06bc2
MC
3479 * NIC to stop sending us irqs, engaging "in-intr-handler"
3480 * event coalescing.
3481 */
3482 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
61487480 3483 if (likely(!tg3_irq_sync(tp)))
88b06bc2 3484 netif_rx_schedule(dev); /* schedule NAPI poll */
61487480 3485
88b06bc2
MC
3486 return IRQ_RETVAL(1);
3487}
3488
1da177e4
LT
3489static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3490{
3491 struct net_device *dev = dev_id;
3492 struct tg3 *tp = netdev_priv(dev);
3493 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4
LT
3494 unsigned int handled = 1;
3495
1da177e4
LT
3496 /* In INTx mode, it is possible for the interrupt to arrive at
3497 * the CPU before the status block posted prior to the interrupt.
3498 * Reading the PCI State register will confirm whether the
3499 * interrupt is ours and will flush the status block.
3500 */
3501 if ((sblk->status & SD_STATUS_UPDATED) ||
3502 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3503 /*
fac9b83e 3504 * Writing any value to intr-mbox-0 clears PCI INTA# and
1da177e4 3505 * chip-internal interrupt pending events.
fac9b83e 3506 * Writing non-zero to intr-mbox-0 additional tells the
1da177e4
LT
3507 * NIC to stop sending us irqs, engaging "in-intr-handler"
3508 * event coalescing.
3509 */
3510 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3511 0x00000001);
f47c11ee
DM
3512 if (tg3_irq_sync(tp))
3513 goto out;
fac9b83e 3514 sblk->status &= ~SD_STATUS_UPDATED;
61487480
MC
3515 if (likely(tg3_has_work(tp))) {
3516 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
fac9b83e 3517 netif_rx_schedule(dev); /* schedule NAPI poll */
61487480 3518 } else {
fac9b83e
DM
3519 /* No work, shared interrupt perhaps? re-enable
3520 * interrupts, and flush that PCI write
3521 */
09ee929c 3522 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
fac9b83e 3523 0x00000000);
fac9b83e
DM
3524 }
3525 } else { /* shared interrupt */
3526 handled = 0;
3527 }
f47c11ee 3528out:
fac9b83e
DM
3529 return IRQ_RETVAL(handled);
3530}
3531
3532static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3533{
3534 struct net_device *dev = dev_id;
3535 struct tg3 *tp = netdev_priv(dev);
3536 struct tg3_hw_status *sblk = tp->hw_status;
fac9b83e
DM
3537 unsigned int handled = 1;
3538
fac9b83e
DM
3539 /* In INTx mode, it is possible for the interrupt to arrive at
3540 * the CPU before the status block posted prior to the interrupt.
3541 * Reading the PCI State register will confirm whether the
3542 * interrupt is ours and will flush the status block.
3543 */
38f3843e 3544 if ((sblk->status_tag != tp->last_tag) ||
fac9b83e 3545 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
1da177e4 3546 /*
fac9b83e
DM
3547 * writing any value to intr-mbox-0 clears PCI INTA# and
3548 * chip-internal interrupt pending events.
3549 * writing non-zero to intr-mbox-0 additional tells the
3550 * NIC to stop sending us irqs, engaging "in-intr-handler"
3551 * event coalescing.
1da177e4 3552 */
fac9b83e
DM
3553 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3554 0x00000001);
f47c11ee
DM
3555 if (tg3_irq_sync(tp))
3556 goto out;
38f3843e 3557 if (netif_rx_schedule_prep(dev)) {
61487480 3558 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
38f3843e
MC
3559 /* Update last_tag to mark that this status has been
3560 * seen. Because interrupt may be shared, we may be
3561 * racing with tg3_poll(), so only update last_tag
3562 * if tg3_poll() is not scheduled.
1da177e4 3563 */
38f3843e
MC
3564 tp->last_tag = sblk->status_tag;
3565 __netif_rx_schedule(dev);
1da177e4
LT
3566 }
3567 } else { /* shared interrupt */
3568 handled = 0;
3569 }
f47c11ee 3570out:
1da177e4
LT
3571 return IRQ_RETVAL(handled);
3572}
3573
7938109f
MC
3574/* ISR for interrupt test */
3575static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3576 struct pt_regs *regs)
3577{
3578 struct net_device *dev = dev_id;
3579 struct tg3 *tp = netdev_priv(dev);
3580 struct tg3_hw_status *sblk = tp->hw_status;
3581
f9804ddb
MC
3582 if ((sblk->status & SD_STATUS_UPDATED) ||
3583 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7938109f
MC
3584 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3585 0x00000001);
3586 return IRQ_RETVAL(1);
3587 }
3588 return IRQ_RETVAL(0);
3589}
3590
8e7a22e3 3591static int tg3_init_hw(struct tg3 *, int);
944d980e 3592static int tg3_halt(struct tg3 *, int, int);
1da177e4
LT
3593
3594#ifdef CONFIG_NET_POLL_CONTROLLER
3595static void tg3_poll_controller(struct net_device *dev)
3596{
88b06bc2
MC
3597 struct tg3 *tp = netdev_priv(dev);
3598
3599 tg3_interrupt(tp->pdev->irq, dev, NULL);
1da177e4
LT
3600}
3601#endif
3602
3603static void tg3_reset_task(void *_data)
3604{
3605 struct tg3 *tp = _data;
3606 unsigned int restart_timer;
3607
7faa006f
MC
3608 tg3_full_lock(tp, 0);
3609 tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3610
3611 if (!netif_running(tp->dev)) {
3612 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3613 tg3_full_unlock(tp);
3614 return;
3615 }
3616
3617 tg3_full_unlock(tp);
3618
1da177e4
LT
3619 tg3_netif_stop(tp);
3620
f47c11ee 3621 tg3_full_lock(tp, 1);
1da177e4
LT
3622
3623 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3624 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3625
df3e6548
MC
3626 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3627 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3628 tp->write32_rx_mbox = tg3_write_flush_reg32;
3629 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3630 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3631 }
3632
944d980e 3633 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
8e7a22e3 3634 tg3_init_hw(tp, 1);
1da177e4
LT
3635
3636 tg3_netif_start(tp);
3637
1da177e4
LT
3638 if (restart_timer)
3639 mod_timer(&tp->timer, jiffies + 1);
7faa006f
MC
3640
3641 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3642
3643 tg3_full_unlock(tp);
1da177e4
LT
3644}
3645
3646static void tg3_tx_timeout(struct net_device *dev)
3647{
3648 struct tg3 *tp = netdev_priv(dev);
3649
3650 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3651 dev->name);
3652
3653 schedule_work(&tp->reset_task);
3654}
3655
c58ec932
MC
3656/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3657static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3658{
3659 u32 base = (u32) mapping & 0xffffffff;
3660
3661 return ((base > 0xffffdcc0) &&
3662 (base + len + 8 < base));
3663}
3664
72f2afb8
MC
3665/* Test for DMA addresses > 40-bit */
3666static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3667 int len)
3668{
3669#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6728a8e2 3670 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
72f2afb8
MC
3671 return (((u64) mapping + len) > DMA_40BIT_MASK);
3672 return 0;
3673#else
3674 return 0;
3675#endif
3676}
3677
1da177e4
LT
3678static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3679
72f2afb8
MC
3680/* Workaround 4GB and 40-bit hardware DMA bugs. */
3681static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
c58ec932
MC
3682 u32 last_plus_one, u32 *start,
3683 u32 base_flags, u32 mss)
1da177e4
LT
3684{
3685 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
c58ec932 3686 dma_addr_t new_addr = 0;
1da177e4 3687 u32 entry = *start;
c58ec932 3688 int i, ret = 0;
1da177e4
LT
3689
3690 if (!new_skb) {
c58ec932
MC
3691 ret = -1;
3692 } else {
3693 /* New SKB is guaranteed to be linear. */
3694 entry = *start;
3695 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3696 PCI_DMA_TODEVICE);
3697 /* Make sure new skb does not cross any 4G boundaries.
3698 * Drop the packet if it does.
3699 */
3700 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3701 ret = -1;
3702 dev_kfree_skb(new_skb);
3703 new_skb = NULL;
3704 } else {
3705 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3706 base_flags, 1 | (mss << 1));
3707 *start = NEXT_TX(entry);
3708 }
1da177e4
LT
3709 }
3710
1da177e4
LT
3711 /* Now clean up the sw ring entries. */
3712 i = 0;
3713 while (entry != last_plus_one) {
3714 int len;
3715
3716 if (i == 0)
3717 len = skb_headlen(skb);
3718 else
3719 len = skb_shinfo(skb)->frags[i-1].size;
3720 pci_unmap_single(tp->pdev,
3721 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3722 len, PCI_DMA_TODEVICE);
3723 if (i == 0) {
3724 tp->tx_buffers[entry].skb = new_skb;
3725 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3726 } else {
3727 tp->tx_buffers[entry].skb = NULL;
3728 }
3729 entry = NEXT_TX(entry);
3730 i++;
3731 }
3732
3733 dev_kfree_skb(skb);
3734
c58ec932 3735 return ret;
1da177e4
LT
3736}
3737
3738static void tg3_set_txd(struct tg3 *tp, int entry,
3739 dma_addr_t mapping, int len, u32 flags,
3740 u32 mss_and_is_end)
3741{
3742 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3743 int is_end = (mss_and_is_end & 0x1);
3744 u32 mss = (mss_and_is_end >> 1);
3745 u32 vlan_tag = 0;
3746
3747 if (is_end)
3748 flags |= TXD_FLAG_END;
3749 if (flags & TXD_FLAG_VLAN) {
3750 vlan_tag = flags >> 16;
3751 flags &= 0xffff;
3752 }
3753 vlan_tag |= (mss << TXD_MSS_SHIFT);
3754
3755 txd->addr_hi = ((u64) mapping >> 32);
3756 txd->addr_lo = ((u64) mapping & 0xffffffff);
3757 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3758 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3759}
3760
5a6f3074
MC
3761/* hard_start_xmit for devices that don't have any bugs and
3762 * support TG3_FLG2_HW_TSO_2 only.
3763 */
1da177e4 3764static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5a6f3074
MC
3765{
3766 struct tg3 *tp = netdev_priv(dev);
3767 dma_addr_t mapping;
3768 u32 len, entry, base_flags, mss;
3769
3770 len = skb_headlen(skb);
3771
00b70504
MC
3772 /* We are running in BH disabled context with netif_tx_lock
3773 * and TX reclaim runs via tp->poll inside of a software
5a6f3074
MC
3774 * interrupt. Furthermore, IRQ processing runs lockless so we have
3775 * no IRQ context deadlocks to worry about either. Rejoice!
3776 */
5a6f3074
MC
3777 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3778 if (!netif_queue_stopped(dev)) {
3779 netif_stop_queue(dev);
3780
3781 /* This is a hard error, log it. */
3782 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3783 "queue awake!\n", dev->name);
3784 }
5a6f3074
MC
3785 return NETDEV_TX_BUSY;
3786 }
3787
3788 entry = tp->tx_prod;
3789 base_flags = 0;
3790#if TG3_TSO_SUPPORT != 0
3791 mss = 0;
3792 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
7967168c 3793 (mss = skb_shinfo(skb)->gso_size) != 0) {
5a6f3074
MC
3794 int tcp_opt_len, ip_tcp_len;
3795
3796 if (skb_header_cloned(skb) &&
3797 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3798 dev_kfree_skb(skb);
3799 goto out_unlock;
3800 }
3801
3802 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3803 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3804
3805 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3806 TXD_FLAG_CPU_POST_DMA);
3807
3808 skb->nh.iph->check = 0;
3809 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3810
3811 skb->h.th->check = 0;
3812
3813 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3814 }
3815 else if (skb->ip_summed == CHECKSUM_HW)
3816 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3817#else
3818 mss = 0;
3819 if (skb->ip_summed == CHECKSUM_HW)
3820 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3821#endif
3822#if TG3_VLAN_TAG_USED
3823 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3824 base_flags |= (TXD_FLAG_VLAN |
3825 (vlan_tx_tag_get(skb) << 16));
3826#endif
3827
3828 /* Queue skb data, a.k.a. the main skb fragment. */
3829 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3830
3831 tp->tx_buffers[entry].skb = skb;
3832 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3833
3834 tg3_set_txd(tp, entry, mapping, len, base_flags,
3835 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3836
3837 entry = NEXT_TX(entry);
3838
3839 /* Now loop through additional data fragments, and queue them. */
3840 if (skb_shinfo(skb)->nr_frags > 0) {
3841 unsigned int i, last;
3842
3843 last = skb_shinfo(skb)->nr_frags - 1;
3844 for (i = 0; i <= last; i++) {
3845 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3846
3847 len = frag->size;
3848 mapping = pci_map_page(tp->pdev,
3849 frag->page,
3850 frag->page_offset,
3851 len, PCI_DMA_TODEVICE);
3852
3853 tp->tx_buffers[entry].skb = NULL;
3854 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3855
3856 tg3_set_txd(tp, entry, mapping, len,
3857 base_flags, (i == last) | (mss << 1));
3858
3859 entry = NEXT_TX(entry);
3860 }
3861 }
3862
3863 /* Packets are ready, update Tx producer idx local and on card. */
3864 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3865
3866 tp->tx_prod = entry;
00b70504
MC
3867 if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
3868 spin_lock(&tp->tx_lock);
5a6f3074
MC
3869 netif_stop_queue(dev);
3870 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3871 netif_wake_queue(tp->dev);
00b70504 3872 spin_unlock(&tp->tx_lock);
5a6f3074
MC
3873 }
3874
3875out_unlock:
3876 mmiowb();
5a6f3074
MC
3877
3878 dev->trans_start = jiffies;
3879
3880 return NETDEV_TX_OK;
3881}
3882
3883/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3884 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3885 */
3886static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
3887{
3888 struct tg3 *tp = netdev_priv(dev);
3889 dma_addr_t mapping;
1da177e4
LT
3890 u32 len, entry, base_flags, mss;
3891 int would_hit_hwbug;
1da177e4
LT
3892
3893 len = skb_headlen(skb);
3894
00b70504
MC
3895 /* We are running in BH disabled context with netif_tx_lock
3896 * and TX reclaim runs via tp->poll inside of a software
f47c11ee
DM
3897 * interrupt. Furthermore, IRQ processing runs lockless so we have
3898 * no IRQ context deadlocks to worry about either. Rejoice!
1da177e4 3899 */
1da177e4 3900 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
1f064a87
SH
3901 if (!netif_queue_stopped(dev)) {
3902 netif_stop_queue(dev);
3903
3904 /* This is a hard error, log it. */
3905 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3906 "queue awake!\n", dev->name);
3907 }
1da177e4
LT
3908 return NETDEV_TX_BUSY;
3909 }
3910
3911 entry = tp->tx_prod;
3912 base_flags = 0;
3913 if (skb->ip_summed == CHECKSUM_HW)
3914 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3915#if TG3_TSO_SUPPORT != 0
3916 mss = 0;
3917 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
7967168c 3918 (mss = skb_shinfo(skb)->gso_size) != 0) {
1da177e4
LT
3919 int tcp_opt_len, ip_tcp_len;
3920
3921 if (skb_header_cloned(skb) &&
3922 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3923 dev_kfree_skb(skb);
3924 goto out_unlock;
3925 }
3926
3927 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3928 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3929
3930 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3931 TXD_FLAG_CPU_POST_DMA);
3932
3933 skb->nh.iph->check = 0;
fd30333d 3934 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
1da177e4
LT
3935 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3936 skb->h.th->check = 0;
3937 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3938 }
3939 else {
3940 skb->h.th->check =
3941 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3942 skb->nh.iph->daddr,
3943 0, IPPROTO_TCP, 0);
3944 }
3945
3946 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3947 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3948 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3949 int tsflags;
3950
3951 tsflags = ((skb->nh.iph->ihl - 5) +
3952 (tcp_opt_len >> 2));
3953 mss |= (tsflags << 11);
3954 }
3955 } else {
3956 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3957 int tsflags;
3958
3959 tsflags = ((skb->nh.iph->ihl - 5) +
3960 (tcp_opt_len >> 2));
3961 base_flags |= tsflags << 12;
3962 }
3963 }
3964 }
3965#else
3966 mss = 0;
3967#endif
3968#if TG3_VLAN_TAG_USED
3969 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3970 base_flags |= (TXD_FLAG_VLAN |
3971 (vlan_tx_tag_get(skb) << 16));
3972#endif
3973
3974 /* Queue skb data, a.k.a. the main skb fragment. */
3975 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3976
3977 tp->tx_buffers[entry].skb = skb;
3978 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3979
3980 would_hit_hwbug = 0;
3981
3982 if (tg3_4g_overflow_test(mapping, len))
c58ec932 3983 would_hit_hwbug = 1;
1da177e4
LT
3984
3985 tg3_set_txd(tp, entry, mapping, len, base_flags,
3986 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3987
3988 entry = NEXT_TX(entry);
3989
3990 /* Now loop through additional data fragments, and queue them. */
3991 if (skb_shinfo(skb)->nr_frags > 0) {
3992 unsigned int i, last;
3993
3994 last = skb_shinfo(skb)->nr_frags - 1;
3995 for (i = 0; i <= last; i++) {
3996 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3997
3998 len = frag->size;
3999 mapping = pci_map_page(tp->pdev,
4000 frag->page,
4001 frag->page_offset,
4002 len, PCI_DMA_TODEVICE);
4003
4004 tp->tx_buffers[entry].skb = NULL;
4005 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4006
c58ec932
MC
4007 if (tg3_4g_overflow_test(mapping, len))
4008 would_hit_hwbug = 1;
1da177e4 4009
72f2afb8
MC
4010 if (tg3_40bit_overflow_test(tp, mapping, len))
4011 would_hit_hwbug = 1;
4012
1da177e4
LT
4013 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4014 tg3_set_txd(tp, entry, mapping, len,
4015 base_flags, (i == last)|(mss << 1));
4016 else
4017 tg3_set_txd(tp, entry, mapping, len,
4018 base_flags, (i == last));
4019
4020 entry = NEXT_TX(entry);
4021 }
4022 }
4023
4024 if (would_hit_hwbug) {
4025 u32 last_plus_one = entry;
4026 u32 start;
1da177e4 4027
c58ec932
MC
4028 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4029 start &= (TG3_TX_RING_SIZE - 1);
1da177e4
LT
4030
4031 /* If the workaround fails due to memory/mapping
4032 * failure, silently drop this packet.
4033 */
72f2afb8 4034 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
c58ec932 4035 &start, base_flags, mss))
1da177e4
LT
4036 goto out_unlock;
4037
4038 entry = start;
4039 }
4040
4041 /* Packets are ready, update Tx producer idx local and on card. */
4042 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4043
4044 tp->tx_prod = entry;
00b70504
MC
4045 if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
4046 spin_lock(&tp->tx_lock);
1da177e4 4047 netif_stop_queue(dev);
51b91468
MC
4048 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
4049 netif_wake_queue(tp->dev);
00b70504 4050 spin_unlock(&tp->tx_lock);
51b91468 4051 }
1da177e4
LT
4052
4053out_unlock:
4054 mmiowb();
1da177e4
LT
4055
4056 dev->trans_start = jiffies;
4057
4058 return NETDEV_TX_OK;
4059}
4060
4061static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4062 int new_mtu)
4063{
4064 dev->mtu = new_mtu;
4065
ef7f5ec0 4066 if (new_mtu > ETH_DATA_LEN) {
a4e2b347 4067 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
ef7f5ec0
MC
4068 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4069 ethtool_op_set_tso(dev, 0);
4070 }
4071 else
4072 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4073 } else {
a4e2b347 4074 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
ef7f5ec0 4075 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
0f893dc6 4076 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
ef7f5ec0 4077 }
1da177e4
LT
4078}
4079
4080static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4081{
4082 struct tg3 *tp = netdev_priv(dev);
4083
4084 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4085 return -EINVAL;
4086
4087 if (!netif_running(dev)) {
4088 /* We'll just catch it later when the
4089 * device is up'd.
4090 */
4091 tg3_set_mtu(dev, tp, new_mtu);
4092 return 0;
4093 }
4094
4095 tg3_netif_stop(tp);
f47c11ee
DM
4096
4097 tg3_full_lock(tp, 1);
1da177e4 4098
944d980e 4099 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
4100
4101 tg3_set_mtu(dev, tp, new_mtu);
4102
8e7a22e3 4103 tg3_init_hw(tp, 0);
1da177e4
LT
4104
4105 tg3_netif_start(tp);
4106
f47c11ee 4107 tg3_full_unlock(tp);
1da177e4
LT
4108
4109 return 0;
4110}
4111
4112/* Free up pending packets in all rx/tx rings.
4113 *
4114 * The chip has been shut down and the driver detached from
4115 * the networking, so no interrupts or new tx packets will
4116 * end up in the driver. tp->{tx,}lock is not held and we are not
4117 * in an interrupt context and thus may sleep.
4118 */
4119static void tg3_free_rings(struct tg3 *tp)
4120{
4121 struct ring_info *rxp;
4122 int i;
4123
4124 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4125 rxp = &tp->rx_std_buffers[i];
4126
4127 if (rxp->skb == NULL)
4128 continue;
4129 pci_unmap_single(tp->pdev,
4130 pci_unmap_addr(rxp, mapping),
7e72aad4 4131 tp->rx_pkt_buf_sz - tp->rx_offset,
1da177e4
LT
4132 PCI_DMA_FROMDEVICE);
4133 dev_kfree_skb_any(rxp->skb);
4134 rxp->skb = NULL;
4135 }
4136
4137 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4138 rxp = &tp->rx_jumbo_buffers[i];
4139
4140 if (rxp->skb == NULL)
4141 continue;
4142 pci_unmap_single(tp->pdev,
4143 pci_unmap_addr(rxp, mapping),
4144 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4145 PCI_DMA_FROMDEVICE);
4146 dev_kfree_skb_any(rxp->skb);
4147 rxp->skb = NULL;
4148 }
4149
4150 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4151 struct tx_ring_info *txp;
4152 struct sk_buff *skb;
4153 int j;
4154
4155 txp = &tp->tx_buffers[i];
4156 skb = txp->skb;
4157
4158 if (skb == NULL) {
4159 i++;
4160 continue;
4161 }
4162
4163 pci_unmap_single(tp->pdev,
4164 pci_unmap_addr(txp, mapping),
4165 skb_headlen(skb),
4166 PCI_DMA_TODEVICE);
4167 txp->skb = NULL;
4168
4169 i++;
4170
4171 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4172 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4173 pci_unmap_page(tp->pdev,
4174 pci_unmap_addr(txp, mapping),
4175 skb_shinfo(skb)->frags[j].size,
4176 PCI_DMA_TODEVICE);
4177 i++;
4178 }
4179
4180 dev_kfree_skb_any(skb);
4181 }
4182}
4183
4184/* Initialize tx/rx rings for packet processing.
4185 *
4186 * The chip has been shut down and the driver detached from
4187 * the networking, so no interrupts or new tx packets will
4188 * end up in the driver. tp->{tx,}lock are held and thus
4189 * we may not sleep.
4190 */
4191static void tg3_init_rings(struct tg3 *tp)
4192{
4193 u32 i;
4194
4195 /* Free up all the SKBs. */
4196 tg3_free_rings(tp);
4197
4198 /* Zero out all descriptors. */
4199 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4200 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4201 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4202 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4203
7e72aad4 4204 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
a4e2b347 4205 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
7e72aad4
MC
4206 (tp->dev->mtu > ETH_DATA_LEN))
4207 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4208
1da177e4
LT
4209 /* Initialize invariants of the rings, we only set this
4210 * stuff once. This works because the card does not
4211 * write into the rx buffer posting rings.
4212 */
4213 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4214 struct tg3_rx_buffer_desc *rxd;
4215
4216 rxd = &tp->rx_std[i];
7e72aad4 4217 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
1da177e4
LT
4218 << RXD_LEN_SHIFT;
4219 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4220 rxd->opaque = (RXD_OPAQUE_RING_STD |
4221 (i << RXD_OPAQUE_INDEX_SHIFT));
4222 }
4223
0f893dc6 4224 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
4225 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4226 struct tg3_rx_buffer_desc *rxd;
4227
4228 rxd = &tp->rx_jumbo[i];
4229 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4230 << RXD_LEN_SHIFT;
4231 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4232 RXD_FLAG_JUMBO;
4233 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4234 (i << RXD_OPAQUE_INDEX_SHIFT));
4235 }
4236 }
4237
4238 /* Now allocate fresh SKBs for each rx ring. */
4239 for (i = 0; i < tp->rx_pending; i++) {
4240 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4241 -1, i) < 0)
4242 break;
4243 }
4244
0f893dc6 4245 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
4246 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4247 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4248 -1, i) < 0)
4249 break;
4250 }
4251 }
4252}
4253
4254/*
4255 * Must not be invoked with interrupt sources disabled and
4256 * the hardware shutdown down.
4257 */
4258static void tg3_free_consistent(struct tg3 *tp)
4259{
b4558ea9
JJ
4260 kfree(tp->rx_std_buffers);
4261 tp->rx_std_buffers = NULL;
1da177e4
LT
4262 if (tp->rx_std) {
4263 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4264 tp->rx_std, tp->rx_std_mapping);
4265 tp->rx_std = NULL;
4266 }
4267 if (tp->rx_jumbo) {
4268 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4269 tp->rx_jumbo, tp->rx_jumbo_mapping);
4270 tp->rx_jumbo = NULL;
4271 }
4272 if (tp->rx_rcb) {
4273 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4274 tp->rx_rcb, tp->rx_rcb_mapping);
4275 tp->rx_rcb = NULL;
4276 }
4277 if (tp->tx_ring) {
4278 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4279 tp->tx_ring, tp->tx_desc_mapping);
4280 tp->tx_ring = NULL;
4281 }
4282 if (tp->hw_status) {
4283 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4284 tp->hw_status, tp->status_mapping);
4285 tp->hw_status = NULL;
4286 }
4287 if (tp->hw_stats) {
4288 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4289 tp->hw_stats, tp->stats_mapping);
4290 tp->hw_stats = NULL;
4291 }
4292}
4293
4294/*
4295 * Must not be invoked with interrupt sources disabled and
4296 * the hardware shutdown down. Can sleep.
4297 */
4298static int tg3_alloc_consistent(struct tg3 *tp)
4299{
4300 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4301 (TG3_RX_RING_SIZE +
4302 TG3_RX_JUMBO_RING_SIZE)) +
4303 (sizeof(struct tx_ring_info) *
4304 TG3_TX_RING_SIZE),
4305 GFP_KERNEL);
4306 if (!tp->rx_std_buffers)
4307 return -ENOMEM;
4308
4309 memset(tp->rx_std_buffers, 0,
4310 (sizeof(struct ring_info) *
4311 (TG3_RX_RING_SIZE +
4312 TG3_RX_JUMBO_RING_SIZE)) +
4313 (sizeof(struct tx_ring_info) *
4314 TG3_TX_RING_SIZE));
4315
4316 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4317 tp->tx_buffers = (struct tx_ring_info *)
4318 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4319
4320 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4321 &tp->rx_std_mapping);
4322 if (!tp->rx_std)
4323 goto err_out;
4324
4325 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4326 &tp->rx_jumbo_mapping);
4327
4328 if (!tp->rx_jumbo)
4329 goto err_out;
4330
4331 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4332 &tp->rx_rcb_mapping);
4333 if (!tp->rx_rcb)
4334 goto err_out;
4335
4336 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4337 &tp->tx_desc_mapping);
4338 if (!tp->tx_ring)
4339 goto err_out;
4340
4341 tp->hw_status = pci_alloc_consistent(tp->pdev,
4342 TG3_HW_STATUS_SIZE,
4343 &tp->status_mapping);
4344 if (!tp->hw_status)
4345 goto err_out;
4346
4347 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4348 sizeof(struct tg3_hw_stats),
4349 &tp->stats_mapping);
4350 if (!tp->hw_stats)
4351 goto err_out;
4352
4353 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4354 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4355
4356 return 0;
4357
4358err_out:
4359 tg3_free_consistent(tp);
4360 return -ENOMEM;
4361}
4362
4363#define MAX_WAIT_CNT 1000
4364
4365/* To stop a block, clear the enable bit and poll till it
4366 * clears. tp->lock is held.
4367 */
b3b7d6be 4368static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
1da177e4
LT
4369{
4370 unsigned int i;
4371 u32 val;
4372
4373 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4374 switch (ofs) {
4375 case RCVLSC_MODE:
4376 case DMAC_MODE:
4377 case MBFREE_MODE:
4378 case BUFMGR_MODE:
4379 case MEMARB_MODE:
4380 /* We can't enable/disable these bits of the
4381 * 5705/5750, just say success.
4382 */
4383 return 0;
4384
4385 default:
4386 break;
4387 };
4388 }
4389
4390 val = tr32(ofs);
4391 val &= ~enable_bit;
4392 tw32_f(ofs, val);
4393
4394 for (i = 0; i < MAX_WAIT_CNT; i++) {
4395 udelay(100);
4396 val = tr32(ofs);
4397 if ((val & enable_bit) == 0)
4398 break;
4399 }
4400
b3b7d6be 4401 if (i == MAX_WAIT_CNT && !silent) {
1da177e4
LT
4402 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4403 "ofs=%lx enable_bit=%x\n",
4404 ofs, enable_bit);
4405 return -ENODEV;
4406 }
4407
4408 return 0;
4409}
4410
4411/* tp->lock is held. */
b3b7d6be 4412static int tg3_abort_hw(struct tg3 *tp, int silent)
1da177e4
LT
4413{
4414 int i, err;
4415
4416 tg3_disable_ints(tp);
4417
4418 tp->rx_mode &= ~RX_MODE_ENABLE;
4419 tw32_f(MAC_RX_MODE, tp->rx_mode);
4420 udelay(10);
4421
b3b7d6be
DM
4422 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4423 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4424 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4425 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4426 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4427 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4428
4429 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4430 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4431 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4432 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4433 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4434 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4435 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
1da177e4
LT
4436
4437 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4438 tw32_f(MAC_MODE, tp->mac_mode);
4439 udelay(40);
4440
4441 tp->tx_mode &= ~TX_MODE_ENABLE;
4442 tw32_f(MAC_TX_MODE, tp->tx_mode);
4443
4444 for (i = 0; i < MAX_WAIT_CNT; i++) {
4445 udelay(100);
4446 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4447 break;
4448 }
4449 if (i >= MAX_WAIT_CNT) {
4450 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4451 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4452 tp->dev->name, tr32(MAC_TX_MODE));
e6de8ad1 4453 err |= -ENODEV;
1da177e4
LT
4454 }
4455
e6de8ad1 4456 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
b3b7d6be
DM
4457 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4458 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
1da177e4
LT
4459
4460 tw32(FTQ_RESET, 0xffffffff);
4461 tw32(FTQ_RESET, 0x00000000);
4462
b3b7d6be
DM
4463 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4464 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
1da177e4
LT
4465
4466 if (tp->hw_status)
4467 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4468 if (tp->hw_stats)
4469 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4470
1da177e4
LT
4471 return err;
4472}
4473
4474/* tp->lock is held. */
4475static int tg3_nvram_lock(struct tg3 *tp)
4476{
4477 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4478 int i;
4479
ec41c7df
MC
4480 if (tp->nvram_lock_cnt == 0) {
4481 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4482 for (i = 0; i < 8000; i++) {
4483 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4484 break;
4485 udelay(20);
4486 }
4487 if (i == 8000) {
4488 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4489 return -ENODEV;
4490 }
1da177e4 4491 }
ec41c7df 4492 tp->nvram_lock_cnt++;
1da177e4
LT
4493 }
4494 return 0;
4495}
4496
4497/* tp->lock is held. */
4498static void tg3_nvram_unlock(struct tg3 *tp)
4499{
ec41c7df
MC
4500 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4501 if (tp->nvram_lock_cnt > 0)
4502 tp->nvram_lock_cnt--;
4503 if (tp->nvram_lock_cnt == 0)
4504 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4505 }
1da177e4
LT
4506}
4507
e6af301b
MC
4508/* tp->lock is held. */
4509static void tg3_enable_nvram_access(struct tg3 *tp)
4510{
4511 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4512 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4513 u32 nvaccess = tr32(NVRAM_ACCESS);
4514
4515 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4516 }
4517}
4518
4519/* tp->lock is held. */
4520static void tg3_disable_nvram_access(struct tg3 *tp)
4521{
4522 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4523 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4524 u32 nvaccess = tr32(NVRAM_ACCESS);
4525
4526 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4527 }
4528}
4529
1da177e4
LT
4530/* tp->lock is held. */
4531static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4532{
f49639e6
DM
4533 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4534 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1da177e4
LT
4535
4536 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4537 switch (kind) {
4538 case RESET_KIND_INIT:
4539 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4540 DRV_STATE_START);
4541 break;
4542
4543 case RESET_KIND_SHUTDOWN:
4544 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4545 DRV_STATE_UNLOAD);
4546 break;
4547
4548 case RESET_KIND_SUSPEND:
4549 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4550 DRV_STATE_SUSPEND);
4551 break;
4552
4553 default:
4554 break;
4555 };
4556 }
4557}
4558
4559/* tp->lock is held. */
4560static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4561{
4562 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4563 switch (kind) {
4564 case RESET_KIND_INIT:
4565 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4566 DRV_STATE_START_DONE);
4567 break;
4568
4569 case RESET_KIND_SHUTDOWN:
4570 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4571 DRV_STATE_UNLOAD_DONE);
4572 break;
4573
4574 default:
4575 break;
4576 };
4577 }
4578}
4579
4580/* tp->lock is held. */
4581static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4582{
4583 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4584 switch (kind) {
4585 case RESET_KIND_INIT:
4586 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4587 DRV_STATE_START);
4588 break;
4589
4590 case RESET_KIND_SHUTDOWN:
4591 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4592 DRV_STATE_UNLOAD);
4593 break;
4594
4595 case RESET_KIND_SUSPEND:
4596 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4597 DRV_STATE_SUSPEND);
4598 break;
4599
4600 default:
4601 break;
4602 };
4603 }
4604}
4605
4606static void tg3_stop_fw(struct tg3 *);
4607
4608/* tp->lock is held. */
4609static int tg3_chip_reset(struct tg3 *tp)
4610{
4611 u32 val;
1ee582d8 4612 void (*write_op)(struct tg3 *, u32, u32);
1da177e4
LT
4613 int i;
4614
f49639e6
DM
4615 tg3_nvram_lock(tp);
4616
4617 /* No matching tg3_nvram_unlock() after this because
4618 * chip reset below will undo the nvram lock.
4619 */
4620 tp->nvram_lock_cnt = 0;
1da177e4 4621
d9ab5ad1 4622 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
af36e6b6 4623 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d9ab5ad1
MC
4624 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4625 tw32(GRC_FASTBOOT_PC, 0);
4626
1da177e4
LT
4627 /*
4628 * We must avoid the readl() that normally takes place.
4629 * It locks machines, causes machine checks, and other
4630 * fun things. So, temporarily disable the 5701
4631 * hardware workaround, while we do the reset.
4632 */
1ee582d8
MC
4633 write_op = tp->write32;
4634 if (write_op == tg3_write_flush_reg32)
4635 tp->write32 = tg3_write32;
1da177e4
LT
4636
4637 /* do the reset */
4638 val = GRC_MISC_CFG_CORECLK_RESET;
4639
4640 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4641 if (tr32(0x7e2c) == 0x60) {
4642 tw32(0x7e2c, 0x20);
4643 }
4644 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4645 tw32(GRC_MISC_CFG, (1 << 29));
4646 val |= (1 << 29);
4647 }
4648 }
4649
4650 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4651 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4652 tw32(GRC_MISC_CFG, val);
4653
1ee582d8
MC
4654 /* restore 5701 hardware bug workaround write method */
4655 tp->write32 = write_op;
1da177e4
LT
4656
4657 /* Unfortunately, we have to delay before the PCI read back.
4658 * Some 575X chips even will not respond to a PCI cfg access
4659 * when the reset command is given to the chip.
4660 *
4661 * How do these hardware designers expect things to work
4662 * properly if the PCI write is posted for a long period
4663 * of time? It is always necessary to have some method by
4664 * which a register read back can occur to push the write
4665 * out which does the reset.
4666 *
4667 * For most tg3 variants the trick below was working.
4668 * Ho hum...
4669 */
4670 udelay(120);
4671
4672 /* Flush PCI posted writes. The normal MMIO registers
4673 * are inaccessible at this time so this is the only
4674 * way to make this reliably (actually, this is no longer
4675 * the case, see above). I tried to use indirect
4676 * register read/write but this upset some 5701 variants.
4677 */
4678 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4679
4680 udelay(120);
4681
4682 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4683 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4684 int i;
4685 u32 cfg_val;
4686
4687 /* Wait for link training to complete. */
4688 for (i = 0; i < 5000; i++)
4689 udelay(100);
4690
4691 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4692 pci_write_config_dword(tp->pdev, 0xc4,
4693 cfg_val | (1 << 15));
4694 }
4695 /* Set PCIE max payload size and clear error status. */
4696 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4697 }
4698
4699 /* Re-enable indirect register accesses. */
4700 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4701 tp->misc_host_ctrl);
4702
4703 /* Set MAX PCI retry to zero. */
4704 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4705 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4706 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4707 val |= PCISTATE_RETRY_SAME_DMA;
4708 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4709
4710 pci_restore_state(tp->pdev);
4711
4712 /* Make sure PCI-X relaxed ordering bit is clear. */
4713 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4714 val &= ~PCIX_CAPS_RELAXED_ORDERING;
4715 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4716
a4e2b347 4717 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4cf78e4f
MC
4718 u32 val;
4719
4720 /* Chip reset on 5780 will reset MSI enable bit,
4721 * so need to restore it.
4722 */
4723 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4724 u16 ctrl;
4725
4726 pci_read_config_word(tp->pdev,
4727 tp->msi_cap + PCI_MSI_FLAGS,
4728 &ctrl);
4729 pci_write_config_word(tp->pdev,
4730 tp->msi_cap + PCI_MSI_FLAGS,
4731 ctrl | PCI_MSI_FLAGS_ENABLE);
4732 val = tr32(MSGINT_MODE);
4733 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4734 }
4735
4736 val = tr32(MEMARB_MODE);
4737 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4738
4739 } else
4740 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
1da177e4
LT
4741
4742 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4743 tg3_stop_fw(tp);
4744 tw32(0x5000, 0x400);
4745 }
4746
4747 tw32(GRC_MODE, tp->grc_mode);
4748
4749 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4750 u32 val = tr32(0xc4);
4751
4752 tw32(0xc4, val | (1 << 15));
4753 }
4754
4755 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4756 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4757 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4758 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4759 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4760 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4761 }
4762
4763 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4764 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4765 tw32_f(MAC_MODE, tp->mac_mode);
747e8f8b
MC
4766 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4767 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4768 tw32_f(MAC_MODE, tp->mac_mode);
1da177e4
LT
4769 } else
4770 tw32_f(MAC_MODE, 0);
4771 udelay(40);
4772
f49639e6
DM
4773 /* Wait for firmware initialization to complete. */
4774 for (i = 0; i < 100000; i++) {
4775 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4776 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4777 break;
4778 udelay(10);
4779 }
4780
4781 /* Chip might not be fitted with firmare. Some Sun onboard
4782 * parts are configured like that. So don't signal the timeout
4783 * of the above loop as an error, but do report the lack of
4784 * running firmware once.
4785 */
4786 if (i >= 100000 &&
4787 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4788 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4789
4790 printk(KERN_INFO PFX "%s: No firmware running.\n",
4791 tp->dev->name);
1da177e4
LT
4792 }
4793
4794 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4795 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4796 u32 val = tr32(0x7c00);
4797
4798 tw32(0x7c00, val | (1 << 25));
4799 }
4800
4801 /* Reprobe ASF enable state. */
4802 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4803 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4804 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4805 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4806 u32 nic_cfg;
4807
4808 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4809 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4810 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 4811 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
4812 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4813 }
4814 }
4815
4816 return 0;
4817}
4818
4819/* tp->lock is held. */
4820static void tg3_stop_fw(struct tg3 *tp)
4821{
4822 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4823 u32 val;
4824 int i;
4825
4826 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4827 val = tr32(GRC_RX_CPU_EVENT);
4828 val |= (1 << 14);
4829 tw32(GRC_RX_CPU_EVENT, val);
4830
4831 /* Wait for RX cpu to ACK the event. */
4832 for (i = 0; i < 100; i++) {
4833 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4834 break;
4835 udelay(1);
4836 }
4837 }
4838}
4839
4840/* tp->lock is held. */
944d980e 4841static int tg3_halt(struct tg3 *tp, int kind, int silent)
1da177e4
LT
4842{
4843 int err;
4844
4845 tg3_stop_fw(tp);
4846
944d980e 4847 tg3_write_sig_pre_reset(tp, kind);
1da177e4 4848
b3b7d6be 4849 tg3_abort_hw(tp, silent);
1da177e4
LT
4850 err = tg3_chip_reset(tp);
4851
944d980e
MC
4852 tg3_write_sig_legacy(tp, kind);
4853 tg3_write_sig_post_reset(tp, kind);
1da177e4
LT
4854
4855 if (err)
4856 return err;
4857
4858 return 0;
4859}
4860
4861#define TG3_FW_RELEASE_MAJOR 0x0
4862#define TG3_FW_RELASE_MINOR 0x0
4863#define TG3_FW_RELEASE_FIX 0x0
4864#define TG3_FW_START_ADDR 0x08000000
4865#define TG3_FW_TEXT_ADDR 0x08000000
4866#define TG3_FW_TEXT_LEN 0x9c0
4867#define TG3_FW_RODATA_ADDR 0x080009c0
4868#define TG3_FW_RODATA_LEN 0x60
4869#define TG3_FW_DATA_ADDR 0x08000a40
4870#define TG3_FW_DATA_LEN 0x20
4871#define TG3_FW_SBSS_ADDR 0x08000a60
4872#define TG3_FW_SBSS_LEN 0xc
4873#define TG3_FW_BSS_ADDR 0x08000a70
4874#define TG3_FW_BSS_LEN 0x10
4875
4876static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4877 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4878 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4879 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4880 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4881 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4882 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4883 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4884 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4885 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4886 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4887 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4888 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4889 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4890 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4891 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4892 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4893 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4894 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4895 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4896 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4897 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4898 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4899 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4900 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4901 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4902 0, 0, 0, 0, 0, 0,
4903 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4904 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4905 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4906 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4907 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4908 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4909 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4910 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4911 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4912 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4913 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4914 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4915 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4916 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4917 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4918 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4919 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4920 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4921 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4922 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4923 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4924 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4925 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4926 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4927 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4928 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4929 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4930 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4931 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4932 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4933 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4934 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4935 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4936 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4937 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4938 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4939 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4940 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4941 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4942 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4943 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4944 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4945 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4946 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4947 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4948 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4949 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4950 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4951 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4952 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4953 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4954 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4955 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4956 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4957 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4958 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4959 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4960 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4961 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4962 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4963 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4964 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4965 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4966 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4967 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4968};
4969
4970static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4971 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4972 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4973 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4974 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4975 0x00000000
4976};
4977
4978#if 0 /* All zeros, don't eat up space with it. */
4979u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4980 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4981 0x00000000, 0x00000000, 0x00000000, 0x00000000
4982};
4983#endif
4984
4985#define RX_CPU_SCRATCH_BASE 0x30000
4986#define RX_CPU_SCRATCH_SIZE 0x04000
4987#define TX_CPU_SCRATCH_BASE 0x34000
4988#define TX_CPU_SCRATCH_SIZE 0x04000
4989
4990/* tp->lock is held. */
4991static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4992{
4993 int i;
4994
5d9428de
ES
4995 BUG_ON(offset == TX_CPU_BASE &&
4996 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
1da177e4
LT
4997
4998 if (offset == RX_CPU_BASE) {
4999 for (i = 0; i < 10000; i++) {
5000 tw32(offset + CPU_STATE, 0xffffffff);
5001 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5002 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5003 break;
5004 }
5005
5006 tw32(offset + CPU_STATE, 0xffffffff);
5007 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
5008 udelay(10);
5009 } else {
5010 for (i = 0; i < 10000; i++) {
5011 tw32(offset + CPU_STATE, 0xffffffff);
5012 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5013 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5014 break;
5015 }
5016 }
5017
5018 if (i >= 10000) {
5019 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5020 "and %s CPU\n",
5021 tp->dev->name,
5022 (offset == RX_CPU_BASE ? "RX" : "TX"));
5023 return -ENODEV;
5024 }
ec41c7df
MC
5025
5026 /* Clear firmware's nvram arbitration. */
5027 if (tp->tg3_flags & TG3_FLAG_NVRAM)
5028 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
1da177e4
LT
5029 return 0;
5030}
5031
5032struct fw_info {
5033 unsigned int text_base;
5034 unsigned int text_len;
5035 u32 *text_data;
5036 unsigned int rodata_base;
5037 unsigned int rodata_len;
5038 u32 *rodata_data;
5039 unsigned int data_base;
5040 unsigned int data_len;
5041 u32 *data_data;
5042};
5043
5044/* tp->lock is held. */
5045static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5046 int cpu_scratch_size, struct fw_info *info)
5047{
ec41c7df 5048 int err, lock_err, i;
1da177e4
LT
5049 void (*write_op)(struct tg3 *, u32, u32);
5050
5051 if (cpu_base == TX_CPU_BASE &&
5052 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5053 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5054 "TX cpu firmware on %s which is 5705.\n",
5055 tp->dev->name);
5056 return -EINVAL;
5057 }
5058
5059 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5060 write_op = tg3_write_mem;
5061 else
5062 write_op = tg3_write_indirect_reg32;
5063
1b628151
MC
5064 /* It is possible that bootcode is still loading at this point.
5065 * Get the nvram lock first before halting the cpu.
5066 */
ec41c7df 5067 lock_err = tg3_nvram_lock(tp);
1da177e4 5068 err = tg3_halt_cpu(tp, cpu_base);
ec41c7df
MC
5069 if (!lock_err)
5070 tg3_nvram_unlock(tp);
1da177e4
LT
5071 if (err)
5072 goto out;
5073
5074 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5075 write_op(tp, cpu_scratch_base + i, 0);
5076 tw32(cpu_base + CPU_STATE, 0xffffffff);
5077 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5078 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5079 write_op(tp, (cpu_scratch_base +
5080 (info->text_base & 0xffff) +
5081 (i * sizeof(u32))),
5082 (info->text_data ?
5083 info->text_data[i] : 0));
5084 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5085 write_op(tp, (cpu_scratch_base +
5086 (info->rodata_base & 0xffff) +
5087 (i * sizeof(u32))),
5088 (info->rodata_data ?
5089 info->rodata_data[i] : 0));
5090 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5091 write_op(tp, (cpu_scratch_base +
5092 (info->data_base & 0xffff) +
5093 (i * sizeof(u32))),
5094 (info->data_data ?
5095 info->data_data[i] : 0));
5096
5097 err = 0;
5098
5099out:
1da177e4
LT
5100 return err;
5101}
5102
5103/* tp->lock is held. */
5104static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5105{
5106 struct fw_info info;
5107 int err, i;
5108
5109 info.text_base = TG3_FW_TEXT_ADDR;
5110 info.text_len = TG3_FW_TEXT_LEN;
5111 info.text_data = &tg3FwText[0];
5112 info.rodata_base = TG3_FW_RODATA_ADDR;
5113 info.rodata_len = TG3_FW_RODATA_LEN;
5114 info.rodata_data = &tg3FwRodata[0];
5115 info.data_base = TG3_FW_DATA_ADDR;
5116 info.data_len = TG3_FW_DATA_LEN;
5117 info.data_data = NULL;
5118
5119 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5120 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5121 &info);
5122 if (err)
5123 return err;
5124
5125 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5126 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5127 &info);
5128 if (err)
5129 return err;
5130
5131 /* Now startup only the RX cpu. */
5132 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5133 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5134
5135 for (i = 0; i < 5; i++) {
5136 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5137 break;
5138 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5139 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
5140 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5141 udelay(1000);
5142 }
5143 if (i >= 5) {
5144 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5145 "to set RX CPU PC, is %08x should be %08x\n",
5146 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5147 TG3_FW_TEXT_ADDR);
5148 return -ENODEV;
5149 }
5150 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5151 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
5152
5153 return 0;
5154}
5155
5156#if TG3_TSO_SUPPORT != 0
5157
5158#define TG3_TSO_FW_RELEASE_MAJOR 0x1
5159#define TG3_TSO_FW_RELASE_MINOR 0x6
5160#define TG3_TSO_FW_RELEASE_FIX 0x0
5161#define TG3_TSO_FW_START_ADDR 0x08000000
5162#define TG3_TSO_FW_TEXT_ADDR 0x08000000
5163#define TG3_TSO_FW_TEXT_LEN 0x1aa0
5164#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5165#define TG3_TSO_FW_RODATA_LEN 0x60
5166#define TG3_TSO_FW_DATA_ADDR 0x08001b20
5167#define TG3_TSO_FW_DATA_LEN 0x30
5168#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5169#define TG3_TSO_FW_SBSS_LEN 0x2c
5170#define TG3_TSO_FW_BSS_ADDR 0x08001b80
5171#define TG3_TSO_FW_BSS_LEN 0x894
5172
5173static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5174 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5175 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5176 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5177 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5178 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5179 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5180 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5181 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5182 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5183 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5184 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5185 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5186 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5187 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5188 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5189 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5190 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5191 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5192 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5193 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5194 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5195 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5196 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5197 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5198 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5199 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5200 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5201 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5202 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5203 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5204 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5205 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5206 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5207 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5208 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5209 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5210 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5211 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5212 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5213 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5214 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5215 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5216 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5217 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5218 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5219 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5220 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5221 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5222 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5223 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5224 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5225 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5226 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5227 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5228 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5229 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5230 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5231 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5232 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5233 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5234 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5235 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5236 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5237 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5238 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5239 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5240 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5241 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5242 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5243 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5244 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5245 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5246 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5247 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5248 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5249 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5250 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5251 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5252 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5253 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5254 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5255 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5256 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5257 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5258 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5259 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5260 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5261 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5262 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5263 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5264 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5265 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5266 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5267 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5268 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5269 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5270 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5271 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5272 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5273 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5274 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5275 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5276 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5277 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5278 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5279 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5280 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5281 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5282 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5283 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5284 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5285 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5286 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5287 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5288 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5289 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5290 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5291 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5292 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5293 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5294 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5295 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5296 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5297 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5298 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5299 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5300 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5301 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5302 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5303 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5304 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5305 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5306 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5307 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5308 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5309 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5310 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5311 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5312 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5313 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5314 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5315 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5316 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5317 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5318 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5319 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5320 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5321 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5322 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5323 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5324 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5325 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5326 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5327 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5328 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5329 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5330 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5331 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5332 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5333 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5334 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5335 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5336 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5337 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5338 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5339 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5340 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5341 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5342 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5343 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5344 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5345 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5346 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5347 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5348 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5349 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5350 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5351 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5352 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5353 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5354 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5355 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5356 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5357 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5358 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5359 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5360 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5361 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5362 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5363 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5364 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5365 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5366 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5367 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5368 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5369 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5370 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5371 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5372 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5373 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5374 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5375 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5376 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5377 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5378 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5379 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5380 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5381 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5382 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5383 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5384 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5385 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5386 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5387 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5388 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5389 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5390 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5391 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5392 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5393 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5394 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5395 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5396 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5397 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5398 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5399 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5400 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5401 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5402 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5403 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5404 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5405 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5406 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5407 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5408 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5409 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5410 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5411 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5412 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5413 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5414 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5415 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5416 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5417 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5418 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5419 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5420 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5421 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5422 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5423 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5424 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5425 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5426 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5427 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5428 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5429 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5430 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5431 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5432 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5433 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5434 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5435 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5436 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5437 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5438 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5439 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5440 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5441 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5442 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5443 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5444 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5445 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5446 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5447 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5448 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5449 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5450 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5451 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5452 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5453 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5454 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5455 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5456 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5457 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5458};
5459
5460static u32 tg3TsoFwRodata[] = {
5461 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5462 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5463 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5464 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5465 0x00000000,
5466};
5467
5468static u32 tg3TsoFwData[] = {
5469 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5470 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5471 0x00000000,
5472};
5473
5474/* 5705 needs a special version of the TSO firmware. */
5475#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5476#define TG3_TSO5_FW_RELASE_MINOR 0x2
5477#define TG3_TSO5_FW_RELEASE_FIX 0x0
5478#define TG3_TSO5_FW_START_ADDR 0x00010000
5479#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5480#define TG3_TSO5_FW_TEXT_LEN 0xe90
5481#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5482#define TG3_TSO5_FW_RODATA_LEN 0x50
5483#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5484#define TG3_TSO5_FW_DATA_LEN 0x20
5485#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5486#define TG3_TSO5_FW_SBSS_LEN 0x28
5487#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5488#define TG3_TSO5_FW_BSS_LEN 0x88
5489
5490static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5491 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5492 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5493 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5494 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5495 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5496 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5497 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5498 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5499 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5500 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5501 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5502 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5503 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5504 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5505 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5506 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5507 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5508 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5509 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5510 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5511 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5512 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5513 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5514 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5515 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5516 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5517 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5518 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5519 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5520 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5521 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5522 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5523 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5524 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5525 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5526 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5527 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5528 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5529 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5530 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5531 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5532 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5533 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5534 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5535 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5536 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5537 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5538 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5539 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5540 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5541 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5542 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5543 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5544 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5545 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5546 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5547 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5548 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5549 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5550 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5551 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5552 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5553 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5554 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5555 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5556 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5557 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5558 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5559 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5560 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5561 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5562 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5563 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5564 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5565 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5566 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5567 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5568 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5569 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5570 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5571 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5572 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5573 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5574 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5575 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5576 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5577 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5578 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5579 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5580 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5581 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5582 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5583 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5584 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5585 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5586 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5587 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5588 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5589 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5590 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5591 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5592 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5593 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5594 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5595 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5596 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5597 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5598 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5599 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5600 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5601 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5602 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5603 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5604 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5605 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5606 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5607 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5608 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5609 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5610 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5611 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5612 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5613 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5614 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5615 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5616 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5617 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5618 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5619 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5620 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5621 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5622 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5623 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5624 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5625 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5626 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5627 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5628 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5629 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5630 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5631 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5632 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5633 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5634 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5635 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5636 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5637 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5638 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5639 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5640 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5641 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5642 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5643 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5644 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5645 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5646 0x00000000, 0x00000000, 0x00000000,
5647};
5648
5649static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5650 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5651 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5652 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5653 0x00000000, 0x00000000, 0x00000000,
5654};
5655
5656static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5657 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5658 0x00000000, 0x00000000, 0x00000000,
5659};
5660
5661/* tp->lock is held. */
5662static int tg3_load_tso_firmware(struct tg3 *tp)
5663{
5664 struct fw_info info;
5665 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5666 int err, i;
5667
5668 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5669 return 0;
5670
5671 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5672 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5673 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5674 info.text_data = &tg3Tso5FwText[0];
5675 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5676 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5677 info.rodata_data = &tg3Tso5FwRodata[0];
5678 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5679 info.data_len = TG3_TSO5_FW_DATA_LEN;
5680 info.data_data = &tg3Tso5FwData[0];
5681 cpu_base = RX_CPU_BASE;
5682 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5683 cpu_scratch_size = (info.text_len +
5684 info.rodata_len +
5685 info.data_len +
5686 TG3_TSO5_FW_SBSS_LEN +
5687 TG3_TSO5_FW_BSS_LEN);
5688 } else {
5689 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5690 info.text_len = TG3_TSO_FW_TEXT_LEN;
5691 info.text_data = &tg3TsoFwText[0];
5692 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5693 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5694 info.rodata_data = &tg3TsoFwRodata[0];
5695 info.data_base = TG3_TSO_FW_DATA_ADDR;
5696 info.data_len = TG3_TSO_FW_DATA_LEN;
5697 info.data_data = &tg3TsoFwData[0];
5698 cpu_base = TX_CPU_BASE;
5699 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5700 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5701 }
5702
5703 err = tg3_load_firmware_cpu(tp, cpu_base,
5704 cpu_scratch_base, cpu_scratch_size,
5705 &info);
5706 if (err)
5707 return err;
5708
5709 /* Now startup the cpu. */
5710 tw32(cpu_base + CPU_STATE, 0xffffffff);
5711 tw32_f(cpu_base + CPU_PC, info.text_base);
5712
5713 for (i = 0; i < 5; i++) {
5714 if (tr32(cpu_base + CPU_PC) == info.text_base)
5715 break;
5716 tw32(cpu_base + CPU_STATE, 0xffffffff);
5717 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
5718 tw32_f(cpu_base + CPU_PC, info.text_base);
5719 udelay(1000);
5720 }
5721 if (i >= 5) {
5722 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5723 "to set CPU PC, is %08x should be %08x\n",
5724 tp->dev->name, tr32(cpu_base + CPU_PC),
5725 info.text_base);
5726 return -ENODEV;
5727 }
5728 tw32(cpu_base + CPU_STATE, 0xffffffff);
5729 tw32_f(cpu_base + CPU_MODE, 0x00000000);
5730 return 0;
5731}
5732
5733#endif /* TG3_TSO_SUPPORT != 0 */
5734
5735/* tp->lock is held. */
5736static void __tg3_set_mac_addr(struct tg3 *tp)
5737{
5738 u32 addr_high, addr_low;
5739 int i;
5740
5741 addr_high = ((tp->dev->dev_addr[0] << 8) |
5742 tp->dev->dev_addr[1]);
5743 addr_low = ((tp->dev->dev_addr[2] << 24) |
5744 (tp->dev->dev_addr[3] << 16) |
5745 (tp->dev->dev_addr[4] << 8) |
5746 (tp->dev->dev_addr[5] << 0));
5747 for (i = 0; i < 4; i++) {
5748 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5749 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5750 }
5751
5752 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5753 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5754 for (i = 0; i < 12; i++) {
5755 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5756 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5757 }
5758 }
5759
5760 addr_high = (tp->dev->dev_addr[0] +
5761 tp->dev->dev_addr[1] +
5762 tp->dev->dev_addr[2] +
5763 tp->dev->dev_addr[3] +
5764 tp->dev->dev_addr[4] +
5765 tp->dev->dev_addr[5]) &
5766 TX_BACKOFF_SEED_MASK;
5767 tw32(MAC_TX_BACKOFF_SEED, addr_high);
5768}
5769
5770static int tg3_set_mac_addr(struct net_device *dev, void *p)
5771{
5772 struct tg3 *tp = netdev_priv(dev);
5773 struct sockaddr *addr = p;
5774
f9804ddb
MC
5775 if (!is_valid_ether_addr(addr->sa_data))
5776 return -EINVAL;
5777
1da177e4
LT
5778 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5779
e75f7c90
MC
5780 if (!netif_running(dev))
5781 return 0;
5782
58712ef9
MC
5783 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5784 /* Reset chip so that ASF can re-init any MAC addresses it
5785 * needs.
5786 */
5787 tg3_netif_stop(tp);
5788 tg3_full_lock(tp, 1);
5789
5790 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8e7a22e3 5791 tg3_init_hw(tp, 0);
58712ef9
MC
5792
5793 tg3_netif_start(tp);
5794 tg3_full_unlock(tp);
5795 } else {
5796 spin_lock_bh(&tp->lock);
5797 __tg3_set_mac_addr(tp);
5798 spin_unlock_bh(&tp->lock);
5799 }
1da177e4
LT
5800
5801 return 0;
5802}
5803
5804/* tp->lock is held. */
5805static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5806 dma_addr_t mapping, u32 maxlen_flags,
5807 u32 nic_addr)
5808{
5809 tg3_write_mem(tp,
5810 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5811 ((u64) mapping >> 32));
5812 tg3_write_mem(tp,
5813 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5814 ((u64) mapping & 0xffffffff));
5815 tg3_write_mem(tp,
5816 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5817 maxlen_flags);
5818
5819 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5820 tg3_write_mem(tp,
5821 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5822 nic_addr);
5823}
5824
5825static void __tg3_set_rx_mode(struct net_device *);
d244c892 5826static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
15f9850d
DM
5827{
5828 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5829 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5830 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5831 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5832 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5833 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5834 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5835 }
5836 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5837 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5838 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5839 u32 val = ec->stats_block_coalesce_usecs;
5840
5841 if (!netif_carrier_ok(tp->dev))
5842 val = 0;
5843
5844 tw32(HOSTCC_STAT_COAL_TICKS, val);
5845 }
5846}
1da177e4
LT
5847
5848/* tp->lock is held. */
8e7a22e3 5849static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
5850{
5851 u32 val, rdmac_mode;
5852 int i, err, limit;
5853
5854 tg3_disable_ints(tp);
5855
5856 tg3_stop_fw(tp);
5857
5858 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5859
5860 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
e6de8ad1 5861 tg3_abort_hw(tp, 1);
1da177e4
LT
5862 }
5863
8e7a22e3 5864 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
d4d2c558
MC
5865 tg3_phy_reset(tp);
5866
1da177e4
LT
5867 err = tg3_chip_reset(tp);
5868 if (err)
5869 return err;
5870
5871 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5872
5873 /* This works around an issue with Athlon chipsets on
5874 * B3 tigon3 silicon. This bit has no effect on any
5875 * other revision. But do not set this on PCI Express
5876 * chips.
5877 */
5878 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5879 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5880 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5881
5882 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5883 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5884 val = tr32(TG3PCI_PCISTATE);
5885 val |= PCISTATE_RETRY_SAME_DMA;
5886 tw32(TG3PCI_PCISTATE, val);
5887 }
5888
5889 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5890 /* Enable some hw fixes. */
5891 val = tr32(TG3PCI_MSI_DATA);
5892 val |= (1 << 26) | (1 << 28) | (1 << 29);
5893 tw32(TG3PCI_MSI_DATA, val);
5894 }
5895
5896 /* Descriptor ring init may make accesses to the
5897 * NIC SRAM area to setup the TX descriptors, so we
5898 * can only do this after the hardware has been
5899 * successfully reset.
5900 */
5901 tg3_init_rings(tp);
5902
5903 /* This value is determined during the probe time DMA
5904 * engine test, tg3_test_dma.
5905 */
5906 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5907
5908 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5909 GRC_MODE_4X_NIC_SEND_RINGS |
5910 GRC_MODE_NO_TX_PHDR_CSUM |
5911 GRC_MODE_NO_RX_PHDR_CSUM);
5912 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
d2d746f8
MC
5913
5914 /* Pseudo-header checksum is done by hardware logic and not
5915 * the offload processers, so make the chip do the pseudo-
5916 * header checksums on receive. For transmit it is more
5917 * convenient to do the pseudo-header checksum in software
5918 * as Linux does that on transmit for us in all cases.
5919 */
5920 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
1da177e4
LT
5921
5922 tw32(GRC_MODE,
5923 tp->grc_mode |
5924 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5925
5926 /* Setup the timer prescalar register. Clock is always 66Mhz. */
5927 val = tr32(GRC_MISC_CFG);
5928 val &= ~0xff;
5929 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5930 tw32(GRC_MISC_CFG, val);
5931
5932 /* Initialize MBUF/DESC pool. */
cbf46853 5933 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
1da177e4
LT
5934 /* Do nothing. */
5935 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5936 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5937 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5938 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5939 else
5940 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5941 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5942 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5943 }
5944#if TG3_TSO_SUPPORT != 0
5945 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5946 int fw_len;
5947
5948 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5949 TG3_TSO5_FW_RODATA_LEN +
5950 TG3_TSO5_FW_DATA_LEN +
5951 TG3_TSO5_FW_SBSS_LEN +
5952 TG3_TSO5_FW_BSS_LEN);
5953 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5954 tw32(BUFMGR_MB_POOL_ADDR,
5955 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5956 tw32(BUFMGR_MB_POOL_SIZE,
5957 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5958 }
5959#endif
5960
0f893dc6 5961 if (tp->dev->mtu <= ETH_DATA_LEN) {
1da177e4
LT
5962 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5963 tp->bufmgr_config.mbuf_read_dma_low_water);
5964 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5965 tp->bufmgr_config.mbuf_mac_rx_low_water);
5966 tw32(BUFMGR_MB_HIGH_WATER,
5967 tp->bufmgr_config.mbuf_high_water);
5968 } else {
5969 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5970 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5971 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5972 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5973 tw32(BUFMGR_MB_HIGH_WATER,
5974 tp->bufmgr_config.mbuf_high_water_jumbo);
5975 }
5976 tw32(BUFMGR_DMA_LOW_WATER,
5977 tp->bufmgr_config.dma_low_water);
5978 tw32(BUFMGR_DMA_HIGH_WATER,
5979 tp->bufmgr_config.dma_high_water);
5980
5981 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5982 for (i = 0; i < 2000; i++) {
5983 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5984 break;
5985 udelay(10);
5986 }
5987 if (i >= 2000) {
5988 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5989 tp->dev->name);
5990 return -ENODEV;
5991 }
5992
5993 /* Setup replenish threshold. */
f92905de
MC
5994 val = tp->rx_pending / 8;
5995 if (val == 0)
5996 val = 1;
5997 else if (val > tp->rx_std_max_post)
5998 val = tp->rx_std_max_post;
5999
6000 tw32(RCVBDI_STD_THRESH, val);
1da177e4
LT
6001
6002 /* Initialize TG3_BDINFO's at:
6003 * RCVDBDI_STD_BD: standard eth size rx ring
6004 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6005 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6006 *
6007 * like so:
6008 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6009 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6010 * ring attribute flags
6011 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6012 *
6013 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6014 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6015 *
6016 * The size of each ring is fixed in the firmware, but the location is
6017 * configurable.
6018 */
6019 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6020 ((u64) tp->rx_std_mapping >> 32));
6021 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6022 ((u64) tp->rx_std_mapping & 0xffffffff));
6023 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6024 NIC_SRAM_RX_BUFFER_DESC);
6025
6026 /* Don't even try to program the JUMBO/MINI buffer descriptor
6027 * configs on 5705.
6028 */
6029 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6030 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6031 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6032 } else {
6033 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6034 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6035
6036 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6037 BDINFO_FLAGS_DISABLED);
6038
6039 /* Setup replenish threshold. */
6040 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6041
0f893dc6 6042 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
6043 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6044 ((u64) tp->rx_jumbo_mapping >> 32));
6045 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6046 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6047 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6048 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6049 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6050 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6051 } else {
6052 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6053 BDINFO_FLAGS_DISABLED);
6054 }
6055
6056 }
6057
6058 /* There is only one send ring on 5705/5750, no need to explicitly
6059 * disable the others.
6060 */
6061 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6062 /* Clear out send RCB ring in SRAM. */
6063 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6064 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6065 BDINFO_FLAGS_DISABLED);
6066 }
6067
6068 tp->tx_prod = 0;
6069 tp->tx_cons = 0;
6070 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6071 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6072
6073 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6074 tp->tx_desc_mapping,
6075 (TG3_TX_RING_SIZE <<
6076 BDINFO_FLAGS_MAXLEN_SHIFT),
6077 NIC_SRAM_TX_BUFFER_DESC);
6078
6079 /* There is only one receive return ring on 5705/5750, no need
6080 * to explicitly disable the others.
6081 */
6082 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6083 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6084 i += TG3_BDINFO_SIZE) {
6085 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6086 BDINFO_FLAGS_DISABLED);
6087 }
6088 }
6089
6090 tp->rx_rcb_ptr = 0;
6091 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6092
6093 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6094 tp->rx_rcb_mapping,
6095 (TG3_RX_RCB_RING_SIZE(tp) <<
6096 BDINFO_FLAGS_MAXLEN_SHIFT),
6097 0);
6098
6099 tp->rx_std_ptr = tp->rx_pending;
6100 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6101 tp->rx_std_ptr);
6102
0f893dc6 6103 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
1da177e4
LT
6104 tp->rx_jumbo_pending : 0;
6105 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6106 tp->rx_jumbo_ptr);
6107
6108 /* Initialize MAC address and backoff seed. */
6109 __tg3_set_mac_addr(tp);
6110
6111 /* MTU + ethernet header + FCS + optional VLAN tag */
6112 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6113
6114 /* The slot time is changed by tg3_setup_phy if we
6115 * run at gigabit with half duplex.
6116 */
6117 tw32(MAC_TX_LENGTHS,
6118 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6119 (6 << TX_LENGTHS_IPG_SHIFT) |
6120 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6121
6122 /* Receive rules. */
6123 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6124 tw32(RCVLPC_CONFIG, 0x0181);
6125
6126 /* Calculate RDMAC_MODE setting early, we need it to determine
6127 * the RCVLPC_STATE_ENABLE mask.
6128 */
6129 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6130 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6131 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6132 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6133 RDMAC_MODE_LNGREAD_ENAB);
6134 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6135 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
85e94ced
MC
6136
6137 /* If statement applies to 5705 and 5750 PCI devices only */
6138 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6139 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6140 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
1da177e4
LT
6141 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6142 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6143 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6144 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6145 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6146 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6147 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6148 }
6149 }
6150
85e94ced
MC
6151 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6152 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6153
1da177e4
LT
6154#if TG3_TSO_SUPPORT != 0
6155 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6156 rdmac_mode |= (1 << 27);
6157#endif
6158
6159 /* Receive/send statistics. */
6160 if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6161 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6162 val = tr32(RCVLPC_STATS_ENABLE);
6163 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6164 tw32(RCVLPC_STATS_ENABLE, val);
6165 } else {
6166 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6167 }
6168 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6169 tw32(SNDDATAI_STATSENAB, 0xffffff);
6170 tw32(SNDDATAI_STATSCTRL,
6171 (SNDDATAI_SCTRL_ENABLE |
6172 SNDDATAI_SCTRL_FASTUPD));
6173
6174 /* Setup host coalescing engine. */
6175 tw32(HOSTCC_MODE, 0);
6176 for (i = 0; i < 2000; i++) {
6177 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6178 break;
6179 udelay(10);
6180 }
6181
d244c892 6182 __tg3_set_coalesce(tp, &tp->coal);
1da177e4
LT
6183
6184 /* set status block DMA address */
6185 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6186 ((u64) tp->status_mapping >> 32));
6187 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6188 ((u64) tp->status_mapping & 0xffffffff));
6189
6190 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6191 /* Status/statistics block address. See tg3_timer,
6192 * the tg3_periodic_fetch_stats call there, and
6193 * tg3_get_stats to see how this works for 5705/5750 chips.
6194 */
1da177e4
LT
6195 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6196 ((u64) tp->stats_mapping >> 32));
6197 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6198 ((u64) tp->stats_mapping & 0xffffffff));
6199 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6200 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6201 }
6202
6203 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6204
6205 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6206 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6207 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6208 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6209
6210 /* Clear statistics/status block in chip, and status block in ram. */
6211 for (i = NIC_SRAM_STATS_BLK;
6212 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6213 i += sizeof(u32)) {
6214 tg3_write_mem(tp, i, 0);
6215 udelay(40);
6216 }
6217 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6218
c94e3941
MC
6219 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6220 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6221 /* reset to prevent losing 1st rx packet intermittently */
6222 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6223 udelay(10);
6224 }
6225
1da177e4
LT
6226 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6227 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6228 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6229 udelay(40);
6230
314fba34
MC
6231 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6232 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6233 * register to preserve the GPIO settings for LOMs. The GPIOs,
6234 * whether used as inputs or outputs, are set by boot code after
6235 * reset.
6236 */
6237 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6238 u32 gpio_mask;
6239
6240 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6241 GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
3e7d83bc
MC
6242
6243 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6244 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6245 GRC_LCLCTRL_GPIO_OUTPUT3;
6246
af36e6b6
MC
6247 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6248 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6249
314fba34
MC
6250 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6251
6252 /* GPIO1 must be driven high for eeprom write protect */
1da177e4
LT
6253 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6254 GRC_LCLCTRL_GPIO_OUTPUT1);
314fba34 6255 }
1da177e4
LT
6256 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6257 udelay(100);
6258
09ee929c 6259 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
fac9b83e 6260 tp->last_tag = 0;
1da177e4
LT
6261
6262 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6263 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6264 udelay(40);
6265 }
6266
6267 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6268 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6269 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6270 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6271 WDMAC_MODE_LNGREAD_ENAB);
6272
85e94ced
MC
6273 /* If statement applies to 5705 and 5750 PCI devices only */
6274 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6275 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6276 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1da177e4
LT
6277 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6278 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6279 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6280 /* nothing */
6281 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6282 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6283 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6284 val |= WDMAC_MODE_RX_ACCEL;
6285 }
6286 }
6287
d9ab5ad1 6288 /* Enable host coalescing bug fix */
af36e6b6
MC
6289 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6290 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
d9ab5ad1
MC
6291 val |= (1 << 29);
6292
1da177e4
LT
6293 tw32_f(WDMAC_MODE, val);
6294 udelay(40);
6295
6296 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6297 val = tr32(TG3PCI_X_CAPS);
6298 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6299 val &= ~PCIX_CAPS_BURST_MASK;
6300 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6301 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6302 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6303 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6304 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6305 val |= (tp->split_mode_max_reqs <<
6306 PCIX_CAPS_SPLIT_SHIFT);
6307 }
6308 tw32(TG3PCI_X_CAPS, val);
6309 }
6310
6311 tw32_f(RDMAC_MODE, rdmac_mode);
6312 udelay(40);
6313
6314 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6315 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6316 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6317 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6318 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6319 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6320 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6321 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6322#if TG3_TSO_SUPPORT != 0
6323 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6324 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6325#endif
6326 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6327 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6328
6329 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6330 err = tg3_load_5701_a0_firmware_fix(tp);
6331 if (err)
6332 return err;
6333 }
6334
6335#if TG3_TSO_SUPPORT != 0
6336 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6337 err = tg3_load_tso_firmware(tp);
6338 if (err)
6339 return err;
6340 }
6341#endif
6342
6343 tp->tx_mode = TX_MODE_ENABLE;
6344 tw32_f(MAC_TX_MODE, tp->tx_mode);
6345 udelay(100);
6346
6347 tp->rx_mode = RX_MODE_ENABLE;
af36e6b6
MC
6348 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6349 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6350
1da177e4
LT
6351 tw32_f(MAC_RX_MODE, tp->rx_mode);
6352 udelay(10);
6353
6354 if (tp->link_config.phy_is_low_power) {
6355 tp->link_config.phy_is_low_power = 0;
6356 tp->link_config.speed = tp->link_config.orig_speed;
6357 tp->link_config.duplex = tp->link_config.orig_duplex;
6358 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6359 }
6360
6361 tp->mi_mode = MAC_MI_MODE_BASE;
6362 tw32_f(MAC_MI_MODE, tp->mi_mode);
6363 udelay(80);
6364
6365 tw32(MAC_LED_CTRL, tp->led_ctrl);
6366
6367 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
c94e3941 6368 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1da177e4
LT
6369 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6370 udelay(10);
6371 }
6372 tw32_f(MAC_RX_MODE, tp->rx_mode);
6373 udelay(10);
6374
6375 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6376 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6377 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6378 /* Set drive transmission level to 1.2V */
6379 /* only if the signal pre-emphasis bit is not set */
6380 val = tr32(MAC_SERDES_CFG);
6381 val &= 0xfffff000;
6382 val |= 0x880;
6383 tw32(MAC_SERDES_CFG, val);
6384 }
6385 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6386 tw32(MAC_SERDES_CFG, 0x616000);
6387 }
6388
6389 /* Prevent chip from dropping frames when flow control
6390 * is enabled.
6391 */
6392 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6393
6394 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6395 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6396 /* Use hardware link auto-negotiation */
6397 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6398 }
6399
d4d2c558
MC
6400 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6401 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6402 u32 tmp;
6403
6404 tmp = tr32(SERDES_RX_CTRL);
6405 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6406 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6407 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6408 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6409 }
6410
8e7a22e3 6411 err = tg3_setup_phy(tp, reset_phy);
1da177e4
LT
6412 if (err)
6413 return err;
6414
6415 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6416 u32 tmp;
6417
6418 /* Clear CRC stats. */
6419 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6420 tg3_writephy(tp, 0x1e, tmp | 0x8000);
6421 tg3_readphy(tp, 0x14, &tmp);
6422 }
6423 }
6424
6425 __tg3_set_rx_mode(tp->dev);
6426
6427 /* Initialize receive rules. */
6428 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
6429 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6430 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
6431 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6432
4cf78e4f 6433 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
a4e2b347 6434 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
1da177e4
LT
6435 limit = 8;
6436 else
6437 limit = 16;
6438 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6439 limit -= 4;
6440 switch (limit) {
6441 case 16:
6442 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
6443 case 15:
6444 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
6445 case 14:
6446 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
6447 case 13:
6448 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
6449 case 12:
6450 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
6451 case 11:
6452 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
6453 case 10:
6454 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
6455 case 9:
6456 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
6457 case 8:
6458 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
6459 case 7:
6460 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
6461 case 6:
6462 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
6463 case 5:
6464 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
6465 case 4:
6466 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6467 case 3:
6468 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6469 case 2:
6470 case 1:
6471
6472 default:
6473 break;
6474 };
6475
6476 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6477
1da177e4
LT
6478 return 0;
6479}
6480
6481/* Called at device open time to get the chip ready for
6482 * packet processing. Invoked with tp->lock held.
6483 */
8e7a22e3 6484static int tg3_init_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
6485{
6486 int err;
6487
6488 /* Force the chip into D0. */
bc1c7567 6489 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
6490 if (err)
6491 goto out;
6492
6493 tg3_switch_clocks(tp);
6494
6495 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6496
8e7a22e3 6497 err = tg3_reset_hw(tp, reset_phy);
1da177e4
LT
6498
6499out:
6500 return err;
6501}
6502
6503#define TG3_STAT_ADD32(PSTAT, REG) \
6504do { u32 __val = tr32(REG); \
6505 (PSTAT)->low += __val; \
6506 if ((PSTAT)->low < __val) \
6507 (PSTAT)->high += 1; \
6508} while (0)
6509
6510static void tg3_periodic_fetch_stats(struct tg3 *tp)
6511{
6512 struct tg3_hw_stats *sp = tp->hw_stats;
6513
6514 if (!netif_carrier_ok(tp->dev))
6515 return;
6516
6517 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6518 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6519 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6520 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6521 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6522 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6523 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6524 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6525 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6526 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6527 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6528 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6529 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6530
6531 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6532 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6533 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6534 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6535 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6536 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6537 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6538 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6539 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6540 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6541 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6542 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6543 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6544 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
463d305b
MC
6545
6546 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6547 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6548 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
1da177e4
LT
6549}
6550
6551static void tg3_timer(unsigned long __opaque)
6552{
6553 struct tg3 *tp = (struct tg3 *) __opaque;
1da177e4 6554
f475f163
MC
6555 if (tp->irq_sync)
6556 goto restart_timer;
6557
f47c11ee 6558 spin_lock(&tp->lock);
1da177e4 6559
fac9b83e
DM
6560 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6561 /* All of this garbage is because when using non-tagged
6562 * IRQ status the mailbox/status_block protocol the chip
6563 * uses with the cpu is race prone.
6564 */
6565 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6566 tw32(GRC_LOCAL_CTRL,
6567 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6568 } else {
6569 tw32(HOSTCC_MODE, tp->coalesce_mode |
6570 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6571 }
1da177e4 6572
fac9b83e
DM
6573 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6574 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
f47c11ee 6575 spin_unlock(&tp->lock);
fac9b83e
DM
6576 schedule_work(&tp->reset_task);
6577 return;
6578 }
1da177e4
LT
6579 }
6580
1da177e4
LT
6581 /* This part only runs once per second. */
6582 if (!--tp->timer_counter) {
fac9b83e
DM
6583 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6584 tg3_periodic_fetch_stats(tp);
6585
1da177e4
LT
6586 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6587 u32 mac_stat;
6588 int phy_event;
6589
6590 mac_stat = tr32(MAC_STATUS);
6591
6592 phy_event = 0;
6593 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6594 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6595 phy_event = 1;
6596 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6597 phy_event = 1;
6598
6599 if (phy_event)
6600 tg3_setup_phy(tp, 0);
6601 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6602 u32 mac_stat = tr32(MAC_STATUS);
6603 int need_setup = 0;
6604
6605 if (netif_carrier_ok(tp->dev) &&
6606 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6607 need_setup = 1;
6608 }
6609 if (! netif_carrier_ok(tp->dev) &&
6610 (mac_stat & (MAC_STATUS_PCS_SYNCED |
6611 MAC_STATUS_SIGNAL_DET))) {
6612 need_setup = 1;
6613 }
6614 if (need_setup) {
6615 tw32_f(MAC_MODE,
6616 (tp->mac_mode &
6617 ~MAC_MODE_PORT_MODE_MASK));
6618 udelay(40);
6619 tw32_f(MAC_MODE, tp->mac_mode);
6620 udelay(40);
6621 tg3_setup_phy(tp, 0);
6622 }
747e8f8b
MC
6623 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6624 tg3_serdes_parallel_detect(tp);
1da177e4
LT
6625
6626 tp->timer_counter = tp->timer_multiplier;
6627 }
6628
28fbef78 6629 /* Heartbeat is only sent once every 2 seconds. */
1da177e4
LT
6630 if (!--tp->asf_counter) {
6631 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6632 u32 val;
6633
bbadf503
MC
6634 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6635 FWCMD_NICDRV_ALIVE2);
6636 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
28fbef78 6637 /* 5 seconds timeout */
bbadf503 6638 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
1da177e4
LT
6639 val = tr32(GRC_RX_CPU_EVENT);
6640 val |= (1 << 14);
6641 tw32(GRC_RX_CPU_EVENT, val);
6642 }
6643 tp->asf_counter = tp->asf_multiplier;
6644 }
6645
f47c11ee 6646 spin_unlock(&tp->lock);
1da177e4 6647
f475f163 6648restart_timer:
1da177e4
LT
6649 tp->timer.expires = jiffies + tp->timer_offset;
6650 add_timer(&tp->timer);
6651}
6652
81789ef5 6653static int tg3_request_irq(struct tg3 *tp)
fcfa0a32
MC
6654{
6655 irqreturn_t (*fn)(int, void *, struct pt_regs *);
6656 unsigned long flags;
6657 struct net_device *dev = tp->dev;
6658
6659 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6660 fn = tg3_msi;
6661 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6662 fn = tg3_msi_1shot;
6663 flags = SA_SAMPLE_RANDOM;
6664 } else {
6665 fn = tg3_interrupt;
6666 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6667 fn = tg3_interrupt_tagged;
6668 flags = SA_SHIRQ | SA_SAMPLE_RANDOM;
6669 }
6670 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6671}
6672
7938109f
MC
6673static int tg3_test_interrupt(struct tg3 *tp)
6674{
6675 struct net_device *dev = tp->dev;
6676 int err, i;
6677 u32 int_mbox = 0;
6678
d4bc3927
MC
6679 if (!netif_running(dev))
6680 return -ENODEV;
6681
7938109f
MC
6682 tg3_disable_ints(tp);
6683
6684 free_irq(tp->pdev->irq, dev);
6685
6686 err = request_irq(tp->pdev->irq, tg3_test_isr,
f4d0ee98 6687 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
7938109f
MC
6688 if (err)
6689 return err;
6690
38f3843e 6691 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7938109f
MC
6692 tg3_enable_ints(tp);
6693
6694 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6695 HOSTCC_MODE_NOW);
6696
6697 for (i = 0; i < 5; i++) {
09ee929c
MC
6698 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6699 TG3_64BIT_REG_LOW);
7938109f
MC
6700 if (int_mbox != 0)
6701 break;
6702 msleep(10);
6703 }
6704
6705 tg3_disable_ints(tp);
6706
6707 free_irq(tp->pdev->irq, dev);
6708
fcfa0a32 6709 err = tg3_request_irq(tp);
7938109f
MC
6710
6711 if (err)
6712 return err;
6713
6714 if (int_mbox != 0)
6715 return 0;
6716
6717 return -EIO;
6718}
6719
6720/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6721 * successfully restored
6722 */
6723static int tg3_test_msi(struct tg3 *tp)
6724{
6725 struct net_device *dev = tp->dev;
6726 int err;
6727 u16 pci_cmd;
6728
6729 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6730 return 0;
6731
6732 /* Turn off SERR reporting in case MSI terminates with Master
6733 * Abort.
6734 */
6735 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6736 pci_write_config_word(tp->pdev, PCI_COMMAND,
6737 pci_cmd & ~PCI_COMMAND_SERR);
6738
6739 err = tg3_test_interrupt(tp);
6740
6741 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6742
6743 if (!err)
6744 return 0;
6745
6746 /* other failures */
6747 if (err != -EIO)
6748 return err;
6749
6750 /* MSI test failed, go back to INTx mode */
6751 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6752 "switching to INTx mode. Please report this failure to "
6753 "the PCI maintainer and include system chipset information.\n",
6754 tp->dev->name);
6755
6756 free_irq(tp->pdev->irq, dev);
6757 pci_disable_msi(tp->pdev);
6758
6759 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6760
fcfa0a32 6761 err = tg3_request_irq(tp);
7938109f
MC
6762 if (err)
6763 return err;
6764
6765 /* Need to reset the chip because the MSI cycle may have terminated
6766 * with Master Abort.
6767 */
f47c11ee 6768 tg3_full_lock(tp, 1);
7938109f 6769
944d980e 6770 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8e7a22e3 6771 err = tg3_init_hw(tp, 1);
7938109f 6772
f47c11ee 6773 tg3_full_unlock(tp);
7938109f
MC
6774
6775 if (err)
6776 free_irq(tp->pdev->irq, dev);
6777
6778 return err;
6779}
6780
1da177e4
LT
6781static int tg3_open(struct net_device *dev)
6782{
6783 struct tg3 *tp = netdev_priv(dev);
6784 int err;
6785
f47c11ee 6786 tg3_full_lock(tp, 0);
1da177e4 6787
bc1c7567
MC
6788 err = tg3_set_power_state(tp, PCI_D0);
6789 if (err)
6790 return err;
6791
1da177e4
LT
6792 tg3_disable_ints(tp);
6793 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6794
f47c11ee 6795 tg3_full_unlock(tp);
1da177e4
LT
6796
6797 /* The placement of this call is tied
6798 * to the setup and use of Host TX descriptors.
6799 */
6800 err = tg3_alloc_consistent(tp);
6801 if (err)
6802 return err;
6803
88b06bc2
MC
6804 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6805 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
d4d2c558
MC
6806 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6807 !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6808 (tp->pdev_peer == tp->pdev))) {
fac9b83e
DM
6809 /* All MSI supporting chips should support tagged
6810 * status. Assert that this is the case.
6811 */
6812 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6813 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6814 "Not using MSI.\n", tp->dev->name);
6815 } else if (pci_enable_msi(tp->pdev) == 0) {
88b06bc2
MC
6816 u32 msi_mode;
6817
6818 msi_mode = tr32(MSGINT_MODE);
6819 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6820 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6821 }
6822 }
fcfa0a32 6823 err = tg3_request_irq(tp);
1da177e4
LT
6824
6825 if (err) {
88b06bc2
MC
6826 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6827 pci_disable_msi(tp->pdev);
6828 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6829 }
1da177e4
LT
6830 tg3_free_consistent(tp);
6831 return err;
6832 }
6833
f47c11ee 6834 tg3_full_lock(tp, 0);
1da177e4 6835
8e7a22e3 6836 err = tg3_init_hw(tp, 1);
1da177e4 6837 if (err) {
944d980e 6838 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
6839 tg3_free_rings(tp);
6840 } else {
fac9b83e
DM
6841 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6842 tp->timer_offset = HZ;
6843 else
6844 tp->timer_offset = HZ / 10;
6845
6846 BUG_ON(tp->timer_offset > HZ);
6847 tp->timer_counter = tp->timer_multiplier =
6848 (HZ / tp->timer_offset);
6849 tp->asf_counter = tp->asf_multiplier =
28fbef78 6850 ((HZ / tp->timer_offset) * 2);
1da177e4
LT
6851
6852 init_timer(&tp->timer);
6853 tp->timer.expires = jiffies + tp->timer_offset;
6854 tp->timer.data = (unsigned long) tp;
6855 tp->timer.function = tg3_timer;
1da177e4
LT
6856 }
6857
f47c11ee 6858 tg3_full_unlock(tp);
1da177e4
LT
6859
6860 if (err) {
88b06bc2
MC
6861 free_irq(tp->pdev->irq, dev);
6862 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6863 pci_disable_msi(tp->pdev);
6864 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6865 }
1da177e4
LT
6866 tg3_free_consistent(tp);
6867 return err;
6868 }
6869
7938109f
MC
6870 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6871 err = tg3_test_msi(tp);
fac9b83e 6872
7938109f 6873 if (err) {
f47c11ee 6874 tg3_full_lock(tp, 0);
7938109f
MC
6875
6876 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6877 pci_disable_msi(tp->pdev);
6878 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6879 }
944d980e 6880 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f
MC
6881 tg3_free_rings(tp);
6882 tg3_free_consistent(tp);
6883
f47c11ee 6884 tg3_full_unlock(tp);
7938109f
MC
6885
6886 return err;
6887 }
fcfa0a32
MC
6888
6889 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6890 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6891 u32 val = tr32(0x7c04);
6892
6893 tw32(0x7c04, val | (1 << 29));
6894 }
6895 }
7938109f
MC
6896 }
6897
f47c11ee 6898 tg3_full_lock(tp, 0);
1da177e4 6899
7938109f
MC
6900 add_timer(&tp->timer);
6901 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
1da177e4
LT
6902 tg3_enable_ints(tp);
6903
f47c11ee 6904 tg3_full_unlock(tp);
1da177e4
LT
6905
6906 netif_start_queue(dev);
6907
6908 return 0;
6909}
6910
6911#if 0
6912/*static*/ void tg3_dump_state(struct tg3 *tp)
6913{
6914 u32 val32, val32_2, val32_3, val32_4, val32_5;
6915 u16 val16;
6916 int i;
6917
6918 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6919 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6920 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6921 val16, val32);
6922
6923 /* MAC block */
6924 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6925 tr32(MAC_MODE), tr32(MAC_STATUS));
6926 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6927 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6928 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6929 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6930 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6931 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6932
6933 /* Send data initiator control block */
6934 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6935 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6936 printk(" SNDDATAI_STATSCTRL[%08x]\n",
6937 tr32(SNDDATAI_STATSCTRL));
6938
6939 /* Send data completion control block */
6940 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6941
6942 /* Send BD ring selector block */
6943 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6944 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6945
6946 /* Send BD initiator control block */
6947 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6948 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6949
6950 /* Send BD completion control block */
6951 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6952
6953 /* Receive list placement control block */
6954 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6955 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6956 printk(" RCVLPC_STATSCTRL[%08x]\n",
6957 tr32(RCVLPC_STATSCTRL));
6958
6959 /* Receive data and receive BD initiator control block */
6960 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6961 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6962
6963 /* Receive data completion control block */
6964 printk("DEBUG: RCVDCC_MODE[%08x]\n",
6965 tr32(RCVDCC_MODE));
6966
6967 /* Receive BD initiator control block */
6968 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6969 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6970
6971 /* Receive BD completion control block */
6972 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6973 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6974
6975 /* Receive list selector control block */
6976 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6977 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6978
6979 /* Mbuf cluster free block */
6980 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6981 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6982
6983 /* Host coalescing control block */
6984 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6985 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6986 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6987 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6988 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6989 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6990 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6991 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6992 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6993 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6994 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6995 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6996
6997 /* Memory arbiter control block */
6998 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6999 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7000
7001 /* Buffer manager control block */
7002 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7003 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7004 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7005 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7006 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7007 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7008 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7009 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7010
7011 /* Read DMA control block */
7012 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7013 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7014
7015 /* Write DMA control block */
7016 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7017 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7018
7019 /* DMA completion block */
7020 printk("DEBUG: DMAC_MODE[%08x]\n",
7021 tr32(DMAC_MODE));
7022
7023 /* GRC block */
7024 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7025 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7026 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7027 tr32(GRC_LOCAL_CTRL));
7028
7029 /* TG3_BDINFOs */
7030 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7031 tr32(RCVDBDI_JUMBO_BD + 0x0),
7032 tr32(RCVDBDI_JUMBO_BD + 0x4),
7033 tr32(RCVDBDI_JUMBO_BD + 0x8),
7034 tr32(RCVDBDI_JUMBO_BD + 0xc));
7035 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7036 tr32(RCVDBDI_STD_BD + 0x0),
7037 tr32(RCVDBDI_STD_BD + 0x4),
7038 tr32(RCVDBDI_STD_BD + 0x8),
7039 tr32(RCVDBDI_STD_BD + 0xc));
7040 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7041 tr32(RCVDBDI_MINI_BD + 0x0),
7042 tr32(RCVDBDI_MINI_BD + 0x4),
7043 tr32(RCVDBDI_MINI_BD + 0x8),
7044 tr32(RCVDBDI_MINI_BD + 0xc));
7045
7046 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7047 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7048 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7049 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7050 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7051 val32, val32_2, val32_3, val32_4);
7052
7053 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7054 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7055 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7056 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7057 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7058 val32, val32_2, val32_3, val32_4);
7059
7060 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7061 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7062 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7063 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7064 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7065 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7066 val32, val32_2, val32_3, val32_4, val32_5);
7067
7068 /* SW status block */
7069 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7070 tp->hw_status->status,
7071 tp->hw_status->status_tag,
7072 tp->hw_status->rx_jumbo_consumer,
7073 tp->hw_status->rx_consumer,
7074 tp->hw_status->rx_mini_consumer,
7075 tp->hw_status->idx[0].rx_producer,
7076 tp->hw_status->idx[0].tx_consumer);
7077
7078 /* SW statistics block */
7079 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7080 ((u32 *)tp->hw_stats)[0],
7081 ((u32 *)tp->hw_stats)[1],
7082 ((u32 *)tp->hw_stats)[2],
7083 ((u32 *)tp->hw_stats)[3]);
7084
7085 /* Mailboxes */
7086 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
09ee929c
MC
7087 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7088 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7089 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7090 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
1da177e4
LT
7091
7092 /* NIC side send descriptors. */
7093 for (i = 0; i < 6; i++) {
7094 unsigned long txd;
7095
7096 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7097 + (i * sizeof(struct tg3_tx_buffer_desc));
7098 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7099 i,
7100 readl(txd + 0x0), readl(txd + 0x4),
7101 readl(txd + 0x8), readl(txd + 0xc));
7102 }
7103
7104 /* NIC side RX descriptors. */
7105 for (i = 0; i < 6; i++) {
7106 unsigned long rxd;
7107
7108 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7109 + (i * sizeof(struct tg3_rx_buffer_desc));
7110 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7111 i,
7112 readl(rxd + 0x0), readl(rxd + 0x4),
7113 readl(rxd + 0x8), readl(rxd + 0xc));
7114 rxd += (4 * sizeof(u32));
7115 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7116 i,
7117 readl(rxd + 0x0), readl(rxd + 0x4),
7118 readl(rxd + 0x8), readl(rxd + 0xc));
7119 }
7120
7121 for (i = 0; i < 6; i++) {
7122 unsigned long rxd;
7123
7124 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7125 + (i * sizeof(struct tg3_rx_buffer_desc));
7126 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7127 i,
7128 readl(rxd + 0x0), readl(rxd + 0x4),
7129 readl(rxd + 0x8), readl(rxd + 0xc));
7130 rxd += (4 * sizeof(u32));
7131 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7132 i,
7133 readl(rxd + 0x0), readl(rxd + 0x4),
7134 readl(rxd + 0x8), readl(rxd + 0xc));
7135 }
7136}
7137#endif
7138
7139static struct net_device_stats *tg3_get_stats(struct net_device *);
7140static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7141
7142static int tg3_close(struct net_device *dev)
7143{
7144 struct tg3 *tp = netdev_priv(dev);
7145
7faa006f
MC
7146 /* Calling flush_scheduled_work() may deadlock because
7147 * linkwatch_event() may be on the workqueue and it will try to get
7148 * the rtnl_lock which we are holding.
7149 */
7150 while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7151 msleep(1);
7152
1da177e4
LT
7153 netif_stop_queue(dev);
7154
7155 del_timer_sync(&tp->timer);
7156
f47c11ee 7157 tg3_full_lock(tp, 1);
1da177e4
LT
7158#if 0
7159 tg3_dump_state(tp);
7160#endif
7161
7162 tg3_disable_ints(tp);
7163
944d980e 7164 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7165 tg3_free_rings(tp);
7166 tp->tg3_flags &=
7167 ~(TG3_FLAG_INIT_COMPLETE |
7168 TG3_FLAG_GOT_SERDES_FLOWCTL);
1da177e4 7169
f47c11ee 7170 tg3_full_unlock(tp);
1da177e4 7171
88b06bc2
MC
7172 free_irq(tp->pdev->irq, dev);
7173 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7174 pci_disable_msi(tp->pdev);
7175 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7176 }
1da177e4
LT
7177
7178 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7179 sizeof(tp->net_stats_prev));
7180 memcpy(&tp->estats_prev, tg3_get_estats(tp),
7181 sizeof(tp->estats_prev));
7182
7183 tg3_free_consistent(tp);
7184
bc1c7567
MC
7185 tg3_set_power_state(tp, PCI_D3hot);
7186
7187 netif_carrier_off(tp->dev);
7188
1da177e4
LT
7189 return 0;
7190}
7191
7192static inline unsigned long get_stat64(tg3_stat64_t *val)
7193{
7194 unsigned long ret;
7195
7196#if (BITS_PER_LONG == 32)
7197 ret = val->low;
7198#else
7199 ret = ((u64)val->high << 32) | ((u64)val->low);
7200#endif
7201 return ret;
7202}
7203
7204static unsigned long calc_crc_errors(struct tg3 *tp)
7205{
7206 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7207
7208 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7209 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7210 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1da177e4
LT
7211 u32 val;
7212
f47c11ee 7213 spin_lock_bh(&tp->lock);
1da177e4
LT
7214 if (!tg3_readphy(tp, 0x1e, &val)) {
7215 tg3_writephy(tp, 0x1e, val | 0x8000);
7216 tg3_readphy(tp, 0x14, &val);
7217 } else
7218 val = 0;
f47c11ee 7219 spin_unlock_bh(&tp->lock);
1da177e4
LT
7220
7221 tp->phy_crc_errors += val;
7222
7223 return tp->phy_crc_errors;
7224 }
7225
7226 return get_stat64(&hw_stats->rx_fcs_errors);
7227}
7228
7229#define ESTAT_ADD(member) \
7230 estats->member = old_estats->member + \
7231 get_stat64(&hw_stats->member)
7232
7233static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7234{
7235 struct tg3_ethtool_stats *estats = &tp->estats;
7236 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7237 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7238
7239 if (!hw_stats)
7240 return old_estats;
7241
7242 ESTAT_ADD(rx_octets);
7243 ESTAT_ADD(rx_fragments);
7244 ESTAT_ADD(rx_ucast_packets);
7245 ESTAT_ADD(rx_mcast_packets);
7246 ESTAT_ADD(rx_bcast_packets);
7247 ESTAT_ADD(rx_fcs_errors);
7248 ESTAT_ADD(rx_align_errors);
7249 ESTAT_ADD(rx_xon_pause_rcvd);
7250 ESTAT_ADD(rx_xoff_pause_rcvd);
7251 ESTAT_ADD(rx_mac_ctrl_rcvd);
7252 ESTAT_ADD(rx_xoff_entered);
7253 ESTAT_ADD(rx_frame_too_long_errors);
7254 ESTAT_ADD(rx_jabbers);
7255 ESTAT_ADD(rx_undersize_packets);
7256 ESTAT_ADD(rx_in_length_errors);
7257 ESTAT_ADD(rx_out_length_errors);
7258 ESTAT_ADD(rx_64_or_less_octet_packets);
7259 ESTAT_ADD(rx_65_to_127_octet_packets);
7260 ESTAT_ADD(rx_128_to_255_octet_packets);
7261 ESTAT_ADD(rx_256_to_511_octet_packets);
7262 ESTAT_ADD(rx_512_to_1023_octet_packets);
7263 ESTAT_ADD(rx_1024_to_1522_octet_packets);
7264 ESTAT_ADD(rx_1523_to_2047_octet_packets);
7265 ESTAT_ADD(rx_2048_to_4095_octet_packets);
7266 ESTAT_ADD(rx_4096_to_8191_octet_packets);
7267 ESTAT_ADD(rx_8192_to_9022_octet_packets);
7268
7269 ESTAT_ADD(tx_octets);
7270 ESTAT_ADD(tx_collisions);
7271 ESTAT_ADD(tx_xon_sent);
7272 ESTAT_ADD(tx_xoff_sent);
7273 ESTAT_ADD(tx_flow_control);
7274 ESTAT_ADD(tx_mac_errors);
7275 ESTAT_ADD(tx_single_collisions);
7276 ESTAT_ADD(tx_mult_collisions);
7277 ESTAT_ADD(tx_deferred);
7278 ESTAT_ADD(tx_excessive_collisions);
7279 ESTAT_ADD(tx_late_collisions);
7280 ESTAT_ADD(tx_collide_2times);
7281 ESTAT_ADD(tx_collide_3times);
7282 ESTAT_ADD(tx_collide_4times);
7283 ESTAT_ADD(tx_collide_5times);
7284 ESTAT_ADD(tx_collide_6times);
7285 ESTAT_ADD(tx_collide_7times);
7286 ESTAT_ADD(tx_collide_8times);
7287 ESTAT_ADD(tx_collide_9times);
7288 ESTAT_ADD(tx_collide_10times);
7289 ESTAT_ADD(tx_collide_11times);
7290 ESTAT_ADD(tx_collide_12times);
7291 ESTAT_ADD(tx_collide_13times);
7292 ESTAT_ADD(tx_collide_14times);
7293 ESTAT_ADD(tx_collide_15times);
7294 ESTAT_ADD(tx_ucast_packets);
7295 ESTAT_ADD(tx_mcast_packets);
7296 ESTAT_ADD(tx_bcast_packets);
7297 ESTAT_ADD(tx_carrier_sense_errors);
7298 ESTAT_ADD(tx_discards);
7299 ESTAT_ADD(tx_errors);
7300
7301 ESTAT_ADD(dma_writeq_full);
7302 ESTAT_ADD(dma_write_prioq_full);
7303 ESTAT_ADD(rxbds_empty);
7304 ESTAT_ADD(rx_discards);
7305 ESTAT_ADD(rx_errors);
7306 ESTAT_ADD(rx_threshold_hit);
7307
7308 ESTAT_ADD(dma_readq_full);
7309 ESTAT_ADD(dma_read_prioq_full);
7310 ESTAT_ADD(tx_comp_queue_full);
7311
7312 ESTAT_ADD(ring_set_send_prod_index);
7313 ESTAT_ADD(ring_status_update);
7314 ESTAT_ADD(nic_irqs);
7315 ESTAT_ADD(nic_avoided_irqs);
7316 ESTAT_ADD(nic_tx_threshold_hit);
7317
7318 return estats;
7319}
7320
7321static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7322{
7323 struct tg3 *tp = netdev_priv(dev);
7324 struct net_device_stats *stats = &tp->net_stats;
7325 struct net_device_stats *old_stats = &tp->net_stats_prev;
7326 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7327
7328 if (!hw_stats)
7329 return old_stats;
7330
7331 stats->rx_packets = old_stats->rx_packets +
7332 get_stat64(&hw_stats->rx_ucast_packets) +
7333 get_stat64(&hw_stats->rx_mcast_packets) +
7334 get_stat64(&hw_stats->rx_bcast_packets);
7335
7336 stats->tx_packets = old_stats->tx_packets +
7337 get_stat64(&hw_stats->tx_ucast_packets) +
7338 get_stat64(&hw_stats->tx_mcast_packets) +
7339 get_stat64(&hw_stats->tx_bcast_packets);
7340
7341 stats->rx_bytes = old_stats->rx_bytes +
7342 get_stat64(&hw_stats->rx_octets);
7343 stats->tx_bytes = old_stats->tx_bytes +
7344 get_stat64(&hw_stats->tx_octets);
7345
7346 stats->rx_errors = old_stats->rx_errors +
4f63b877 7347 get_stat64(&hw_stats->rx_errors);
1da177e4
LT
7348 stats->tx_errors = old_stats->tx_errors +
7349 get_stat64(&hw_stats->tx_errors) +
7350 get_stat64(&hw_stats->tx_mac_errors) +
7351 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7352 get_stat64(&hw_stats->tx_discards);
7353
7354 stats->multicast = old_stats->multicast +
7355 get_stat64(&hw_stats->rx_mcast_packets);
7356 stats->collisions = old_stats->collisions +
7357 get_stat64(&hw_stats->tx_collisions);
7358
7359 stats->rx_length_errors = old_stats->rx_length_errors +
7360 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7361 get_stat64(&hw_stats->rx_undersize_packets);
7362
7363 stats->rx_over_errors = old_stats->rx_over_errors +
7364 get_stat64(&hw_stats->rxbds_empty);
7365 stats->rx_frame_errors = old_stats->rx_frame_errors +
7366 get_stat64(&hw_stats->rx_align_errors);
7367 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7368 get_stat64(&hw_stats->tx_discards);
7369 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7370 get_stat64(&hw_stats->tx_carrier_sense_errors);
7371
7372 stats->rx_crc_errors = old_stats->rx_crc_errors +
7373 calc_crc_errors(tp);
7374
4f63b877
JL
7375 stats->rx_missed_errors = old_stats->rx_missed_errors +
7376 get_stat64(&hw_stats->rx_discards);
7377
1da177e4
LT
7378 return stats;
7379}
7380
7381static inline u32 calc_crc(unsigned char *buf, int len)
7382{
7383 u32 reg;
7384 u32 tmp;
7385 int j, k;
7386
7387 reg = 0xffffffff;
7388
7389 for (j = 0; j < len; j++) {
7390 reg ^= buf[j];
7391
7392 for (k = 0; k < 8; k++) {
7393 tmp = reg & 0x01;
7394
7395 reg >>= 1;
7396
7397 if (tmp) {
7398 reg ^= 0xedb88320;
7399 }
7400 }
7401 }
7402
7403 return ~reg;
7404}
7405
7406static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7407{
7408 /* accept or reject all multicast frames */
7409 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7410 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7411 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7412 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7413}
7414
7415static void __tg3_set_rx_mode(struct net_device *dev)
7416{
7417 struct tg3 *tp = netdev_priv(dev);
7418 u32 rx_mode;
7419
7420 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7421 RX_MODE_KEEP_VLAN_TAG);
7422
7423 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7424 * flag clear.
7425 */
7426#if TG3_VLAN_TAG_USED
7427 if (!tp->vlgrp &&
7428 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7429 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7430#else
7431 /* By definition, VLAN is disabled always in this
7432 * case.
7433 */
7434 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7435 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7436#endif
7437
7438 if (dev->flags & IFF_PROMISC) {
7439 /* Promiscuous mode. */
7440 rx_mode |= RX_MODE_PROMISC;
7441 } else if (dev->flags & IFF_ALLMULTI) {
7442 /* Accept all multicast. */
7443 tg3_set_multi (tp, 1);
7444 } else if (dev->mc_count < 1) {
7445 /* Reject all multicast. */
7446 tg3_set_multi (tp, 0);
7447 } else {
7448 /* Accept one or more multicast(s). */
7449 struct dev_mc_list *mclist;
7450 unsigned int i;
7451 u32 mc_filter[4] = { 0, };
7452 u32 regidx;
7453 u32 bit;
7454 u32 crc;
7455
7456 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7457 i++, mclist = mclist->next) {
7458
7459 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7460 bit = ~crc & 0x7f;
7461 regidx = (bit & 0x60) >> 5;
7462 bit &= 0x1f;
7463 mc_filter[regidx] |= (1 << bit);
7464 }
7465
7466 tw32(MAC_HASH_REG_0, mc_filter[0]);
7467 tw32(MAC_HASH_REG_1, mc_filter[1]);
7468 tw32(MAC_HASH_REG_2, mc_filter[2]);
7469 tw32(MAC_HASH_REG_3, mc_filter[3]);
7470 }
7471
7472 if (rx_mode != tp->rx_mode) {
7473 tp->rx_mode = rx_mode;
7474 tw32_f(MAC_RX_MODE, rx_mode);
7475 udelay(10);
7476 }
7477}
7478
7479static void tg3_set_rx_mode(struct net_device *dev)
7480{
7481 struct tg3 *tp = netdev_priv(dev);
7482
e75f7c90
MC
7483 if (!netif_running(dev))
7484 return;
7485
f47c11ee 7486 tg3_full_lock(tp, 0);
1da177e4 7487 __tg3_set_rx_mode(dev);
f47c11ee 7488 tg3_full_unlock(tp);
1da177e4
LT
7489}
7490
7491#define TG3_REGDUMP_LEN (32 * 1024)
7492
7493static int tg3_get_regs_len(struct net_device *dev)
7494{
7495 return TG3_REGDUMP_LEN;
7496}
7497
7498static void tg3_get_regs(struct net_device *dev,
7499 struct ethtool_regs *regs, void *_p)
7500{
7501 u32 *p = _p;
7502 struct tg3 *tp = netdev_priv(dev);
7503 u8 *orig_p = _p;
7504 int i;
7505
7506 regs->version = 0;
7507
7508 memset(p, 0, TG3_REGDUMP_LEN);
7509
bc1c7567
MC
7510 if (tp->link_config.phy_is_low_power)
7511 return;
7512
f47c11ee 7513 tg3_full_lock(tp, 0);
1da177e4
LT
7514
7515#define __GET_REG32(reg) (*(p)++ = tr32(reg))
7516#define GET_REG32_LOOP(base,len) \
7517do { p = (u32 *)(orig_p + (base)); \
7518 for (i = 0; i < len; i += 4) \
7519 __GET_REG32((base) + i); \
7520} while (0)
7521#define GET_REG32_1(reg) \
7522do { p = (u32 *)(orig_p + (reg)); \
7523 __GET_REG32((reg)); \
7524} while (0)
7525
7526 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7527 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7528 GET_REG32_LOOP(MAC_MODE, 0x4f0);
7529 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7530 GET_REG32_1(SNDDATAC_MODE);
7531 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7532 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7533 GET_REG32_1(SNDBDC_MODE);
7534 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7535 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7536 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7537 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7538 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7539 GET_REG32_1(RCVDCC_MODE);
7540 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7541 GET_REG32_LOOP(RCVCC_MODE, 0x14);
7542 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7543 GET_REG32_1(MBFREE_MODE);
7544 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7545 GET_REG32_LOOP(MEMARB_MODE, 0x10);
7546 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7547 GET_REG32_LOOP(RDMAC_MODE, 0x08);
7548 GET_REG32_LOOP(WDMAC_MODE, 0x08);
091465d7
CE
7549 GET_REG32_1(RX_CPU_MODE);
7550 GET_REG32_1(RX_CPU_STATE);
7551 GET_REG32_1(RX_CPU_PGMCTR);
7552 GET_REG32_1(RX_CPU_HWBKPT);
7553 GET_REG32_1(TX_CPU_MODE);
7554 GET_REG32_1(TX_CPU_STATE);
7555 GET_REG32_1(TX_CPU_PGMCTR);
1da177e4
LT
7556 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7557 GET_REG32_LOOP(FTQ_RESET, 0x120);
7558 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7559 GET_REG32_1(DMAC_MODE);
7560 GET_REG32_LOOP(GRC_MODE, 0x4c);
7561 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7562 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7563
7564#undef __GET_REG32
7565#undef GET_REG32_LOOP
7566#undef GET_REG32_1
7567
f47c11ee 7568 tg3_full_unlock(tp);
1da177e4
LT
7569}
7570
7571static int tg3_get_eeprom_len(struct net_device *dev)
7572{
7573 struct tg3 *tp = netdev_priv(dev);
7574
7575 return tp->nvram_size;
7576}
7577
7578static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
1820180b 7579static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
1da177e4
LT
7580
7581static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7582{
7583 struct tg3 *tp = netdev_priv(dev);
7584 int ret;
7585 u8 *pd;
7586 u32 i, offset, len, val, b_offset, b_count;
7587
bc1c7567
MC
7588 if (tp->link_config.phy_is_low_power)
7589 return -EAGAIN;
7590
1da177e4
LT
7591 offset = eeprom->offset;
7592 len = eeprom->len;
7593 eeprom->len = 0;
7594
7595 eeprom->magic = TG3_EEPROM_MAGIC;
7596
7597 if (offset & 3) {
7598 /* adjustments to start on required 4 byte boundary */
7599 b_offset = offset & 3;
7600 b_count = 4 - b_offset;
7601 if (b_count > len) {
7602 /* i.e. offset=1 len=2 */
7603 b_count = len;
7604 }
7605 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7606 if (ret)
7607 return ret;
7608 val = cpu_to_le32(val);
7609 memcpy(data, ((char*)&val) + b_offset, b_count);
7610 len -= b_count;
7611 offset += b_count;
7612 eeprom->len += b_count;
7613 }
7614
7615 /* read bytes upto the last 4 byte boundary */
7616 pd = &data[eeprom->len];
7617 for (i = 0; i < (len - (len & 3)); i += 4) {
7618 ret = tg3_nvram_read(tp, offset + i, &val);
7619 if (ret) {
7620 eeprom->len += i;
7621 return ret;
7622 }
7623 val = cpu_to_le32(val);
7624 memcpy(pd + i, &val, 4);
7625 }
7626 eeprom->len += i;
7627
7628 if (len & 3) {
7629 /* read last bytes not ending on 4 byte boundary */
7630 pd = &data[eeprom->len];
7631 b_count = len & 3;
7632 b_offset = offset + len - b_count;
7633 ret = tg3_nvram_read(tp, b_offset, &val);
7634 if (ret)
7635 return ret;
7636 val = cpu_to_le32(val);
7637 memcpy(pd, ((char*)&val), b_count);
7638 eeprom->len += b_count;
7639 }
7640 return 0;
7641}
7642
7643static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7644
7645static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7646{
7647 struct tg3 *tp = netdev_priv(dev);
7648 int ret;
7649 u32 offset, len, b_offset, odd_len, start, end;
7650 u8 *buf;
7651
bc1c7567
MC
7652 if (tp->link_config.phy_is_low_power)
7653 return -EAGAIN;
7654
1da177e4
LT
7655 if (eeprom->magic != TG3_EEPROM_MAGIC)
7656 return -EINVAL;
7657
7658 offset = eeprom->offset;
7659 len = eeprom->len;
7660
7661 if ((b_offset = (offset & 3))) {
7662 /* adjustments to start on required 4 byte boundary */
7663 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7664 if (ret)
7665 return ret;
7666 start = cpu_to_le32(start);
7667 len += b_offset;
7668 offset &= ~3;
1c8594b4
MC
7669 if (len < 4)
7670 len = 4;
1da177e4
LT
7671 }
7672
7673 odd_len = 0;
1c8594b4 7674 if (len & 3) {
1da177e4
LT
7675 /* adjustments to end on required 4 byte boundary */
7676 odd_len = 1;
7677 len = (len + 3) & ~3;
7678 ret = tg3_nvram_read(tp, offset+len-4, &end);
7679 if (ret)
7680 return ret;
7681 end = cpu_to_le32(end);
7682 }
7683
7684 buf = data;
7685 if (b_offset || odd_len) {
7686 buf = kmalloc(len, GFP_KERNEL);
7687 if (buf == 0)
7688 return -ENOMEM;
7689 if (b_offset)
7690 memcpy(buf, &start, 4);
7691 if (odd_len)
7692 memcpy(buf+len-4, &end, 4);
7693 memcpy(buf + b_offset, data, eeprom->len);
7694 }
7695
7696 ret = tg3_nvram_write_block(tp, offset, len, buf);
7697
7698 if (buf != data)
7699 kfree(buf);
7700
7701 return ret;
7702}
7703
7704static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7705{
7706 struct tg3 *tp = netdev_priv(dev);
7707
7708 cmd->supported = (SUPPORTED_Autoneg);
7709
7710 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7711 cmd->supported |= (SUPPORTED_1000baseT_Half |
7712 SUPPORTED_1000baseT_Full);
7713
ef348144 7714 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1da177e4
LT
7715 cmd->supported |= (SUPPORTED_100baseT_Half |
7716 SUPPORTED_100baseT_Full |
7717 SUPPORTED_10baseT_Half |
7718 SUPPORTED_10baseT_Full |
7719 SUPPORTED_MII);
ef348144
KK
7720 cmd->port = PORT_TP;
7721 } else {
1da177e4 7722 cmd->supported |= SUPPORTED_FIBRE;
ef348144
KK
7723 cmd->port = PORT_FIBRE;
7724 }
1da177e4
LT
7725
7726 cmd->advertising = tp->link_config.advertising;
7727 if (netif_running(dev)) {
7728 cmd->speed = tp->link_config.active_speed;
7729 cmd->duplex = tp->link_config.active_duplex;
7730 }
1da177e4
LT
7731 cmd->phy_address = PHY_ADDR;
7732 cmd->transceiver = 0;
7733 cmd->autoneg = tp->link_config.autoneg;
7734 cmd->maxtxpkt = 0;
7735 cmd->maxrxpkt = 0;
7736 return 0;
7737}
7738
7739static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7740{
7741 struct tg3 *tp = netdev_priv(dev);
7742
37ff238d 7743 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
1da177e4
LT
7744 /* These are the only valid advertisement bits allowed. */
7745 if (cmd->autoneg == AUTONEG_ENABLE &&
7746 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7747 ADVERTISED_1000baseT_Full |
7748 ADVERTISED_Autoneg |
7749 ADVERTISED_FIBRE)))
7750 return -EINVAL;
37ff238d
MC
7751 /* Fiber can only do SPEED_1000. */
7752 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7753 (cmd->speed != SPEED_1000))
7754 return -EINVAL;
7755 /* Copper cannot force SPEED_1000. */
7756 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7757 (cmd->speed == SPEED_1000))
7758 return -EINVAL;
7759 else if ((cmd->speed == SPEED_1000) &&
7760 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7761 return -EINVAL;
1da177e4 7762
f47c11ee 7763 tg3_full_lock(tp, 0);
1da177e4
LT
7764
7765 tp->link_config.autoneg = cmd->autoneg;
7766 if (cmd->autoneg == AUTONEG_ENABLE) {
7767 tp->link_config.advertising = cmd->advertising;
7768 tp->link_config.speed = SPEED_INVALID;
7769 tp->link_config.duplex = DUPLEX_INVALID;
7770 } else {
7771 tp->link_config.advertising = 0;
7772 tp->link_config.speed = cmd->speed;
7773 tp->link_config.duplex = cmd->duplex;
7774 }
7775
7776 if (netif_running(dev))
7777 tg3_setup_phy(tp, 1);
7778
f47c11ee 7779 tg3_full_unlock(tp);
1da177e4
LT
7780
7781 return 0;
7782}
7783
7784static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7785{
7786 struct tg3 *tp = netdev_priv(dev);
7787
7788 strcpy(info->driver, DRV_MODULE_NAME);
7789 strcpy(info->version, DRV_MODULE_VERSION);
c4e6575c 7790 strcpy(info->fw_version, tp->fw_ver);
1da177e4
LT
7791 strcpy(info->bus_info, pci_name(tp->pdev));
7792}
7793
7794static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7795{
7796 struct tg3 *tp = netdev_priv(dev);
7797
7798 wol->supported = WAKE_MAGIC;
7799 wol->wolopts = 0;
7800 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7801 wol->wolopts = WAKE_MAGIC;
7802 memset(&wol->sopass, 0, sizeof(wol->sopass));
7803}
7804
7805static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7806{
7807 struct tg3 *tp = netdev_priv(dev);
7808
7809 if (wol->wolopts & ~WAKE_MAGIC)
7810 return -EINVAL;
7811 if ((wol->wolopts & WAKE_MAGIC) &&
7812 tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7813 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7814 return -EINVAL;
7815
f47c11ee 7816 spin_lock_bh(&tp->lock);
1da177e4
LT
7817 if (wol->wolopts & WAKE_MAGIC)
7818 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7819 else
7820 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
f47c11ee 7821 spin_unlock_bh(&tp->lock);
1da177e4
LT
7822
7823 return 0;
7824}
7825
7826static u32 tg3_get_msglevel(struct net_device *dev)
7827{
7828 struct tg3 *tp = netdev_priv(dev);
7829 return tp->msg_enable;
7830}
7831
7832static void tg3_set_msglevel(struct net_device *dev, u32 value)
7833{
7834 struct tg3 *tp = netdev_priv(dev);
7835 tp->msg_enable = value;
7836}
7837
7838#if TG3_TSO_SUPPORT != 0
7839static int tg3_set_tso(struct net_device *dev, u32 value)
7840{
7841 struct tg3 *tp = netdev_priv(dev);
7842
7843 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7844 if (value)
7845 return -EINVAL;
7846 return 0;
7847 }
7848 return ethtool_op_set_tso(dev, value);
7849}
7850#endif
7851
7852static int tg3_nway_reset(struct net_device *dev)
7853{
7854 struct tg3 *tp = netdev_priv(dev);
7855 u32 bmcr;
7856 int r;
7857
7858 if (!netif_running(dev))
7859 return -EAGAIN;
7860
c94e3941
MC
7861 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7862 return -EINVAL;
7863
f47c11ee 7864 spin_lock_bh(&tp->lock);
1da177e4
LT
7865 r = -EINVAL;
7866 tg3_readphy(tp, MII_BMCR, &bmcr);
7867 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
c94e3941
MC
7868 ((bmcr & BMCR_ANENABLE) ||
7869 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7870 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7871 BMCR_ANENABLE);
1da177e4
LT
7872 r = 0;
7873 }
f47c11ee 7874 spin_unlock_bh(&tp->lock);
1da177e4
LT
7875
7876 return r;
7877}
7878
7879static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7880{
7881 struct tg3 *tp = netdev_priv(dev);
7882
7883 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7884 ering->rx_mini_max_pending = 0;
4f81c32b
MC
7885 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7886 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7887 else
7888 ering->rx_jumbo_max_pending = 0;
7889
7890 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
1da177e4
LT
7891
7892 ering->rx_pending = tp->rx_pending;
7893 ering->rx_mini_pending = 0;
4f81c32b
MC
7894 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7895 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7896 else
7897 ering->rx_jumbo_pending = 0;
7898
1da177e4
LT
7899 ering->tx_pending = tp->tx_pending;
7900}
7901
7902static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7903{
7904 struct tg3 *tp = netdev_priv(dev);
bbe832c0 7905 int irq_sync = 0;
1da177e4
LT
7906
7907 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7908 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7909 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7910 return -EINVAL;
7911
bbe832c0 7912 if (netif_running(dev)) {
1da177e4 7913 tg3_netif_stop(tp);
bbe832c0
MC
7914 irq_sync = 1;
7915 }
1da177e4 7916
bbe832c0 7917 tg3_full_lock(tp, irq_sync);
1da177e4
LT
7918
7919 tp->rx_pending = ering->rx_pending;
7920
7921 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7922 tp->rx_pending > 63)
7923 tp->rx_pending = 63;
7924 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7925 tp->tx_pending = ering->tx_pending;
7926
7927 if (netif_running(dev)) {
944d980e 7928 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8e7a22e3 7929 tg3_init_hw(tp, 1);
1da177e4
LT
7930 tg3_netif_start(tp);
7931 }
7932
f47c11ee 7933 tg3_full_unlock(tp);
1da177e4
LT
7934
7935 return 0;
7936}
7937
7938static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7939{
7940 struct tg3 *tp = netdev_priv(dev);
7941
7942 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7943 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7944 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7945}
7946
7947static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7948{
7949 struct tg3 *tp = netdev_priv(dev);
bbe832c0 7950 int irq_sync = 0;
1da177e4 7951
bbe832c0 7952 if (netif_running(dev)) {
1da177e4 7953 tg3_netif_stop(tp);
bbe832c0
MC
7954 irq_sync = 1;
7955 }
1da177e4 7956
bbe832c0 7957 tg3_full_lock(tp, irq_sync);
f47c11ee 7958
1da177e4
LT
7959 if (epause->autoneg)
7960 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7961 else
7962 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7963 if (epause->rx_pause)
7964 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7965 else
7966 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7967 if (epause->tx_pause)
7968 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7969 else
7970 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7971
7972 if (netif_running(dev)) {
944d980e 7973 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8e7a22e3 7974 tg3_init_hw(tp, 1);
1da177e4
LT
7975 tg3_netif_start(tp);
7976 }
f47c11ee
DM
7977
7978 tg3_full_unlock(tp);
1da177e4
LT
7979
7980 return 0;
7981}
7982
7983static u32 tg3_get_rx_csum(struct net_device *dev)
7984{
7985 struct tg3 *tp = netdev_priv(dev);
7986 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7987}
7988
7989static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7990{
7991 struct tg3 *tp = netdev_priv(dev);
7992
7993 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7994 if (data != 0)
7995 return -EINVAL;
7996 return 0;
7997 }
7998
f47c11ee 7999 spin_lock_bh(&tp->lock);
1da177e4
LT
8000 if (data)
8001 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8002 else
8003 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
f47c11ee 8004 spin_unlock_bh(&tp->lock);
1da177e4
LT
8005
8006 return 0;
8007}
8008
8009static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8010{
8011 struct tg3 *tp = netdev_priv(dev);
8012
8013 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8014 if (data != 0)
8015 return -EINVAL;
8016 return 0;
8017 }
8018
af36e6b6
MC
8019 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8020 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9c27dbdf 8021 ethtool_op_set_tx_hw_csum(dev, data);
1da177e4 8022 else
9c27dbdf 8023 ethtool_op_set_tx_csum(dev, data);
1da177e4
LT
8024
8025 return 0;
8026}
8027
8028static int tg3_get_stats_count (struct net_device *dev)
8029{
8030 return TG3_NUM_STATS;
8031}
8032
4cafd3f5
MC
8033static int tg3_get_test_count (struct net_device *dev)
8034{
8035 return TG3_NUM_TEST;
8036}
8037
1da177e4
LT
8038static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8039{
8040 switch (stringset) {
8041 case ETH_SS_STATS:
8042 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8043 break;
4cafd3f5
MC
8044 case ETH_SS_TEST:
8045 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8046 break;
1da177e4
LT
8047 default:
8048 WARN_ON(1); /* we need a WARN() */
8049 break;
8050 }
8051}
8052
4009a93d
MC
8053static int tg3_phys_id(struct net_device *dev, u32 data)
8054{
8055 struct tg3 *tp = netdev_priv(dev);
8056 int i;
8057
8058 if (!netif_running(tp->dev))
8059 return -EAGAIN;
8060
8061 if (data == 0)
8062 data = 2;
8063
8064 for (i = 0; i < (data * 2); i++) {
8065 if ((i % 2) == 0)
8066 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8067 LED_CTRL_1000MBPS_ON |
8068 LED_CTRL_100MBPS_ON |
8069 LED_CTRL_10MBPS_ON |
8070 LED_CTRL_TRAFFIC_OVERRIDE |
8071 LED_CTRL_TRAFFIC_BLINK |
8072 LED_CTRL_TRAFFIC_LED);
8073
8074 else
8075 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8076 LED_CTRL_TRAFFIC_OVERRIDE);
8077
8078 if (msleep_interruptible(500))
8079 break;
8080 }
8081 tw32(MAC_LED_CTRL, tp->led_ctrl);
8082 return 0;
8083}
8084
1da177e4
LT
8085static void tg3_get_ethtool_stats (struct net_device *dev,
8086 struct ethtool_stats *estats, u64 *tmp_stats)
8087{
8088 struct tg3 *tp = netdev_priv(dev);
8089 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8090}
8091
566f86ad 8092#define NVRAM_TEST_SIZE 0x100
1b27777a 8093#define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
566f86ad
MC
8094
8095static int tg3_test_nvram(struct tg3 *tp)
8096{
1b27777a
MC
8097 u32 *buf, csum, magic;
8098 int i, j, err = 0, size;
566f86ad 8099
1820180b 8100 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
1b27777a
MC
8101 return -EIO;
8102
1b27777a
MC
8103 if (magic == TG3_EEPROM_MAGIC)
8104 size = NVRAM_TEST_SIZE;
8105 else if ((magic & 0xff000000) == 0xa5000000) {
8106 if ((magic & 0xe00000) == 0x200000)
8107 size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8108 else
8109 return 0;
8110 } else
8111 return -EIO;
8112
8113 buf = kmalloc(size, GFP_KERNEL);
566f86ad
MC
8114 if (buf == NULL)
8115 return -ENOMEM;
8116
1b27777a
MC
8117 err = -EIO;
8118 for (i = 0, j = 0; i < size; i += 4, j++) {
566f86ad
MC
8119 u32 val;
8120
8121 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8122 break;
8123 buf[j] = cpu_to_le32(val);
8124 }
1b27777a 8125 if (i < size)
566f86ad
MC
8126 goto out;
8127
1b27777a
MC
8128 /* Selfboot format */
8129 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8130 u8 *buf8 = (u8 *) buf, csum8 = 0;
8131
8132 for (i = 0; i < size; i++)
8133 csum8 += buf8[i];
8134
ad96b485
AB
8135 if (csum8 == 0) {
8136 err = 0;
8137 goto out;
8138 }
8139
8140 err = -EIO;
8141 goto out;
1b27777a 8142 }
566f86ad
MC
8143
8144 /* Bootstrap checksum at offset 0x10 */
8145 csum = calc_crc((unsigned char *) buf, 0x10);
8146 if(csum != cpu_to_le32(buf[0x10/4]))
8147 goto out;
8148
8149 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8150 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8151 if (csum != cpu_to_le32(buf[0xfc/4]))
8152 goto out;
8153
8154 err = 0;
8155
8156out:
8157 kfree(buf);
8158 return err;
8159}
8160
ca43007a
MC
8161#define TG3_SERDES_TIMEOUT_SEC 2
8162#define TG3_COPPER_TIMEOUT_SEC 6
8163
8164static int tg3_test_link(struct tg3 *tp)
8165{
8166 int i, max;
8167
8168 if (!netif_running(tp->dev))
8169 return -ENODEV;
8170
4c987487 8171 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
ca43007a
MC
8172 max = TG3_SERDES_TIMEOUT_SEC;
8173 else
8174 max = TG3_COPPER_TIMEOUT_SEC;
8175
8176 for (i = 0; i < max; i++) {
8177 if (netif_carrier_ok(tp->dev))
8178 return 0;
8179
8180 if (msleep_interruptible(1000))
8181 break;
8182 }
8183
8184 return -EIO;
8185}
8186
a71116d1 8187/* Only test the commonly used registers */
30ca3e37 8188static int tg3_test_registers(struct tg3 *tp)
a71116d1
MC
8189{
8190 int i, is_5705;
8191 u32 offset, read_mask, write_mask, val, save_val, read_val;
8192 static struct {
8193 u16 offset;
8194 u16 flags;
8195#define TG3_FL_5705 0x1
8196#define TG3_FL_NOT_5705 0x2
8197#define TG3_FL_NOT_5788 0x4
8198 u32 read_mask;
8199 u32 write_mask;
8200 } reg_tbl[] = {
8201 /* MAC Control Registers */
8202 { MAC_MODE, TG3_FL_NOT_5705,
8203 0x00000000, 0x00ef6f8c },
8204 { MAC_MODE, TG3_FL_5705,
8205 0x00000000, 0x01ef6b8c },
8206 { MAC_STATUS, TG3_FL_NOT_5705,
8207 0x03800107, 0x00000000 },
8208 { MAC_STATUS, TG3_FL_5705,
8209 0x03800100, 0x00000000 },
8210 { MAC_ADDR_0_HIGH, 0x0000,
8211 0x00000000, 0x0000ffff },
8212 { MAC_ADDR_0_LOW, 0x0000,
8213 0x00000000, 0xffffffff },
8214 { MAC_RX_MTU_SIZE, 0x0000,
8215 0x00000000, 0x0000ffff },
8216 { MAC_TX_MODE, 0x0000,
8217 0x00000000, 0x00000070 },
8218 { MAC_TX_LENGTHS, 0x0000,
8219 0x00000000, 0x00003fff },
8220 { MAC_RX_MODE, TG3_FL_NOT_5705,
8221 0x00000000, 0x000007fc },
8222 { MAC_RX_MODE, TG3_FL_5705,
8223 0x00000000, 0x000007dc },
8224 { MAC_HASH_REG_0, 0x0000,
8225 0x00000000, 0xffffffff },
8226 { MAC_HASH_REG_1, 0x0000,
8227 0x00000000, 0xffffffff },
8228 { MAC_HASH_REG_2, 0x0000,
8229 0x00000000, 0xffffffff },
8230 { MAC_HASH_REG_3, 0x0000,
8231 0x00000000, 0xffffffff },
8232
8233 /* Receive Data and Receive BD Initiator Control Registers. */
8234 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8235 0x00000000, 0xffffffff },
8236 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8237 0x00000000, 0xffffffff },
8238 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8239 0x00000000, 0x00000003 },
8240 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8241 0x00000000, 0xffffffff },
8242 { RCVDBDI_STD_BD+0, 0x0000,
8243 0x00000000, 0xffffffff },
8244 { RCVDBDI_STD_BD+4, 0x0000,
8245 0x00000000, 0xffffffff },
8246 { RCVDBDI_STD_BD+8, 0x0000,
8247 0x00000000, 0xffff0002 },
8248 { RCVDBDI_STD_BD+0xc, 0x0000,
8249 0x00000000, 0xffffffff },
8250
8251 /* Receive BD Initiator Control Registers. */
8252 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8253 0x00000000, 0xffffffff },
8254 { RCVBDI_STD_THRESH, TG3_FL_5705,
8255 0x00000000, 0x000003ff },
8256 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8257 0x00000000, 0xffffffff },
8258
8259 /* Host Coalescing Control Registers. */
8260 { HOSTCC_MODE, TG3_FL_NOT_5705,
8261 0x00000000, 0x00000004 },
8262 { HOSTCC_MODE, TG3_FL_5705,
8263 0x00000000, 0x000000f6 },
8264 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8265 0x00000000, 0xffffffff },
8266 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8267 0x00000000, 0x000003ff },
8268 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8269 0x00000000, 0xffffffff },
8270 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8271 0x00000000, 0x000003ff },
8272 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8273 0x00000000, 0xffffffff },
8274 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8275 0x00000000, 0x000000ff },
8276 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8277 0x00000000, 0xffffffff },
8278 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8279 0x00000000, 0x000000ff },
8280 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8281 0x00000000, 0xffffffff },
8282 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8283 0x00000000, 0xffffffff },
8284 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8285 0x00000000, 0xffffffff },
8286 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8287 0x00000000, 0x000000ff },
8288 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8289 0x00000000, 0xffffffff },
8290 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8291 0x00000000, 0x000000ff },
8292 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8293 0x00000000, 0xffffffff },
8294 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8295 0x00000000, 0xffffffff },
8296 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8297 0x00000000, 0xffffffff },
8298 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8299 0x00000000, 0xffffffff },
8300 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8301 0x00000000, 0xffffffff },
8302 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8303 0xffffffff, 0x00000000 },
8304 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8305 0xffffffff, 0x00000000 },
8306
8307 /* Buffer Manager Control Registers. */
8308 { BUFMGR_MB_POOL_ADDR, 0x0000,
8309 0x00000000, 0x007fff80 },
8310 { BUFMGR_MB_POOL_SIZE, 0x0000,
8311 0x00000000, 0x007fffff },
8312 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8313 0x00000000, 0x0000003f },
8314 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8315 0x00000000, 0x000001ff },
8316 { BUFMGR_MB_HIGH_WATER, 0x0000,
8317 0x00000000, 0x000001ff },
8318 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8319 0xffffffff, 0x00000000 },
8320 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8321 0xffffffff, 0x00000000 },
8322
8323 /* Mailbox Registers */
8324 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8325 0x00000000, 0x000001ff },
8326 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8327 0x00000000, 0x000001ff },
8328 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8329 0x00000000, 0x000007ff },
8330 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8331 0x00000000, 0x000001ff },
8332
8333 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8334 };
8335
8336 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8337 is_5705 = 1;
8338 else
8339 is_5705 = 0;
8340
8341 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8342 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8343 continue;
8344
8345 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8346 continue;
8347
8348 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8349 (reg_tbl[i].flags & TG3_FL_NOT_5788))
8350 continue;
8351
8352 offset = (u32) reg_tbl[i].offset;
8353 read_mask = reg_tbl[i].read_mask;
8354 write_mask = reg_tbl[i].write_mask;
8355
8356 /* Save the original register content */
8357 save_val = tr32(offset);
8358
8359 /* Determine the read-only value. */
8360 read_val = save_val & read_mask;
8361
8362 /* Write zero to the register, then make sure the read-only bits
8363 * are not changed and the read/write bits are all zeros.
8364 */
8365 tw32(offset, 0);
8366
8367 val = tr32(offset);
8368
8369 /* Test the read-only and read/write bits. */
8370 if (((val & read_mask) != read_val) || (val & write_mask))
8371 goto out;
8372
8373 /* Write ones to all the bits defined by RdMask and WrMask, then
8374 * make sure the read-only bits are not changed and the
8375 * read/write bits are all ones.
8376 */
8377 tw32(offset, read_mask | write_mask);
8378
8379 val = tr32(offset);
8380
8381 /* Test the read-only bits. */
8382 if ((val & read_mask) != read_val)
8383 goto out;
8384
8385 /* Test the read/write bits. */
8386 if ((val & write_mask) != write_mask)
8387 goto out;
8388
8389 tw32(offset, save_val);
8390 }
8391
8392 return 0;
8393
8394out:
8395 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8396 tw32(offset, save_val);
8397 return -EIO;
8398}
8399
7942e1db
MC
8400static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8401{
f71e1309 8402 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7942e1db
MC
8403 int i;
8404 u32 j;
8405
8406 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8407 for (j = 0; j < len; j += 4) {
8408 u32 val;
8409
8410 tg3_write_mem(tp, offset + j, test_pattern[i]);
8411 tg3_read_mem(tp, offset + j, &val);
8412 if (val != test_pattern[i])
8413 return -EIO;
8414 }
8415 }
8416 return 0;
8417}
8418
8419static int tg3_test_memory(struct tg3 *tp)
8420{
8421 static struct mem_entry {
8422 u32 offset;
8423 u32 len;
8424 } mem_tbl_570x[] = {
38690194 8425 { 0x00000000, 0x00b50},
7942e1db
MC
8426 { 0x00002000, 0x1c000},
8427 { 0xffffffff, 0x00000}
8428 }, mem_tbl_5705[] = {
8429 { 0x00000100, 0x0000c},
8430 { 0x00000200, 0x00008},
7942e1db
MC
8431 { 0x00004000, 0x00800},
8432 { 0x00006000, 0x01000},
8433 { 0x00008000, 0x02000},
8434 { 0x00010000, 0x0e000},
8435 { 0xffffffff, 0x00000}
79f4d13a
MC
8436 }, mem_tbl_5755[] = {
8437 { 0x00000200, 0x00008},
8438 { 0x00004000, 0x00800},
8439 { 0x00006000, 0x00800},
8440 { 0x00008000, 0x02000},
8441 { 0x00010000, 0x0c000},
8442 { 0xffffffff, 0x00000}
7942e1db
MC
8443 };
8444 struct mem_entry *mem_tbl;
8445 int err = 0;
8446 int i;
8447
79f4d13a 8448 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
af36e6b6
MC
8449 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8450 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
79f4d13a
MC
8451 mem_tbl = mem_tbl_5755;
8452 else
8453 mem_tbl = mem_tbl_5705;
8454 } else
7942e1db
MC
8455 mem_tbl = mem_tbl_570x;
8456
8457 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8458 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8459 mem_tbl[i].len)) != 0)
8460 break;
8461 }
8462
8463 return err;
8464}
8465
9f40dead
MC
8466#define TG3_MAC_LOOPBACK 0
8467#define TG3_PHY_LOOPBACK 1
8468
8469static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
c76949a6 8470{
9f40dead 8471 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
c76949a6
MC
8472 u32 desc_idx;
8473 struct sk_buff *skb, *rx_skb;
8474 u8 *tx_data;
8475 dma_addr_t map;
8476 int num_pkts, tx_len, rx_len, i, err;
8477 struct tg3_rx_buffer_desc *desc;
8478
9f40dead 8479 if (loopback_mode == TG3_MAC_LOOPBACK) {
c94e3941
MC
8480 /* HW errata - mac loopback fails in some cases on 5780.
8481 * Normal traffic and PHY loopback are not affected by
8482 * errata.
8483 */
8484 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8485 return 0;
8486
9f40dead
MC
8487 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8488 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8489 MAC_MODE_PORT_MODE_GMII;
8490 tw32(MAC_MODE, mac_mode);
8491 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
c94e3941
MC
8492 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8493 BMCR_SPEED1000);
8494 udelay(40);
8495 /* reset to prevent losing 1st rx packet intermittently */
8496 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8497 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8498 udelay(10);
8499 tw32_f(MAC_RX_MODE, tp->rx_mode);
8500 }
9f40dead
MC
8501 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8502 MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
ff18ff02 8503 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9f40dead 8504 mac_mode &= ~MAC_MODE_LINK_POLARITY;
ff18ff02
MC
8505 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8506 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8507 }
9f40dead 8508 tw32(MAC_MODE, mac_mode);
9f40dead
MC
8509 }
8510 else
8511 return -EINVAL;
c76949a6
MC
8512
8513 err = -EIO;
8514
c76949a6
MC
8515 tx_len = 1514;
8516 skb = dev_alloc_skb(tx_len);
a50bb7b9
JJ
8517 if (!skb)
8518 return -ENOMEM;
8519
c76949a6
MC
8520 tx_data = skb_put(skb, tx_len);
8521 memcpy(tx_data, tp->dev->dev_addr, 6);
8522 memset(tx_data + 6, 0x0, 8);
8523
8524 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8525
8526 for (i = 14; i < tx_len; i++)
8527 tx_data[i] = (u8) (i & 0xff);
8528
8529 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8530
8531 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8532 HOSTCC_MODE_NOW);
8533
8534 udelay(10);
8535
8536 rx_start_idx = tp->hw_status->idx[0].rx_producer;
8537
c76949a6
MC
8538 num_pkts = 0;
8539
9f40dead 8540 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
c76949a6 8541
9f40dead 8542 tp->tx_prod++;
c76949a6
MC
8543 num_pkts++;
8544
9f40dead
MC
8545 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8546 tp->tx_prod);
09ee929c 8547 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
c76949a6
MC
8548
8549 udelay(10);
8550
8551 for (i = 0; i < 10; i++) {
8552 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8553 HOSTCC_MODE_NOW);
8554
8555 udelay(10);
8556
8557 tx_idx = tp->hw_status->idx[0].tx_consumer;
8558 rx_idx = tp->hw_status->idx[0].rx_producer;
9f40dead 8559 if ((tx_idx == tp->tx_prod) &&
c76949a6
MC
8560 (rx_idx == (rx_start_idx + num_pkts)))
8561 break;
8562 }
8563
8564 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8565 dev_kfree_skb(skb);
8566
9f40dead 8567 if (tx_idx != tp->tx_prod)
c76949a6
MC
8568 goto out;
8569
8570 if (rx_idx != rx_start_idx + num_pkts)
8571 goto out;
8572
8573 desc = &tp->rx_rcb[rx_start_idx];
8574 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8575 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8576 if (opaque_key != RXD_OPAQUE_RING_STD)
8577 goto out;
8578
8579 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8580 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8581 goto out;
8582
8583 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8584 if (rx_len != tx_len)
8585 goto out;
8586
8587 rx_skb = tp->rx_std_buffers[desc_idx].skb;
8588
8589 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8590 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8591
8592 for (i = 14; i < tx_len; i++) {
8593 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8594 goto out;
8595 }
8596 err = 0;
8597
8598 /* tg3_free_rings will unmap and free the rx_skb */
8599out:
8600 return err;
8601}
8602
9f40dead
MC
8603#define TG3_MAC_LOOPBACK_FAILED 1
8604#define TG3_PHY_LOOPBACK_FAILED 2
8605#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
8606 TG3_PHY_LOOPBACK_FAILED)
8607
8608static int tg3_test_loopback(struct tg3 *tp)
8609{
8610 int err = 0;
8611
8612 if (!netif_running(tp->dev))
8613 return TG3_LOOPBACK_FAILED;
8614
8e7a22e3 8615 tg3_reset_hw(tp, 1);
9f40dead
MC
8616
8617 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8618 err |= TG3_MAC_LOOPBACK_FAILED;
8619 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8620 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8621 err |= TG3_PHY_LOOPBACK_FAILED;
8622 }
8623
8624 return err;
8625}
8626
4cafd3f5
MC
8627static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8628 u64 *data)
8629{
566f86ad
MC
8630 struct tg3 *tp = netdev_priv(dev);
8631
bc1c7567
MC
8632 if (tp->link_config.phy_is_low_power)
8633 tg3_set_power_state(tp, PCI_D0);
8634
566f86ad
MC
8635 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8636
8637 if (tg3_test_nvram(tp) != 0) {
8638 etest->flags |= ETH_TEST_FL_FAILED;
8639 data[0] = 1;
8640 }
ca43007a
MC
8641 if (tg3_test_link(tp) != 0) {
8642 etest->flags |= ETH_TEST_FL_FAILED;
8643 data[1] = 1;
8644 }
a71116d1 8645 if (etest->flags & ETH_TEST_FL_OFFLINE) {
ec41c7df 8646 int err, irq_sync = 0;
bbe832c0
MC
8647
8648 if (netif_running(dev)) {
a71116d1 8649 tg3_netif_stop(tp);
bbe832c0
MC
8650 irq_sync = 1;
8651 }
a71116d1 8652
bbe832c0 8653 tg3_full_lock(tp, irq_sync);
a71116d1
MC
8654
8655 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
ec41c7df 8656 err = tg3_nvram_lock(tp);
a71116d1
MC
8657 tg3_halt_cpu(tp, RX_CPU_BASE);
8658 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8659 tg3_halt_cpu(tp, TX_CPU_BASE);
ec41c7df
MC
8660 if (!err)
8661 tg3_nvram_unlock(tp);
a71116d1 8662
d9ab5ad1
MC
8663 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8664 tg3_phy_reset(tp);
8665
a71116d1
MC
8666 if (tg3_test_registers(tp) != 0) {
8667 etest->flags |= ETH_TEST_FL_FAILED;
8668 data[2] = 1;
8669 }
7942e1db
MC
8670 if (tg3_test_memory(tp) != 0) {
8671 etest->flags |= ETH_TEST_FL_FAILED;
8672 data[3] = 1;
8673 }
9f40dead 8674 if ((data[4] = tg3_test_loopback(tp)) != 0)
c76949a6 8675 etest->flags |= ETH_TEST_FL_FAILED;
a71116d1 8676
f47c11ee
DM
8677 tg3_full_unlock(tp);
8678
d4bc3927
MC
8679 if (tg3_test_interrupt(tp) != 0) {
8680 etest->flags |= ETH_TEST_FL_FAILED;
8681 data[5] = 1;
8682 }
f47c11ee
DM
8683
8684 tg3_full_lock(tp, 0);
d4bc3927 8685
a71116d1
MC
8686 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8687 if (netif_running(dev)) {
8688 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8e7a22e3 8689 tg3_init_hw(tp, 1);
a71116d1
MC
8690 tg3_netif_start(tp);
8691 }
f47c11ee
DM
8692
8693 tg3_full_unlock(tp);
a71116d1 8694 }
bc1c7567
MC
8695 if (tp->link_config.phy_is_low_power)
8696 tg3_set_power_state(tp, PCI_D3hot);
8697
4cafd3f5
MC
8698}
8699
1da177e4
LT
8700static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8701{
8702 struct mii_ioctl_data *data = if_mii(ifr);
8703 struct tg3 *tp = netdev_priv(dev);
8704 int err;
8705
8706 switch(cmd) {
8707 case SIOCGMIIPHY:
8708 data->phy_id = PHY_ADDR;
8709
8710 /* fallthru */
8711 case SIOCGMIIREG: {
8712 u32 mii_regval;
8713
8714 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8715 break; /* We have no PHY */
8716
bc1c7567
MC
8717 if (tp->link_config.phy_is_low_power)
8718 return -EAGAIN;
8719
f47c11ee 8720 spin_lock_bh(&tp->lock);
1da177e4 8721 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
f47c11ee 8722 spin_unlock_bh(&tp->lock);
1da177e4
LT
8723
8724 data->val_out = mii_regval;
8725
8726 return err;
8727 }
8728
8729 case SIOCSMIIREG:
8730 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8731 break; /* We have no PHY */
8732
8733 if (!capable(CAP_NET_ADMIN))
8734 return -EPERM;
8735
bc1c7567
MC
8736 if (tp->link_config.phy_is_low_power)
8737 return -EAGAIN;
8738
f47c11ee 8739 spin_lock_bh(&tp->lock);
1da177e4 8740 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
f47c11ee 8741 spin_unlock_bh(&tp->lock);
1da177e4
LT
8742
8743 return err;
8744
8745 default:
8746 /* do nothing */
8747 break;
8748 }
8749 return -EOPNOTSUPP;
8750}
8751
8752#if TG3_VLAN_TAG_USED
8753static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8754{
8755 struct tg3 *tp = netdev_priv(dev);
8756
29315e87
MC
8757 if (netif_running(dev))
8758 tg3_netif_stop(tp);
8759
f47c11ee 8760 tg3_full_lock(tp, 0);
1da177e4
LT
8761
8762 tp->vlgrp = grp;
8763
8764 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8765 __tg3_set_rx_mode(dev);
8766
f47c11ee 8767 tg3_full_unlock(tp);
29315e87
MC
8768
8769 if (netif_running(dev))
8770 tg3_netif_start(tp);
1da177e4
LT
8771}
8772
8773static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8774{
8775 struct tg3 *tp = netdev_priv(dev);
8776
29315e87
MC
8777 if (netif_running(dev))
8778 tg3_netif_stop(tp);
8779
f47c11ee 8780 tg3_full_lock(tp, 0);
1da177e4
LT
8781 if (tp->vlgrp)
8782 tp->vlgrp->vlan_devices[vid] = NULL;
f47c11ee 8783 tg3_full_unlock(tp);
29315e87
MC
8784
8785 if (netif_running(dev))
8786 tg3_netif_start(tp);
1da177e4
LT
8787}
8788#endif
8789
15f9850d
DM
8790static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8791{
8792 struct tg3 *tp = netdev_priv(dev);
8793
8794 memcpy(ec, &tp->coal, sizeof(*ec));
8795 return 0;
8796}
8797
d244c892
MC
8798static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8799{
8800 struct tg3 *tp = netdev_priv(dev);
8801 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8802 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8803
8804 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8805 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8806 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8807 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8808 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8809 }
8810
8811 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8812 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8813 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8814 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8815 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8816 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8817 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8818 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8819 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8820 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8821 return -EINVAL;
8822
8823 /* No rx interrupts will be generated if both are zero */
8824 if ((ec->rx_coalesce_usecs == 0) &&
8825 (ec->rx_max_coalesced_frames == 0))
8826 return -EINVAL;
8827
8828 /* No tx interrupts will be generated if both are zero */
8829 if ((ec->tx_coalesce_usecs == 0) &&
8830 (ec->tx_max_coalesced_frames == 0))
8831 return -EINVAL;
8832
8833 /* Only copy relevant parameters, ignore all others. */
8834 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8835 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8836 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8837 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8838 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8839 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8840 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8841 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8842 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8843
8844 if (netif_running(dev)) {
8845 tg3_full_lock(tp, 0);
8846 __tg3_set_coalesce(tp, &tp->coal);
8847 tg3_full_unlock(tp);
8848 }
8849 return 0;
8850}
8851
1da177e4
LT
8852static struct ethtool_ops tg3_ethtool_ops = {
8853 .get_settings = tg3_get_settings,
8854 .set_settings = tg3_set_settings,
8855 .get_drvinfo = tg3_get_drvinfo,
8856 .get_regs_len = tg3_get_regs_len,
8857 .get_regs = tg3_get_regs,
8858 .get_wol = tg3_get_wol,
8859 .set_wol = tg3_set_wol,
8860 .get_msglevel = tg3_get_msglevel,
8861 .set_msglevel = tg3_set_msglevel,
8862 .nway_reset = tg3_nway_reset,
8863 .get_link = ethtool_op_get_link,
8864 .get_eeprom_len = tg3_get_eeprom_len,
8865 .get_eeprom = tg3_get_eeprom,
8866 .set_eeprom = tg3_set_eeprom,
8867 .get_ringparam = tg3_get_ringparam,
8868 .set_ringparam = tg3_set_ringparam,
8869 .get_pauseparam = tg3_get_pauseparam,
8870 .set_pauseparam = tg3_set_pauseparam,
8871 .get_rx_csum = tg3_get_rx_csum,
8872 .set_rx_csum = tg3_set_rx_csum,
8873 .get_tx_csum = ethtool_op_get_tx_csum,
8874 .set_tx_csum = tg3_set_tx_csum,
8875 .get_sg = ethtool_op_get_sg,
8876 .set_sg = ethtool_op_set_sg,
8877#if TG3_TSO_SUPPORT != 0
8878 .get_tso = ethtool_op_get_tso,
8879 .set_tso = tg3_set_tso,
8880#endif
4cafd3f5
MC
8881 .self_test_count = tg3_get_test_count,
8882 .self_test = tg3_self_test,
1da177e4 8883 .get_strings = tg3_get_strings,
4009a93d 8884 .phys_id = tg3_phys_id,
1da177e4
LT
8885 .get_stats_count = tg3_get_stats_count,
8886 .get_ethtool_stats = tg3_get_ethtool_stats,
15f9850d 8887 .get_coalesce = tg3_get_coalesce,
d244c892 8888 .set_coalesce = tg3_set_coalesce,
2ff43697 8889 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
8890};
8891
8892static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8893{
1b27777a 8894 u32 cursize, val, magic;
1da177e4
LT
8895
8896 tp->nvram_size = EEPROM_CHIP_SIZE;
8897
1820180b 8898 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
1da177e4
LT
8899 return;
8900
1b27777a 8901 if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
1da177e4
LT
8902 return;
8903
8904 /*
8905 * Size the chip by reading offsets at increasing powers of two.
8906 * When we encounter our validation signature, we know the addressing
8907 * has wrapped around, and thus have our chip size.
8908 */
1b27777a 8909 cursize = 0x10;
1da177e4
LT
8910
8911 while (cursize < tp->nvram_size) {
1820180b 8912 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
1da177e4
LT
8913 return;
8914
1820180b 8915 if (val == magic)
1da177e4
LT
8916 break;
8917
8918 cursize <<= 1;
8919 }
8920
8921 tp->nvram_size = cursize;
8922}
8923
8924static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8925{
8926 u32 val;
8927
1820180b 8928 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
1b27777a
MC
8929 return;
8930
8931 /* Selfboot format */
1820180b 8932 if (val != TG3_EEPROM_MAGIC) {
1b27777a
MC
8933 tg3_get_eeprom_size(tp);
8934 return;
8935 }
8936
1da177e4
LT
8937 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8938 if (val != 0) {
8939 tp->nvram_size = (val >> 16) * 1024;
8940 return;
8941 }
8942 }
8943 tp->nvram_size = 0x20000;
8944}
8945
8946static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8947{
8948 u32 nvcfg1;
8949
8950 nvcfg1 = tr32(NVRAM_CFG1);
8951 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8952 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8953 }
8954 else {
8955 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8956 tw32(NVRAM_CFG1, nvcfg1);
8957 }
8958
4c987487 8959 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
a4e2b347 8960 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
8961 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8962 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8963 tp->nvram_jedecnum = JEDEC_ATMEL;
8964 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8965 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8966 break;
8967 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8968 tp->nvram_jedecnum = JEDEC_ATMEL;
8969 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8970 break;
8971 case FLASH_VENDOR_ATMEL_EEPROM:
8972 tp->nvram_jedecnum = JEDEC_ATMEL;
8973 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8974 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8975 break;
8976 case FLASH_VENDOR_ST:
8977 tp->nvram_jedecnum = JEDEC_ST;
8978 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8979 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8980 break;
8981 case FLASH_VENDOR_SAIFUN:
8982 tp->nvram_jedecnum = JEDEC_SAIFUN;
8983 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8984 break;
8985 case FLASH_VENDOR_SST_SMALL:
8986 case FLASH_VENDOR_SST_LARGE:
8987 tp->nvram_jedecnum = JEDEC_SST;
8988 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8989 break;
8990 }
8991 }
8992 else {
8993 tp->nvram_jedecnum = JEDEC_ATMEL;
8994 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8995 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8996 }
8997}
8998
361b4ac2
MC
8999static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9000{
9001 u32 nvcfg1;
9002
9003 nvcfg1 = tr32(NVRAM_CFG1);
9004
e6af301b
MC
9005 /* NVRAM protection for TPM */
9006 if (nvcfg1 & (1 << 27))
9007 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9008
361b4ac2
MC
9009 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9010 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9011 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9012 tp->nvram_jedecnum = JEDEC_ATMEL;
9013 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9014 break;
9015 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9016 tp->nvram_jedecnum = JEDEC_ATMEL;
9017 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9018 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9019 break;
9020 case FLASH_5752VENDOR_ST_M45PE10:
9021 case FLASH_5752VENDOR_ST_M45PE20:
9022 case FLASH_5752VENDOR_ST_M45PE40:
9023 tp->nvram_jedecnum = JEDEC_ST;
9024 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9025 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9026 break;
9027 }
9028
9029 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9030 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9031 case FLASH_5752PAGE_SIZE_256:
9032 tp->nvram_pagesize = 256;
9033 break;
9034 case FLASH_5752PAGE_SIZE_512:
9035 tp->nvram_pagesize = 512;
9036 break;
9037 case FLASH_5752PAGE_SIZE_1K:
9038 tp->nvram_pagesize = 1024;
9039 break;
9040 case FLASH_5752PAGE_SIZE_2K:
9041 tp->nvram_pagesize = 2048;
9042 break;
9043 case FLASH_5752PAGE_SIZE_4K:
9044 tp->nvram_pagesize = 4096;
9045 break;
9046 case FLASH_5752PAGE_SIZE_264:
9047 tp->nvram_pagesize = 264;
9048 break;
9049 }
9050 }
9051 else {
9052 /* For eeprom, set pagesize to maximum eeprom size */
9053 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9054
9055 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9056 tw32(NVRAM_CFG1, nvcfg1);
9057 }
9058}
9059
d3c7b886
MC
9060static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9061{
9062 u32 nvcfg1;
9063
9064 nvcfg1 = tr32(NVRAM_CFG1);
9065
9066 /* NVRAM protection for TPM */
9067 if (nvcfg1 & (1 << 27))
9068 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9069
9070 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9071 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9072 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9073 tp->nvram_jedecnum = JEDEC_ATMEL;
9074 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9075 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9076
9077 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9078 tw32(NVRAM_CFG1, nvcfg1);
9079 break;
9080 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9081 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9082 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9083 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9084 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9085 tp->nvram_jedecnum = JEDEC_ATMEL;
9086 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9087 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9088 tp->nvram_pagesize = 264;
9089 break;
9090 case FLASH_5752VENDOR_ST_M45PE10:
9091 case FLASH_5752VENDOR_ST_M45PE20:
9092 case FLASH_5752VENDOR_ST_M45PE40:
9093 tp->nvram_jedecnum = JEDEC_ST;
9094 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9095 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9096 tp->nvram_pagesize = 256;
9097 break;
9098 }
9099}
9100
1b27777a
MC
9101static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9102{
9103 u32 nvcfg1;
9104
9105 nvcfg1 = tr32(NVRAM_CFG1);
9106
9107 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9108 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9109 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9110 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9111 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9112 tp->nvram_jedecnum = JEDEC_ATMEL;
9113 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9114 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9115
9116 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9117 tw32(NVRAM_CFG1, nvcfg1);
9118 break;
9119 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9120 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9121 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9122 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9123 tp->nvram_jedecnum = JEDEC_ATMEL;
9124 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9125 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9126 tp->nvram_pagesize = 264;
9127 break;
9128 case FLASH_5752VENDOR_ST_M45PE10:
9129 case FLASH_5752VENDOR_ST_M45PE20:
9130 case FLASH_5752VENDOR_ST_M45PE40:
9131 tp->nvram_jedecnum = JEDEC_ST;
9132 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9133 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9134 tp->nvram_pagesize = 256;
9135 break;
9136 }
9137}
9138
1da177e4
LT
9139/* Chips other than 5700/5701 use the NVRAM for fetching info. */
9140static void __devinit tg3_nvram_init(struct tg3 *tp)
9141{
9142 int j;
9143
1da177e4
LT
9144 tw32_f(GRC_EEPROM_ADDR,
9145 (EEPROM_ADDR_FSM_RESET |
9146 (EEPROM_DEFAULT_CLOCK_PERIOD <<
9147 EEPROM_ADDR_CLKPERD_SHIFT)));
9148
9149 /* XXX schedule_timeout() ... */
9150 for (j = 0; j < 100; j++)
9151 udelay(10);
9152
9153 /* Enable seeprom accesses. */
9154 tw32_f(GRC_LOCAL_CTRL,
9155 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9156 udelay(100);
9157
9158 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9159 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9160 tp->tg3_flags |= TG3_FLAG_NVRAM;
9161
ec41c7df
MC
9162 if (tg3_nvram_lock(tp)) {
9163 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9164 "tg3_nvram_init failed.\n", tp->dev->name);
9165 return;
9166 }
e6af301b 9167 tg3_enable_nvram_access(tp);
1da177e4 9168
361b4ac2
MC
9169 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9170 tg3_get_5752_nvram_info(tp);
d3c7b886
MC
9171 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9172 tg3_get_5755_nvram_info(tp);
1b27777a
MC
9173 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9174 tg3_get_5787_nvram_info(tp);
361b4ac2
MC
9175 else
9176 tg3_get_nvram_info(tp);
9177
1da177e4
LT
9178 tg3_get_nvram_size(tp);
9179
e6af301b 9180 tg3_disable_nvram_access(tp);
381291b7 9181 tg3_nvram_unlock(tp);
1da177e4
LT
9182
9183 } else {
9184 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9185
9186 tg3_get_eeprom_size(tp);
9187 }
9188}
9189
9190static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9191 u32 offset, u32 *val)
9192{
9193 u32 tmp;
9194 int i;
9195
9196 if (offset > EEPROM_ADDR_ADDR_MASK ||
9197 (offset % 4) != 0)
9198 return -EINVAL;
9199
9200 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9201 EEPROM_ADDR_DEVID_MASK |
9202 EEPROM_ADDR_READ);
9203 tw32(GRC_EEPROM_ADDR,
9204 tmp |
9205 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9206 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9207 EEPROM_ADDR_ADDR_MASK) |
9208 EEPROM_ADDR_READ | EEPROM_ADDR_START);
9209
9210 for (i = 0; i < 10000; i++) {
9211 tmp = tr32(GRC_EEPROM_ADDR);
9212
9213 if (tmp & EEPROM_ADDR_COMPLETE)
9214 break;
9215 udelay(100);
9216 }
9217 if (!(tmp & EEPROM_ADDR_COMPLETE))
9218 return -EBUSY;
9219
9220 *val = tr32(GRC_EEPROM_DATA);
9221 return 0;
9222}
9223
9224#define NVRAM_CMD_TIMEOUT 10000
9225
9226static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9227{
9228 int i;
9229
9230 tw32(NVRAM_CMD, nvram_cmd);
9231 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9232 udelay(10);
9233 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9234 udelay(10);
9235 break;
9236 }
9237 }
9238 if (i == NVRAM_CMD_TIMEOUT) {
9239 return -EBUSY;
9240 }
9241 return 0;
9242}
9243
1820180b
MC
9244static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9245{
9246 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9247 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9248 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9249 (tp->nvram_jedecnum == JEDEC_ATMEL))
9250
9251 addr = ((addr / tp->nvram_pagesize) <<
9252 ATMEL_AT45DB0X1B_PAGE_POS) +
9253 (addr % tp->nvram_pagesize);
9254
9255 return addr;
9256}
9257
c4e6575c
MC
9258static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9259{
9260 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9261 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9262 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9263 (tp->nvram_jedecnum == JEDEC_ATMEL))
9264
9265 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9266 tp->nvram_pagesize) +
9267 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9268
9269 return addr;
9270}
9271
1da177e4
LT
9272static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9273{
9274 int ret;
9275
1da177e4
LT
9276 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9277 return tg3_nvram_read_using_eeprom(tp, offset, val);
9278
1820180b 9279 offset = tg3_nvram_phys_addr(tp, offset);
1da177e4
LT
9280
9281 if (offset > NVRAM_ADDR_MSK)
9282 return -EINVAL;
9283
ec41c7df
MC
9284 ret = tg3_nvram_lock(tp);
9285 if (ret)
9286 return ret;
1da177e4 9287
e6af301b 9288 tg3_enable_nvram_access(tp);
1da177e4
LT
9289
9290 tw32(NVRAM_ADDR, offset);
9291 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9292 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9293
9294 if (ret == 0)
9295 *val = swab32(tr32(NVRAM_RDDATA));
9296
e6af301b 9297 tg3_disable_nvram_access(tp);
1da177e4 9298
381291b7
MC
9299 tg3_nvram_unlock(tp);
9300
1da177e4
LT
9301 return ret;
9302}
9303
1820180b
MC
9304static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9305{
9306 int err;
9307 u32 tmp;
9308
9309 err = tg3_nvram_read(tp, offset, &tmp);
9310 *val = swab32(tmp);
9311 return err;
9312}
9313
1da177e4
LT
9314static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9315 u32 offset, u32 len, u8 *buf)
9316{
9317 int i, j, rc = 0;
9318 u32 val;
9319
9320 for (i = 0; i < len; i += 4) {
9321 u32 addr, data;
9322
9323 addr = offset + i;
9324
9325 memcpy(&data, buf + i, 4);
9326
9327 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9328
9329 val = tr32(GRC_EEPROM_ADDR);
9330 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9331
9332 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9333 EEPROM_ADDR_READ);
9334 tw32(GRC_EEPROM_ADDR, val |
9335 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9336 (addr & EEPROM_ADDR_ADDR_MASK) |
9337 EEPROM_ADDR_START |
9338 EEPROM_ADDR_WRITE);
9339
9340 for (j = 0; j < 10000; j++) {
9341 val = tr32(GRC_EEPROM_ADDR);
9342
9343 if (val & EEPROM_ADDR_COMPLETE)
9344 break;
9345 udelay(100);
9346 }
9347 if (!(val & EEPROM_ADDR_COMPLETE)) {
9348 rc = -EBUSY;
9349 break;
9350 }
9351 }
9352
9353 return rc;
9354}
9355
9356/* offset and length are dword aligned */
9357static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9358 u8 *buf)
9359{
9360 int ret = 0;
9361 u32 pagesize = tp->nvram_pagesize;
9362 u32 pagemask = pagesize - 1;
9363 u32 nvram_cmd;
9364 u8 *tmp;
9365
9366 tmp = kmalloc(pagesize, GFP_KERNEL);
9367 if (tmp == NULL)
9368 return -ENOMEM;
9369
9370 while (len) {
9371 int j;
e6af301b 9372 u32 phy_addr, page_off, size;
1da177e4
LT
9373
9374 phy_addr = offset & ~pagemask;
9375
9376 for (j = 0; j < pagesize; j += 4) {
9377 if ((ret = tg3_nvram_read(tp, phy_addr + j,
9378 (u32 *) (tmp + j))))
9379 break;
9380 }
9381 if (ret)
9382 break;
9383
9384 page_off = offset & pagemask;
9385 size = pagesize;
9386 if (len < size)
9387 size = len;
9388
9389 len -= size;
9390
9391 memcpy(tmp + page_off, buf, size);
9392
9393 offset = offset + (pagesize - page_off);
9394
e6af301b 9395 tg3_enable_nvram_access(tp);
1da177e4
LT
9396
9397 /*
9398 * Before we can erase the flash page, we need
9399 * to issue a special "write enable" command.
9400 */
9401 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9402
9403 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9404 break;
9405
9406 /* Erase the target page */
9407 tw32(NVRAM_ADDR, phy_addr);
9408
9409 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9410 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9411
9412 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9413 break;
9414
9415 /* Issue another write enable to start the write. */
9416 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9417
9418 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9419 break;
9420
9421 for (j = 0; j < pagesize; j += 4) {
9422 u32 data;
9423
9424 data = *((u32 *) (tmp + j));
9425 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9426
9427 tw32(NVRAM_ADDR, phy_addr + j);
9428
9429 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9430 NVRAM_CMD_WR;
9431
9432 if (j == 0)
9433 nvram_cmd |= NVRAM_CMD_FIRST;
9434 else if (j == (pagesize - 4))
9435 nvram_cmd |= NVRAM_CMD_LAST;
9436
9437 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9438 break;
9439 }
9440 if (ret)
9441 break;
9442 }
9443
9444 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9445 tg3_nvram_exec_cmd(tp, nvram_cmd);
9446
9447 kfree(tmp);
9448
9449 return ret;
9450}
9451
9452/* offset and length are dword aligned */
9453static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9454 u8 *buf)
9455{
9456 int i, ret = 0;
9457
9458 for (i = 0; i < len; i += 4, offset += 4) {
9459 u32 data, page_off, phy_addr, nvram_cmd;
9460
9461 memcpy(&data, buf + i, 4);
9462 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9463
9464 page_off = offset % tp->nvram_pagesize;
9465
1820180b 9466 phy_addr = tg3_nvram_phys_addr(tp, offset);
1da177e4
LT
9467
9468 tw32(NVRAM_ADDR, phy_addr);
9469
9470 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9471
9472 if ((page_off == 0) || (i == 0))
9473 nvram_cmd |= NVRAM_CMD_FIRST;
f6d9a256 9474 if (page_off == (tp->nvram_pagesize - 4))
1da177e4
LT
9475 nvram_cmd |= NVRAM_CMD_LAST;
9476
9477 if (i == (len - 4))
9478 nvram_cmd |= NVRAM_CMD_LAST;
9479
4c987487 9480 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
af36e6b6 9481 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
1b27777a 9482 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
4c987487
MC
9483 (tp->nvram_jedecnum == JEDEC_ST) &&
9484 (nvram_cmd & NVRAM_CMD_FIRST)) {
1da177e4
LT
9485
9486 if ((ret = tg3_nvram_exec_cmd(tp,
9487 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9488 NVRAM_CMD_DONE)))
9489
9490 break;
9491 }
9492 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9493 /* We always do complete word writes to eeprom. */
9494 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9495 }
9496
9497 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9498 break;
9499 }
9500 return ret;
9501}
9502
9503/* offset and length are dword aligned */
9504static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9505{
9506 int ret;
9507
1da177e4 9508 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34
MC
9509 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9510 ~GRC_LCLCTRL_GPIO_OUTPUT1);
1da177e4
LT
9511 udelay(40);
9512 }
9513
9514 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9515 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9516 }
9517 else {
9518 u32 grc_mode;
9519
ec41c7df
MC
9520 ret = tg3_nvram_lock(tp);
9521 if (ret)
9522 return ret;
1da177e4 9523
e6af301b
MC
9524 tg3_enable_nvram_access(tp);
9525 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9526 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
1da177e4 9527 tw32(NVRAM_WRITE1, 0x406);
1da177e4
LT
9528
9529 grc_mode = tr32(GRC_MODE);
9530 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9531
9532 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9533 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9534
9535 ret = tg3_nvram_write_block_buffered(tp, offset, len,
9536 buf);
9537 }
9538 else {
9539 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9540 buf);
9541 }
9542
9543 grc_mode = tr32(GRC_MODE);
9544 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9545
e6af301b 9546 tg3_disable_nvram_access(tp);
1da177e4
LT
9547 tg3_nvram_unlock(tp);
9548 }
9549
9550 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34 9551 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1da177e4
LT
9552 udelay(40);
9553 }
9554
9555 return ret;
9556}
9557
9558struct subsys_tbl_ent {
9559 u16 subsys_vendor, subsys_devid;
9560 u32 phy_id;
9561};
9562
9563static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9564 /* Broadcom boards. */
9565 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9566 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9567 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9568 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
9569 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9570 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9571 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
9572 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9573 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9574 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9575 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9576
9577 /* 3com boards. */
9578 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9579 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9580 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
9581 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9582 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9583
9584 /* DELL boards. */
9585 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9586 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9587 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9588 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9589
9590 /* Compaq boards. */
9591 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9592 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9593 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
9594 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9595 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9596
9597 /* IBM boards. */
9598 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9599};
9600
9601static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9602{
9603 int i;
9604
9605 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9606 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9607 tp->pdev->subsystem_vendor) &&
9608 (subsys_id_to_phy_id[i].subsys_devid ==
9609 tp->pdev->subsystem_device))
9610 return &subsys_id_to_phy_id[i];
9611 }
9612 return NULL;
9613}
9614
7d0c41ef 9615static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
1da177e4 9616{
1da177e4 9617 u32 val;
caf636c7
MC
9618 u16 pmcsr;
9619
9620 /* On some early chips the SRAM cannot be accessed in D3hot state,
9621 * so need make sure we're in D0.
9622 */
9623 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9624 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9625 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9626 msleep(1);
7d0c41ef
MC
9627
9628 /* Make sure register accesses (indirect or otherwise)
9629 * will function correctly.
9630 */
9631 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9632 tp->misc_host_ctrl);
1da177e4 9633
f49639e6
DM
9634 /* The memory arbiter has to be enabled in order for SRAM accesses
9635 * to succeed. Normally on powerup the tg3 chip firmware will make
9636 * sure it is enabled, but other entities such as system netboot
9637 * code might disable it.
9638 */
9639 val = tr32(MEMARB_MODE);
9640 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9641
1da177e4 9642 tp->phy_id = PHY_ID_INVALID;
7d0c41ef
MC
9643 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9644
f49639e6
DM
9645 /* Assume an onboard device by default. */
9646 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
72b845e0 9647
1da177e4
LT
9648 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9649 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9650 u32 nic_cfg, led_cfg;
7d0c41ef
MC
9651 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9652 int eeprom_phy_serdes = 0;
1da177e4
LT
9653
9654 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9655 tp->nic_sram_data_cfg = nic_cfg;
9656
9657 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9658 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9659 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9660 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9661 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9662 (ver > 0) && (ver < 0x100))
9663 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9664
1da177e4
LT
9665 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9666 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9667 eeprom_phy_serdes = 1;
9668
9669 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9670 if (nic_phy_id != 0) {
9671 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9672 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9673
9674 eeprom_phy_id = (id1 >> 16) << 10;
9675 eeprom_phy_id |= (id2 & 0xfc00) << 16;
9676 eeprom_phy_id |= (id2 & 0x03ff) << 0;
9677 } else
9678 eeprom_phy_id = 0;
9679
7d0c41ef 9680 tp->phy_id = eeprom_phy_id;
747e8f8b 9681 if (eeprom_phy_serdes) {
a4e2b347 9682 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
747e8f8b
MC
9683 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9684 else
9685 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9686 }
7d0c41ef 9687
cbf46853 9688 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
9689 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9690 SHASTA_EXT_LED_MODE_MASK);
cbf46853 9691 else
1da177e4
LT
9692 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9693
9694 switch (led_cfg) {
9695 default:
9696 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9697 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9698 break;
9699
9700 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9701 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9702 break;
9703
9704 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9705 tp->led_ctrl = LED_CTRL_MODE_MAC;
9ba27794
MC
9706
9707 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9708 * read on some older 5700/5701 bootcode.
9709 */
9710 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9711 ASIC_REV_5700 ||
9712 GET_ASIC_REV(tp->pci_chip_rev_id) ==
9713 ASIC_REV_5701)
9714 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9715
1da177e4
LT
9716 break;
9717
9718 case SHASTA_EXT_LED_SHARED:
9719 tp->led_ctrl = LED_CTRL_MODE_SHARED;
9720 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9721 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9722 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9723 LED_CTRL_MODE_PHY_2);
9724 break;
9725
9726 case SHASTA_EXT_LED_MAC:
9727 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9728 break;
9729
9730 case SHASTA_EXT_LED_COMBO:
9731 tp->led_ctrl = LED_CTRL_MODE_COMBO;
9732 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9733 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9734 LED_CTRL_MODE_PHY_2);
9735 break;
9736
9737 };
9738
9739 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9740 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9741 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9742 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9743
bbadf503 9744 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
1da177e4 9745 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
f49639e6
DM
9746 else
9747 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
1da177e4
LT
9748
9749 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9750 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 9751 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
9752 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9753 }
9754 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9755 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9756
9757 if (cfg2 & (1 << 17))
9758 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9759
9760 /* serdes signal pre-emphasis in register 0x590 set by */
9761 /* bootcode if bit 18 is set */
9762 if (cfg2 & (1 << 18))
9763 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9764 }
7d0c41ef
MC
9765}
9766
9767static int __devinit tg3_phy_probe(struct tg3 *tp)
9768{
9769 u32 hw_phy_id_1, hw_phy_id_2;
9770 u32 hw_phy_id, hw_phy_id_masked;
9771 int err;
1da177e4
LT
9772
9773 /* Reading the PHY ID register can conflict with ASF
9774 * firwmare access to the PHY hardware.
9775 */
9776 err = 0;
9777 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9778 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9779 } else {
9780 /* Now read the physical PHY_ID from the chip and verify
9781 * that it is sane. If it doesn't look good, we fall back
9782 * to either the hard-coded table based PHY_ID and failing
9783 * that the value found in the eeprom area.
9784 */
9785 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9786 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9787
9788 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
9789 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9790 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
9791
9792 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9793 }
9794
9795 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9796 tp->phy_id = hw_phy_id;
9797 if (hw_phy_id_masked == PHY_ID_BCM8002)
9798 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
da6b2d01
MC
9799 else
9800 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
1da177e4 9801 } else {
7d0c41ef
MC
9802 if (tp->phy_id != PHY_ID_INVALID) {
9803 /* Do nothing, phy ID already set up in
9804 * tg3_get_eeprom_hw_cfg().
9805 */
1da177e4
LT
9806 } else {
9807 struct subsys_tbl_ent *p;
9808
9809 /* No eeprom signature? Try the hardcoded
9810 * subsys device table.
9811 */
9812 p = lookup_by_subsys(tp);
9813 if (!p)
9814 return -ENODEV;
9815
9816 tp->phy_id = p->phy_id;
9817 if (!tp->phy_id ||
9818 tp->phy_id == PHY_ID_BCM8002)
9819 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9820 }
9821 }
9822
747e8f8b 9823 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
1da177e4
LT
9824 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9825 u32 bmsr, adv_reg, tg3_ctrl;
9826
9827 tg3_readphy(tp, MII_BMSR, &bmsr);
9828 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9829 (bmsr & BMSR_LSTATUS))
9830 goto skip_phy_reset;
9831
9832 err = tg3_phy_reset(tp);
9833 if (err)
9834 return err;
9835
9836 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9837 ADVERTISE_100HALF | ADVERTISE_100FULL |
9838 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9839 tg3_ctrl = 0;
9840 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9841 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9842 MII_TG3_CTRL_ADV_1000_FULL);
9843 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9844 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9845 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9846 MII_TG3_CTRL_ENABLE_AS_MASTER);
9847 }
9848
9849 if (!tg3_copper_is_advertising_all(tp)) {
9850 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9851
9852 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9853 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9854
9855 tg3_writephy(tp, MII_BMCR,
9856 BMCR_ANENABLE | BMCR_ANRESTART);
9857 }
9858 tg3_phy_set_wirespeed(tp);
9859
9860 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9861 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9862 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9863 }
9864
9865skip_phy_reset:
9866 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9867 err = tg3_init_5401phy_dsp(tp);
9868 if (err)
9869 return err;
9870 }
9871
9872 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9873 err = tg3_init_5401phy_dsp(tp);
9874 }
9875
747e8f8b 9876 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1da177e4
LT
9877 tp->link_config.advertising =
9878 (ADVERTISED_1000baseT_Half |
9879 ADVERTISED_1000baseT_Full |
9880 ADVERTISED_Autoneg |
9881 ADVERTISED_FIBRE);
9882 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9883 tp->link_config.advertising &=
9884 ~(ADVERTISED_1000baseT_Half |
9885 ADVERTISED_1000baseT_Full);
9886
9887 return err;
9888}
9889
9890static void __devinit tg3_read_partno(struct tg3 *tp)
9891{
9892 unsigned char vpd_data[256];
9893 int i;
1b27777a 9894 u32 magic;
1da177e4 9895
1820180b 9896 if (tg3_nvram_read_swab(tp, 0x0, &magic))
f49639e6 9897 goto out_not_found;
1da177e4 9898
1820180b 9899 if (magic == TG3_EEPROM_MAGIC) {
1b27777a
MC
9900 for (i = 0; i < 256; i += 4) {
9901 u32 tmp;
1da177e4 9902
1b27777a
MC
9903 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9904 goto out_not_found;
9905
9906 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
9907 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
9908 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9909 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9910 }
9911 } else {
9912 int vpd_cap;
9913
9914 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
9915 for (i = 0; i < 256; i += 4) {
9916 u32 tmp, j = 0;
9917 u16 tmp16;
9918
9919 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
9920 i);
9921 while (j++ < 100) {
9922 pci_read_config_word(tp->pdev, vpd_cap +
9923 PCI_VPD_ADDR, &tmp16);
9924 if (tmp16 & 0x8000)
9925 break;
9926 msleep(1);
9927 }
f49639e6
DM
9928 if (!(tmp16 & 0x8000))
9929 goto out_not_found;
9930
1b27777a
MC
9931 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9932 &tmp);
9933 tmp = cpu_to_le32(tmp);
9934 memcpy(&vpd_data[i], &tmp, 4);
9935 }
1da177e4
LT
9936 }
9937
9938 /* Now parse and find the part number. */
9939 for (i = 0; i < 256; ) {
9940 unsigned char val = vpd_data[i];
9941 int block_end;
9942
9943 if (val == 0x82 || val == 0x91) {
9944 i = (i + 3 +
9945 (vpd_data[i + 1] +
9946 (vpd_data[i + 2] << 8)));
9947 continue;
9948 }
9949
9950 if (val != 0x90)
9951 goto out_not_found;
9952
9953 block_end = (i + 3 +
9954 (vpd_data[i + 1] +
9955 (vpd_data[i + 2] << 8)));
9956 i += 3;
9957 while (i < block_end) {
9958 if (vpd_data[i + 0] == 'P' &&
9959 vpd_data[i + 1] == 'N') {
9960 int partno_len = vpd_data[i + 2];
9961
9962 if (partno_len > 24)
9963 goto out_not_found;
9964
9965 memcpy(tp->board_part_number,
9966 &vpd_data[i + 3],
9967 partno_len);
9968
9969 /* Success. */
9970 return;
9971 }
9972 }
9973
9974 /* Part number not found. */
9975 goto out_not_found;
9976 }
9977
9978out_not_found:
9979 strcpy(tp->board_part_number, "none");
9980}
9981
c4e6575c
MC
9982static void __devinit tg3_read_fw_ver(struct tg3 *tp)
9983{
9984 u32 val, offset, start;
9985
9986 if (tg3_nvram_read_swab(tp, 0, &val))
9987 return;
9988
9989 if (val != TG3_EEPROM_MAGIC)
9990 return;
9991
9992 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
9993 tg3_nvram_read_swab(tp, 0x4, &start))
9994 return;
9995
9996 offset = tg3_nvram_logical_addr(tp, offset);
9997 if (tg3_nvram_read_swab(tp, offset, &val))
9998 return;
9999
10000 if ((val & 0xfc000000) == 0x0c000000) {
10001 u32 ver_offset, addr;
10002 int i;
10003
10004 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10005 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10006 return;
10007
10008 if (val != 0)
10009 return;
10010
10011 addr = offset + ver_offset - start;
10012 for (i = 0; i < 16; i += 4) {
10013 if (tg3_nvram_read(tp, addr + i, &val))
10014 return;
10015
10016 val = cpu_to_le32(val);
10017 memcpy(tp->fw_ver + i, &val, 4);
10018 }
10019 }
10020}
10021
1da177e4
LT
10022static int __devinit tg3_get_invariants(struct tg3 *tp)
10023{
10024 static struct pci_device_id write_reorder_chipsets[] = {
1da177e4
LT
10025 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10026 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
399de50b
MC
10027 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10028 PCI_DEVICE_ID_VIA_8385_0) },
1da177e4
LT
10029 { },
10030 };
10031 u32 misc_ctrl_reg;
10032 u32 cacheline_sz_reg;
10033 u32 pci_state_reg, grc_misc_cfg;
10034 u32 val;
10035 u16 pci_cmd;
10036 int err;
10037
1da177e4
LT
10038 /* Force memory write invalidate off. If we leave it on,
10039 * then on 5700_BX chips we have to enable a workaround.
10040 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10041 * to match the cacheline size. The Broadcom driver have this
10042 * workaround but turns MWI off all the times so never uses
10043 * it. This seems to suggest that the workaround is insufficient.
10044 */
10045 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10046 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10047 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10048
10049 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10050 * has the register indirect write enable bit set before
10051 * we try to access any of the MMIO registers. It is also
10052 * critical that the PCI-X hw workaround situation is decided
10053 * before that as well.
10054 */
10055 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10056 &misc_ctrl_reg);
10057
10058 tp->pci_chip_rev_id = (misc_ctrl_reg >>
10059 MISC_HOST_CTRL_CHIPREV_SHIFT);
10060
ff645bec
MC
10061 /* Wrong chip ID in 5752 A0. This code can be removed later
10062 * as A0 is not in production.
10063 */
10064 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10065 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10066
6892914f
MC
10067 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10068 * we need to disable memory and use config. cycles
10069 * only to access all registers. The 5702/03 chips
10070 * can mistakenly decode the special cycles from the
10071 * ICH chipsets as memory write cycles, causing corruption
10072 * of register and memory space. Only certain ICH bridges
10073 * will drive special cycles with non-zero data during the
10074 * address phase which can fall within the 5703's address
10075 * range. This is not an ICH bug as the PCI spec allows
10076 * non-zero address during special cycles. However, only
10077 * these ICH bridges are known to drive non-zero addresses
10078 * during special cycles.
10079 *
10080 * Since special cycles do not cross PCI bridges, we only
10081 * enable this workaround if the 5703 is on the secondary
10082 * bus of these ICH bridges.
10083 */
10084 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10085 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10086 static struct tg3_dev_id {
10087 u32 vendor;
10088 u32 device;
10089 u32 rev;
10090 } ich_chipsets[] = {
10091 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10092 PCI_ANY_ID },
10093 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10094 PCI_ANY_ID },
10095 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10096 0xa },
10097 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10098 PCI_ANY_ID },
10099 { },
10100 };
10101 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10102 struct pci_dev *bridge = NULL;
10103
10104 while (pci_id->vendor != 0) {
10105 bridge = pci_get_device(pci_id->vendor, pci_id->device,
10106 bridge);
10107 if (!bridge) {
10108 pci_id++;
10109 continue;
10110 }
10111 if (pci_id->rev != PCI_ANY_ID) {
10112 u8 rev;
10113
10114 pci_read_config_byte(bridge, PCI_REVISION_ID,
10115 &rev);
10116 if (rev > pci_id->rev)
10117 continue;
10118 }
10119 if (bridge->subordinate &&
10120 (bridge->subordinate->number ==
10121 tp->pdev->bus->number)) {
10122
10123 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10124 pci_dev_put(bridge);
10125 break;
10126 }
10127 }
10128 }
10129
4a29cc2e
MC
10130 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10131 * DMA addresses > 40-bit. This bridge may have other additional
10132 * 57xx devices behind it in some 4-port NIC designs for example.
10133 * Any tg3 device found behind the bridge will also need the 40-bit
10134 * DMA workaround.
10135 */
a4e2b347
MC
10136 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10137 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10138 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
4a29cc2e 10139 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
4cf78e4f 10140 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
a4e2b347 10141 }
4a29cc2e
MC
10142 else {
10143 struct pci_dev *bridge = NULL;
10144
10145 do {
10146 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10147 PCI_DEVICE_ID_SERVERWORKS_EPB,
10148 bridge);
10149 if (bridge && bridge->subordinate &&
10150 (bridge->subordinate->number <=
10151 tp->pdev->bus->number) &&
10152 (bridge->subordinate->subordinate >=
10153 tp->pdev->bus->number)) {
10154 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10155 pci_dev_put(bridge);
10156 break;
10157 }
10158 } while (bridge);
10159 }
4cf78e4f 10160
1da177e4
LT
10161 /* Initialize misc host control in PCI block. */
10162 tp->misc_host_ctrl |= (misc_ctrl_reg &
10163 MISC_HOST_CTRL_CHIPREV);
10164 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10165 tp->misc_host_ctrl);
10166
10167 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10168 &cacheline_sz_reg);
10169
10170 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
10171 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
10172 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
10173 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
10174
6708e5cc 10175 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
4cf78e4f 10176 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
af36e6b6 10177 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d9ab5ad1 10178 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
a4e2b347 10179 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6708e5cc
JL
10180 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10181
1b440c56
JL
10182 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10183 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10184 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10185
5a6f3074 10186 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
af36e6b6
MC
10187 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10188 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
5a6f3074 10189 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
fcfa0a32
MC
10190 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10191 } else
5a6f3074
MC
10192 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1;
10193 }
1da177e4 10194
0f893dc6
MC
10195 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10196 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
d9ab5ad1 10197 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
af36e6b6 10198 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
d9ab5ad1 10199 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
0f893dc6
MC
10200 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10201
1da177e4
LT
10202 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10203 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10204
399de50b
MC
10205 /* If we have an AMD 762 or VIA K8T800 chipset, write
10206 * reordering to the mailbox registers done by the host
10207 * controller can cause major troubles. We read back from
10208 * every mailbox register write to force the writes to be
10209 * posted to the chip in order.
10210 */
10211 if (pci_dev_present(write_reorder_chipsets) &&
10212 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10213 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10214
1da177e4
LT
10215 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10216 tp->pci_lat_timer < 64) {
10217 tp->pci_lat_timer = 64;
10218
10219 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
10220 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
10221 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
10222 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
10223
10224 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10225 cacheline_sz_reg);
10226 }
10227
10228 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10229 &pci_state_reg);
10230
10231 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10232 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10233
10234 /* If this is a 5700 BX chipset, and we are in PCI-X
10235 * mode, enable register write workaround.
10236 *
10237 * The workaround is to use indirect register accesses
10238 * for all chip writes not to mailbox registers.
10239 */
10240 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10241 u32 pm_reg;
10242 u16 pci_cmd;
10243
10244 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10245
10246 /* The chip can have it's power management PCI config
10247 * space registers clobbered due to this bug.
10248 * So explicitly force the chip into D0 here.
10249 */
10250 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10251 &pm_reg);
10252 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10253 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10254 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10255 pm_reg);
10256
10257 /* Also, force SERR#/PERR# in PCI command. */
10258 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10259 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10260 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10261 }
10262 }
10263
087fe256
MC
10264 /* 5700 BX chips need to have their TX producer index mailboxes
10265 * written twice to workaround a bug.
10266 */
10267 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10268 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10269
1da177e4
LT
10270 /* Back to back register writes can cause problems on this chip,
10271 * the workaround is to read back all reg writes except those to
10272 * mailbox regs. See tg3_write_indirect_reg32().
10273 *
10274 * PCI Express 5750_A0 rev chips need this workaround too.
10275 */
10276 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10277 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10278 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10279 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10280
10281 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10282 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10283 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10284 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10285
10286 /* Chip-specific fixup from Broadcom driver */
10287 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10288 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10289 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10290 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10291 }
10292
1ee582d8 10293 /* Default fast path register access methods */
20094930 10294 tp->read32 = tg3_read32;
1ee582d8 10295 tp->write32 = tg3_write32;
09ee929c 10296 tp->read32_mbox = tg3_read32;
20094930 10297 tp->write32_mbox = tg3_write32;
1ee582d8
MC
10298 tp->write32_tx_mbox = tg3_write32;
10299 tp->write32_rx_mbox = tg3_write32;
10300
10301 /* Various workaround register access methods */
10302 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10303 tp->write32 = tg3_write_indirect_reg32;
10304 else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10305 tp->write32 = tg3_write_flush_reg32;
10306
10307 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10308 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10309 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10310 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10311 tp->write32_rx_mbox = tg3_write_flush_reg32;
10312 }
20094930 10313
6892914f
MC
10314 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10315 tp->read32 = tg3_read_indirect_reg32;
10316 tp->write32 = tg3_write_indirect_reg32;
10317 tp->read32_mbox = tg3_read_indirect_mbox;
10318 tp->write32_mbox = tg3_write_indirect_mbox;
10319 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10320 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10321
10322 iounmap(tp->regs);
22abe310 10323 tp->regs = NULL;
6892914f
MC
10324
10325 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10326 pci_cmd &= ~PCI_COMMAND_MEMORY;
10327 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10328 }
10329
bbadf503
MC
10330 if (tp->write32 == tg3_write_indirect_reg32 ||
10331 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10332 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
f49639e6 10333 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
bbadf503
MC
10334 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10335
7d0c41ef
MC
10336 /* Get eeprom hw config before calling tg3_set_power_state().
10337 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10338 * determined before calling tg3_set_power_state() so that
10339 * we know whether or not to switch out of Vaux power.
10340 * When the flag is set, it means that GPIO1 is used for eeprom
10341 * write protect and also implies that it is a LOM where GPIOs
10342 * are not used to switch power.
10343 */
10344 tg3_get_eeprom_hw_cfg(tp);
10345
314fba34
MC
10346 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10347 * GPIO1 driven high will bring 5700's external PHY out of reset.
10348 * It is also used as eeprom write protect on LOMs.
10349 */
10350 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10351 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10352 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10353 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10354 GRC_LCLCTRL_GPIO_OUTPUT1);
3e7d83bc
MC
10355 /* Unused GPIO3 must be driven as output on 5752 because there
10356 * are no pull-up resistors on unused GPIO pins.
10357 */
10358 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10359 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
314fba34 10360
af36e6b6
MC
10361 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10362 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10363
1da177e4 10364 /* Force the chip into D0. */
bc1c7567 10365 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
10366 if (err) {
10367 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10368 pci_name(tp->pdev));
10369 return err;
10370 }
10371
10372 /* 5700 B0 chips do not support checksumming correctly due
10373 * to hardware bugs.
10374 */
10375 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10376 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10377
1da177e4
LT
10378 /* Derive initial jumbo mode from MTU assigned in
10379 * ether_setup() via the alloc_etherdev() call
10380 */
0f893dc6 10381 if (tp->dev->mtu > ETH_DATA_LEN &&
a4e2b347 10382 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
0f893dc6 10383 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
1da177e4
LT
10384
10385 /* Determine WakeOnLan speed to use. */
10386 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10387 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10388 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10389 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10390 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10391 } else {
10392 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10393 }
10394
10395 /* A few boards don't want Ethernet@WireSpeed phy feature */
10396 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10397 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10398 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
747e8f8b
MC
10399 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10400 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1da177e4
LT
10401 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10402
10403 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10404 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10405 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10406 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10407 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10408
c424cb24
MC
10409 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10410 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10411 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10412 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10413 else
10414 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10415 }
1da177e4 10416
1da177e4 10417 tp->coalesce_mode = 0;
1da177e4
LT
10418 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10419 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10420 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10421
10422 /* Initialize MAC MI mode, polling disabled. */
10423 tw32_f(MAC_MI_MODE, tp->mi_mode);
10424 udelay(80);
10425
10426 /* Initialize data/descriptor byte/word swapping. */
10427 val = tr32(GRC_MODE);
10428 val &= GRC_MODE_HOST_STACKUP;
10429 tw32(GRC_MODE, val | tp->grc_mode);
10430
10431 tg3_switch_clocks(tp);
10432
10433 /* Clear this out for sanity. */
10434 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10435
10436 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10437 &pci_state_reg);
10438 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10439 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10440 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10441
10442 if (chiprevid == CHIPREV_ID_5701_A0 ||
10443 chiprevid == CHIPREV_ID_5701_B0 ||
10444 chiprevid == CHIPREV_ID_5701_B2 ||
10445 chiprevid == CHIPREV_ID_5701_B5) {
10446 void __iomem *sram_base;
10447
10448 /* Write some dummy words into the SRAM status block
10449 * area, see if it reads back correctly. If the return
10450 * value is bad, force enable the PCIX workaround.
10451 */
10452 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10453
10454 writel(0x00000000, sram_base);
10455 writel(0x00000000, sram_base + 4);
10456 writel(0xffffffff, sram_base + 4);
10457 if (readl(sram_base) != 0x00000000)
10458 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10459 }
10460 }
10461
10462 udelay(50);
10463 tg3_nvram_init(tp);
10464
10465 grc_misc_cfg = tr32(GRC_MISC_CFG);
10466 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10467
10468 /* Broadcom's driver says that CIOBE multisplit has a bug */
10469#if 0
10470 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10471 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10472 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10473 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10474 }
10475#endif
10476 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10477 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10478 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10479 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10480
fac9b83e
DM
10481 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10482 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10483 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10484 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10485 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10486 HOSTCC_MODE_CLRTICK_TXBD);
10487
10488 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10489 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10490 tp->misc_host_ctrl);
10491 }
10492
1da177e4
LT
10493 /* these are limited to 10/100 only */
10494 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10495 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10496 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10497 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10498 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10499 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10500 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10501 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10502 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10503 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10504 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10505
10506 err = tg3_phy_probe(tp);
10507 if (err) {
10508 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10509 pci_name(tp->pdev), err);
10510 /* ... but do not return immediately ... */
10511 }
10512
10513 tg3_read_partno(tp);
c4e6575c 10514 tg3_read_fw_ver(tp);
1da177e4
LT
10515
10516 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10517 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10518 } else {
10519 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10520 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10521 else
10522 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10523 }
10524
10525 /* 5700 {AX,BX} chips have a broken status block link
10526 * change bit implementation, so we must use the
10527 * status register in those cases.
10528 */
10529 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10530 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10531 else
10532 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10533
10534 /* The led_ctrl is set during tg3_phy_probe, here we might
10535 * have to force the link status polling mechanism based
10536 * upon subsystem IDs.
10537 */
10538 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10539 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10540 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10541 TG3_FLAG_USE_LINKCHG_REG);
10542 }
10543
10544 /* For all SERDES we poll the MAC status register. */
10545 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10546 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10547 else
10548 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10549
5a6f3074 10550 /* All chips before 5787 can get confused if TX buffers
1da177e4
LT
10551 * straddle the 4GB address boundary in some cases.
10552 */
af36e6b6
MC
10553 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10554 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
5a6f3074
MC
10555 tp->dev->hard_start_xmit = tg3_start_xmit;
10556 else
10557 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
1da177e4
LT
10558
10559 tp->rx_offset = 2;
10560 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10561 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10562 tp->rx_offset = 0;
10563
f92905de
MC
10564 tp->rx_std_max_post = TG3_RX_RING_SIZE;
10565
10566 /* Increment the rx prod index on the rx std ring by at most
10567 * 8 for these chips to workaround hw errata.
10568 */
10569 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10570 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10571 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10572 tp->rx_std_max_post = 8;
10573
1da177e4
LT
10574 /* By default, disable wake-on-lan. User can change this
10575 * using ETHTOOL_SWOL.
10576 */
10577 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10578
10579 return err;
10580}
10581
10582#ifdef CONFIG_SPARC64
10583static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10584{
10585 struct net_device *dev = tp->dev;
10586 struct pci_dev *pdev = tp->pdev;
10587 struct pcidev_cookie *pcp = pdev->sysdata;
10588
10589 if (pcp != NULL) {
de8d28b1
DM
10590 unsigned char *addr;
10591 int len;
1da177e4 10592
de8d28b1
DM
10593 addr = of_get_property(pcp->prom_node, "local-mac-address",
10594 &len);
10595 if (addr && len == 6) {
10596 memcpy(dev->dev_addr, addr, 6);
2ff43697 10597 memcpy(dev->perm_addr, dev->dev_addr, 6);
1da177e4
LT
10598 return 0;
10599 }
10600 }
10601 return -ENODEV;
10602}
10603
10604static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10605{
10606 struct net_device *dev = tp->dev;
10607
10608 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2ff43697 10609 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
1da177e4
LT
10610 return 0;
10611}
10612#endif
10613
10614static int __devinit tg3_get_device_address(struct tg3 *tp)
10615{
10616 struct net_device *dev = tp->dev;
10617 u32 hi, lo, mac_offset;
008652b3 10618 int addr_ok = 0;
1da177e4
LT
10619
10620#ifdef CONFIG_SPARC64
10621 if (!tg3_get_macaddr_sparc(tp))
10622 return 0;
10623#endif
10624
10625 mac_offset = 0x7c;
f49639e6 10626 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
a4e2b347 10627 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
10628 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10629 mac_offset = 0xcc;
10630 if (tg3_nvram_lock(tp))
10631 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10632 else
10633 tg3_nvram_unlock(tp);
10634 }
10635
10636 /* First try to get it from MAC address mailbox. */
10637 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10638 if ((hi >> 16) == 0x484b) {
10639 dev->dev_addr[0] = (hi >> 8) & 0xff;
10640 dev->dev_addr[1] = (hi >> 0) & 0xff;
10641
10642 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10643 dev->dev_addr[2] = (lo >> 24) & 0xff;
10644 dev->dev_addr[3] = (lo >> 16) & 0xff;
10645 dev->dev_addr[4] = (lo >> 8) & 0xff;
10646 dev->dev_addr[5] = (lo >> 0) & 0xff;
1da177e4 10647
008652b3
MC
10648 /* Some old bootcode may report a 0 MAC address in SRAM */
10649 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10650 }
10651 if (!addr_ok) {
10652 /* Next, try NVRAM. */
f49639e6 10653 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
008652b3
MC
10654 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10655 dev->dev_addr[0] = ((hi >> 16) & 0xff);
10656 dev->dev_addr[1] = ((hi >> 24) & 0xff);
10657 dev->dev_addr[2] = ((lo >> 0) & 0xff);
10658 dev->dev_addr[3] = ((lo >> 8) & 0xff);
10659 dev->dev_addr[4] = ((lo >> 16) & 0xff);
10660 dev->dev_addr[5] = ((lo >> 24) & 0xff);
10661 }
10662 /* Finally just fetch it out of the MAC control regs. */
10663 else {
10664 hi = tr32(MAC_ADDR_0_HIGH);
10665 lo = tr32(MAC_ADDR_0_LOW);
10666
10667 dev->dev_addr[5] = lo & 0xff;
10668 dev->dev_addr[4] = (lo >> 8) & 0xff;
10669 dev->dev_addr[3] = (lo >> 16) & 0xff;
10670 dev->dev_addr[2] = (lo >> 24) & 0xff;
10671 dev->dev_addr[1] = hi & 0xff;
10672 dev->dev_addr[0] = (hi >> 8) & 0xff;
10673 }
1da177e4
LT
10674 }
10675
10676 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10677#ifdef CONFIG_SPARC64
10678 if (!tg3_get_default_macaddr_sparc(tp))
10679 return 0;
10680#endif
10681 return -EINVAL;
10682 }
2ff43697 10683 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
10684 return 0;
10685}
10686
59e6b434
DM
10687#define BOUNDARY_SINGLE_CACHELINE 1
10688#define BOUNDARY_MULTI_CACHELINE 2
10689
10690static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10691{
10692 int cacheline_size;
10693 u8 byte;
10694 int goal;
10695
10696 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10697 if (byte == 0)
10698 cacheline_size = 1024;
10699 else
10700 cacheline_size = (int) byte * 4;
10701
10702 /* On 5703 and later chips, the boundary bits have no
10703 * effect.
10704 */
10705 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10706 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10707 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10708 goto out;
10709
10710#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10711 goal = BOUNDARY_MULTI_CACHELINE;
10712#else
10713#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10714 goal = BOUNDARY_SINGLE_CACHELINE;
10715#else
10716 goal = 0;
10717#endif
10718#endif
10719
10720 if (!goal)
10721 goto out;
10722
10723 /* PCI controllers on most RISC systems tend to disconnect
10724 * when a device tries to burst across a cache-line boundary.
10725 * Therefore, letting tg3 do so just wastes PCI bandwidth.
10726 *
10727 * Unfortunately, for PCI-E there are only limited
10728 * write-side controls for this, and thus for reads
10729 * we will still get the disconnects. We'll also waste
10730 * these PCI cycles for both read and write for chips
10731 * other than 5700 and 5701 which do not implement the
10732 * boundary bits.
10733 */
10734 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10735 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10736 switch (cacheline_size) {
10737 case 16:
10738 case 32:
10739 case 64:
10740 case 128:
10741 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10742 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10743 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10744 } else {
10745 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10746 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10747 }
10748 break;
10749
10750 case 256:
10751 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10752 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10753 break;
10754
10755 default:
10756 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10757 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10758 break;
10759 };
10760 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10761 switch (cacheline_size) {
10762 case 16:
10763 case 32:
10764 case 64:
10765 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10766 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10767 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10768 break;
10769 }
10770 /* fallthrough */
10771 case 128:
10772 default:
10773 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10774 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10775 break;
10776 };
10777 } else {
10778 switch (cacheline_size) {
10779 case 16:
10780 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10781 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10782 DMA_RWCTRL_WRITE_BNDRY_16);
10783 break;
10784 }
10785 /* fallthrough */
10786 case 32:
10787 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10788 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10789 DMA_RWCTRL_WRITE_BNDRY_32);
10790 break;
10791 }
10792 /* fallthrough */
10793 case 64:
10794 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10795 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10796 DMA_RWCTRL_WRITE_BNDRY_64);
10797 break;
10798 }
10799 /* fallthrough */
10800 case 128:
10801 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10802 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10803 DMA_RWCTRL_WRITE_BNDRY_128);
10804 break;
10805 }
10806 /* fallthrough */
10807 case 256:
10808 val |= (DMA_RWCTRL_READ_BNDRY_256 |
10809 DMA_RWCTRL_WRITE_BNDRY_256);
10810 break;
10811 case 512:
10812 val |= (DMA_RWCTRL_READ_BNDRY_512 |
10813 DMA_RWCTRL_WRITE_BNDRY_512);
10814 break;
10815 case 1024:
10816 default:
10817 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10818 DMA_RWCTRL_WRITE_BNDRY_1024);
10819 break;
10820 };
10821 }
10822
10823out:
10824 return val;
10825}
10826
1da177e4
LT
10827static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10828{
10829 struct tg3_internal_buffer_desc test_desc;
10830 u32 sram_dma_descs;
10831 int i, ret;
10832
10833 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10834
10835 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10836 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10837 tw32(RDMAC_STATUS, 0);
10838 tw32(WDMAC_STATUS, 0);
10839
10840 tw32(BUFMGR_MODE, 0);
10841 tw32(FTQ_RESET, 0);
10842
10843 test_desc.addr_hi = ((u64) buf_dma) >> 32;
10844 test_desc.addr_lo = buf_dma & 0xffffffff;
10845 test_desc.nic_mbuf = 0x00002100;
10846 test_desc.len = size;
10847
10848 /*
10849 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10850 * the *second* time the tg3 driver was getting loaded after an
10851 * initial scan.
10852 *
10853 * Broadcom tells me:
10854 * ...the DMA engine is connected to the GRC block and a DMA
10855 * reset may affect the GRC block in some unpredictable way...
10856 * The behavior of resets to individual blocks has not been tested.
10857 *
10858 * Broadcom noted the GRC reset will also reset all sub-components.
10859 */
10860 if (to_device) {
10861 test_desc.cqid_sqid = (13 << 8) | 2;
10862
10863 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10864 udelay(40);
10865 } else {
10866 test_desc.cqid_sqid = (16 << 8) | 7;
10867
10868 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10869 udelay(40);
10870 }
10871 test_desc.flags = 0x00000005;
10872
10873 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10874 u32 val;
10875
10876 val = *(((u32 *)&test_desc) + i);
10877 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10878 sram_dma_descs + (i * sizeof(u32)));
10879 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10880 }
10881 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10882
10883 if (to_device) {
10884 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10885 } else {
10886 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10887 }
10888
10889 ret = -ENODEV;
10890 for (i = 0; i < 40; i++) {
10891 u32 val;
10892
10893 if (to_device)
10894 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10895 else
10896 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10897 if ((val & 0xffff) == sram_dma_descs) {
10898 ret = 0;
10899 break;
10900 }
10901
10902 udelay(100);
10903 }
10904
10905 return ret;
10906}
10907
ded7340d 10908#define TEST_BUFFER_SIZE 0x2000
1da177e4
LT
10909
10910static int __devinit tg3_test_dma(struct tg3 *tp)
10911{
10912 dma_addr_t buf_dma;
59e6b434 10913 u32 *buf, saved_dma_rwctrl;
1da177e4
LT
10914 int ret;
10915
10916 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10917 if (!buf) {
10918 ret = -ENOMEM;
10919 goto out_nofree;
10920 }
10921
10922 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10923 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10924
59e6b434 10925 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
1da177e4
LT
10926
10927 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10928 /* DMA read watermark not used on PCIE */
10929 tp->dma_rwctrl |= 0x00180000;
10930 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
85e94ced
MC
10931 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10932 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
1da177e4
LT
10933 tp->dma_rwctrl |= 0x003f0000;
10934 else
10935 tp->dma_rwctrl |= 0x003f000f;
10936 } else {
10937 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10938 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10939 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10940
4a29cc2e
MC
10941 /* If the 5704 is behind the EPB bridge, we can
10942 * do the less restrictive ONE_DMA workaround for
10943 * better performance.
10944 */
10945 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10946 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10947 tp->dma_rwctrl |= 0x8000;
10948 else if (ccval == 0x6 || ccval == 0x7)
1da177e4
LT
10949 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10950
59e6b434 10951 /* Set bit 23 to enable PCIX hw bug fix */
1da177e4 10952 tp->dma_rwctrl |= 0x009f0000;
4cf78e4f
MC
10953 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10954 /* 5780 always in PCIX mode */
10955 tp->dma_rwctrl |= 0x00144000;
a4e2b347
MC
10956 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10957 /* 5714 always in PCIX mode */
10958 tp->dma_rwctrl |= 0x00148000;
1da177e4
LT
10959 } else {
10960 tp->dma_rwctrl |= 0x001b000f;
10961 }
10962 }
10963
10964 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10965 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10966 tp->dma_rwctrl &= 0xfffffff0;
10967
10968 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10969 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10970 /* Remove this if it causes problems for some boards. */
10971 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10972
10973 /* On 5700/5701 chips, we need to set this bit.
10974 * Otherwise the chip will issue cacheline transactions
10975 * to streamable DMA memory with not all the byte
10976 * enables turned on. This is an error on several
10977 * RISC PCI controllers, in particular sparc64.
10978 *
10979 * On 5703/5704 chips, this bit has been reassigned
10980 * a different meaning. In particular, it is used
10981 * on those chips to enable a PCI-X workaround.
10982 */
10983 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10984 }
10985
10986 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10987
10988#if 0
10989 /* Unneeded, already done by tg3_get_invariants. */
10990 tg3_switch_clocks(tp);
10991#endif
10992
10993 ret = 0;
10994 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10995 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10996 goto out;
10997
59e6b434
DM
10998 /* It is best to perform DMA test with maximum write burst size
10999 * to expose the 5700/5701 write DMA bug.
11000 */
11001 saved_dma_rwctrl = tp->dma_rwctrl;
11002 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11003 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11004
1da177e4
LT
11005 while (1) {
11006 u32 *p = buf, i;
11007
11008 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11009 p[i] = i;
11010
11011 /* Send the buffer to the chip. */
11012 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11013 if (ret) {
11014 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11015 break;
11016 }
11017
11018#if 0
11019 /* validate data reached card RAM correctly. */
11020 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11021 u32 val;
11022 tg3_read_mem(tp, 0x2100 + (i*4), &val);
11023 if (le32_to_cpu(val) != p[i]) {
11024 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
11025 /* ret = -ENODEV here? */
11026 }
11027 p[i] = 0;
11028 }
11029#endif
11030 /* Now read it back. */
11031 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11032 if (ret) {
11033 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11034
11035 break;
11036 }
11037
11038 /* Verify it. */
11039 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11040 if (p[i] == i)
11041 continue;
11042
59e6b434
DM
11043 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11044 DMA_RWCTRL_WRITE_BNDRY_16) {
11045 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
1da177e4
LT
11046 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11047 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11048 break;
11049 } else {
11050 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11051 ret = -ENODEV;
11052 goto out;
11053 }
11054 }
11055
11056 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11057 /* Success. */
11058 ret = 0;
11059 break;
11060 }
11061 }
59e6b434
DM
11062 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11063 DMA_RWCTRL_WRITE_BNDRY_16) {
6d1cfbab
MC
11064 static struct pci_device_id dma_wait_state_chipsets[] = {
11065 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11066 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11067 { },
11068 };
11069
59e6b434 11070 /* DMA test passed without adjusting DMA boundary,
6d1cfbab
MC
11071 * now look for chipsets that are known to expose the
11072 * DMA bug without failing the test.
59e6b434 11073 */
6d1cfbab
MC
11074 if (pci_dev_present(dma_wait_state_chipsets)) {
11075 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11076 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11077 }
11078 else
11079 /* Safe to use the calculated DMA boundary. */
11080 tp->dma_rwctrl = saved_dma_rwctrl;
11081
59e6b434
DM
11082 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11083 }
1da177e4
LT
11084
11085out:
11086 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11087out_nofree:
11088 return ret;
11089}
11090
11091static void __devinit tg3_init_link_config(struct tg3 *tp)
11092{
11093 tp->link_config.advertising =
11094 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11095 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11096 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11097 ADVERTISED_Autoneg | ADVERTISED_MII);
11098 tp->link_config.speed = SPEED_INVALID;
11099 tp->link_config.duplex = DUPLEX_INVALID;
11100 tp->link_config.autoneg = AUTONEG_ENABLE;
1da177e4
LT
11101 tp->link_config.active_speed = SPEED_INVALID;
11102 tp->link_config.active_duplex = DUPLEX_INVALID;
11103 tp->link_config.phy_is_low_power = 0;
11104 tp->link_config.orig_speed = SPEED_INVALID;
11105 tp->link_config.orig_duplex = DUPLEX_INVALID;
11106 tp->link_config.orig_autoneg = AUTONEG_INVALID;
11107}
11108
11109static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11110{
fdfec172
MC
11111 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11112 tp->bufmgr_config.mbuf_read_dma_low_water =
11113 DEFAULT_MB_RDMA_LOW_WATER_5705;
11114 tp->bufmgr_config.mbuf_mac_rx_low_water =
11115 DEFAULT_MB_MACRX_LOW_WATER_5705;
11116 tp->bufmgr_config.mbuf_high_water =
11117 DEFAULT_MB_HIGH_WATER_5705;
11118
11119 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11120 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11121 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11122 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11123 tp->bufmgr_config.mbuf_high_water_jumbo =
11124 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11125 } else {
11126 tp->bufmgr_config.mbuf_read_dma_low_water =
11127 DEFAULT_MB_RDMA_LOW_WATER;
11128 tp->bufmgr_config.mbuf_mac_rx_low_water =
11129 DEFAULT_MB_MACRX_LOW_WATER;
11130 tp->bufmgr_config.mbuf_high_water =
11131 DEFAULT_MB_HIGH_WATER;
11132
11133 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11134 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11135 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11136 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11137 tp->bufmgr_config.mbuf_high_water_jumbo =
11138 DEFAULT_MB_HIGH_WATER_JUMBO;
11139 }
1da177e4
LT
11140
11141 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11142 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11143}
11144
11145static char * __devinit tg3_phy_string(struct tg3 *tp)
11146{
11147 switch (tp->phy_id & PHY_ID_MASK) {
11148 case PHY_ID_BCM5400: return "5400";
11149 case PHY_ID_BCM5401: return "5401";
11150 case PHY_ID_BCM5411: return "5411";
11151 case PHY_ID_BCM5701: return "5701";
11152 case PHY_ID_BCM5703: return "5703";
11153 case PHY_ID_BCM5704: return "5704";
11154 case PHY_ID_BCM5705: return "5705";
11155 case PHY_ID_BCM5750: return "5750";
85e94ced 11156 case PHY_ID_BCM5752: return "5752";
a4e2b347 11157 case PHY_ID_BCM5714: return "5714";
4cf78e4f 11158 case PHY_ID_BCM5780: return "5780";
af36e6b6 11159 case PHY_ID_BCM5755: return "5755";
d9ab5ad1 11160 case PHY_ID_BCM5787: return "5787";
1da177e4
LT
11161 case PHY_ID_BCM8002: return "8002/serdes";
11162 case 0: return "serdes";
11163 default: return "unknown";
11164 };
11165}
11166
f9804ddb
MC
11167static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11168{
11169 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11170 strcpy(str, "PCI Express");
11171 return str;
11172 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11173 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11174
11175 strcpy(str, "PCIX:");
11176
11177 if ((clock_ctrl == 7) ||
11178 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11179 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11180 strcat(str, "133MHz");
11181 else if (clock_ctrl == 0)
11182 strcat(str, "33MHz");
11183 else if (clock_ctrl == 2)
11184 strcat(str, "50MHz");
11185 else if (clock_ctrl == 4)
11186 strcat(str, "66MHz");
11187 else if (clock_ctrl == 6)
11188 strcat(str, "100MHz");
f9804ddb
MC
11189 } else {
11190 strcpy(str, "PCI:");
11191 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11192 strcat(str, "66MHz");
11193 else
11194 strcat(str, "33MHz");
11195 }
11196 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11197 strcat(str, ":32-bit");
11198 else
11199 strcat(str, ":64-bit");
11200 return str;
11201}
11202
8c2dc7e1 11203static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
1da177e4
LT
11204{
11205 struct pci_dev *peer;
11206 unsigned int func, devnr = tp->pdev->devfn & ~7;
11207
11208 for (func = 0; func < 8; func++) {
11209 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11210 if (peer && peer != tp->pdev)
11211 break;
11212 pci_dev_put(peer);
11213 }
16fe9d74
MC
11214 /* 5704 can be configured in single-port mode, set peer to
11215 * tp->pdev in that case.
11216 */
11217 if (!peer) {
11218 peer = tp->pdev;
11219 return peer;
11220 }
1da177e4
LT
11221
11222 /*
11223 * We don't need to keep the refcount elevated; there's no way
11224 * to remove one half of this device without removing the other
11225 */
11226 pci_dev_put(peer);
11227
11228 return peer;
11229}
11230
15f9850d
DM
11231static void __devinit tg3_init_coal(struct tg3 *tp)
11232{
11233 struct ethtool_coalesce *ec = &tp->coal;
11234
11235 memset(ec, 0, sizeof(*ec));
11236 ec->cmd = ETHTOOL_GCOALESCE;
11237 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11238 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11239 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11240 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11241 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11242 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11243 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11244 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11245 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11246
11247 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11248 HOSTCC_MODE_CLRTICK_TXBD)) {
11249 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11250 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11251 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11252 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11253 }
d244c892
MC
11254
11255 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11256 ec->rx_coalesce_usecs_irq = 0;
11257 ec->tx_coalesce_usecs_irq = 0;
11258 ec->stats_block_coalesce_usecs = 0;
11259 }
15f9850d
DM
11260}
11261
1da177e4
LT
11262static int __devinit tg3_init_one(struct pci_dev *pdev,
11263 const struct pci_device_id *ent)
11264{
11265 static int tg3_version_printed = 0;
11266 unsigned long tg3reg_base, tg3reg_len;
11267 struct net_device *dev;
11268 struct tg3 *tp;
72f2afb8 11269 int i, err, pm_cap;
f9804ddb 11270 char str[40];
72f2afb8 11271 u64 dma_mask, persist_dma_mask;
1da177e4
LT
11272
11273 if (tg3_version_printed++ == 0)
11274 printk(KERN_INFO "%s", version);
11275
11276 err = pci_enable_device(pdev);
11277 if (err) {
11278 printk(KERN_ERR PFX "Cannot enable PCI device, "
11279 "aborting.\n");
11280 return err;
11281 }
11282
11283 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11284 printk(KERN_ERR PFX "Cannot find proper PCI device "
11285 "base address, aborting.\n");
11286 err = -ENODEV;
11287 goto err_out_disable_pdev;
11288 }
11289
11290 err = pci_request_regions(pdev, DRV_MODULE_NAME);
11291 if (err) {
11292 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11293 "aborting.\n");
11294 goto err_out_disable_pdev;
11295 }
11296
11297 pci_set_master(pdev);
11298
11299 /* Find power-management capability. */
11300 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11301 if (pm_cap == 0) {
11302 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11303 "aborting.\n");
11304 err = -EIO;
11305 goto err_out_free_res;
11306 }
11307
1da177e4
LT
11308 tg3reg_base = pci_resource_start(pdev, 0);
11309 tg3reg_len = pci_resource_len(pdev, 0);
11310
11311 dev = alloc_etherdev(sizeof(*tp));
11312 if (!dev) {
11313 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11314 err = -ENOMEM;
11315 goto err_out_free_res;
11316 }
11317
11318 SET_MODULE_OWNER(dev);
11319 SET_NETDEV_DEV(dev, &pdev->dev);
11320
1da177e4
LT
11321#if TG3_VLAN_TAG_USED
11322 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11323 dev->vlan_rx_register = tg3_vlan_rx_register;
11324 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11325#endif
11326
11327 tp = netdev_priv(dev);
11328 tp->pdev = pdev;
11329 tp->dev = dev;
11330 tp->pm_cap = pm_cap;
11331 tp->mac_mode = TG3_DEF_MAC_MODE;
11332 tp->rx_mode = TG3_DEF_RX_MODE;
11333 tp->tx_mode = TG3_DEF_TX_MODE;
11334 tp->mi_mode = MAC_MI_MODE_BASE;
11335 if (tg3_debug > 0)
11336 tp->msg_enable = tg3_debug;
11337 else
11338 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11339
11340 /* The word/byte swap controls here control register access byte
11341 * swapping. DMA data byte swapping is controlled in the GRC_MODE
11342 * setting below.
11343 */
11344 tp->misc_host_ctrl =
11345 MISC_HOST_CTRL_MASK_PCI_INT |
11346 MISC_HOST_CTRL_WORD_SWAP |
11347 MISC_HOST_CTRL_INDIR_ACCESS |
11348 MISC_HOST_CTRL_PCISTATE_RW;
11349
11350 /* The NONFRM (non-frame) byte/word swap controls take effect
11351 * on descriptor entries, anything which isn't packet data.
11352 *
11353 * The StrongARM chips on the board (one for tx, one for rx)
11354 * are running in big-endian mode.
11355 */
11356 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11357 GRC_MODE_WSWAP_NONFRM_DATA);
11358#ifdef __BIG_ENDIAN
11359 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11360#endif
11361 spin_lock_init(&tp->lock);
11362 spin_lock_init(&tp->tx_lock);
11363 spin_lock_init(&tp->indirect_lock);
11364 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11365
11366 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11367 if (tp->regs == 0UL) {
11368 printk(KERN_ERR PFX "Cannot map device registers, "
11369 "aborting.\n");
11370 err = -ENOMEM;
11371 goto err_out_free_dev;
11372 }
11373
11374 tg3_init_link_config(tp);
11375
1da177e4
LT
11376 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11377 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11378 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11379
11380 dev->open = tg3_open;
11381 dev->stop = tg3_close;
11382 dev->get_stats = tg3_get_stats;
11383 dev->set_multicast_list = tg3_set_rx_mode;
11384 dev->set_mac_address = tg3_set_mac_addr;
11385 dev->do_ioctl = tg3_ioctl;
11386 dev->tx_timeout = tg3_tx_timeout;
11387 dev->poll = tg3_poll;
11388 dev->ethtool_ops = &tg3_ethtool_ops;
11389 dev->weight = 64;
11390 dev->watchdog_timeo = TG3_TX_TIMEOUT;
11391 dev->change_mtu = tg3_change_mtu;
11392 dev->irq = pdev->irq;
11393#ifdef CONFIG_NET_POLL_CONTROLLER
11394 dev->poll_controller = tg3_poll_controller;
11395#endif
11396
11397 err = tg3_get_invariants(tp);
11398 if (err) {
11399 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11400 "aborting.\n");
11401 goto err_out_iounmap;
11402 }
11403
4a29cc2e
MC
11404 /* The EPB bridge inside 5714, 5715, and 5780 and any
11405 * device behind the EPB cannot support DMA addresses > 40-bit.
72f2afb8
MC
11406 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11407 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11408 * do DMA address check in tg3_start_xmit().
11409 */
4a29cc2e
MC
11410 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11411 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11412 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
72f2afb8
MC
11413 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11414#ifdef CONFIG_HIGHMEM
11415 dma_mask = DMA_64BIT_MASK;
11416#endif
4a29cc2e 11417 } else
72f2afb8
MC
11418 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11419
11420 /* Configure DMA attributes. */
11421 if (dma_mask > DMA_32BIT_MASK) {
11422 err = pci_set_dma_mask(pdev, dma_mask);
11423 if (!err) {
11424 dev->features |= NETIF_F_HIGHDMA;
11425 err = pci_set_consistent_dma_mask(pdev,
11426 persist_dma_mask);
11427 if (err < 0) {
11428 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11429 "DMA for consistent allocations\n");
11430 goto err_out_iounmap;
11431 }
11432 }
11433 }
11434 if (err || dma_mask == DMA_32BIT_MASK) {
11435 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11436 if (err) {
11437 printk(KERN_ERR PFX "No usable DMA configuration, "
11438 "aborting.\n");
11439 goto err_out_iounmap;
11440 }
11441 }
11442
fdfec172 11443 tg3_init_bufmgr_config(tp);
1da177e4
LT
11444
11445#if TG3_TSO_SUPPORT != 0
11446 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11447 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11448 }
11449 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11450 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11451 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11452 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11453 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11454 } else {
11455 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11456 }
11457
4e3a7aaa
MC
11458 /* TSO is on by default on chips that support hardware TSO.
11459 * Firmware TSO on older chips gives lower performance, so it
11460 * is off by default, but can be enabled using ethtool.
11461 */
11462 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
1da177e4 11463 dev->features |= NETIF_F_TSO;
1da177e4
LT
11464
11465#endif
11466
11467 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11468 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11469 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11470 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11471 tp->rx_pending = 63;
11472 }
11473
8c2dc7e1
MC
11474 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11475 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11476 tp->pdev_peer = tg3_find_peer(tp);
1da177e4
LT
11477
11478 err = tg3_get_device_address(tp);
11479 if (err) {
11480 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11481 "aborting.\n");
11482 goto err_out_iounmap;
11483 }
11484
11485 /*
11486 * Reset chip in case UNDI or EFI driver did not shutdown
11487 * DMA self test will enable WDMAC and we'll see (spurious)
11488 * pending DMA on the PCI bus at that point.
11489 */
11490 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11491 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11492 pci_save_state(tp->pdev);
11493 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
944d980e 11494 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
11495 }
11496
11497 err = tg3_test_dma(tp);
11498 if (err) {
11499 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11500 goto err_out_iounmap;
11501 }
11502
11503 /* Tigon3 can do ipv4 only... and some chips have buggy
11504 * checksumming.
11505 */
11506 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
af36e6b6
MC
11507 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11508 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9c27dbdf
MC
11509 dev->features |= NETIF_F_HW_CSUM;
11510 else
11511 dev->features |= NETIF_F_IP_CSUM;
11512 dev->features |= NETIF_F_SG;
1da177e4
LT
11513 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11514 } else
11515 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11516
1da177e4
LT
11517 /* flow control autonegotiation is default behavior */
11518 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11519
15f9850d
DM
11520 tg3_init_coal(tp);
11521
7d3f4c97
DM
11522 /* Now that we have fully setup the chip, save away a snapshot
11523 * of the PCI config space. We need to restore this after
11524 * GRC_MISC_CFG core clock resets and some resume events.
11525 */
11526 pci_save_state(tp->pdev);
11527
1da177e4
LT
11528 err = register_netdev(dev);
11529 if (err) {
11530 printk(KERN_ERR PFX "Cannot register net device, "
11531 "aborting.\n");
11532 goto err_out_iounmap;
11533 }
11534
11535 pci_set_drvdata(pdev, dev);
11536
f9804ddb 11537 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
1da177e4
LT
11538 dev->name,
11539 tp->board_part_number,
11540 tp->pci_chip_rev_id,
11541 tg3_phy_string(tp),
f9804ddb 11542 tg3_bus_string(tp, str),
1da177e4
LT
11543 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11544
11545 for (i = 0; i < 6; i++)
11546 printk("%2.2x%c", dev->dev_addr[i],
11547 i == 5 ? '\n' : ':');
11548
11549 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11550 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11551 "TSOcap[%d] \n",
11552 dev->name,
11553 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11554 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11555 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11556 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11557 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11558 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11559 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
4a29cc2e
MC
11560 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11561 dev->name, tp->dma_rwctrl,
11562 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11563 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
1da177e4 11564
59f1741e
JM
11565 netif_carrier_off(tp->dev);
11566
1da177e4
LT
11567 return 0;
11568
11569err_out_iounmap:
6892914f
MC
11570 if (tp->regs) {
11571 iounmap(tp->regs);
22abe310 11572 tp->regs = NULL;
6892914f 11573 }
1da177e4
LT
11574
11575err_out_free_dev:
11576 free_netdev(dev);
11577
11578err_out_free_res:
11579 pci_release_regions(pdev);
11580
11581err_out_disable_pdev:
11582 pci_disable_device(pdev);
11583 pci_set_drvdata(pdev, NULL);
11584 return err;
11585}
11586
11587static void __devexit tg3_remove_one(struct pci_dev *pdev)
11588{
11589 struct net_device *dev = pci_get_drvdata(pdev);
11590
11591 if (dev) {
11592 struct tg3 *tp = netdev_priv(dev);
11593
7faa006f 11594 flush_scheduled_work();
1da177e4 11595 unregister_netdev(dev);
6892914f
MC
11596 if (tp->regs) {
11597 iounmap(tp->regs);
22abe310 11598 tp->regs = NULL;
6892914f 11599 }
1da177e4
LT
11600 free_netdev(dev);
11601 pci_release_regions(pdev);
11602 pci_disable_device(pdev);
11603 pci_set_drvdata(pdev, NULL);
11604 }
11605}
11606
11607static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11608{
11609 struct net_device *dev = pci_get_drvdata(pdev);
11610 struct tg3 *tp = netdev_priv(dev);
11611 int err;
11612
11613 if (!netif_running(dev))
11614 return 0;
11615
7faa006f 11616 flush_scheduled_work();
1da177e4
LT
11617 tg3_netif_stop(tp);
11618
11619 del_timer_sync(&tp->timer);
11620
f47c11ee 11621 tg3_full_lock(tp, 1);
1da177e4 11622 tg3_disable_ints(tp);
f47c11ee 11623 tg3_full_unlock(tp);
1da177e4
LT
11624
11625 netif_device_detach(dev);
11626
f47c11ee 11627 tg3_full_lock(tp, 0);
944d980e 11628 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6a9eba15 11629 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
f47c11ee 11630 tg3_full_unlock(tp);
1da177e4
LT
11631
11632 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11633 if (err) {
f47c11ee 11634 tg3_full_lock(tp, 0);
1da177e4 11635
6a9eba15 11636 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8e7a22e3 11637 tg3_init_hw(tp, 1);
1da177e4
LT
11638
11639 tp->timer.expires = jiffies + tp->timer_offset;
11640 add_timer(&tp->timer);
11641
11642 netif_device_attach(dev);
11643 tg3_netif_start(tp);
11644
f47c11ee 11645 tg3_full_unlock(tp);
1da177e4
LT
11646 }
11647
11648 return err;
11649}
11650
11651static int tg3_resume(struct pci_dev *pdev)
11652{
11653 struct net_device *dev = pci_get_drvdata(pdev);
11654 struct tg3 *tp = netdev_priv(dev);
11655 int err;
11656
11657 if (!netif_running(dev))
11658 return 0;
11659
11660 pci_restore_state(tp->pdev);
11661
bc1c7567 11662 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
11663 if (err)
11664 return err;
11665
11666 netif_device_attach(dev);
11667
f47c11ee 11668 tg3_full_lock(tp, 0);
1da177e4 11669
6a9eba15 11670 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8e7a22e3 11671 tg3_init_hw(tp, 1);
1da177e4
LT
11672
11673 tp->timer.expires = jiffies + tp->timer_offset;
11674 add_timer(&tp->timer);
11675
1da177e4
LT
11676 tg3_netif_start(tp);
11677
f47c11ee 11678 tg3_full_unlock(tp);
1da177e4
LT
11679
11680 return 0;
11681}
11682
11683static struct pci_driver tg3_driver = {
11684 .name = DRV_MODULE_NAME,
11685 .id_table = tg3_pci_tbl,
11686 .probe = tg3_init_one,
11687 .remove = __devexit_p(tg3_remove_one),
11688 .suspend = tg3_suspend,
11689 .resume = tg3_resume
11690};
11691
11692static int __init tg3_init(void)
11693{
11694 return pci_module_init(&tg3_driver);
11695}
11696
11697static void __exit tg3_cleanup(void)
11698{
11699 pci_unregister_driver(&tg3_driver);
11700}
11701
11702module_init(tg3_init);
11703module_exit(tg3_cleanup);