]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/tg3.c
[PATCH] tg3: fix ASF heartbeat
[net-next-2.6.git] / drivers / net / tg3.c
CommitLineData
1da177e4
LT
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
8 *
9 * Firmware is:
49cabf49
MC
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
1da177e4
LT
16 */
17
18#include <linux/config.h>
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
27#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/workqueue.h>
61487480 39#include <linux/prefetch.h>
1da177e4
LT
40
41#include <net/checksum.h>
42
43#include <asm/system.h>
44#include <asm/io.h>
45#include <asm/byteorder.h>
46#include <asm/uaccess.h>
47
48#ifdef CONFIG_SPARC64
49#include <asm/idprom.h>
50#include <asm/oplib.h>
51#include <asm/pbm.h>
52#endif
53
54#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
55#define TG3_VLAN_TAG_USED 1
56#else
57#define TG3_VLAN_TAG_USED 0
58#endif
59
60#ifdef NETIF_F_TSO
61#define TG3_TSO_SUPPORT 1
62#else
63#define TG3_TSO_SUPPORT 0
64#endif
65
66#include "tg3.h"
67
68#define DRV_MODULE_NAME "tg3"
69#define PFX DRV_MODULE_NAME ": "
ed39f731
DM
70#define DRV_MODULE_VERSION "3.42"
71#define DRV_MODULE_RELDATE "Oct 3, 2005"
1da177e4
LT
72
73#define TG3_DEF_MAC_MODE 0
74#define TG3_DEF_RX_MODE 0
75#define TG3_DEF_TX_MODE 0
76#define TG3_DEF_MSG_ENABLE \
77 (NETIF_MSG_DRV | \
78 NETIF_MSG_PROBE | \
79 NETIF_MSG_LINK | \
80 NETIF_MSG_TIMER | \
81 NETIF_MSG_IFDOWN | \
82 NETIF_MSG_IFUP | \
83 NETIF_MSG_RX_ERR | \
84 NETIF_MSG_TX_ERR)
85
86/* length of time before we decide the hardware is borked,
87 * and dev->tx_timeout() should be called to fix the problem
88 */
89#define TG3_TX_TIMEOUT (5 * HZ)
90
91/* hardware minimum and maximum for a single frame's data payload */
92#define TG3_MIN_MTU 60
93#define TG3_MAX_MTU(tp) \
0f893dc6 94 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
1da177e4
LT
95
96/* These numbers seem to be hard coded in the NIC firmware somehow.
97 * You can't change the ring sizes, but you can change where you place
98 * them in the NIC onboard memory.
99 */
100#define TG3_RX_RING_SIZE 512
101#define TG3_DEF_RX_RING_PENDING 200
102#define TG3_RX_JUMBO_RING_SIZE 256
103#define TG3_DEF_RX_JUMBO_RING_PENDING 100
104
105/* Do not place this n-ring entries value into the tp struct itself,
106 * we really want to expose these constants to GCC so that modulo et
107 * al. operations are done with shifts and masks instead of with
108 * hw multiply/modulo instructions. Another solution would be to
109 * replace things like '% foo' with '& (foo - 1)'.
110 */
111#define TG3_RX_RCB_RING_SIZE(tp) \
112 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
113
114#define TG3_TX_RING_SIZE 512
115#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
116
117#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
118 TG3_RX_RING_SIZE)
119#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 TG3_RX_JUMBO_RING_SIZE)
121#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
122 TG3_RX_RCB_RING_SIZE(tp))
123#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
124 TG3_TX_RING_SIZE)
1da177e4 125#define TX_BUFFS_AVAIL(TP) \
51b91468
MC
126 ((TP)->tx_pending - \
127 (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
1da177e4
LT
128#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
129
130#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
131#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
132
133/* minimum number of free TX descriptors required to wake up TX process */
134#define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
135
136/* number of ETHTOOL_GSTATS u64's */
137#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
138
4cafd3f5
MC
139#define TG3_NUM_TEST 6
140
1da177e4
LT
141static char version[] __devinitdata =
142 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
143
144MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
145MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
146MODULE_LICENSE("GPL");
147MODULE_VERSION(DRV_MODULE_VERSION);
148
149static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
150module_param(tg3_debug, int, 0);
151MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
152
153static struct pci_device_id tg3_pci_tbl[] = {
154 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
155 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
156 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
159 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
161 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
163 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
165 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
167 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
169 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
171 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
173 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
175 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
177 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
179 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
181 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
183 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
185 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
187 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
189 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
191 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
193 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
195 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
201 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
203 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
205 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
207 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
6e9017a7 212 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
af2bcd97 213 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
d8659255
XVP
214 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1da177e4
LT
216 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
a4e2b347
MC
222 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
4cf78e4f
MC
226 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1da177e4
LT
230 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
237 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
239 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
245 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246 { 0, }
247};
248
249MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
250
251static struct {
252 const char string[ETH_GSTRING_LEN];
253} ethtool_stats_keys[TG3_NUM_STATS] = {
254 { "rx_octets" },
255 { "rx_fragments" },
256 { "rx_ucast_packets" },
257 { "rx_mcast_packets" },
258 { "rx_bcast_packets" },
259 { "rx_fcs_errors" },
260 { "rx_align_errors" },
261 { "rx_xon_pause_rcvd" },
262 { "rx_xoff_pause_rcvd" },
263 { "rx_mac_ctrl_rcvd" },
264 { "rx_xoff_entered" },
265 { "rx_frame_too_long_errors" },
266 { "rx_jabbers" },
267 { "rx_undersize_packets" },
268 { "rx_in_length_errors" },
269 { "rx_out_length_errors" },
270 { "rx_64_or_less_octet_packets" },
271 { "rx_65_to_127_octet_packets" },
272 { "rx_128_to_255_octet_packets" },
273 { "rx_256_to_511_octet_packets" },
274 { "rx_512_to_1023_octet_packets" },
275 { "rx_1024_to_1522_octet_packets" },
276 { "rx_1523_to_2047_octet_packets" },
277 { "rx_2048_to_4095_octet_packets" },
278 { "rx_4096_to_8191_octet_packets" },
279 { "rx_8192_to_9022_octet_packets" },
280
281 { "tx_octets" },
282 { "tx_collisions" },
283
284 { "tx_xon_sent" },
285 { "tx_xoff_sent" },
286 { "tx_flow_control" },
287 { "tx_mac_errors" },
288 { "tx_single_collisions" },
289 { "tx_mult_collisions" },
290 { "tx_deferred" },
291 { "tx_excessive_collisions" },
292 { "tx_late_collisions" },
293 { "tx_collide_2times" },
294 { "tx_collide_3times" },
295 { "tx_collide_4times" },
296 { "tx_collide_5times" },
297 { "tx_collide_6times" },
298 { "tx_collide_7times" },
299 { "tx_collide_8times" },
300 { "tx_collide_9times" },
301 { "tx_collide_10times" },
302 { "tx_collide_11times" },
303 { "tx_collide_12times" },
304 { "tx_collide_13times" },
305 { "tx_collide_14times" },
306 { "tx_collide_15times" },
307 { "tx_ucast_packets" },
308 { "tx_mcast_packets" },
309 { "tx_bcast_packets" },
310 { "tx_carrier_sense_errors" },
311 { "tx_discards" },
312 { "tx_errors" },
313
314 { "dma_writeq_full" },
315 { "dma_write_prioq_full" },
316 { "rxbds_empty" },
317 { "rx_discards" },
318 { "rx_errors" },
319 { "rx_threshold_hit" },
320
321 { "dma_readq_full" },
322 { "dma_read_prioq_full" },
323 { "tx_comp_queue_full" },
324
325 { "ring_set_send_prod_index" },
326 { "ring_status_update" },
327 { "nic_irqs" },
328 { "nic_avoided_irqs" },
329 { "nic_tx_threshold_hit" }
330};
331
4cafd3f5
MC
332static struct {
333 const char string[ETH_GSTRING_LEN];
334} ethtool_test_keys[TG3_NUM_TEST] = {
335 { "nvram test (online) " },
336 { "link test (online) " },
337 { "register test (offline)" },
338 { "memory test (offline)" },
339 { "loopback test (offline)" },
340 { "interrupt test (offline)" },
341};
342
1da177e4
LT
343static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
344{
6892914f
MC
345 unsigned long flags;
346
347 spin_lock_irqsave(&tp->indirect_lock, flags);
1ee582d8
MC
348 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
349 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
6892914f 350 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1ee582d8
MC
351}
352
353static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
354{
355 writel(val, tp->regs + off);
356 readl(tp->regs + off);
1da177e4
LT
357}
358
6892914f 359static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
1da177e4 360{
6892914f
MC
361 unsigned long flags;
362 u32 val;
363
364 spin_lock_irqsave(&tp->indirect_lock, flags);
365 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
366 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
367 spin_unlock_irqrestore(&tp->indirect_lock, flags);
368 return val;
369}
370
371static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
372{
373 unsigned long flags;
374
375 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
376 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
377 TG3_64BIT_REG_LOW, val);
378 return;
379 }
380 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
381 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
382 TG3_64BIT_REG_LOW, val);
383 return;
1da177e4 384 }
6892914f
MC
385
386 spin_lock_irqsave(&tp->indirect_lock, flags);
387 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
388 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
389 spin_unlock_irqrestore(&tp->indirect_lock, flags);
390
391 /* In indirect mode when disabling interrupts, we also need
392 * to clear the interrupt bit in the GRC local ctrl register.
393 */
394 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
395 (val == 0x1)) {
396 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
397 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
398 }
399}
400
401static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
402{
403 unsigned long flags;
404 u32 val;
405
406 spin_lock_irqsave(&tp->indirect_lock, flags);
407 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
408 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
409 spin_unlock_irqrestore(&tp->indirect_lock, flags);
410 return val;
411}
412
413static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
414{
415 tp->write32(tp, off, val);
416 if (!(tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) &&
417 !(tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) &&
418 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
419 tp->read32(tp, off); /* flush */
1da177e4
LT
420}
421
09ee929c
MC
422static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
423{
424 tp->write32_mbox(tp, off, val);
6892914f
MC
425 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
426 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
427 tp->read32_mbox(tp, off);
09ee929c
MC
428}
429
20094930 430static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
1da177e4
LT
431{
432 void __iomem *mbox = tp->regs + off;
433 writel(val, mbox);
434 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
435 writel(val, mbox);
436 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
437 readl(mbox);
438}
439
20094930
MC
440static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
441{
442 writel(val, tp->regs + off);
443}
1da177e4 444
20094930
MC
445static u32 tg3_read32(struct tg3 *tp, u32 off)
446{
447 return (readl(tp->regs + off));
448}
449
450#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
09ee929c 451#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
20094930
MC
452#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
453#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
09ee929c 454#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
20094930
MC
455
456#define tw32(reg,val) tp->write32(tp, reg, val)
1da177e4 457#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val))
20094930 458#define tr32(reg) tp->read32(tp, reg)
1da177e4
LT
459
460static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
461{
6892914f
MC
462 unsigned long flags;
463
464 spin_lock_irqsave(&tp->indirect_lock, flags);
1da177e4
LT
465 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
466 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
467
468 /* Always leave this as zero. */
469 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
6892914f 470 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
471}
472
28fbef78
MC
473static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
474{
475 /* If no workaround is needed, write to mem space directly */
476 if (tp->write32 != tg3_write_indirect_reg32)
477 tw32(NIC_SRAM_WIN_BASE + off, val);
478 else
479 tg3_write_mem(tp, off, val);
480}
481
1da177e4
LT
482static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
483{
6892914f
MC
484 unsigned long flags;
485
486 spin_lock_irqsave(&tp->indirect_lock, flags);
1da177e4
LT
487 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
488 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
489
490 /* Always leave this as zero. */
491 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
6892914f 492 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
493}
494
495static void tg3_disable_ints(struct tg3 *tp)
496{
497 tw32(TG3PCI_MISC_HOST_CTRL,
498 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c 499 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
1da177e4
LT
500}
501
502static inline void tg3_cond_int(struct tg3 *tp)
503{
38f3843e
MC
504 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
505 (tp->hw_status->status & SD_STATUS_UPDATED))
1da177e4
LT
506 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
507}
508
509static void tg3_enable_ints(struct tg3 *tp)
510{
bbe832c0
MC
511 tp->irq_sync = 0;
512 wmb();
513
1da177e4
LT
514 tw32(TG3PCI_MISC_HOST_CTRL,
515 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c
MC
516 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
517 (tp->last_tag << 24));
1da177e4
LT
518 tg3_cond_int(tp);
519}
520
04237ddd
MC
521static inline unsigned int tg3_has_work(struct tg3 *tp)
522{
523 struct tg3_hw_status *sblk = tp->hw_status;
524 unsigned int work_exists = 0;
525
526 /* check for phy events */
527 if (!(tp->tg3_flags &
528 (TG3_FLAG_USE_LINKCHG_REG |
529 TG3_FLAG_POLL_SERDES))) {
530 if (sblk->status & SD_STATUS_LINK_CHG)
531 work_exists = 1;
532 }
533 /* check for RX/TX work to do */
534 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
535 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
536 work_exists = 1;
537
538 return work_exists;
539}
540
1da177e4 541/* tg3_restart_ints
04237ddd
MC
542 * similar to tg3_enable_ints, but it accurately determines whether there
543 * is new work pending and can return without flushing the PIO write
544 * which reenables interrupts
1da177e4
LT
545 */
546static void tg3_restart_ints(struct tg3 *tp)
547{
fac9b83e
DM
548 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
549 tp->last_tag << 24);
1da177e4
LT
550 mmiowb();
551
fac9b83e
DM
552 /* When doing tagged status, this work check is unnecessary.
553 * The last_tag we write above tells the chip which piece of
554 * work we've completed.
555 */
556 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
557 tg3_has_work(tp))
04237ddd
MC
558 tw32(HOSTCC_MODE, tp->coalesce_mode |
559 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
1da177e4
LT
560}
561
562static inline void tg3_netif_stop(struct tg3 *tp)
563{
bbe832c0 564 tp->dev->trans_start = jiffies; /* prevent tx timeout */
1da177e4
LT
565 netif_poll_disable(tp->dev);
566 netif_tx_disable(tp->dev);
567}
568
569static inline void tg3_netif_start(struct tg3 *tp)
570{
571 netif_wake_queue(tp->dev);
572 /* NOTE: unconditional netif_wake_queue is only appropriate
573 * so long as all callers are assured to have free tx slots
574 * (such as after tg3_init_hw)
575 */
576 netif_poll_enable(tp->dev);
f47c11ee
DM
577 tp->hw_status->status |= SD_STATUS_UPDATED;
578 tg3_enable_ints(tp);
1da177e4
LT
579}
580
581static void tg3_switch_clocks(struct tg3 *tp)
582{
583 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
584 u32 orig_clock_ctrl;
585
a4e2b347 586 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4cf78e4f
MC
587 return;
588
1da177e4
LT
589 orig_clock_ctrl = clock_ctrl;
590 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
591 CLOCK_CTRL_CLKRUN_OENABLE |
592 0x1f);
593 tp->pci_clock_ctrl = clock_ctrl;
594
595 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
596 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
597 tw32_f(TG3PCI_CLOCK_CTRL,
598 clock_ctrl | CLOCK_CTRL_625_CORE);
599 udelay(40);
600 }
601 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
602 tw32_f(TG3PCI_CLOCK_CTRL,
603 clock_ctrl |
604 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
605 udelay(40);
606 tw32_f(TG3PCI_CLOCK_CTRL,
607 clock_ctrl | (CLOCK_CTRL_ALTCLK));
608 udelay(40);
609 }
610 tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
611 udelay(40);
612}
613
614#define PHY_BUSY_LOOPS 5000
615
616static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
617{
618 u32 frame_val;
619 unsigned int loops;
620 int ret;
621
622 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
623 tw32_f(MAC_MI_MODE,
624 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
625 udelay(80);
626 }
627
628 *val = 0x0;
629
630 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
631 MI_COM_PHY_ADDR_MASK);
632 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
633 MI_COM_REG_ADDR_MASK);
634 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
635
636 tw32_f(MAC_MI_COM, frame_val);
637
638 loops = PHY_BUSY_LOOPS;
639 while (loops != 0) {
640 udelay(10);
641 frame_val = tr32(MAC_MI_COM);
642
643 if ((frame_val & MI_COM_BUSY) == 0) {
644 udelay(5);
645 frame_val = tr32(MAC_MI_COM);
646 break;
647 }
648 loops -= 1;
649 }
650
651 ret = -EBUSY;
652 if (loops != 0) {
653 *val = frame_val & MI_COM_DATA_MASK;
654 ret = 0;
655 }
656
657 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
658 tw32_f(MAC_MI_MODE, tp->mi_mode);
659 udelay(80);
660 }
661
662 return ret;
663}
664
665static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
666{
667 u32 frame_val;
668 unsigned int loops;
669 int ret;
670
671 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
672 tw32_f(MAC_MI_MODE,
673 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
674 udelay(80);
675 }
676
677 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
678 MI_COM_PHY_ADDR_MASK);
679 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
680 MI_COM_REG_ADDR_MASK);
681 frame_val |= (val & MI_COM_DATA_MASK);
682 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
683
684 tw32_f(MAC_MI_COM, frame_val);
685
686 loops = PHY_BUSY_LOOPS;
687 while (loops != 0) {
688 udelay(10);
689 frame_val = tr32(MAC_MI_COM);
690 if ((frame_val & MI_COM_BUSY) == 0) {
691 udelay(5);
692 frame_val = tr32(MAC_MI_COM);
693 break;
694 }
695 loops -= 1;
696 }
697
698 ret = -EBUSY;
699 if (loops != 0)
700 ret = 0;
701
702 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
703 tw32_f(MAC_MI_MODE, tp->mi_mode);
704 udelay(80);
705 }
706
707 return ret;
708}
709
710static void tg3_phy_set_wirespeed(struct tg3 *tp)
711{
712 u32 val;
713
714 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
715 return;
716
717 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
718 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
719 tg3_writephy(tp, MII_TG3_AUX_CTRL,
720 (val | (1 << 15) | (1 << 4)));
721}
722
723static int tg3_bmcr_reset(struct tg3 *tp)
724{
725 u32 phy_control;
726 int limit, err;
727
728 /* OK, reset it, and poll the BMCR_RESET bit until it
729 * clears or we time out.
730 */
731 phy_control = BMCR_RESET;
732 err = tg3_writephy(tp, MII_BMCR, phy_control);
733 if (err != 0)
734 return -EBUSY;
735
736 limit = 5000;
737 while (limit--) {
738 err = tg3_readphy(tp, MII_BMCR, &phy_control);
739 if (err != 0)
740 return -EBUSY;
741
742 if ((phy_control & BMCR_RESET) == 0) {
743 udelay(40);
744 break;
745 }
746 udelay(10);
747 }
748 if (limit <= 0)
749 return -EBUSY;
750
751 return 0;
752}
753
754static int tg3_wait_macro_done(struct tg3 *tp)
755{
756 int limit = 100;
757
758 while (limit--) {
759 u32 tmp32;
760
761 if (!tg3_readphy(tp, 0x16, &tmp32)) {
762 if ((tmp32 & 0x1000) == 0)
763 break;
764 }
765 }
766 if (limit <= 0)
767 return -EBUSY;
768
769 return 0;
770}
771
772static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
773{
774 static const u32 test_pat[4][6] = {
775 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
776 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
777 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
778 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
779 };
780 int chan;
781
782 for (chan = 0; chan < 4; chan++) {
783 int i;
784
785 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
786 (chan * 0x2000) | 0x0200);
787 tg3_writephy(tp, 0x16, 0x0002);
788
789 for (i = 0; i < 6; i++)
790 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
791 test_pat[chan][i]);
792
793 tg3_writephy(tp, 0x16, 0x0202);
794 if (tg3_wait_macro_done(tp)) {
795 *resetp = 1;
796 return -EBUSY;
797 }
798
799 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
800 (chan * 0x2000) | 0x0200);
801 tg3_writephy(tp, 0x16, 0x0082);
802 if (tg3_wait_macro_done(tp)) {
803 *resetp = 1;
804 return -EBUSY;
805 }
806
807 tg3_writephy(tp, 0x16, 0x0802);
808 if (tg3_wait_macro_done(tp)) {
809 *resetp = 1;
810 return -EBUSY;
811 }
812
813 for (i = 0; i < 6; i += 2) {
814 u32 low, high;
815
816 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
817 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
818 tg3_wait_macro_done(tp)) {
819 *resetp = 1;
820 return -EBUSY;
821 }
822 low &= 0x7fff;
823 high &= 0x000f;
824 if (low != test_pat[chan][i] ||
825 high != test_pat[chan][i+1]) {
826 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
827 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
828 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
829
830 return -EBUSY;
831 }
832 }
833 }
834
835 return 0;
836}
837
838static int tg3_phy_reset_chanpat(struct tg3 *tp)
839{
840 int chan;
841
842 for (chan = 0; chan < 4; chan++) {
843 int i;
844
845 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
846 (chan * 0x2000) | 0x0200);
847 tg3_writephy(tp, 0x16, 0x0002);
848 for (i = 0; i < 6; i++)
849 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
850 tg3_writephy(tp, 0x16, 0x0202);
851 if (tg3_wait_macro_done(tp))
852 return -EBUSY;
853 }
854
855 return 0;
856}
857
858static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
859{
860 u32 reg32, phy9_orig;
861 int retries, do_phy_reset, err;
862
863 retries = 10;
864 do_phy_reset = 1;
865 do {
866 if (do_phy_reset) {
867 err = tg3_bmcr_reset(tp);
868 if (err)
869 return err;
870 do_phy_reset = 0;
871 }
872
873 /* Disable transmitter and interrupt. */
874 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
875 continue;
876
877 reg32 |= 0x3000;
878 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
879
880 /* Set full-duplex, 1000 mbps. */
881 tg3_writephy(tp, MII_BMCR,
882 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
883
884 /* Set to master mode. */
885 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
886 continue;
887
888 tg3_writephy(tp, MII_TG3_CTRL,
889 (MII_TG3_CTRL_AS_MASTER |
890 MII_TG3_CTRL_ENABLE_AS_MASTER));
891
892 /* Enable SM_DSP_CLOCK and 6dB. */
893 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
894
895 /* Block the PHY control access. */
896 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
897 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
898
899 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
900 if (!err)
901 break;
902 } while (--retries);
903
904 err = tg3_phy_reset_chanpat(tp);
905 if (err)
906 return err;
907
908 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
909 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
910
911 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
912 tg3_writephy(tp, 0x16, 0x0000);
913
914 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
915 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
916 /* Set Extended packet length bit for jumbo frames */
917 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
918 }
919 else {
920 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
921 }
922
923 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
924
925 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
926 reg32 &= ~0x3000;
927 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
928 } else if (!err)
929 err = -EBUSY;
930
931 return err;
932}
933
934/* This will reset the tigon3 PHY if there is no valid
935 * link unless the FORCE argument is non-zero.
936 */
937static int tg3_phy_reset(struct tg3 *tp)
938{
939 u32 phy_status;
940 int err;
941
942 err = tg3_readphy(tp, MII_BMSR, &phy_status);
943 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
944 if (err != 0)
945 return -EBUSY;
946
947 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
948 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
949 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
950 err = tg3_phy_reset_5703_4_5(tp);
951 if (err)
952 return err;
953 goto out;
954 }
955
956 err = tg3_bmcr_reset(tp);
957 if (err)
958 return err;
959
960out:
961 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
962 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
963 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
964 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
965 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
966 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
967 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
968 }
969 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
970 tg3_writephy(tp, 0x1c, 0x8d68);
971 tg3_writephy(tp, 0x1c, 0x8d68);
972 }
973 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
974 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
975 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
976 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
977 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
978 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
979 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
980 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
981 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
982 }
983 /* Set Extended packet length bit (bit 14) on all chips that */
984 /* support jumbo frames */
985 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
986 /* Cannot do read-modify-write on 5401 */
987 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
0f893dc6 988 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
989 u32 phy_reg;
990
991 /* Set bit 14 with read-modify-write to preserve other bits */
992 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
993 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
994 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
995 }
996
997 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
998 * jumbo frames transmission.
999 */
0f893dc6 1000 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1001 u32 phy_reg;
1002
1003 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1004 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1005 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1006 }
1007
1008 tg3_phy_set_wirespeed(tp);
1009 return 0;
1010}
1011
1012static void tg3_frob_aux_power(struct tg3 *tp)
1013{
1014 struct tg3 *tp_peer = tp;
1015
1016 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1017 return;
1018
1019 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1020 tp_peer = pci_get_drvdata(tp->pdev_peer);
1021 if (!tp_peer)
1022 BUG();
1023 }
1024
1025
1026 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1027 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
1028 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1029 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1030 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1031 (GRC_LCLCTRL_GPIO_OE0 |
1032 GRC_LCLCTRL_GPIO_OE1 |
1033 GRC_LCLCTRL_GPIO_OE2 |
1034 GRC_LCLCTRL_GPIO_OUTPUT0 |
1035 GRC_LCLCTRL_GPIO_OUTPUT1));
1036 udelay(100);
1037 } else {
1038 u32 no_gpio2;
1039 u32 grc_local_ctrl;
1040
1041 if (tp_peer != tp &&
1042 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1043 return;
1044
1045 /* On 5753 and variants, GPIO2 cannot be used. */
1046 no_gpio2 = tp->nic_sram_data_cfg &
1047 NIC_SRAM_DATA_CFG_NO_GPIO2;
1048
1049 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1050 GRC_LCLCTRL_GPIO_OE1 |
1051 GRC_LCLCTRL_GPIO_OE2 |
1052 GRC_LCLCTRL_GPIO_OUTPUT1 |
1053 GRC_LCLCTRL_GPIO_OUTPUT2;
1054 if (no_gpio2) {
1055 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1056 GRC_LCLCTRL_GPIO_OUTPUT2);
1057 }
1058 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1059 grc_local_ctrl);
1060 udelay(100);
1061
1062 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1063
1064 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1065 grc_local_ctrl);
1066 udelay(100);
1067
1068 if (!no_gpio2) {
1069 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1070 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1071 grc_local_ctrl);
1072 udelay(100);
1073 }
1074 }
1075 } else {
1076 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1077 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1078 if (tp_peer != tp &&
1079 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1080 return;
1081
1082 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1083 (GRC_LCLCTRL_GPIO_OE1 |
1084 GRC_LCLCTRL_GPIO_OUTPUT1));
1085 udelay(100);
1086
1087 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1088 (GRC_LCLCTRL_GPIO_OE1));
1089 udelay(100);
1090
1091 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1092 (GRC_LCLCTRL_GPIO_OE1 |
1093 GRC_LCLCTRL_GPIO_OUTPUT1));
1094 udelay(100);
1095 }
1096 }
1097}
1098
1099static int tg3_setup_phy(struct tg3 *, int);
1100
1101#define RESET_KIND_SHUTDOWN 0
1102#define RESET_KIND_INIT 1
1103#define RESET_KIND_SUSPEND 2
1104
1105static void tg3_write_sig_post_reset(struct tg3 *, int);
1106static int tg3_halt_cpu(struct tg3 *, u32);
1107
1108static int tg3_set_power_state(struct tg3 *tp, int state)
1109{
1110 u32 misc_host_ctrl;
1111 u16 power_control, power_caps;
1112 int pm = tp->pm_cap;
1113
1114 /* Make sure register accesses (indirect or otherwise)
1115 * will function correctly.
1116 */
1117 pci_write_config_dword(tp->pdev,
1118 TG3PCI_MISC_HOST_CTRL,
1119 tp->misc_host_ctrl);
1120
1121 pci_read_config_word(tp->pdev,
1122 pm + PCI_PM_CTRL,
1123 &power_control);
1124 power_control |= PCI_PM_CTRL_PME_STATUS;
1125 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1126 switch (state) {
1127 case 0:
1128 power_control |= 0;
1129 pci_write_config_word(tp->pdev,
1130 pm + PCI_PM_CTRL,
1131 power_control);
8c6bda1a
MC
1132 udelay(100); /* Delay after power state change */
1133
1134 /* Switch out of Vaux if it is not a LOM */
1135 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1136 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1137 udelay(100);
1138 }
1da177e4
LT
1139
1140 return 0;
1141
1142 case 1:
1143 power_control |= 1;
1144 break;
1145
1146 case 2:
1147 power_control |= 2;
1148 break;
1149
1150 case 3:
1151 power_control |= 3;
1152 break;
1153
1154 default:
1155 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1156 "requested.\n",
1157 tp->dev->name, state);
1158 return -EINVAL;
1159 };
1160
1161 power_control |= PCI_PM_CTRL_PME_ENABLE;
1162
1163 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1164 tw32(TG3PCI_MISC_HOST_CTRL,
1165 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1166
1167 if (tp->link_config.phy_is_low_power == 0) {
1168 tp->link_config.phy_is_low_power = 1;
1169 tp->link_config.orig_speed = tp->link_config.speed;
1170 tp->link_config.orig_duplex = tp->link_config.duplex;
1171 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1172 }
1173
747e8f8b 1174 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1da177e4
LT
1175 tp->link_config.speed = SPEED_10;
1176 tp->link_config.duplex = DUPLEX_HALF;
1177 tp->link_config.autoneg = AUTONEG_ENABLE;
1178 tg3_setup_phy(tp, 0);
1179 }
1180
1181 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1182
1183 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1184 u32 mac_mode;
1185
1186 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1187 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1188 udelay(40);
1189
1190 mac_mode = MAC_MODE_PORT_MODE_MII;
1191
1192 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1193 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1194 mac_mode |= MAC_MODE_LINK_POLARITY;
1195 } else {
1196 mac_mode = MAC_MODE_PORT_MODE_TBI;
1197 }
1198
cbf46853 1199 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1da177e4
LT
1200 tw32(MAC_LED_CTRL, tp->led_ctrl);
1201
1202 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1203 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1204 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1205
1206 tw32_f(MAC_MODE, mac_mode);
1207 udelay(100);
1208
1209 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1210 udelay(10);
1211 }
1212
1213 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1214 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1215 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1216 u32 base_val;
1217
1218 base_val = tp->pci_clock_ctrl;
1219 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1220 CLOCK_CTRL_TXCLK_DISABLE);
1221
1222 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1223 CLOCK_CTRL_ALTCLK |
1224 CLOCK_CTRL_PWRDOWN_PLL133);
1225 udelay(40);
a4e2b347 1226 } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4cf78e4f 1227 /* do nothing */
85e94ced 1228 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1da177e4
LT
1229 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1230 u32 newbits1, newbits2;
1231
1232 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1233 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1234 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1235 CLOCK_CTRL_TXCLK_DISABLE |
1236 CLOCK_CTRL_ALTCLK);
1237 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1238 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1239 newbits1 = CLOCK_CTRL_625_CORE;
1240 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1241 } else {
1242 newbits1 = CLOCK_CTRL_ALTCLK;
1243 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1244 }
1245
1246 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1247 udelay(40);
1248
1249 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1250 udelay(40);
1251
1252 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1253 u32 newbits3;
1254
1255 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1256 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1257 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1258 CLOCK_CTRL_TXCLK_DISABLE |
1259 CLOCK_CTRL_44MHZ_CORE);
1260 } else {
1261 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1262 }
1263
1264 tw32_f(TG3PCI_CLOCK_CTRL,
1265 tp->pci_clock_ctrl | newbits3);
1266 udelay(40);
1267 }
1268 }
1269
1270 tg3_frob_aux_power(tp);
1271
1272 /* Workaround for unstable PLL clock */
1273 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1274 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1275 u32 val = tr32(0x7d00);
1276
1277 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1278 tw32(0x7d00, val);
1279 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1280 tg3_halt_cpu(tp, RX_CPU_BASE);
1281 }
1282
1283 /* Finally, set the new power state. */
1284 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
8c6bda1a 1285 udelay(100); /* Delay after power state change */
1da177e4
LT
1286
1287 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1288
1289 return 0;
1290}
1291
1292static void tg3_link_report(struct tg3 *tp)
1293{
1294 if (!netif_carrier_ok(tp->dev)) {
1295 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1296 } else {
1297 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1298 tp->dev->name,
1299 (tp->link_config.active_speed == SPEED_1000 ?
1300 1000 :
1301 (tp->link_config.active_speed == SPEED_100 ?
1302 100 : 10)),
1303 (tp->link_config.active_duplex == DUPLEX_FULL ?
1304 "full" : "half"));
1305
1306 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1307 "%s for RX.\n",
1308 tp->dev->name,
1309 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1310 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1311 }
1312}
1313
1314static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1315{
1316 u32 new_tg3_flags = 0;
1317 u32 old_rx_mode = tp->rx_mode;
1318 u32 old_tx_mode = tp->tx_mode;
1319
1320 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
747e8f8b
MC
1321
1322 /* Convert 1000BaseX flow control bits to 1000BaseT
1323 * bits before resolving flow control.
1324 */
1325 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1326 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1327 ADVERTISE_PAUSE_ASYM);
1328 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1329
1330 if (local_adv & ADVERTISE_1000XPAUSE)
1331 local_adv |= ADVERTISE_PAUSE_CAP;
1332 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1333 local_adv |= ADVERTISE_PAUSE_ASYM;
1334 if (remote_adv & LPA_1000XPAUSE)
1335 remote_adv |= LPA_PAUSE_CAP;
1336 if (remote_adv & LPA_1000XPAUSE_ASYM)
1337 remote_adv |= LPA_PAUSE_ASYM;
1338 }
1339
1da177e4
LT
1340 if (local_adv & ADVERTISE_PAUSE_CAP) {
1341 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1342 if (remote_adv & LPA_PAUSE_CAP)
1343 new_tg3_flags |=
1344 (TG3_FLAG_RX_PAUSE |
1345 TG3_FLAG_TX_PAUSE);
1346 else if (remote_adv & LPA_PAUSE_ASYM)
1347 new_tg3_flags |=
1348 (TG3_FLAG_RX_PAUSE);
1349 } else {
1350 if (remote_adv & LPA_PAUSE_CAP)
1351 new_tg3_flags |=
1352 (TG3_FLAG_RX_PAUSE |
1353 TG3_FLAG_TX_PAUSE);
1354 }
1355 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1356 if ((remote_adv & LPA_PAUSE_CAP) &&
1357 (remote_adv & LPA_PAUSE_ASYM))
1358 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1359 }
1360
1361 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1362 tp->tg3_flags |= new_tg3_flags;
1363 } else {
1364 new_tg3_flags = tp->tg3_flags;
1365 }
1366
1367 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1368 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1369 else
1370 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1371
1372 if (old_rx_mode != tp->rx_mode) {
1373 tw32_f(MAC_RX_MODE, tp->rx_mode);
1374 }
1375
1376 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1377 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1378 else
1379 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1380
1381 if (old_tx_mode != tp->tx_mode) {
1382 tw32_f(MAC_TX_MODE, tp->tx_mode);
1383 }
1384}
1385
1386static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1387{
1388 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1389 case MII_TG3_AUX_STAT_10HALF:
1390 *speed = SPEED_10;
1391 *duplex = DUPLEX_HALF;
1392 break;
1393
1394 case MII_TG3_AUX_STAT_10FULL:
1395 *speed = SPEED_10;
1396 *duplex = DUPLEX_FULL;
1397 break;
1398
1399 case MII_TG3_AUX_STAT_100HALF:
1400 *speed = SPEED_100;
1401 *duplex = DUPLEX_HALF;
1402 break;
1403
1404 case MII_TG3_AUX_STAT_100FULL:
1405 *speed = SPEED_100;
1406 *duplex = DUPLEX_FULL;
1407 break;
1408
1409 case MII_TG3_AUX_STAT_1000HALF:
1410 *speed = SPEED_1000;
1411 *duplex = DUPLEX_HALF;
1412 break;
1413
1414 case MII_TG3_AUX_STAT_1000FULL:
1415 *speed = SPEED_1000;
1416 *duplex = DUPLEX_FULL;
1417 break;
1418
1419 default:
1420 *speed = SPEED_INVALID;
1421 *duplex = DUPLEX_INVALID;
1422 break;
1423 };
1424}
1425
1426static void tg3_phy_copper_begin(struct tg3 *tp)
1427{
1428 u32 new_adv;
1429 int i;
1430
1431 if (tp->link_config.phy_is_low_power) {
1432 /* Entering low power mode. Disable gigabit and
1433 * 100baseT advertisements.
1434 */
1435 tg3_writephy(tp, MII_TG3_CTRL, 0);
1436
1437 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1438 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1439 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1440 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1441
1442 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1443 } else if (tp->link_config.speed == SPEED_INVALID) {
1444 tp->link_config.advertising =
1445 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1446 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1447 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1448 ADVERTISED_Autoneg | ADVERTISED_MII);
1449
1450 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1451 tp->link_config.advertising &=
1452 ~(ADVERTISED_1000baseT_Half |
1453 ADVERTISED_1000baseT_Full);
1454
1455 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1456 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1457 new_adv |= ADVERTISE_10HALF;
1458 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1459 new_adv |= ADVERTISE_10FULL;
1460 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1461 new_adv |= ADVERTISE_100HALF;
1462 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1463 new_adv |= ADVERTISE_100FULL;
1464 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1465
1466 if (tp->link_config.advertising &
1467 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1468 new_adv = 0;
1469 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1470 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1471 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1472 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1473 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1474 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1475 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1476 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1477 MII_TG3_CTRL_ENABLE_AS_MASTER);
1478 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1479 } else {
1480 tg3_writephy(tp, MII_TG3_CTRL, 0);
1481 }
1482 } else {
1483 /* Asking for a specific link mode. */
1484 if (tp->link_config.speed == SPEED_1000) {
1485 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1486 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1487
1488 if (tp->link_config.duplex == DUPLEX_FULL)
1489 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1490 else
1491 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1492 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1493 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1494 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1495 MII_TG3_CTRL_ENABLE_AS_MASTER);
1496 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1497 } else {
1498 tg3_writephy(tp, MII_TG3_CTRL, 0);
1499
1500 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1501 if (tp->link_config.speed == SPEED_100) {
1502 if (tp->link_config.duplex == DUPLEX_FULL)
1503 new_adv |= ADVERTISE_100FULL;
1504 else
1505 new_adv |= ADVERTISE_100HALF;
1506 } else {
1507 if (tp->link_config.duplex == DUPLEX_FULL)
1508 new_adv |= ADVERTISE_10FULL;
1509 else
1510 new_adv |= ADVERTISE_10HALF;
1511 }
1512 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1513 }
1514 }
1515
1516 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1517 tp->link_config.speed != SPEED_INVALID) {
1518 u32 bmcr, orig_bmcr;
1519
1520 tp->link_config.active_speed = tp->link_config.speed;
1521 tp->link_config.active_duplex = tp->link_config.duplex;
1522
1523 bmcr = 0;
1524 switch (tp->link_config.speed) {
1525 default:
1526 case SPEED_10:
1527 break;
1528
1529 case SPEED_100:
1530 bmcr |= BMCR_SPEED100;
1531 break;
1532
1533 case SPEED_1000:
1534 bmcr |= TG3_BMCR_SPEED1000;
1535 break;
1536 };
1537
1538 if (tp->link_config.duplex == DUPLEX_FULL)
1539 bmcr |= BMCR_FULLDPLX;
1540
1541 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1542 (bmcr != orig_bmcr)) {
1543 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1544 for (i = 0; i < 1500; i++) {
1545 u32 tmp;
1546
1547 udelay(10);
1548 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1549 tg3_readphy(tp, MII_BMSR, &tmp))
1550 continue;
1551 if (!(tmp & BMSR_LSTATUS)) {
1552 udelay(40);
1553 break;
1554 }
1555 }
1556 tg3_writephy(tp, MII_BMCR, bmcr);
1557 udelay(40);
1558 }
1559 } else {
1560 tg3_writephy(tp, MII_BMCR,
1561 BMCR_ANENABLE | BMCR_ANRESTART);
1562 }
1563}
1564
1565static int tg3_init_5401phy_dsp(struct tg3 *tp)
1566{
1567 int err;
1568
1569 /* Turn off tap power management. */
1570 /* Set Extended packet length bit */
1571 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1572
1573 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1574 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1575
1576 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1577 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1578
1579 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1580 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1581
1582 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1583 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1584
1585 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1586 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1587
1588 udelay(40);
1589
1590 return err;
1591}
1592
1593static int tg3_copper_is_advertising_all(struct tg3 *tp)
1594{
1595 u32 adv_reg, all_mask;
1596
1597 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1598 return 0;
1599
1600 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1601 ADVERTISE_100HALF | ADVERTISE_100FULL);
1602 if ((adv_reg & all_mask) != all_mask)
1603 return 0;
1604 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1605 u32 tg3_ctrl;
1606
1607 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1608 return 0;
1609
1610 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1611 MII_TG3_CTRL_ADV_1000_FULL);
1612 if ((tg3_ctrl & all_mask) != all_mask)
1613 return 0;
1614 }
1615 return 1;
1616}
1617
1618static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1619{
1620 int current_link_up;
1621 u32 bmsr, dummy;
1622 u16 current_speed;
1623 u8 current_duplex;
1624 int i, err;
1625
1626 tw32(MAC_EVENT, 0);
1627
1628 tw32_f(MAC_STATUS,
1629 (MAC_STATUS_SYNC_CHANGED |
1630 MAC_STATUS_CFG_CHANGED |
1631 MAC_STATUS_MI_COMPLETION |
1632 MAC_STATUS_LNKSTATE_CHANGED));
1633 udelay(40);
1634
1635 tp->mi_mode = MAC_MI_MODE_BASE;
1636 tw32_f(MAC_MI_MODE, tp->mi_mode);
1637 udelay(80);
1638
1639 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1640
1641 /* Some third-party PHYs need to be reset on link going
1642 * down.
1643 */
1644 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1645 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1646 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1647 netif_carrier_ok(tp->dev)) {
1648 tg3_readphy(tp, MII_BMSR, &bmsr);
1649 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1650 !(bmsr & BMSR_LSTATUS))
1651 force_reset = 1;
1652 }
1653 if (force_reset)
1654 tg3_phy_reset(tp);
1655
1656 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1657 tg3_readphy(tp, MII_BMSR, &bmsr);
1658 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1659 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1660 bmsr = 0;
1661
1662 if (!(bmsr & BMSR_LSTATUS)) {
1663 err = tg3_init_5401phy_dsp(tp);
1664 if (err)
1665 return err;
1666
1667 tg3_readphy(tp, MII_BMSR, &bmsr);
1668 for (i = 0; i < 1000; i++) {
1669 udelay(10);
1670 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1671 (bmsr & BMSR_LSTATUS)) {
1672 udelay(40);
1673 break;
1674 }
1675 }
1676
1677 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1678 !(bmsr & BMSR_LSTATUS) &&
1679 tp->link_config.active_speed == SPEED_1000) {
1680 err = tg3_phy_reset(tp);
1681 if (!err)
1682 err = tg3_init_5401phy_dsp(tp);
1683 if (err)
1684 return err;
1685 }
1686 }
1687 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1688 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1689 /* 5701 {A0,B0} CRC bug workaround */
1690 tg3_writephy(tp, 0x15, 0x0a75);
1691 tg3_writephy(tp, 0x1c, 0x8c68);
1692 tg3_writephy(tp, 0x1c, 0x8d68);
1693 tg3_writephy(tp, 0x1c, 0x8c68);
1694 }
1695
1696 /* Clear pending interrupts... */
1697 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1698 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1699
1700 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1701 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1702 else
1703 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1704
1705 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1706 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1707 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1708 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1709 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1710 else
1711 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1712 }
1713
1714 current_link_up = 0;
1715 current_speed = SPEED_INVALID;
1716 current_duplex = DUPLEX_INVALID;
1717
1718 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1719 u32 val;
1720
1721 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1722 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1723 if (!(val & (1 << 10))) {
1724 val |= (1 << 10);
1725 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1726 goto relink;
1727 }
1728 }
1729
1730 bmsr = 0;
1731 for (i = 0; i < 100; i++) {
1732 tg3_readphy(tp, MII_BMSR, &bmsr);
1733 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1734 (bmsr & BMSR_LSTATUS))
1735 break;
1736 udelay(40);
1737 }
1738
1739 if (bmsr & BMSR_LSTATUS) {
1740 u32 aux_stat, bmcr;
1741
1742 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1743 for (i = 0; i < 2000; i++) {
1744 udelay(10);
1745 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1746 aux_stat)
1747 break;
1748 }
1749
1750 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1751 &current_speed,
1752 &current_duplex);
1753
1754 bmcr = 0;
1755 for (i = 0; i < 200; i++) {
1756 tg3_readphy(tp, MII_BMCR, &bmcr);
1757 if (tg3_readphy(tp, MII_BMCR, &bmcr))
1758 continue;
1759 if (bmcr && bmcr != 0x7fff)
1760 break;
1761 udelay(10);
1762 }
1763
1764 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1765 if (bmcr & BMCR_ANENABLE) {
1766 current_link_up = 1;
1767
1768 /* Force autoneg restart if we are exiting
1769 * low power mode.
1770 */
1771 if (!tg3_copper_is_advertising_all(tp))
1772 current_link_up = 0;
1773 } else {
1774 current_link_up = 0;
1775 }
1776 } else {
1777 if (!(bmcr & BMCR_ANENABLE) &&
1778 tp->link_config.speed == current_speed &&
1779 tp->link_config.duplex == current_duplex) {
1780 current_link_up = 1;
1781 } else {
1782 current_link_up = 0;
1783 }
1784 }
1785
1786 tp->link_config.active_speed = current_speed;
1787 tp->link_config.active_duplex = current_duplex;
1788 }
1789
1790 if (current_link_up == 1 &&
1791 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1792 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1793 u32 local_adv, remote_adv;
1794
1795 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1796 local_adv = 0;
1797 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1798
1799 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1800 remote_adv = 0;
1801
1802 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1803
1804 /* If we are not advertising full pause capability,
1805 * something is wrong. Bring the link down and reconfigure.
1806 */
1807 if (local_adv != ADVERTISE_PAUSE_CAP) {
1808 current_link_up = 0;
1809 } else {
1810 tg3_setup_flow_control(tp, local_adv, remote_adv);
1811 }
1812 }
1813relink:
1814 if (current_link_up == 0) {
1815 u32 tmp;
1816
1817 tg3_phy_copper_begin(tp);
1818
1819 tg3_readphy(tp, MII_BMSR, &tmp);
1820 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1821 (tmp & BMSR_LSTATUS))
1822 current_link_up = 1;
1823 }
1824
1825 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1826 if (current_link_up == 1) {
1827 if (tp->link_config.active_speed == SPEED_100 ||
1828 tp->link_config.active_speed == SPEED_10)
1829 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1830 else
1831 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1832 } else
1833 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1834
1835 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1836 if (tp->link_config.active_duplex == DUPLEX_HALF)
1837 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1838
1839 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1840 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1841 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1842 (current_link_up == 1 &&
1843 tp->link_config.active_speed == SPEED_10))
1844 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1845 } else {
1846 if (current_link_up == 1)
1847 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1848 }
1849
1850 /* ??? Without this setting Netgear GA302T PHY does not
1851 * ??? send/receive packets...
1852 */
1853 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1854 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1855 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1856 tw32_f(MAC_MI_MODE, tp->mi_mode);
1857 udelay(80);
1858 }
1859
1860 tw32_f(MAC_MODE, tp->mac_mode);
1861 udelay(40);
1862
1863 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1864 /* Polled via timer. */
1865 tw32_f(MAC_EVENT, 0);
1866 } else {
1867 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1868 }
1869 udelay(40);
1870
1871 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1872 current_link_up == 1 &&
1873 tp->link_config.active_speed == SPEED_1000 &&
1874 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1875 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1876 udelay(120);
1877 tw32_f(MAC_STATUS,
1878 (MAC_STATUS_SYNC_CHANGED |
1879 MAC_STATUS_CFG_CHANGED));
1880 udelay(40);
1881 tg3_write_mem(tp,
1882 NIC_SRAM_FIRMWARE_MBOX,
1883 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1884 }
1885
1886 if (current_link_up != netif_carrier_ok(tp->dev)) {
1887 if (current_link_up)
1888 netif_carrier_on(tp->dev);
1889 else
1890 netif_carrier_off(tp->dev);
1891 tg3_link_report(tp);
1892 }
1893
1894 return 0;
1895}
1896
1897struct tg3_fiber_aneginfo {
1898 int state;
1899#define ANEG_STATE_UNKNOWN 0
1900#define ANEG_STATE_AN_ENABLE 1
1901#define ANEG_STATE_RESTART_INIT 2
1902#define ANEG_STATE_RESTART 3
1903#define ANEG_STATE_DISABLE_LINK_OK 4
1904#define ANEG_STATE_ABILITY_DETECT_INIT 5
1905#define ANEG_STATE_ABILITY_DETECT 6
1906#define ANEG_STATE_ACK_DETECT_INIT 7
1907#define ANEG_STATE_ACK_DETECT 8
1908#define ANEG_STATE_COMPLETE_ACK_INIT 9
1909#define ANEG_STATE_COMPLETE_ACK 10
1910#define ANEG_STATE_IDLE_DETECT_INIT 11
1911#define ANEG_STATE_IDLE_DETECT 12
1912#define ANEG_STATE_LINK_OK 13
1913#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
1914#define ANEG_STATE_NEXT_PAGE_WAIT 15
1915
1916 u32 flags;
1917#define MR_AN_ENABLE 0x00000001
1918#define MR_RESTART_AN 0x00000002
1919#define MR_AN_COMPLETE 0x00000004
1920#define MR_PAGE_RX 0x00000008
1921#define MR_NP_LOADED 0x00000010
1922#define MR_TOGGLE_TX 0x00000020
1923#define MR_LP_ADV_FULL_DUPLEX 0x00000040
1924#define MR_LP_ADV_HALF_DUPLEX 0x00000080
1925#define MR_LP_ADV_SYM_PAUSE 0x00000100
1926#define MR_LP_ADV_ASYM_PAUSE 0x00000200
1927#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1928#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1929#define MR_LP_ADV_NEXT_PAGE 0x00001000
1930#define MR_TOGGLE_RX 0x00002000
1931#define MR_NP_RX 0x00004000
1932
1933#define MR_LINK_OK 0x80000000
1934
1935 unsigned long link_time, cur_time;
1936
1937 u32 ability_match_cfg;
1938 int ability_match_count;
1939
1940 char ability_match, idle_match, ack_match;
1941
1942 u32 txconfig, rxconfig;
1943#define ANEG_CFG_NP 0x00000080
1944#define ANEG_CFG_ACK 0x00000040
1945#define ANEG_CFG_RF2 0x00000020
1946#define ANEG_CFG_RF1 0x00000010
1947#define ANEG_CFG_PS2 0x00000001
1948#define ANEG_CFG_PS1 0x00008000
1949#define ANEG_CFG_HD 0x00004000
1950#define ANEG_CFG_FD 0x00002000
1951#define ANEG_CFG_INVAL 0x00001f06
1952
1953};
1954#define ANEG_OK 0
1955#define ANEG_DONE 1
1956#define ANEG_TIMER_ENAB 2
1957#define ANEG_FAILED -1
1958
1959#define ANEG_STATE_SETTLE_TIME 10000
1960
1961static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1962 struct tg3_fiber_aneginfo *ap)
1963{
1964 unsigned long delta;
1965 u32 rx_cfg_reg;
1966 int ret;
1967
1968 if (ap->state == ANEG_STATE_UNKNOWN) {
1969 ap->rxconfig = 0;
1970 ap->link_time = 0;
1971 ap->cur_time = 0;
1972 ap->ability_match_cfg = 0;
1973 ap->ability_match_count = 0;
1974 ap->ability_match = 0;
1975 ap->idle_match = 0;
1976 ap->ack_match = 0;
1977 }
1978 ap->cur_time++;
1979
1980 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1981 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1982
1983 if (rx_cfg_reg != ap->ability_match_cfg) {
1984 ap->ability_match_cfg = rx_cfg_reg;
1985 ap->ability_match = 0;
1986 ap->ability_match_count = 0;
1987 } else {
1988 if (++ap->ability_match_count > 1) {
1989 ap->ability_match = 1;
1990 ap->ability_match_cfg = rx_cfg_reg;
1991 }
1992 }
1993 if (rx_cfg_reg & ANEG_CFG_ACK)
1994 ap->ack_match = 1;
1995 else
1996 ap->ack_match = 0;
1997
1998 ap->idle_match = 0;
1999 } else {
2000 ap->idle_match = 1;
2001 ap->ability_match_cfg = 0;
2002 ap->ability_match_count = 0;
2003 ap->ability_match = 0;
2004 ap->ack_match = 0;
2005
2006 rx_cfg_reg = 0;
2007 }
2008
2009 ap->rxconfig = rx_cfg_reg;
2010 ret = ANEG_OK;
2011
2012 switch(ap->state) {
2013 case ANEG_STATE_UNKNOWN:
2014 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2015 ap->state = ANEG_STATE_AN_ENABLE;
2016
2017 /* fallthru */
2018 case ANEG_STATE_AN_ENABLE:
2019 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2020 if (ap->flags & MR_AN_ENABLE) {
2021 ap->link_time = 0;
2022 ap->cur_time = 0;
2023 ap->ability_match_cfg = 0;
2024 ap->ability_match_count = 0;
2025 ap->ability_match = 0;
2026 ap->idle_match = 0;
2027 ap->ack_match = 0;
2028
2029 ap->state = ANEG_STATE_RESTART_INIT;
2030 } else {
2031 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2032 }
2033 break;
2034
2035 case ANEG_STATE_RESTART_INIT:
2036 ap->link_time = ap->cur_time;
2037 ap->flags &= ~(MR_NP_LOADED);
2038 ap->txconfig = 0;
2039 tw32(MAC_TX_AUTO_NEG, 0);
2040 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2041 tw32_f(MAC_MODE, tp->mac_mode);
2042 udelay(40);
2043
2044 ret = ANEG_TIMER_ENAB;
2045 ap->state = ANEG_STATE_RESTART;
2046
2047 /* fallthru */
2048 case ANEG_STATE_RESTART:
2049 delta = ap->cur_time - ap->link_time;
2050 if (delta > ANEG_STATE_SETTLE_TIME) {
2051 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2052 } else {
2053 ret = ANEG_TIMER_ENAB;
2054 }
2055 break;
2056
2057 case ANEG_STATE_DISABLE_LINK_OK:
2058 ret = ANEG_DONE;
2059 break;
2060
2061 case ANEG_STATE_ABILITY_DETECT_INIT:
2062 ap->flags &= ~(MR_TOGGLE_TX);
2063 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2064 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2065 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2066 tw32_f(MAC_MODE, tp->mac_mode);
2067 udelay(40);
2068
2069 ap->state = ANEG_STATE_ABILITY_DETECT;
2070 break;
2071
2072 case ANEG_STATE_ABILITY_DETECT:
2073 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2074 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2075 }
2076 break;
2077
2078 case ANEG_STATE_ACK_DETECT_INIT:
2079 ap->txconfig |= ANEG_CFG_ACK;
2080 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2081 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2082 tw32_f(MAC_MODE, tp->mac_mode);
2083 udelay(40);
2084
2085 ap->state = ANEG_STATE_ACK_DETECT;
2086
2087 /* fallthru */
2088 case ANEG_STATE_ACK_DETECT:
2089 if (ap->ack_match != 0) {
2090 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2091 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2092 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2093 } else {
2094 ap->state = ANEG_STATE_AN_ENABLE;
2095 }
2096 } else if (ap->ability_match != 0 &&
2097 ap->rxconfig == 0) {
2098 ap->state = ANEG_STATE_AN_ENABLE;
2099 }
2100 break;
2101
2102 case ANEG_STATE_COMPLETE_ACK_INIT:
2103 if (ap->rxconfig & ANEG_CFG_INVAL) {
2104 ret = ANEG_FAILED;
2105 break;
2106 }
2107 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2108 MR_LP_ADV_HALF_DUPLEX |
2109 MR_LP_ADV_SYM_PAUSE |
2110 MR_LP_ADV_ASYM_PAUSE |
2111 MR_LP_ADV_REMOTE_FAULT1 |
2112 MR_LP_ADV_REMOTE_FAULT2 |
2113 MR_LP_ADV_NEXT_PAGE |
2114 MR_TOGGLE_RX |
2115 MR_NP_RX);
2116 if (ap->rxconfig & ANEG_CFG_FD)
2117 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2118 if (ap->rxconfig & ANEG_CFG_HD)
2119 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2120 if (ap->rxconfig & ANEG_CFG_PS1)
2121 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2122 if (ap->rxconfig & ANEG_CFG_PS2)
2123 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2124 if (ap->rxconfig & ANEG_CFG_RF1)
2125 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2126 if (ap->rxconfig & ANEG_CFG_RF2)
2127 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2128 if (ap->rxconfig & ANEG_CFG_NP)
2129 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2130
2131 ap->link_time = ap->cur_time;
2132
2133 ap->flags ^= (MR_TOGGLE_TX);
2134 if (ap->rxconfig & 0x0008)
2135 ap->flags |= MR_TOGGLE_RX;
2136 if (ap->rxconfig & ANEG_CFG_NP)
2137 ap->flags |= MR_NP_RX;
2138 ap->flags |= MR_PAGE_RX;
2139
2140 ap->state = ANEG_STATE_COMPLETE_ACK;
2141 ret = ANEG_TIMER_ENAB;
2142 break;
2143
2144 case ANEG_STATE_COMPLETE_ACK:
2145 if (ap->ability_match != 0 &&
2146 ap->rxconfig == 0) {
2147 ap->state = ANEG_STATE_AN_ENABLE;
2148 break;
2149 }
2150 delta = ap->cur_time - ap->link_time;
2151 if (delta > ANEG_STATE_SETTLE_TIME) {
2152 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2153 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2154 } else {
2155 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2156 !(ap->flags & MR_NP_RX)) {
2157 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2158 } else {
2159 ret = ANEG_FAILED;
2160 }
2161 }
2162 }
2163 break;
2164
2165 case ANEG_STATE_IDLE_DETECT_INIT:
2166 ap->link_time = ap->cur_time;
2167 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2168 tw32_f(MAC_MODE, tp->mac_mode);
2169 udelay(40);
2170
2171 ap->state = ANEG_STATE_IDLE_DETECT;
2172 ret = ANEG_TIMER_ENAB;
2173 break;
2174
2175 case ANEG_STATE_IDLE_DETECT:
2176 if (ap->ability_match != 0 &&
2177 ap->rxconfig == 0) {
2178 ap->state = ANEG_STATE_AN_ENABLE;
2179 break;
2180 }
2181 delta = ap->cur_time - ap->link_time;
2182 if (delta > ANEG_STATE_SETTLE_TIME) {
2183 /* XXX another gem from the Broadcom driver :( */
2184 ap->state = ANEG_STATE_LINK_OK;
2185 }
2186 break;
2187
2188 case ANEG_STATE_LINK_OK:
2189 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2190 ret = ANEG_DONE;
2191 break;
2192
2193 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2194 /* ??? unimplemented */
2195 break;
2196
2197 case ANEG_STATE_NEXT_PAGE_WAIT:
2198 /* ??? unimplemented */
2199 break;
2200
2201 default:
2202 ret = ANEG_FAILED;
2203 break;
2204 };
2205
2206 return ret;
2207}
2208
2209static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2210{
2211 int res = 0;
2212 struct tg3_fiber_aneginfo aninfo;
2213 int status = ANEG_FAILED;
2214 unsigned int tick;
2215 u32 tmp;
2216
2217 tw32_f(MAC_TX_AUTO_NEG, 0);
2218
2219 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2220 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2221 udelay(40);
2222
2223 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2224 udelay(40);
2225
2226 memset(&aninfo, 0, sizeof(aninfo));
2227 aninfo.flags |= MR_AN_ENABLE;
2228 aninfo.state = ANEG_STATE_UNKNOWN;
2229 aninfo.cur_time = 0;
2230 tick = 0;
2231 while (++tick < 195000) {
2232 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2233 if (status == ANEG_DONE || status == ANEG_FAILED)
2234 break;
2235
2236 udelay(1);
2237 }
2238
2239 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2240 tw32_f(MAC_MODE, tp->mac_mode);
2241 udelay(40);
2242
2243 *flags = aninfo.flags;
2244
2245 if (status == ANEG_DONE &&
2246 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2247 MR_LP_ADV_FULL_DUPLEX)))
2248 res = 1;
2249
2250 return res;
2251}
2252
2253static void tg3_init_bcm8002(struct tg3 *tp)
2254{
2255 u32 mac_status = tr32(MAC_STATUS);
2256 int i;
2257
2258 /* Reset when initting first time or we have a link. */
2259 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2260 !(mac_status & MAC_STATUS_PCS_SYNCED))
2261 return;
2262
2263 /* Set PLL lock range. */
2264 tg3_writephy(tp, 0x16, 0x8007);
2265
2266 /* SW reset */
2267 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2268
2269 /* Wait for reset to complete. */
2270 /* XXX schedule_timeout() ... */
2271 for (i = 0; i < 500; i++)
2272 udelay(10);
2273
2274 /* Config mode; select PMA/Ch 1 regs. */
2275 tg3_writephy(tp, 0x10, 0x8411);
2276
2277 /* Enable auto-lock and comdet, select txclk for tx. */
2278 tg3_writephy(tp, 0x11, 0x0a10);
2279
2280 tg3_writephy(tp, 0x18, 0x00a0);
2281 tg3_writephy(tp, 0x16, 0x41ff);
2282
2283 /* Assert and deassert POR. */
2284 tg3_writephy(tp, 0x13, 0x0400);
2285 udelay(40);
2286 tg3_writephy(tp, 0x13, 0x0000);
2287
2288 tg3_writephy(tp, 0x11, 0x0a50);
2289 udelay(40);
2290 tg3_writephy(tp, 0x11, 0x0a10);
2291
2292 /* Wait for signal to stabilize */
2293 /* XXX schedule_timeout() ... */
2294 for (i = 0; i < 15000; i++)
2295 udelay(10);
2296
2297 /* Deselect the channel register so we can read the PHYID
2298 * later.
2299 */
2300 tg3_writephy(tp, 0x10, 0x8011);
2301}
2302
2303static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2304{
2305 u32 sg_dig_ctrl, sg_dig_status;
2306 u32 serdes_cfg, expected_sg_dig_ctrl;
2307 int workaround, port_a;
2308 int current_link_up;
2309
2310 serdes_cfg = 0;
2311 expected_sg_dig_ctrl = 0;
2312 workaround = 0;
2313 port_a = 1;
2314 current_link_up = 0;
2315
2316 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2317 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2318 workaround = 1;
2319 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2320 port_a = 0;
2321
2322 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2323 /* preserve bits 20-23 for voltage regulator */
2324 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2325 }
2326
2327 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2328
2329 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2330 if (sg_dig_ctrl & (1 << 31)) {
2331 if (workaround) {
2332 u32 val = serdes_cfg;
2333
2334 if (port_a)
2335 val |= 0xc010000;
2336 else
2337 val |= 0x4010000;
2338 tw32_f(MAC_SERDES_CFG, val);
2339 }
2340 tw32_f(SG_DIG_CTRL, 0x01388400);
2341 }
2342 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2343 tg3_setup_flow_control(tp, 0, 0);
2344 current_link_up = 1;
2345 }
2346 goto out;
2347 }
2348
2349 /* Want auto-negotiation. */
2350 expected_sg_dig_ctrl = 0x81388400;
2351
2352 /* Pause capability */
2353 expected_sg_dig_ctrl |= (1 << 11);
2354
2355 /* Asymettric pause */
2356 expected_sg_dig_ctrl |= (1 << 12);
2357
2358 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2359 if (workaround)
2360 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2361 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2362 udelay(5);
2363 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2364
2365 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2366 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2367 MAC_STATUS_SIGNAL_DET)) {
2368 int i;
2369
2370 /* Giver time to negotiate (~200ms) */
2371 for (i = 0; i < 40000; i++) {
2372 sg_dig_status = tr32(SG_DIG_STATUS);
2373 if (sg_dig_status & (0x3))
2374 break;
2375 udelay(5);
2376 }
2377 mac_status = tr32(MAC_STATUS);
2378
2379 if ((sg_dig_status & (1 << 1)) &&
2380 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2381 u32 local_adv, remote_adv;
2382
2383 local_adv = ADVERTISE_PAUSE_CAP;
2384 remote_adv = 0;
2385 if (sg_dig_status & (1 << 19))
2386 remote_adv |= LPA_PAUSE_CAP;
2387 if (sg_dig_status & (1 << 20))
2388 remote_adv |= LPA_PAUSE_ASYM;
2389
2390 tg3_setup_flow_control(tp, local_adv, remote_adv);
2391 current_link_up = 1;
2392 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2393 } else if (!(sg_dig_status & (1 << 1))) {
2394 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2395 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2396 else {
2397 if (workaround) {
2398 u32 val = serdes_cfg;
2399
2400 if (port_a)
2401 val |= 0xc010000;
2402 else
2403 val |= 0x4010000;
2404
2405 tw32_f(MAC_SERDES_CFG, val);
2406 }
2407
2408 tw32_f(SG_DIG_CTRL, 0x01388400);
2409 udelay(40);
2410
2411 /* Link parallel detection - link is up */
2412 /* only if we have PCS_SYNC and not */
2413 /* receiving config code words */
2414 mac_status = tr32(MAC_STATUS);
2415 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2416 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2417 tg3_setup_flow_control(tp, 0, 0);
2418 current_link_up = 1;
2419 }
2420 }
2421 }
2422 }
2423
2424out:
2425 return current_link_up;
2426}
2427
2428static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2429{
2430 int current_link_up = 0;
2431
2432 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2433 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2434 goto out;
2435 }
2436
2437 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2438 u32 flags;
2439 int i;
2440
2441 if (fiber_autoneg(tp, &flags)) {
2442 u32 local_adv, remote_adv;
2443
2444 local_adv = ADVERTISE_PAUSE_CAP;
2445 remote_adv = 0;
2446 if (flags & MR_LP_ADV_SYM_PAUSE)
2447 remote_adv |= LPA_PAUSE_CAP;
2448 if (flags & MR_LP_ADV_ASYM_PAUSE)
2449 remote_adv |= LPA_PAUSE_ASYM;
2450
2451 tg3_setup_flow_control(tp, local_adv, remote_adv);
2452
2453 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2454 current_link_up = 1;
2455 }
2456 for (i = 0; i < 30; i++) {
2457 udelay(20);
2458 tw32_f(MAC_STATUS,
2459 (MAC_STATUS_SYNC_CHANGED |
2460 MAC_STATUS_CFG_CHANGED));
2461 udelay(40);
2462 if ((tr32(MAC_STATUS) &
2463 (MAC_STATUS_SYNC_CHANGED |
2464 MAC_STATUS_CFG_CHANGED)) == 0)
2465 break;
2466 }
2467
2468 mac_status = tr32(MAC_STATUS);
2469 if (current_link_up == 0 &&
2470 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2471 !(mac_status & MAC_STATUS_RCVD_CFG))
2472 current_link_up = 1;
2473 } else {
2474 /* Forcing 1000FD link up. */
2475 current_link_up = 1;
2476 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2477
2478 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2479 udelay(40);
2480 }
2481
2482out:
2483 return current_link_up;
2484}
2485
2486static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2487{
2488 u32 orig_pause_cfg;
2489 u16 orig_active_speed;
2490 u8 orig_active_duplex;
2491 u32 mac_status;
2492 int current_link_up;
2493 int i;
2494
2495 orig_pause_cfg =
2496 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2497 TG3_FLAG_TX_PAUSE));
2498 orig_active_speed = tp->link_config.active_speed;
2499 orig_active_duplex = tp->link_config.active_duplex;
2500
2501 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2502 netif_carrier_ok(tp->dev) &&
2503 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2504 mac_status = tr32(MAC_STATUS);
2505 mac_status &= (MAC_STATUS_PCS_SYNCED |
2506 MAC_STATUS_SIGNAL_DET |
2507 MAC_STATUS_CFG_CHANGED |
2508 MAC_STATUS_RCVD_CFG);
2509 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2510 MAC_STATUS_SIGNAL_DET)) {
2511 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2512 MAC_STATUS_CFG_CHANGED));
2513 return 0;
2514 }
2515 }
2516
2517 tw32_f(MAC_TX_AUTO_NEG, 0);
2518
2519 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2520 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2521 tw32_f(MAC_MODE, tp->mac_mode);
2522 udelay(40);
2523
2524 if (tp->phy_id == PHY_ID_BCM8002)
2525 tg3_init_bcm8002(tp);
2526
2527 /* Enable link change event even when serdes polling. */
2528 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2529 udelay(40);
2530
2531 current_link_up = 0;
2532 mac_status = tr32(MAC_STATUS);
2533
2534 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2535 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2536 else
2537 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2538
2539 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2540 tw32_f(MAC_MODE, tp->mac_mode);
2541 udelay(40);
2542
2543 tp->hw_status->status =
2544 (SD_STATUS_UPDATED |
2545 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2546
2547 for (i = 0; i < 100; i++) {
2548 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2549 MAC_STATUS_CFG_CHANGED));
2550 udelay(5);
2551 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2552 MAC_STATUS_CFG_CHANGED)) == 0)
2553 break;
2554 }
2555
2556 mac_status = tr32(MAC_STATUS);
2557 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2558 current_link_up = 0;
2559 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2560 tw32_f(MAC_MODE, (tp->mac_mode |
2561 MAC_MODE_SEND_CONFIGS));
2562 udelay(1);
2563 tw32_f(MAC_MODE, tp->mac_mode);
2564 }
2565 }
2566
2567 if (current_link_up == 1) {
2568 tp->link_config.active_speed = SPEED_1000;
2569 tp->link_config.active_duplex = DUPLEX_FULL;
2570 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2571 LED_CTRL_LNKLED_OVERRIDE |
2572 LED_CTRL_1000MBPS_ON));
2573 } else {
2574 tp->link_config.active_speed = SPEED_INVALID;
2575 tp->link_config.active_duplex = DUPLEX_INVALID;
2576 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2577 LED_CTRL_LNKLED_OVERRIDE |
2578 LED_CTRL_TRAFFIC_OVERRIDE));
2579 }
2580
2581 if (current_link_up != netif_carrier_ok(tp->dev)) {
2582 if (current_link_up)
2583 netif_carrier_on(tp->dev);
2584 else
2585 netif_carrier_off(tp->dev);
2586 tg3_link_report(tp);
2587 } else {
2588 u32 now_pause_cfg =
2589 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2590 TG3_FLAG_TX_PAUSE);
2591 if (orig_pause_cfg != now_pause_cfg ||
2592 orig_active_speed != tp->link_config.active_speed ||
2593 orig_active_duplex != tp->link_config.active_duplex)
2594 tg3_link_report(tp);
2595 }
2596
2597 return 0;
2598}
2599
747e8f8b
MC
2600static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2601{
2602 int current_link_up, err = 0;
2603 u32 bmsr, bmcr;
2604 u16 current_speed;
2605 u8 current_duplex;
2606
2607 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2608 tw32_f(MAC_MODE, tp->mac_mode);
2609 udelay(40);
2610
2611 tw32(MAC_EVENT, 0);
2612
2613 tw32_f(MAC_STATUS,
2614 (MAC_STATUS_SYNC_CHANGED |
2615 MAC_STATUS_CFG_CHANGED |
2616 MAC_STATUS_MI_COMPLETION |
2617 MAC_STATUS_LNKSTATE_CHANGED));
2618 udelay(40);
2619
2620 if (force_reset)
2621 tg3_phy_reset(tp);
2622
2623 current_link_up = 0;
2624 current_speed = SPEED_INVALID;
2625 current_duplex = DUPLEX_INVALID;
2626
2627 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2628 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2629
2630 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2631
2632 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2633 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2634 /* do nothing, just check for link up at the end */
2635 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2636 u32 adv, new_adv;
2637
2638 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2639 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2640 ADVERTISE_1000XPAUSE |
2641 ADVERTISE_1000XPSE_ASYM |
2642 ADVERTISE_SLCT);
2643
2644 /* Always advertise symmetric PAUSE just like copper */
2645 new_adv |= ADVERTISE_1000XPAUSE;
2646
2647 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2648 new_adv |= ADVERTISE_1000XHALF;
2649 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2650 new_adv |= ADVERTISE_1000XFULL;
2651
2652 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2653 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2654 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2655 tg3_writephy(tp, MII_BMCR, bmcr);
2656
2657 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2658 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2659 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2660
2661 return err;
2662 }
2663 } else {
2664 u32 new_bmcr;
2665
2666 bmcr &= ~BMCR_SPEED1000;
2667 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2668
2669 if (tp->link_config.duplex == DUPLEX_FULL)
2670 new_bmcr |= BMCR_FULLDPLX;
2671
2672 if (new_bmcr != bmcr) {
2673 /* BMCR_SPEED1000 is a reserved bit that needs
2674 * to be set on write.
2675 */
2676 new_bmcr |= BMCR_SPEED1000;
2677
2678 /* Force a linkdown */
2679 if (netif_carrier_ok(tp->dev)) {
2680 u32 adv;
2681
2682 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2683 adv &= ~(ADVERTISE_1000XFULL |
2684 ADVERTISE_1000XHALF |
2685 ADVERTISE_SLCT);
2686 tg3_writephy(tp, MII_ADVERTISE, adv);
2687 tg3_writephy(tp, MII_BMCR, bmcr |
2688 BMCR_ANRESTART |
2689 BMCR_ANENABLE);
2690 udelay(10);
2691 netif_carrier_off(tp->dev);
2692 }
2693 tg3_writephy(tp, MII_BMCR, new_bmcr);
2694 bmcr = new_bmcr;
2695 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2696 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2697 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2698 }
2699 }
2700
2701 if (bmsr & BMSR_LSTATUS) {
2702 current_speed = SPEED_1000;
2703 current_link_up = 1;
2704 if (bmcr & BMCR_FULLDPLX)
2705 current_duplex = DUPLEX_FULL;
2706 else
2707 current_duplex = DUPLEX_HALF;
2708
2709 if (bmcr & BMCR_ANENABLE) {
2710 u32 local_adv, remote_adv, common;
2711
2712 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2713 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2714 common = local_adv & remote_adv;
2715 if (common & (ADVERTISE_1000XHALF |
2716 ADVERTISE_1000XFULL)) {
2717 if (common & ADVERTISE_1000XFULL)
2718 current_duplex = DUPLEX_FULL;
2719 else
2720 current_duplex = DUPLEX_HALF;
2721
2722 tg3_setup_flow_control(tp, local_adv,
2723 remote_adv);
2724 }
2725 else
2726 current_link_up = 0;
2727 }
2728 }
2729
2730 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2731 if (tp->link_config.active_duplex == DUPLEX_HALF)
2732 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2733
2734 tw32_f(MAC_MODE, tp->mac_mode);
2735 udelay(40);
2736
2737 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2738
2739 tp->link_config.active_speed = current_speed;
2740 tp->link_config.active_duplex = current_duplex;
2741
2742 if (current_link_up != netif_carrier_ok(tp->dev)) {
2743 if (current_link_up)
2744 netif_carrier_on(tp->dev);
2745 else {
2746 netif_carrier_off(tp->dev);
2747 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2748 }
2749 tg3_link_report(tp);
2750 }
2751 return err;
2752}
2753
2754static void tg3_serdes_parallel_detect(struct tg3 *tp)
2755{
2756 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2757 /* Give autoneg time to complete. */
2758 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2759 return;
2760 }
2761 if (!netif_carrier_ok(tp->dev) &&
2762 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2763 u32 bmcr;
2764
2765 tg3_readphy(tp, MII_BMCR, &bmcr);
2766 if (bmcr & BMCR_ANENABLE) {
2767 u32 phy1, phy2;
2768
2769 /* Select shadow register 0x1f */
2770 tg3_writephy(tp, 0x1c, 0x7c00);
2771 tg3_readphy(tp, 0x1c, &phy1);
2772
2773 /* Select expansion interrupt status register */
2774 tg3_writephy(tp, 0x17, 0x0f01);
2775 tg3_readphy(tp, 0x15, &phy2);
2776 tg3_readphy(tp, 0x15, &phy2);
2777
2778 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2779 /* We have signal detect and not receiving
2780 * config code words, link is up by parallel
2781 * detection.
2782 */
2783
2784 bmcr &= ~BMCR_ANENABLE;
2785 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2786 tg3_writephy(tp, MII_BMCR, bmcr);
2787 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2788 }
2789 }
2790 }
2791 else if (netif_carrier_ok(tp->dev) &&
2792 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2793 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2794 u32 phy2;
2795
2796 /* Select expansion interrupt status register */
2797 tg3_writephy(tp, 0x17, 0x0f01);
2798 tg3_readphy(tp, 0x15, &phy2);
2799 if (phy2 & 0x20) {
2800 u32 bmcr;
2801
2802 /* Config code words received, turn on autoneg. */
2803 tg3_readphy(tp, MII_BMCR, &bmcr);
2804 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2805
2806 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2807
2808 }
2809 }
2810}
2811
1da177e4
LT
2812static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2813{
2814 int err;
2815
2816 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2817 err = tg3_setup_fiber_phy(tp, force_reset);
747e8f8b
MC
2818 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2819 err = tg3_setup_fiber_mii_phy(tp, force_reset);
1da177e4
LT
2820 } else {
2821 err = tg3_setup_copper_phy(tp, force_reset);
2822 }
2823
2824 if (tp->link_config.active_speed == SPEED_1000 &&
2825 tp->link_config.active_duplex == DUPLEX_HALF)
2826 tw32(MAC_TX_LENGTHS,
2827 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2828 (6 << TX_LENGTHS_IPG_SHIFT) |
2829 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2830 else
2831 tw32(MAC_TX_LENGTHS,
2832 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2833 (6 << TX_LENGTHS_IPG_SHIFT) |
2834 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2835
2836 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2837 if (netif_carrier_ok(tp->dev)) {
2838 tw32(HOSTCC_STAT_COAL_TICKS,
15f9850d 2839 tp->coal.stats_block_coalesce_usecs);
1da177e4
LT
2840 } else {
2841 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2842 }
2843 }
2844
2845 return err;
2846}
2847
2848/* Tigon3 never reports partial packet sends. So we do not
2849 * need special logic to handle SKBs that have not had all
2850 * of their frags sent yet, like SunGEM does.
2851 */
2852static void tg3_tx(struct tg3 *tp)
2853{
2854 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2855 u32 sw_idx = tp->tx_cons;
2856
2857 while (sw_idx != hw_idx) {
2858 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2859 struct sk_buff *skb = ri->skb;
2860 int i;
2861
2862 if (unlikely(skb == NULL))
2863 BUG();
2864
2865 pci_unmap_single(tp->pdev,
2866 pci_unmap_addr(ri, mapping),
2867 skb_headlen(skb),
2868 PCI_DMA_TODEVICE);
2869
2870 ri->skb = NULL;
2871
2872 sw_idx = NEXT_TX(sw_idx);
2873
2874 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2875 if (unlikely(sw_idx == hw_idx))
2876 BUG();
2877
2878 ri = &tp->tx_buffers[sw_idx];
2879 if (unlikely(ri->skb != NULL))
2880 BUG();
2881
2882 pci_unmap_page(tp->pdev,
2883 pci_unmap_addr(ri, mapping),
2884 skb_shinfo(skb)->frags[i].size,
2885 PCI_DMA_TODEVICE);
2886
2887 sw_idx = NEXT_TX(sw_idx);
2888 }
2889
f47c11ee 2890 dev_kfree_skb(skb);
1da177e4
LT
2891 }
2892
2893 tp->tx_cons = sw_idx;
2894
51b91468
MC
2895 if (unlikely(netif_queue_stopped(tp->dev))) {
2896 spin_lock(&tp->tx_lock);
2897 if (netif_queue_stopped(tp->dev) &&
2898 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2899 netif_wake_queue(tp->dev);
2900 spin_unlock(&tp->tx_lock);
2901 }
1da177e4
LT
2902}
2903
2904/* Returns size of skb allocated or < 0 on error.
2905 *
2906 * We only need to fill in the address because the other members
2907 * of the RX descriptor are invariant, see tg3_init_rings.
2908 *
2909 * Note the purposeful assymetry of cpu vs. chip accesses. For
2910 * posting buffers we only dirty the first cache line of the RX
2911 * descriptor (containing the address). Whereas for the RX status
2912 * buffers the cpu only reads the last cacheline of the RX descriptor
2913 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2914 */
2915static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2916 int src_idx, u32 dest_idx_unmasked)
2917{
2918 struct tg3_rx_buffer_desc *desc;
2919 struct ring_info *map, *src_map;
2920 struct sk_buff *skb;
2921 dma_addr_t mapping;
2922 int skb_size, dest_idx;
2923
2924 src_map = NULL;
2925 switch (opaque_key) {
2926 case RXD_OPAQUE_RING_STD:
2927 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2928 desc = &tp->rx_std[dest_idx];
2929 map = &tp->rx_std_buffers[dest_idx];
2930 if (src_idx >= 0)
2931 src_map = &tp->rx_std_buffers[src_idx];
7e72aad4 2932 skb_size = tp->rx_pkt_buf_sz;
1da177e4
LT
2933 break;
2934
2935 case RXD_OPAQUE_RING_JUMBO:
2936 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2937 desc = &tp->rx_jumbo[dest_idx];
2938 map = &tp->rx_jumbo_buffers[dest_idx];
2939 if (src_idx >= 0)
2940 src_map = &tp->rx_jumbo_buffers[src_idx];
2941 skb_size = RX_JUMBO_PKT_BUF_SZ;
2942 break;
2943
2944 default:
2945 return -EINVAL;
2946 };
2947
2948 /* Do not overwrite any of the map or rp information
2949 * until we are sure we can commit to a new buffer.
2950 *
2951 * Callers depend upon this behavior and assume that
2952 * we leave everything unchanged if we fail.
2953 */
2954 skb = dev_alloc_skb(skb_size);
2955 if (skb == NULL)
2956 return -ENOMEM;
2957
2958 skb->dev = tp->dev;
2959 skb_reserve(skb, tp->rx_offset);
2960
2961 mapping = pci_map_single(tp->pdev, skb->data,
2962 skb_size - tp->rx_offset,
2963 PCI_DMA_FROMDEVICE);
2964
2965 map->skb = skb;
2966 pci_unmap_addr_set(map, mapping, mapping);
2967
2968 if (src_map != NULL)
2969 src_map->skb = NULL;
2970
2971 desc->addr_hi = ((u64)mapping >> 32);
2972 desc->addr_lo = ((u64)mapping & 0xffffffff);
2973
2974 return skb_size;
2975}
2976
2977/* We only need to move over in the address because the other
2978 * members of the RX descriptor are invariant. See notes above
2979 * tg3_alloc_rx_skb for full details.
2980 */
2981static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2982 int src_idx, u32 dest_idx_unmasked)
2983{
2984 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2985 struct ring_info *src_map, *dest_map;
2986 int dest_idx;
2987
2988 switch (opaque_key) {
2989 case RXD_OPAQUE_RING_STD:
2990 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2991 dest_desc = &tp->rx_std[dest_idx];
2992 dest_map = &tp->rx_std_buffers[dest_idx];
2993 src_desc = &tp->rx_std[src_idx];
2994 src_map = &tp->rx_std_buffers[src_idx];
2995 break;
2996
2997 case RXD_OPAQUE_RING_JUMBO:
2998 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2999 dest_desc = &tp->rx_jumbo[dest_idx];
3000 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3001 src_desc = &tp->rx_jumbo[src_idx];
3002 src_map = &tp->rx_jumbo_buffers[src_idx];
3003 break;
3004
3005 default:
3006 return;
3007 };
3008
3009 dest_map->skb = src_map->skb;
3010 pci_unmap_addr_set(dest_map, mapping,
3011 pci_unmap_addr(src_map, mapping));
3012 dest_desc->addr_hi = src_desc->addr_hi;
3013 dest_desc->addr_lo = src_desc->addr_lo;
3014
3015 src_map->skb = NULL;
3016}
3017
3018#if TG3_VLAN_TAG_USED
3019static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3020{
3021 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3022}
3023#endif
3024
3025/* The RX ring scheme is composed of multiple rings which post fresh
3026 * buffers to the chip, and one special ring the chip uses to report
3027 * status back to the host.
3028 *
3029 * The special ring reports the status of received packets to the
3030 * host. The chip does not write into the original descriptor the
3031 * RX buffer was obtained from. The chip simply takes the original
3032 * descriptor as provided by the host, updates the status and length
3033 * field, then writes this into the next status ring entry.
3034 *
3035 * Each ring the host uses to post buffers to the chip is described
3036 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3037 * it is first placed into the on-chip ram. When the packet's length
3038 * is known, it walks down the TG3_BDINFO entries to select the ring.
3039 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3040 * which is within the range of the new packet's length is chosen.
3041 *
3042 * The "separate ring for rx status" scheme may sound queer, but it makes
3043 * sense from a cache coherency perspective. If only the host writes
3044 * to the buffer post rings, and only the chip writes to the rx status
3045 * rings, then cache lines never move beyond shared-modified state.
3046 * If both the host and chip were to write into the same ring, cache line
3047 * eviction could occur since both entities want it in an exclusive state.
3048 */
3049static int tg3_rx(struct tg3 *tp, int budget)
3050{
3051 u32 work_mask;
483ba50b
MC
3052 u32 sw_idx = tp->rx_rcb_ptr;
3053 u16 hw_idx;
1da177e4
LT
3054 int received;
3055
3056 hw_idx = tp->hw_status->idx[0].rx_producer;
3057 /*
3058 * We need to order the read of hw_idx and the read of
3059 * the opaque cookie.
3060 */
3061 rmb();
1da177e4
LT
3062 work_mask = 0;
3063 received = 0;
3064 while (sw_idx != hw_idx && budget > 0) {
3065 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3066 unsigned int len;
3067 struct sk_buff *skb;
3068 dma_addr_t dma_addr;
3069 u32 opaque_key, desc_idx, *post_ptr;
3070
3071 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3072 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3073 if (opaque_key == RXD_OPAQUE_RING_STD) {
3074 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3075 mapping);
3076 skb = tp->rx_std_buffers[desc_idx].skb;
3077 post_ptr = &tp->rx_std_ptr;
3078 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3079 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3080 mapping);
3081 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3082 post_ptr = &tp->rx_jumbo_ptr;
3083 }
3084 else {
3085 goto next_pkt_nopost;
3086 }
3087
3088 work_mask |= opaque_key;
3089
3090 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3091 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3092 drop_it:
3093 tg3_recycle_rx(tp, opaque_key,
3094 desc_idx, *post_ptr);
3095 drop_it_no_recycle:
3096 /* Other statistics kept track of by card. */
3097 tp->net_stats.rx_dropped++;
3098 goto next_pkt;
3099 }
3100
3101 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3102
3103 if (len > RX_COPY_THRESHOLD
3104 && tp->rx_offset == 2
3105 /* rx_offset != 2 iff this is a 5701 card running
3106 * in PCI-X mode [see tg3_get_invariants()] */
3107 ) {
3108 int skb_size;
3109
3110 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3111 desc_idx, *post_ptr);
3112 if (skb_size < 0)
3113 goto drop_it;
3114
3115 pci_unmap_single(tp->pdev, dma_addr,
3116 skb_size - tp->rx_offset,
3117 PCI_DMA_FROMDEVICE);
3118
3119 skb_put(skb, len);
3120 } else {
3121 struct sk_buff *copy_skb;
3122
3123 tg3_recycle_rx(tp, opaque_key,
3124 desc_idx, *post_ptr);
3125
3126 copy_skb = dev_alloc_skb(len + 2);
3127 if (copy_skb == NULL)
3128 goto drop_it_no_recycle;
3129
3130 copy_skb->dev = tp->dev;
3131 skb_reserve(copy_skb, 2);
3132 skb_put(copy_skb, len);
3133 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3134 memcpy(copy_skb->data, skb->data, len);
3135 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3136
3137 /* We'll reuse the original ring buffer. */
3138 skb = copy_skb;
3139 }
3140
3141 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3142 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3143 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3144 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3145 skb->ip_summed = CHECKSUM_UNNECESSARY;
3146 else
3147 skb->ip_summed = CHECKSUM_NONE;
3148
3149 skb->protocol = eth_type_trans(skb, tp->dev);
3150#if TG3_VLAN_TAG_USED
3151 if (tp->vlgrp != NULL &&
3152 desc->type_flags & RXD_FLAG_VLAN) {
3153 tg3_vlan_rx(tp, skb,
3154 desc->err_vlan & RXD_VLAN_MASK);
3155 } else
3156#endif
3157 netif_receive_skb(skb);
3158
3159 tp->dev->last_rx = jiffies;
3160 received++;
3161 budget--;
3162
3163next_pkt:
3164 (*post_ptr)++;
3165next_pkt_nopost:
483ba50b
MC
3166 sw_idx++;
3167 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
52f6d697
MC
3168
3169 /* Refresh hw_idx to see if there is new work */
3170 if (sw_idx == hw_idx) {
3171 hw_idx = tp->hw_status->idx[0].rx_producer;
3172 rmb();
3173 }
1da177e4
LT
3174 }
3175
3176 /* ACK the status ring. */
483ba50b
MC
3177 tp->rx_rcb_ptr = sw_idx;
3178 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
1da177e4
LT
3179
3180 /* Refill RX ring(s). */
3181 if (work_mask & RXD_OPAQUE_RING_STD) {
3182 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3183 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3184 sw_idx);
3185 }
3186 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3187 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3188 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3189 sw_idx);
3190 }
3191 mmiowb();
3192
3193 return received;
3194}
3195
3196static int tg3_poll(struct net_device *netdev, int *budget)
3197{
3198 struct tg3 *tp = netdev_priv(netdev);
3199 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4
LT
3200 int done;
3201
1da177e4
LT
3202 /* handle link change and other phy events */
3203 if (!(tp->tg3_flags &
3204 (TG3_FLAG_USE_LINKCHG_REG |
3205 TG3_FLAG_POLL_SERDES))) {
3206 if (sblk->status & SD_STATUS_LINK_CHG) {
3207 sblk->status = SD_STATUS_UPDATED |
3208 (sblk->status & ~SD_STATUS_LINK_CHG);
f47c11ee 3209 spin_lock(&tp->lock);
1da177e4 3210 tg3_setup_phy(tp, 0);
f47c11ee 3211 spin_unlock(&tp->lock);
1da177e4
LT
3212 }
3213 }
3214
3215 /* run TX completion thread */
3216 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
1da177e4 3217 tg3_tx(tp);
1da177e4
LT
3218 }
3219
1da177e4
LT
3220 /* run RX thread, within the bounds set by NAPI.
3221 * All RX "locking" is done by ensuring outside
3222 * code synchronizes with dev->poll()
3223 */
1da177e4
LT
3224 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3225 int orig_budget = *budget;
3226 int work_done;
3227
3228 if (orig_budget > netdev->quota)
3229 orig_budget = netdev->quota;
3230
3231 work_done = tg3_rx(tp, orig_budget);
3232
3233 *budget -= work_done;
3234 netdev->quota -= work_done;
1da177e4
LT
3235 }
3236
38f3843e 3237 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
f7383c22 3238 tp->last_tag = sblk->status_tag;
38f3843e
MC
3239 rmb();
3240 } else
3241 sblk->status &= ~SD_STATUS_UPDATED;
f7383c22 3242
1da177e4 3243 /* if no more work, tell net stack and NIC we're done */
f7383c22 3244 done = !tg3_has_work(tp);
1da177e4 3245 if (done) {
f47c11ee 3246 netif_rx_complete(netdev);
1da177e4 3247 tg3_restart_ints(tp);
1da177e4
LT
3248 }
3249
3250 return (done ? 0 : 1);
3251}
3252
f47c11ee
DM
3253static void tg3_irq_quiesce(struct tg3 *tp)
3254{
3255 BUG_ON(tp->irq_sync);
3256
3257 tp->irq_sync = 1;
3258 smp_mb();
3259
3260 synchronize_irq(tp->pdev->irq);
3261}
3262
3263static inline int tg3_irq_sync(struct tg3 *tp)
3264{
3265 return tp->irq_sync;
3266}
3267
3268/* Fully shutdown all tg3 driver activity elsewhere in the system.
3269 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3270 * with as well. Most of the time, this is not necessary except when
3271 * shutting down the device.
3272 */
3273static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3274{
3275 if (irq_sync)
3276 tg3_irq_quiesce(tp);
3277 spin_lock_bh(&tp->lock);
3278 spin_lock(&tp->tx_lock);
3279}
3280
3281static inline void tg3_full_unlock(struct tg3 *tp)
3282{
3283 spin_unlock(&tp->tx_lock);
3284 spin_unlock_bh(&tp->lock);
3285}
3286
88b06bc2
MC
3287/* MSI ISR - No need to check for interrupt sharing and no need to
3288 * flush status block and interrupt mailbox. PCI ordering rules
3289 * guarantee that MSI will arrive after the status block.
3290 */
3291static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3292{
3293 struct net_device *dev = dev_id;
3294 struct tg3 *tp = netdev_priv(dev);
88b06bc2 3295
61487480
MC
3296 prefetch(tp->hw_status);
3297 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
88b06bc2 3298 /*
fac9b83e 3299 * Writing any value to intr-mbox-0 clears PCI INTA# and
88b06bc2 3300 * chip-internal interrupt pending events.
fac9b83e 3301 * Writing non-zero to intr-mbox-0 additional tells the
88b06bc2
MC
3302 * NIC to stop sending us irqs, engaging "in-intr-handler"
3303 * event coalescing.
3304 */
3305 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
61487480 3306 if (likely(!tg3_irq_sync(tp)))
88b06bc2 3307 netif_rx_schedule(dev); /* schedule NAPI poll */
61487480 3308
88b06bc2
MC
3309 return IRQ_RETVAL(1);
3310}
3311
1da177e4
LT
3312static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3313{
3314 struct net_device *dev = dev_id;
3315 struct tg3 *tp = netdev_priv(dev);
3316 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4
LT
3317 unsigned int handled = 1;
3318
1da177e4
LT
3319 /* In INTx mode, it is possible for the interrupt to arrive at
3320 * the CPU before the status block posted prior to the interrupt.
3321 * Reading the PCI State register will confirm whether the
3322 * interrupt is ours and will flush the status block.
3323 */
3324 if ((sblk->status & SD_STATUS_UPDATED) ||
3325 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3326 /*
fac9b83e 3327 * Writing any value to intr-mbox-0 clears PCI INTA# and
1da177e4 3328 * chip-internal interrupt pending events.
fac9b83e 3329 * Writing non-zero to intr-mbox-0 additional tells the
1da177e4
LT
3330 * NIC to stop sending us irqs, engaging "in-intr-handler"
3331 * event coalescing.
3332 */
3333 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3334 0x00000001);
f47c11ee
DM
3335 if (tg3_irq_sync(tp))
3336 goto out;
fac9b83e 3337 sblk->status &= ~SD_STATUS_UPDATED;
61487480
MC
3338 if (likely(tg3_has_work(tp))) {
3339 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
fac9b83e 3340 netif_rx_schedule(dev); /* schedule NAPI poll */
61487480 3341 } else {
fac9b83e
DM
3342 /* No work, shared interrupt perhaps? re-enable
3343 * interrupts, and flush that PCI write
3344 */
09ee929c 3345 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
fac9b83e 3346 0x00000000);
fac9b83e
DM
3347 }
3348 } else { /* shared interrupt */
3349 handled = 0;
3350 }
f47c11ee 3351out:
fac9b83e
DM
3352 return IRQ_RETVAL(handled);
3353}
3354
3355static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3356{
3357 struct net_device *dev = dev_id;
3358 struct tg3 *tp = netdev_priv(dev);
3359 struct tg3_hw_status *sblk = tp->hw_status;
fac9b83e
DM
3360 unsigned int handled = 1;
3361
fac9b83e
DM
3362 /* In INTx mode, it is possible for the interrupt to arrive at
3363 * the CPU before the status block posted prior to the interrupt.
3364 * Reading the PCI State register will confirm whether the
3365 * interrupt is ours and will flush the status block.
3366 */
38f3843e 3367 if ((sblk->status_tag != tp->last_tag) ||
fac9b83e 3368 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
1da177e4 3369 /*
fac9b83e
DM
3370 * writing any value to intr-mbox-0 clears PCI INTA# and
3371 * chip-internal interrupt pending events.
3372 * writing non-zero to intr-mbox-0 additional tells the
3373 * NIC to stop sending us irqs, engaging "in-intr-handler"
3374 * event coalescing.
1da177e4 3375 */
fac9b83e
DM
3376 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3377 0x00000001);
f47c11ee
DM
3378 if (tg3_irq_sync(tp))
3379 goto out;
38f3843e 3380 if (netif_rx_schedule_prep(dev)) {
61487480 3381 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
38f3843e
MC
3382 /* Update last_tag to mark that this status has been
3383 * seen. Because interrupt may be shared, we may be
3384 * racing with tg3_poll(), so only update last_tag
3385 * if tg3_poll() is not scheduled.
1da177e4 3386 */
38f3843e
MC
3387 tp->last_tag = sblk->status_tag;
3388 __netif_rx_schedule(dev);
1da177e4
LT
3389 }
3390 } else { /* shared interrupt */
3391 handled = 0;
3392 }
f47c11ee 3393out:
1da177e4
LT
3394 return IRQ_RETVAL(handled);
3395}
3396
7938109f
MC
3397/* ISR for interrupt test */
3398static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3399 struct pt_regs *regs)
3400{
3401 struct net_device *dev = dev_id;
3402 struct tg3 *tp = netdev_priv(dev);
3403 struct tg3_hw_status *sblk = tp->hw_status;
3404
f9804ddb
MC
3405 if ((sblk->status & SD_STATUS_UPDATED) ||
3406 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7938109f
MC
3407 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3408 0x00000001);
3409 return IRQ_RETVAL(1);
3410 }
3411 return IRQ_RETVAL(0);
3412}
3413
1da177e4 3414static int tg3_init_hw(struct tg3 *);
944d980e 3415static int tg3_halt(struct tg3 *, int, int);
1da177e4
LT
3416
3417#ifdef CONFIG_NET_POLL_CONTROLLER
3418static void tg3_poll_controller(struct net_device *dev)
3419{
88b06bc2
MC
3420 struct tg3 *tp = netdev_priv(dev);
3421
3422 tg3_interrupt(tp->pdev->irq, dev, NULL);
1da177e4
LT
3423}
3424#endif
3425
3426static void tg3_reset_task(void *_data)
3427{
3428 struct tg3 *tp = _data;
3429 unsigned int restart_timer;
3430
3431 tg3_netif_stop(tp);
3432
f47c11ee 3433 tg3_full_lock(tp, 1);
1da177e4
LT
3434
3435 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3436 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3437
944d980e 3438 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
1da177e4
LT
3439 tg3_init_hw(tp);
3440
3441 tg3_netif_start(tp);
3442
f47c11ee 3443 tg3_full_unlock(tp);
1da177e4
LT
3444
3445 if (restart_timer)
3446 mod_timer(&tp->timer, jiffies + 1);
3447}
3448
3449static void tg3_tx_timeout(struct net_device *dev)
3450{
3451 struct tg3 *tp = netdev_priv(dev);
3452
3453 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3454 dev->name);
3455
3456 schedule_work(&tp->reset_task);
3457}
3458
c58ec932
MC
3459/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3460static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3461{
3462 u32 base = (u32) mapping & 0xffffffff;
3463
3464 return ((base > 0xffffdcc0) &&
3465 (base + len + 8 < base));
3466}
3467
1da177e4
LT
3468static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3469
3470static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
c58ec932
MC
3471 u32 last_plus_one, u32 *start,
3472 u32 base_flags, u32 mss)
1da177e4
LT
3473{
3474 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
c58ec932 3475 dma_addr_t new_addr = 0;
1da177e4 3476 u32 entry = *start;
c58ec932 3477 int i, ret = 0;
1da177e4
LT
3478
3479 if (!new_skb) {
c58ec932
MC
3480 ret = -1;
3481 } else {
3482 /* New SKB is guaranteed to be linear. */
3483 entry = *start;
3484 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3485 PCI_DMA_TODEVICE);
3486 /* Make sure new skb does not cross any 4G boundaries.
3487 * Drop the packet if it does.
3488 */
3489 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3490 ret = -1;
3491 dev_kfree_skb(new_skb);
3492 new_skb = NULL;
3493 } else {
3494 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3495 base_flags, 1 | (mss << 1));
3496 *start = NEXT_TX(entry);
3497 }
1da177e4
LT
3498 }
3499
1da177e4
LT
3500 /* Now clean up the sw ring entries. */
3501 i = 0;
3502 while (entry != last_plus_one) {
3503 int len;
3504
3505 if (i == 0)
3506 len = skb_headlen(skb);
3507 else
3508 len = skb_shinfo(skb)->frags[i-1].size;
3509 pci_unmap_single(tp->pdev,
3510 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3511 len, PCI_DMA_TODEVICE);
3512 if (i == 0) {
3513 tp->tx_buffers[entry].skb = new_skb;
3514 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3515 } else {
3516 tp->tx_buffers[entry].skb = NULL;
3517 }
3518 entry = NEXT_TX(entry);
3519 i++;
3520 }
3521
3522 dev_kfree_skb(skb);
3523
c58ec932 3524 return ret;
1da177e4
LT
3525}
3526
3527static void tg3_set_txd(struct tg3 *tp, int entry,
3528 dma_addr_t mapping, int len, u32 flags,
3529 u32 mss_and_is_end)
3530{
3531 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3532 int is_end = (mss_and_is_end & 0x1);
3533 u32 mss = (mss_and_is_end >> 1);
3534 u32 vlan_tag = 0;
3535
3536 if (is_end)
3537 flags |= TXD_FLAG_END;
3538 if (flags & TXD_FLAG_VLAN) {
3539 vlan_tag = flags >> 16;
3540 flags &= 0xffff;
3541 }
3542 vlan_tag |= (mss << TXD_MSS_SHIFT);
3543
3544 txd->addr_hi = ((u64) mapping >> 32);
3545 txd->addr_lo = ((u64) mapping & 0xffffffff);
3546 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3547 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3548}
3549
1da177e4
LT
3550static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3551{
3552 struct tg3 *tp = netdev_priv(dev);
3553 dma_addr_t mapping;
1da177e4
LT
3554 u32 len, entry, base_flags, mss;
3555 int would_hit_hwbug;
1da177e4
LT
3556
3557 len = skb_headlen(skb);
3558
3559 /* No BH disabling for tx_lock here. We are running in BH disabled
3560 * context and TX reclaim runs via tp->poll inside of a software
f47c11ee
DM
3561 * interrupt. Furthermore, IRQ processing runs lockless so we have
3562 * no IRQ context deadlocks to worry about either. Rejoice!
1da177e4 3563 */
f47c11ee 3564 if (!spin_trylock(&tp->tx_lock))
1da177e4 3565 return NETDEV_TX_LOCKED;
1da177e4
LT
3566
3567 /* This is a hard error, log it. */
3568 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3569 netif_stop_queue(dev);
f47c11ee 3570 spin_unlock(&tp->tx_lock);
1da177e4
LT
3571 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3572 dev->name);
3573 return NETDEV_TX_BUSY;
3574 }
3575
3576 entry = tp->tx_prod;
3577 base_flags = 0;
3578 if (skb->ip_summed == CHECKSUM_HW)
3579 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3580#if TG3_TSO_SUPPORT != 0
3581 mss = 0;
3582 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3583 (mss = skb_shinfo(skb)->tso_size) != 0) {
3584 int tcp_opt_len, ip_tcp_len;
3585
3586 if (skb_header_cloned(skb) &&
3587 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3588 dev_kfree_skb(skb);
3589 goto out_unlock;
3590 }
3591
3592 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3593 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3594
3595 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3596 TXD_FLAG_CPU_POST_DMA);
3597
3598 skb->nh.iph->check = 0;
3599 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3600 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3601 skb->h.th->check = 0;
3602 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3603 }
3604 else {
3605 skb->h.th->check =
3606 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3607 skb->nh.iph->daddr,
3608 0, IPPROTO_TCP, 0);
3609 }
3610
3611 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3612 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3613 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3614 int tsflags;
3615
3616 tsflags = ((skb->nh.iph->ihl - 5) +
3617 (tcp_opt_len >> 2));
3618 mss |= (tsflags << 11);
3619 }
3620 } else {
3621 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3622 int tsflags;
3623
3624 tsflags = ((skb->nh.iph->ihl - 5) +
3625 (tcp_opt_len >> 2));
3626 base_flags |= tsflags << 12;
3627 }
3628 }
3629 }
3630#else
3631 mss = 0;
3632#endif
3633#if TG3_VLAN_TAG_USED
3634 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3635 base_flags |= (TXD_FLAG_VLAN |
3636 (vlan_tx_tag_get(skb) << 16));
3637#endif
3638
3639 /* Queue skb data, a.k.a. the main skb fragment. */
3640 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3641
3642 tp->tx_buffers[entry].skb = skb;
3643 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3644
3645 would_hit_hwbug = 0;
3646
3647 if (tg3_4g_overflow_test(mapping, len))
c58ec932 3648 would_hit_hwbug = 1;
1da177e4
LT
3649
3650 tg3_set_txd(tp, entry, mapping, len, base_flags,
3651 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3652
3653 entry = NEXT_TX(entry);
3654
3655 /* Now loop through additional data fragments, and queue them. */
3656 if (skb_shinfo(skb)->nr_frags > 0) {
3657 unsigned int i, last;
3658
3659 last = skb_shinfo(skb)->nr_frags - 1;
3660 for (i = 0; i <= last; i++) {
3661 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3662
3663 len = frag->size;
3664 mapping = pci_map_page(tp->pdev,
3665 frag->page,
3666 frag->page_offset,
3667 len, PCI_DMA_TODEVICE);
3668
3669 tp->tx_buffers[entry].skb = NULL;
3670 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3671
c58ec932
MC
3672 if (tg3_4g_overflow_test(mapping, len))
3673 would_hit_hwbug = 1;
1da177e4
LT
3674
3675 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3676 tg3_set_txd(tp, entry, mapping, len,
3677 base_flags, (i == last)|(mss << 1));
3678 else
3679 tg3_set_txd(tp, entry, mapping, len,
3680 base_flags, (i == last));
3681
3682 entry = NEXT_TX(entry);
3683 }
3684 }
3685
3686 if (would_hit_hwbug) {
3687 u32 last_plus_one = entry;
3688 u32 start;
1da177e4 3689
c58ec932
MC
3690 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3691 start &= (TG3_TX_RING_SIZE - 1);
1da177e4
LT
3692
3693 /* If the workaround fails due to memory/mapping
3694 * failure, silently drop this packet.
3695 */
c58ec932
MC
3696 if (tigon3_4gb_hwbug_workaround(tp, skb, last_plus_one,
3697 &start, base_flags, mss))
1da177e4
LT
3698 goto out_unlock;
3699
3700 entry = start;
3701 }
3702
3703 /* Packets are ready, update Tx producer idx local and on card. */
3704 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3705
3706 tp->tx_prod = entry;
51b91468 3707 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
1da177e4 3708 netif_stop_queue(dev);
51b91468
MC
3709 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3710 netif_wake_queue(tp->dev);
3711 }
1da177e4
LT
3712
3713out_unlock:
3714 mmiowb();
f47c11ee 3715 spin_unlock(&tp->tx_lock);
1da177e4
LT
3716
3717 dev->trans_start = jiffies;
3718
3719 return NETDEV_TX_OK;
3720}
3721
3722static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3723 int new_mtu)
3724{
3725 dev->mtu = new_mtu;
3726
ef7f5ec0 3727 if (new_mtu > ETH_DATA_LEN) {
a4e2b347 3728 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
ef7f5ec0
MC
3729 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3730 ethtool_op_set_tso(dev, 0);
3731 }
3732 else
3733 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3734 } else {
a4e2b347 3735 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
ef7f5ec0 3736 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
0f893dc6 3737 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
ef7f5ec0 3738 }
1da177e4
LT
3739}
3740
3741static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3742{
3743 struct tg3 *tp = netdev_priv(dev);
3744
3745 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3746 return -EINVAL;
3747
3748 if (!netif_running(dev)) {
3749 /* We'll just catch it later when the
3750 * device is up'd.
3751 */
3752 tg3_set_mtu(dev, tp, new_mtu);
3753 return 0;
3754 }
3755
3756 tg3_netif_stop(tp);
f47c11ee
DM
3757
3758 tg3_full_lock(tp, 1);
1da177e4 3759
944d980e 3760 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
3761
3762 tg3_set_mtu(dev, tp, new_mtu);
3763
3764 tg3_init_hw(tp);
3765
3766 tg3_netif_start(tp);
3767
f47c11ee 3768 tg3_full_unlock(tp);
1da177e4
LT
3769
3770 return 0;
3771}
3772
3773/* Free up pending packets in all rx/tx rings.
3774 *
3775 * The chip has been shut down and the driver detached from
3776 * the networking, so no interrupts or new tx packets will
3777 * end up in the driver. tp->{tx,}lock is not held and we are not
3778 * in an interrupt context and thus may sleep.
3779 */
3780static void tg3_free_rings(struct tg3 *tp)
3781{
3782 struct ring_info *rxp;
3783 int i;
3784
3785 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3786 rxp = &tp->rx_std_buffers[i];
3787
3788 if (rxp->skb == NULL)
3789 continue;
3790 pci_unmap_single(tp->pdev,
3791 pci_unmap_addr(rxp, mapping),
7e72aad4 3792 tp->rx_pkt_buf_sz - tp->rx_offset,
1da177e4
LT
3793 PCI_DMA_FROMDEVICE);
3794 dev_kfree_skb_any(rxp->skb);
3795 rxp->skb = NULL;
3796 }
3797
3798 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3799 rxp = &tp->rx_jumbo_buffers[i];
3800
3801 if (rxp->skb == NULL)
3802 continue;
3803 pci_unmap_single(tp->pdev,
3804 pci_unmap_addr(rxp, mapping),
3805 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3806 PCI_DMA_FROMDEVICE);
3807 dev_kfree_skb_any(rxp->skb);
3808 rxp->skb = NULL;
3809 }
3810
3811 for (i = 0; i < TG3_TX_RING_SIZE; ) {
3812 struct tx_ring_info *txp;
3813 struct sk_buff *skb;
3814 int j;
3815
3816 txp = &tp->tx_buffers[i];
3817 skb = txp->skb;
3818
3819 if (skb == NULL) {
3820 i++;
3821 continue;
3822 }
3823
3824 pci_unmap_single(tp->pdev,
3825 pci_unmap_addr(txp, mapping),
3826 skb_headlen(skb),
3827 PCI_DMA_TODEVICE);
3828 txp->skb = NULL;
3829
3830 i++;
3831
3832 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3833 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3834 pci_unmap_page(tp->pdev,
3835 pci_unmap_addr(txp, mapping),
3836 skb_shinfo(skb)->frags[j].size,
3837 PCI_DMA_TODEVICE);
3838 i++;
3839 }
3840
3841 dev_kfree_skb_any(skb);
3842 }
3843}
3844
3845/* Initialize tx/rx rings for packet processing.
3846 *
3847 * The chip has been shut down and the driver detached from
3848 * the networking, so no interrupts or new tx packets will
3849 * end up in the driver. tp->{tx,}lock are held and thus
3850 * we may not sleep.
3851 */
3852static void tg3_init_rings(struct tg3 *tp)
3853{
3854 u32 i;
3855
3856 /* Free up all the SKBs. */
3857 tg3_free_rings(tp);
3858
3859 /* Zero out all descriptors. */
3860 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3861 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3862 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3863 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3864
7e72aad4 3865 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
a4e2b347 3866 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
7e72aad4
MC
3867 (tp->dev->mtu > ETH_DATA_LEN))
3868 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3869
1da177e4
LT
3870 /* Initialize invariants of the rings, we only set this
3871 * stuff once. This works because the card does not
3872 * write into the rx buffer posting rings.
3873 */
3874 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3875 struct tg3_rx_buffer_desc *rxd;
3876
3877 rxd = &tp->rx_std[i];
7e72aad4 3878 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
1da177e4
LT
3879 << RXD_LEN_SHIFT;
3880 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3881 rxd->opaque = (RXD_OPAQUE_RING_STD |
3882 (i << RXD_OPAQUE_INDEX_SHIFT));
3883 }
3884
0f893dc6 3885 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
3886 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3887 struct tg3_rx_buffer_desc *rxd;
3888
3889 rxd = &tp->rx_jumbo[i];
3890 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3891 << RXD_LEN_SHIFT;
3892 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3893 RXD_FLAG_JUMBO;
3894 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3895 (i << RXD_OPAQUE_INDEX_SHIFT));
3896 }
3897 }
3898
3899 /* Now allocate fresh SKBs for each rx ring. */
3900 for (i = 0; i < tp->rx_pending; i++) {
3901 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3902 -1, i) < 0)
3903 break;
3904 }
3905
0f893dc6 3906 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
3907 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3908 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3909 -1, i) < 0)
3910 break;
3911 }
3912 }
3913}
3914
3915/*
3916 * Must not be invoked with interrupt sources disabled and
3917 * the hardware shutdown down.
3918 */
3919static void tg3_free_consistent(struct tg3 *tp)
3920{
3921 if (tp->rx_std_buffers) {
3922 kfree(tp->rx_std_buffers);
3923 tp->rx_std_buffers = NULL;
3924 }
3925 if (tp->rx_std) {
3926 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3927 tp->rx_std, tp->rx_std_mapping);
3928 tp->rx_std = NULL;
3929 }
3930 if (tp->rx_jumbo) {
3931 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3932 tp->rx_jumbo, tp->rx_jumbo_mapping);
3933 tp->rx_jumbo = NULL;
3934 }
3935 if (tp->rx_rcb) {
3936 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3937 tp->rx_rcb, tp->rx_rcb_mapping);
3938 tp->rx_rcb = NULL;
3939 }
3940 if (tp->tx_ring) {
3941 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3942 tp->tx_ring, tp->tx_desc_mapping);
3943 tp->tx_ring = NULL;
3944 }
3945 if (tp->hw_status) {
3946 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3947 tp->hw_status, tp->status_mapping);
3948 tp->hw_status = NULL;
3949 }
3950 if (tp->hw_stats) {
3951 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3952 tp->hw_stats, tp->stats_mapping);
3953 tp->hw_stats = NULL;
3954 }
3955}
3956
3957/*
3958 * Must not be invoked with interrupt sources disabled and
3959 * the hardware shutdown down. Can sleep.
3960 */
3961static int tg3_alloc_consistent(struct tg3 *tp)
3962{
3963 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3964 (TG3_RX_RING_SIZE +
3965 TG3_RX_JUMBO_RING_SIZE)) +
3966 (sizeof(struct tx_ring_info) *
3967 TG3_TX_RING_SIZE),
3968 GFP_KERNEL);
3969 if (!tp->rx_std_buffers)
3970 return -ENOMEM;
3971
3972 memset(tp->rx_std_buffers, 0,
3973 (sizeof(struct ring_info) *
3974 (TG3_RX_RING_SIZE +
3975 TG3_RX_JUMBO_RING_SIZE)) +
3976 (sizeof(struct tx_ring_info) *
3977 TG3_TX_RING_SIZE));
3978
3979 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3980 tp->tx_buffers = (struct tx_ring_info *)
3981 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3982
3983 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3984 &tp->rx_std_mapping);
3985 if (!tp->rx_std)
3986 goto err_out;
3987
3988 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3989 &tp->rx_jumbo_mapping);
3990
3991 if (!tp->rx_jumbo)
3992 goto err_out;
3993
3994 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3995 &tp->rx_rcb_mapping);
3996 if (!tp->rx_rcb)
3997 goto err_out;
3998
3999 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4000 &tp->tx_desc_mapping);
4001 if (!tp->tx_ring)
4002 goto err_out;
4003
4004 tp->hw_status = pci_alloc_consistent(tp->pdev,
4005 TG3_HW_STATUS_SIZE,
4006 &tp->status_mapping);
4007 if (!tp->hw_status)
4008 goto err_out;
4009
4010 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4011 sizeof(struct tg3_hw_stats),
4012 &tp->stats_mapping);
4013 if (!tp->hw_stats)
4014 goto err_out;
4015
4016 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4017 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4018
4019 return 0;
4020
4021err_out:
4022 tg3_free_consistent(tp);
4023 return -ENOMEM;
4024}
4025
4026#define MAX_WAIT_CNT 1000
4027
4028/* To stop a block, clear the enable bit and poll till it
4029 * clears. tp->lock is held.
4030 */
b3b7d6be 4031static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
1da177e4
LT
4032{
4033 unsigned int i;
4034 u32 val;
4035
4036 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4037 switch (ofs) {
4038 case RCVLSC_MODE:
4039 case DMAC_MODE:
4040 case MBFREE_MODE:
4041 case BUFMGR_MODE:
4042 case MEMARB_MODE:
4043 /* We can't enable/disable these bits of the
4044 * 5705/5750, just say success.
4045 */
4046 return 0;
4047
4048 default:
4049 break;
4050 };
4051 }
4052
4053 val = tr32(ofs);
4054 val &= ~enable_bit;
4055 tw32_f(ofs, val);
4056
4057 for (i = 0; i < MAX_WAIT_CNT; i++) {
4058 udelay(100);
4059 val = tr32(ofs);
4060 if ((val & enable_bit) == 0)
4061 break;
4062 }
4063
b3b7d6be 4064 if (i == MAX_WAIT_CNT && !silent) {
1da177e4
LT
4065 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4066 "ofs=%lx enable_bit=%x\n",
4067 ofs, enable_bit);
4068 return -ENODEV;
4069 }
4070
4071 return 0;
4072}
4073
4074/* tp->lock is held. */
b3b7d6be 4075static int tg3_abort_hw(struct tg3 *tp, int silent)
1da177e4
LT
4076{
4077 int i, err;
4078
4079 tg3_disable_ints(tp);
4080
4081 tp->rx_mode &= ~RX_MODE_ENABLE;
4082 tw32_f(MAC_RX_MODE, tp->rx_mode);
4083 udelay(10);
4084
b3b7d6be
DM
4085 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4086 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4087 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4088 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4089 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4090 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4091
4092 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4093 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4094 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4095 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4096 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4097 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4098 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
1da177e4
LT
4099
4100 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4101 tw32_f(MAC_MODE, tp->mac_mode);
4102 udelay(40);
4103
4104 tp->tx_mode &= ~TX_MODE_ENABLE;
4105 tw32_f(MAC_TX_MODE, tp->tx_mode);
4106
4107 for (i = 0; i < MAX_WAIT_CNT; i++) {
4108 udelay(100);
4109 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4110 break;
4111 }
4112 if (i >= MAX_WAIT_CNT) {
4113 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4114 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4115 tp->dev->name, tr32(MAC_TX_MODE));
e6de8ad1 4116 err |= -ENODEV;
1da177e4
LT
4117 }
4118
e6de8ad1 4119 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
b3b7d6be
DM
4120 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4121 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
1da177e4
LT
4122
4123 tw32(FTQ_RESET, 0xffffffff);
4124 tw32(FTQ_RESET, 0x00000000);
4125
b3b7d6be
DM
4126 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4127 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
1da177e4
LT
4128
4129 if (tp->hw_status)
4130 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4131 if (tp->hw_stats)
4132 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4133
1da177e4
LT
4134 return err;
4135}
4136
4137/* tp->lock is held. */
4138static int tg3_nvram_lock(struct tg3 *tp)
4139{
4140 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4141 int i;
4142
4143 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4144 for (i = 0; i < 8000; i++) {
4145 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4146 break;
4147 udelay(20);
4148 }
4149 if (i == 8000)
4150 return -ENODEV;
4151 }
4152 return 0;
4153}
4154
4155/* tp->lock is held. */
4156static void tg3_nvram_unlock(struct tg3 *tp)
4157{
4158 if (tp->tg3_flags & TG3_FLAG_NVRAM)
4159 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4160}
4161
e6af301b
MC
4162/* tp->lock is held. */
4163static void tg3_enable_nvram_access(struct tg3 *tp)
4164{
4165 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4166 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4167 u32 nvaccess = tr32(NVRAM_ACCESS);
4168
4169 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4170 }
4171}
4172
4173/* tp->lock is held. */
4174static void tg3_disable_nvram_access(struct tg3 *tp)
4175{
4176 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4177 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4178 u32 nvaccess = tr32(NVRAM_ACCESS);
4179
4180 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4181 }
4182}
4183
1da177e4
LT
4184/* tp->lock is held. */
4185static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4186{
4187 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4188 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4189 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4190
4191 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4192 switch (kind) {
4193 case RESET_KIND_INIT:
4194 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4195 DRV_STATE_START);
4196 break;
4197
4198 case RESET_KIND_SHUTDOWN:
4199 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4200 DRV_STATE_UNLOAD);
4201 break;
4202
4203 case RESET_KIND_SUSPEND:
4204 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4205 DRV_STATE_SUSPEND);
4206 break;
4207
4208 default:
4209 break;
4210 };
4211 }
4212}
4213
4214/* tp->lock is held. */
4215static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4216{
4217 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4218 switch (kind) {
4219 case RESET_KIND_INIT:
4220 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4221 DRV_STATE_START_DONE);
4222 break;
4223
4224 case RESET_KIND_SHUTDOWN:
4225 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4226 DRV_STATE_UNLOAD_DONE);
4227 break;
4228
4229 default:
4230 break;
4231 };
4232 }
4233}
4234
4235/* tp->lock is held. */
4236static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4237{
4238 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4239 switch (kind) {
4240 case RESET_KIND_INIT:
4241 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4242 DRV_STATE_START);
4243 break;
4244
4245 case RESET_KIND_SHUTDOWN:
4246 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4247 DRV_STATE_UNLOAD);
4248 break;
4249
4250 case RESET_KIND_SUSPEND:
4251 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4252 DRV_STATE_SUSPEND);
4253 break;
4254
4255 default:
4256 break;
4257 };
4258 }
4259}
4260
4261static void tg3_stop_fw(struct tg3 *);
4262
4263/* tp->lock is held. */
4264static int tg3_chip_reset(struct tg3 *tp)
4265{
4266 u32 val;
1ee582d8 4267 void (*write_op)(struct tg3 *, u32, u32);
1da177e4
LT
4268 int i;
4269
4270 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4271 tg3_nvram_lock(tp);
4272
4273 /*
4274 * We must avoid the readl() that normally takes place.
4275 * It locks machines, causes machine checks, and other
4276 * fun things. So, temporarily disable the 5701
4277 * hardware workaround, while we do the reset.
4278 */
1ee582d8
MC
4279 write_op = tp->write32;
4280 if (write_op == tg3_write_flush_reg32)
4281 tp->write32 = tg3_write32;
1da177e4
LT
4282
4283 /* do the reset */
4284 val = GRC_MISC_CFG_CORECLK_RESET;
4285
4286 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4287 if (tr32(0x7e2c) == 0x60) {
4288 tw32(0x7e2c, 0x20);
4289 }
4290 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4291 tw32(GRC_MISC_CFG, (1 << 29));
4292 val |= (1 << 29);
4293 }
4294 }
4295
4296 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4297 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4298 tw32(GRC_MISC_CFG, val);
4299
1ee582d8
MC
4300 /* restore 5701 hardware bug workaround write method */
4301 tp->write32 = write_op;
1da177e4
LT
4302
4303 /* Unfortunately, we have to delay before the PCI read back.
4304 * Some 575X chips even will not respond to a PCI cfg access
4305 * when the reset command is given to the chip.
4306 *
4307 * How do these hardware designers expect things to work
4308 * properly if the PCI write is posted for a long period
4309 * of time? It is always necessary to have some method by
4310 * which a register read back can occur to push the write
4311 * out which does the reset.
4312 *
4313 * For most tg3 variants the trick below was working.
4314 * Ho hum...
4315 */
4316 udelay(120);
4317
4318 /* Flush PCI posted writes. The normal MMIO registers
4319 * are inaccessible at this time so this is the only
4320 * way to make this reliably (actually, this is no longer
4321 * the case, see above). I tried to use indirect
4322 * register read/write but this upset some 5701 variants.
4323 */
4324 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4325
4326 udelay(120);
4327
4328 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4329 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4330 int i;
4331 u32 cfg_val;
4332
4333 /* Wait for link training to complete. */
4334 for (i = 0; i < 5000; i++)
4335 udelay(100);
4336
4337 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4338 pci_write_config_dword(tp->pdev, 0xc4,
4339 cfg_val | (1 << 15));
4340 }
4341 /* Set PCIE max payload size and clear error status. */
4342 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4343 }
4344
4345 /* Re-enable indirect register accesses. */
4346 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4347 tp->misc_host_ctrl);
4348
4349 /* Set MAX PCI retry to zero. */
4350 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4351 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4352 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4353 val |= PCISTATE_RETRY_SAME_DMA;
4354 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4355
4356 pci_restore_state(tp->pdev);
4357
4358 /* Make sure PCI-X relaxed ordering bit is clear. */
4359 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4360 val &= ~PCIX_CAPS_RELAXED_ORDERING;
4361 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4362
a4e2b347 4363 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4cf78e4f
MC
4364 u32 val;
4365
4366 /* Chip reset on 5780 will reset MSI enable bit,
4367 * so need to restore it.
4368 */
4369 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4370 u16 ctrl;
4371
4372 pci_read_config_word(tp->pdev,
4373 tp->msi_cap + PCI_MSI_FLAGS,
4374 &ctrl);
4375 pci_write_config_word(tp->pdev,
4376 tp->msi_cap + PCI_MSI_FLAGS,
4377 ctrl | PCI_MSI_FLAGS_ENABLE);
4378 val = tr32(MSGINT_MODE);
4379 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4380 }
4381
4382 val = tr32(MEMARB_MODE);
4383 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4384
4385 } else
4386 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
1da177e4
LT
4387
4388 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4389 tg3_stop_fw(tp);
4390 tw32(0x5000, 0x400);
4391 }
4392
4393 tw32(GRC_MODE, tp->grc_mode);
4394
4395 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4396 u32 val = tr32(0xc4);
4397
4398 tw32(0xc4, val | (1 << 15));
4399 }
4400
4401 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4402 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4403 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4404 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4405 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4406 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4407 }
4408
4409 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4410 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4411 tw32_f(MAC_MODE, tp->mac_mode);
747e8f8b
MC
4412 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4413 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4414 tw32_f(MAC_MODE, tp->mac_mode);
1da177e4
LT
4415 } else
4416 tw32_f(MAC_MODE, 0);
4417 udelay(40);
4418
4419 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4420 /* Wait for firmware initialization to complete. */
4421 for (i = 0; i < 100000; i++) {
4422 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4423 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4424 break;
4425 udelay(10);
4426 }
4427 if (i >= 100000) {
4428 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4429 "firmware will not restart magic=%08x\n",
4430 tp->dev->name, val);
4431 return -ENODEV;
4432 }
4433 }
4434
4435 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4436 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4437 u32 val = tr32(0x7c00);
4438
4439 tw32(0x7c00, val | (1 << 25));
4440 }
4441
4442 /* Reprobe ASF enable state. */
4443 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4444 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4445 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4446 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4447 u32 nic_cfg;
4448
4449 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4450 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4451 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 4452 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
4453 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4454 }
4455 }
4456
4457 return 0;
4458}
4459
4460/* tp->lock is held. */
4461static void tg3_stop_fw(struct tg3 *tp)
4462{
4463 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4464 u32 val;
4465 int i;
4466
4467 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4468 val = tr32(GRC_RX_CPU_EVENT);
4469 val |= (1 << 14);
4470 tw32(GRC_RX_CPU_EVENT, val);
4471
4472 /* Wait for RX cpu to ACK the event. */
4473 for (i = 0; i < 100; i++) {
4474 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4475 break;
4476 udelay(1);
4477 }
4478 }
4479}
4480
4481/* tp->lock is held. */
944d980e 4482static int tg3_halt(struct tg3 *tp, int kind, int silent)
1da177e4
LT
4483{
4484 int err;
4485
4486 tg3_stop_fw(tp);
4487
944d980e 4488 tg3_write_sig_pre_reset(tp, kind);
1da177e4 4489
b3b7d6be 4490 tg3_abort_hw(tp, silent);
1da177e4
LT
4491 err = tg3_chip_reset(tp);
4492
944d980e
MC
4493 tg3_write_sig_legacy(tp, kind);
4494 tg3_write_sig_post_reset(tp, kind);
1da177e4
LT
4495
4496 if (err)
4497 return err;
4498
4499 return 0;
4500}
4501
4502#define TG3_FW_RELEASE_MAJOR 0x0
4503#define TG3_FW_RELASE_MINOR 0x0
4504#define TG3_FW_RELEASE_FIX 0x0
4505#define TG3_FW_START_ADDR 0x08000000
4506#define TG3_FW_TEXT_ADDR 0x08000000
4507#define TG3_FW_TEXT_LEN 0x9c0
4508#define TG3_FW_RODATA_ADDR 0x080009c0
4509#define TG3_FW_RODATA_LEN 0x60
4510#define TG3_FW_DATA_ADDR 0x08000a40
4511#define TG3_FW_DATA_LEN 0x20
4512#define TG3_FW_SBSS_ADDR 0x08000a60
4513#define TG3_FW_SBSS_LEN 0xc
4514#define TG3_FW_BSS_ADDR 0x08000a70
4515#define TG3_FW_BSS_LEN 0x10
4516
4517static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4518 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4519 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4520 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4521 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4522 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4523 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4524 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4525 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4526 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4527 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4528 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4529 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4530 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4531 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4532 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4533 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4534 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4535 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4536 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4537 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4538 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4539 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4540 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4541 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4542 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4543 0, 0, 0, 0, 0, 0,
4544 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4545 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4546 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4547 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4548 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4549 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4550 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4551 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4552 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4553 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4554 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4555 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4556 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4557 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4558 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4559 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4560 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4561 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4562 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4563 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4564 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4565 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4566 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4567 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4568 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4569 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4570 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4571 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4572 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4573 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4574 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4575 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4576 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4577 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4578 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4579 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4580 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4581 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4582 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4583 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4584 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4585 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4586 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4587 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4588 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4589 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4590 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4591 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4592 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4593 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4594 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4595 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4596 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4597 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4598 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4599 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4600 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4601 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4602 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4603 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4604 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4605 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4606 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4607 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4608 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4609};
4610
4611static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4612 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4613 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4614 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4615 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4616 0x00000000
4617};
4618
4619#if 0 /* All zeros, don't eat up space with it. */
4620u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4621 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4622 0x00000000, 0x00000000, 0x00000000, 0x00000000
4623};
4624#endif
4625
4626#define RX_CPU_SCRATCH_BASE 0x30000
4627#define RX_CPU_SCRATCH_SIZE 0x04000
4628#define TX_CPU_SCRATCH_BASE 0x34000
4629#define TX_CPU_SCRATCH_SIZE 0x04000
4630
4631/* tp->lock is held. */
4632static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4633{
4634 int i;
4635
4636 if (offset == TX_CPU_BASE &&
4637 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4638 BUG();
4639
4640 if (offset == RX_CPU_BASE) {
4641 for (i = 0; i < 10000; i++) {
4642 tw32(offset + CPU_STATE, 0xffffffff);
4643 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4644 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4645 break;
4646 }
4647
4648 tw32(offset + CPU_STATE, 0xffffffff);
4649 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
4650 udelay(10);
4651 } else {
4652 for (i = 0; i < 10000; i++) {
4653 tw32(offset + CPU_STATE, 0xffffffff);
4654 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4655 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4656 break;
4657 }
4658 }
4659
4660 if (i >= 10000) {
4661 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4662 "and %s CPU\n",
4663 tp->dev->name,
4664 (offset == RX_CPU_BASE ? "RX" : "TX"));
4665 return -ENODEV;
4666 }
4667 return 0;
4668}
4669
4670struct fw_info {
4671 unsigned int text_base;
4672 unsigned int text_len;
4673 u32 *text_data;
4674 unsigned int rodata_base;
4675 unsigned int rodata_len;
4676 u32 *rodata_data;
4677 unsigned int data_base;
4678 unsigned int data_len;
4679 u32 *data_data;
4680};
4681
4682/* tp->lock is held. */
4683static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4684 int cpu_scratch_size, struct fw_info *info)
4685{
4686 int err, i;
1da177e4
LT
4687 void (*write_op)(struct tg3 *, u32, u32);
4688
4689 if (cpu_base == TX_CPU_BASE &&
4690 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4691 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4692 "TX cpu firmware on %s which is 5705.\n",
4693 tp->dev->name);
4694 return -EINVAL;
4695 }
4696
4697 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4698 write_op = tg3_write_mem;
4699 else
4700 write_op = tg3_write_indirect_reg32;
4701
1b628151
MC
4702 /* It is possible that bootcode is still loading at this point.
4703 * Get the nvram lock first before halting the cpu.
4704 */
4705 tg3_nvram_lock(tp);
1da177e4 4706 err = tg3_halt_cpu(tp, cpu_base);
1b628151 4707 tg3_nvram_unlock(tp);
1da177e4
LT
4708 if (err)
4709 goto out;
4710
4711 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4712 write_op(tp, cpu_scratch_base + i, 0);
4713 tw32(cpu_base + CPU_STATE, 0xffffffff);
4714 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4715 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4716 write_op(tp, (cpu_scratch_base +
4717 (info->text_base & 0xffff) +
4718 (i * sizeof(u32))),
4719 (info->text_data ?
4720 info->text_data[i] : 0));
4721 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4722 write_op(tp, (cpu_scratch_base +
4723 (info->rodata_base & 0xffff) +
4724 (i * sizeof(u32))),
4725 (info->rodata_data ?
4726 info->rodata_data[i] : 0));
4727 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4728 write_op(tp, (cpu_scratch_base +
4729 (info->data_base & 0xffff) +
4730 (i * sizeof(u32))),
4731 (info->data_data ?
4732 info->data_data[i] : 0));
4733
4734 err = 0;
4735
4736out:
1da177e4
LT
4737 return err;
4738}
4739
4740/* tp->lock is held. */
4741static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4742{
4743 struct fw_info info;
4744 int err, i;
4745
4746 info.text_base = TG3_FW_TEXT_ADDR;
4747 info.text_len = TG3_FW_TEXT_LEN;
4748 info.text_data = &tg3FwText[0];
4749 info.rodata_base = TG3_FW_RODATA_ADDR;
4750 info.rodata_len = TG3_FW_RODATA_LEN;
4751 info.rodata_data = &tg3FwRodata[0];
4752 info.data_base = TG3_FW_DATA_ADDR;
4753 info.data_len = TG3_FW_DATA_LEN;
4754 info.data_data = NULL;
4755
4756 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4757 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4758 &info);
4759 if (err)
4760 return err;
4761
4762 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4763 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4764 &info);
4765 if (err)
4766 return err;
4767
4768 /* Now startup only the RX cpu. */
4769 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4770 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4771
4772 for (i = 0; i < 5; i++) {
4773 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4774 break;
4775 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4776 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
4777 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4778 udelay(1000);
4779 }
4780 if (i >= 5) {
4781 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4782 "to set RX CPU PC, is %08x should be %08x\n",
4783 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4784 TG3_FW_TEXT_ADDR);
4785 return -ENODEV;
4786 }
4787 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4788 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
4789
4790 return 0;
4791}
4792
4793#if TG3_TSO_SUPPORT != 0
4794
4795#define TG3_TSO_FW_RELEASE_MAJOR 0x1
4796#define TG3_TSO_FW_RELASE_MINOR 0x6
4797#define TG3_TSO_FW_RELEASE_FIX 0x0
4798#define TG3_TSO_FW_START_ADDR 0x08000000
4799#define TG3_TSO_FW_TEXT_ADDR 0x08000000
4800#define TG3_TSO_FW_TEXT_LEN 0x1aa0
4801#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
4802#define TG3_TSO_FW_RODATA_LEN 0x60
4803#define TG3_TSO_FW_DATA_ADDR 0x08001b20
4804#define TG3_TSO_FW_DATA_LEN 0x30
4805#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
4806#define TG3_TSO_FW_SBSS_LEN 0x2c
4807#define TG3_TSO_FW_BSS_ADDR 0x08001b80
4808#define TG3_TSO_FW_BSS_LEN 0x894
4809
4810static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4811 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4812 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4813 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4814 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4815 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4816 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4817 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4818 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4819 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4820 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4821 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4822 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4823 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4824 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4825 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4826 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4827 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4828 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4829 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4830 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4831 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4832 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4833 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4834 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4835 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4836 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4837 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4838 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4839 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4840 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4841 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4842 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4843 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4844 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4845 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4846 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4847 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4848 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4849 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4850 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4851 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4852 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4853 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4854 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4855 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4856 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4857 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4858 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4859 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4860 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4861 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4862 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4863 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4864 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4865 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4866 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4867 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4868 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4869 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4870 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4871 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4872 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4873 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4874 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4875 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4876 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4877 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4878 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4879 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4880 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4881 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4882 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4883 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4884 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4885 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4886 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4887 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4888 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4889 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4890 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4891 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4892 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4893 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4894 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4895 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4896 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4897 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4898 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4899 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4900 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4901 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4902 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4903 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4904 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4905 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4906 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4907 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4908 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4909 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4910 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4911 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4912 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4913 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4914 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4915 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4916 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4917 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4918 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4919 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4920 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4921 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4922 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4923 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4924 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4925 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4926 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4927 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4928 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4929 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4930 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4931 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4932 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4933 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4934 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4935 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4936 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4937 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4938 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4939 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4940 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4941 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4942 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4943 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4944 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4945 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4946 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4947 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4948 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4949 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4950 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4951 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4952 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4953 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4954 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4955 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4956 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4957 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4958 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4959 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4960 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4961 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4962 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4963 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4964 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4965 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4966 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4967 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4968 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4969 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4970 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4971 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4972 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4973 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4974 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4975 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4976 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4977 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4978 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4979 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4980 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4981 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4982 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4983 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4984 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4985 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4986 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4987 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4988 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4989 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4990 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4991 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4992 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4993 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4994 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4995 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4996 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4997 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4998 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4999 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5000 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5001 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5002 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5003 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5004 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5005 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5006 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5007 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5008 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5009 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5010 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5011 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5012 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5013 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5014 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5015 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5016 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5017 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5018 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5019 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5020 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5021 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5022 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5023 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5024 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5025 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5026 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5027 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5028 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5029 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5030 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5031 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5032 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5033 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5034 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5035 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5036 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5037 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5038 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5039 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5040 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5041 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5042 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5043 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5044 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5045 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5046 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5047 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5048 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5049 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5050 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5051 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5052 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5053 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5054 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5055 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5056 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5057 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5058 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5059 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5060 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5061 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5062 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5063 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5064 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5065 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5066 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5067 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5068 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5069 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5070 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5071 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5072 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5073 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5074 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5075 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5076 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5077 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5078 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5079 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5080 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5081 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5082 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5083 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5084 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5085 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5086 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5087 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5088 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5089 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5090 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5091 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5092 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5093 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5094 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5095};
5096
5097static u32 tg3TsoFwRodata[] = {
5098 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5099 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5100 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5101 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5102 0x00000000,
5103};
5104
5105static u32 tg3TsoFwData[] = {
5106 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5107 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5108 0x00000000,
5109};
5110
5111/* 5705 needs a special version of the TSO firmware. */
5112#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5113#define TG3_TSO5_FW_RELASE_MINOR 0x2
5114#define TG3_TSO5_FW_RELEASE_FIX 0x0
5115#define TG3_TSO5_FW_START_ADDR 0x00010000
5116#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5117#define TG3_TSO5_FW_TEXT_LEN 0xe90
5118#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5119#define TG3_TSO5_FW_RODATA_LEN 0x50
5120#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5121#define TG3_TSO5_FW_DATA_LEN 0x20
5122#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5123#define TG3_TSO5_FW_SBSS_LEN 0x28
5124#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5125#define TG3_TSO5_FW_BSS_LEN 0x88
5126
5127static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5128 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5129 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5130 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5131 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5132 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5133 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5134 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5135 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5136 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5137 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5138 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5139 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5140 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5141 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5142 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5143 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5144 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5145 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5146 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5147 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5148 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5149 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5150 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5151 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5152 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5153 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5154 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5155 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5156 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5157 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5158 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5159 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5160 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5161 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5162 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5163 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5164 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5165 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5166 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5167 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5168 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5169 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5170 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5171 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5172 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5173 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5174 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5175 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5176 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5177 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5178 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5179 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5180 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5181 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5182 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5183 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5184 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5185 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5186 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5187 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5188 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5189 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5190 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5191 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5192 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5193 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5194 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5195 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5196 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5197 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5198 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5199 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5200 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5201 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5202 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5203 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5204 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5205 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5206 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5207 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5208 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5209 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5210 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5211 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5212 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5213 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5214 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5215 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5216 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5217 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5218 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5219 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5220 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5221 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5222 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5223 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5224 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5225 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5226 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5227 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5228 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5229 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5230 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5231 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5232 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5233 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5234 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5235 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5236 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5237 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5238 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5239 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5240 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5241 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5242 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5243 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5244 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5245 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5246 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5247 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5248 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5249 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5250 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5251 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5252 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5253 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5254 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5255 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5256 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5257 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5258 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5259 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5260 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5261 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5262 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5263 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5264 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5265 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5266 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5267 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5268 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5269 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5270 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5271 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5272 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5273 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5274 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5275 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5276 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5277 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5278 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5279 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5280 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5281 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5282 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5283 0x00000000, 0x00000000, 0x00000000,
5284};
5285
5286static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5287 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5288 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5289 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5290 0x00000000, 0x00000000, 0x00000000,
5291};
5292
5293static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5294 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5295 0x00000000, 0x00000000, 0x00000000,
5296};
5297
5298/* tp->lock is held. */
5299static int tg3_load_tso_firmware(struct tg3 *tp)
5300{
5301 struct fw_info info;
5302 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5303 int err, i;
5304
5305 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5306 return 0;
5307
5308 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5309 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5310 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5311 info.text_data = &tg3Tso5FwText[0];
5312 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5313 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5314 info.rodata_data = &tg3Tso5FwRodata[0];
5315 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5316 info.data_len = TG3_TSO5_FW_DATA_LEN;
5317 info.data_data = &tg3Tso5FwData[0];
5318 cpu_base = RX_CPU_BASE;
5319 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5320 cpu_scratch_size = (info.text_len +
5321 info.rodata_len +
5322 info.data_len +
5323 TG3_TSO5_FW_SBSS_LEN +
5324 TG3_TSO5_FW_BSS_LEN);
5325 } else {
5326 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5327 info.text_len = TG3_TSO_FW_TEXT_LEN;
5328 info.text_data = &tg3TsoFwText[0];
5329 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5330 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5331 info.rodata_data = &tg3TsoFwRodata[0];
5332 info.data_base = TG3_TSO_FW_DATA_ADDR;
5333 info.data_len = TG3_TSO_FW_DATA_LEN;
5334 info.data_data = &tg3TsoFwData[0];
5335 cpu_base = TX_CPU_BASE;
5336 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5337 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5338 }
5339
5340 err = tg3_load_firmware_cpu(tp, cpu_base,
5341 cpu_scratch_base, cpu_scratch_size,
5342 &info);
5343 if (err)
5344 return err;
5345
5346 /* Now startup the cpu. */
5347 tw32(cpu_base + CPU_STATE, 0xffffffff);
5348 tw32_f(cpu_base + CPU_PC, info.text_base);
5349
5350 for (i = 0; i < 5; i++) {
5351 if (tr32(cpu_base + CPU_PC) == info.text_base)
5352 break;
5353 tw32(cpu_base + CPU_STATE, 0xffffffff);
5354 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
5355 tw32_f(cpu_base + CPU_PC, info.text_base);
5356 udelay(1000);
5357 }
5358 if (i >= 5) {
5359 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5360 "to set CPU PC, is %08x should be %08x\n",
5361 tp->dev->name, tr32(cpu_base + CPU_PC),
5362 info.text_base);
5363 return -ENODEV;
5364 }
5365 tw32(cpu_base + CPU_STATE, 0xffffffff);
5366 tw32_f(cpu_base + CPU_MODE, 0x00000000);
5367 return 0;
5368}
5369
5370#endif /* TG3_TSO_SUPPORT != 0 */
5371
5372/* tp->lock is held. */
5373static void __tg3_set_mac_addr(struct tg3 *tp)
5374{
5375 u32 addr_high, addr_low;
5376 int i;
5377
5378 addr_high = ((tp->dev->dev_addr[0] << 8) |
5379 tp->dev->dev_addr[1]);
5380 addr_low = ((tp->dev->dev_addr[2] << 24) |
5381 (tp->dev->dev_addr[3] << 16) |
5382 (tp->dev->dev_addr[4] << 8) |
5383 (tp->dev->dev_addr[5] << 0));
5384 for (i = 0; i < 4; i++) {
5385 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5386 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5387 }
5388
5389 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5390 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5391 for (i = 0; i < 12; i++) {
5392 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5393 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5394 }
5395 }
5396
5397 addr_high = (tp->dev->dev_addr[0] +
5398 tp->dev->dev_addr[1] +
5399 tp->dev->dev_addr[2] +
5400 tp->dev->dev_addr[3] +
5401 tp->dev->dev_addr[4] +
5402 tp->dev->dev_addr[5]) &
5403 TX_BACKOFF_SEED_MASK;
5404 tw32(MAC_TX_BACKOFF_SEED, addr_high);
5405}
5406
5407static int tg3_set_mac_addr(struct net_device *dev, void *p)
5408{
5409 struct tg3 *tp = netdev_priv(dev);
5410 struct sockaddr *addr = p;
5411
f9804ddb
MC
5412 if (!is_valid_ether_addr(addr->sa_data))
5413 return -EINVAL;
5414
1da177e4
LT
5415 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5416
f47c11ee 5417 spin_lock_bh(&tp->lock);
1da177e4 5418 __tg3_set_mac_addr(tp);
f47c11ee 5419 spin_unlock_bh(&tp->lock);
1da177e4
LT
5420
5421 return 0;
5422}
5423
5424/* tp->lock is held. */
5425static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5426 dma_addr_t mapping, u32 maxlen_flags,
5427 u32 nic_addr)
5428{
5429 tg3_write_mem(tp,
5430 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5431 ((u64) mapping >> 32));
5432 tg3_write_mem(tp,
5433 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5434 ((u64) mapping & 0xffffffff));
5435 tg3_write_mem(tp,
5436 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5437 maxlen_flags);
5438
5439 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5440 tg3_write_mem(tp,
5441 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5442 nic_addr);
5443}
5444
5445static void __tg3_set_rx_mode(struct net_device *);
d244c892 5446static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
15f9850d
DM
5447{
5448 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5449 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5450 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5451 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5452 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5453 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5454 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5455 }
5456 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5457 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5458 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5459 u32 val = ec->stats_block_coalesce_usecs;
5460
5461 if (!netif_carrier_ok(tp->dev))
5462 val = 0;
5463
5464 tw32(HOSTCC_STAT_COAL_TICKS, val);
5465 }
5466}
1da177e4
LT
5467
5468/* tp->lock is held. */
5469static int tg3_reset_hw(struct tg3 *tp)
5470{
5471 u32 val, rdmac_mode;
5472 int i, err, limit;
5473
5474 tg3_disable_ints(tp);
5475
5476 tg3_stop_fw(tp);
5477
5478 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5479
5480 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
e6de8ad1 5481 tg3_abort_hw(tp, 1);
1da177e4
LT
5482 }
5483
5484 err = tg3_chip_reset(tp);
5485 if (err)
5486 return err;
5487
5488 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5489
5490 /* This works around an issue with Athlon chipsets on
5491 * B3 tigon3 silicon. This bit has no effect on any
5492 * other revision. But do not set this on PCI Express
5493 * chips.
5494 */
5495 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5496 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5497 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5498
5499 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5500 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5501 val = tr32(TG3PCI_PCISTATE);
5502 val |= PCISTATE_RETRY_SAME_DMA;
5503 tw32(TG3PCI_PCISTATE, val);
5504 }
5505
5506 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5507 /* Enable some hw fixes. */
5508 val = tr32(TG3PCI_MSI_DATA);
5509 val |= (1 << 26) | (1 << 28) | (1 << 29);
5510 tw32(TG3PCI_MSI_DATA, val);
5511 }
5512
5513 /* Descriptor ring init may make accesses to the
5514 * NIC SRAM area to setup the TX descriptors, so we
5515 * can only do this after the hardware has been
5516 * successfully reset.
5517 */
5518 tg3_init_rings(tp);
5519
5520 /* This value is determined during the probe time DMA
5521 * engine test, tg3_test_dma.
5522 */
5523 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5524
5525 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5526 GRC_MODE_4X_NIC_SEND_RINGS |
5527 GRC_MODE_NO_TX_PHDR_CSUM |
5528 GRC_MODE_NO_RX_PHDR_CSUM);
5529 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5530 if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5531 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5532 if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5533 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5534
5535 tw32(GRC_MODE,
5536 tp->grc_mode |
5537 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5538
5539 /* Setup the timer prescalar register. Clock is always 66Mhz. */
5540 val = tr32(GRC_MISC_CFG);
5541 val &= ~0xff;
5542 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5543 tw32(GRC_MISC_CFG, val);
5544
5545 /* Initialize MBUF/DESC pool. */
cbf46853 5546 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
1da177e4
LT
5547 /* Do nothing. */
5548 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5549 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5550 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5551 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5552 else
5553 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5554 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5555 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5556 }
5557#if TG3_TSO_SUPPORT != 0
5558 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5559 int fw_len;
5560
5561 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5562 TG3_TSO5_FW_RODATA_LEN +
5563 TG3_TSO5_FW_DATA_LEN +
5564 TG3_TSO5_FW_SBSS_LEN +
5565 TG3_TSO5_FW_BSS_LEN);
5566 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5567 tw32(BUFMGR_MB_POOL_ADDR,
5568 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5569 tw32(BUFMGR_MB_POOL_SIZE,
5570 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5571 }
5572#endif
5573
0f893dc6 5574 if (tp->dev->mtu <= ETH_DATA_LEN) {
1da177e4
LT
5575 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5576 tp->bufmgr_config.mbuf_read_dma_low_water);
5577 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5578 tp->bufmgr_config.mbuf_mac_rx_low_water);
5579 tw32(BUFMGR_MB_HIGH_WATER,
5580 tp->bufmgr_config.mbuf_high_water);
5581 } else {
5582 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5583 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5584 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5585 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5586 tw32(BUFMGR_MB_HIGH_WATER,
5587 tp->bufmgr_config.mbuf_high_water_jumbo);
5588 }
5589 tw32(BUFMGR_DMA_LOW_WATER,
5590 tp->bufmgr_config.dma_low_water);
5591 tw32(BUFMGR_DMA_HIGH_WATER,
5592 tp->bufmgr_config.dma_high_water);
5593
5594 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5595 for (i = 0; i < 2000; i++) {
5596 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5597 break;
5598 udelay(10);
5599 }
5600 if (i >= 2000) {
5601 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5602 tp->dev->name);
5603 return -ENODEV;
5604 }
5605
5606 /* Setup replenish threshold. */
5607 tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5608
5609 /* Initialize TG3_BDINFO's at:
5610 * RCVDBDI_STD_BD: standard eth size rx ring
5611 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
5612 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
5613 *
5614 * like so:
5615 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
5616 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
5617 * ring attribute flags
5618 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
5619 *
5620 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5621 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5622 *
5623 * The size of each ring is fixed in the firmware, but the location is
5624 * configurable.
5625 */
5626 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5627 ((u64) tp->rx_std_mapping >> 32));
5628 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5629 ((u64) tp->rx_std_mapping & 0xffffffff));
5630 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5631 NIC_SRAM_RX_BUFFER_DESC);
5632
5633 /* Don't even try to program the JUMBO/MINI buffer descriptor
5634 * configs on 5705.
5635 */
5636 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5637 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5638 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5639 } else {
5640 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5641 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5642
5643 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5644 BDINFO_FLAGS_DISABLED);
5645
5646 /* Setup replenish threshold. */
5647 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5648
0f893dc6 5649 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
5650 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5651 ((u64) tp->rx_jumbo_mapping >> 32));
5652 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5653 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5654 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5655 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5656 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5657 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5658 } else {
5659 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5660 BDINFO_FLAGS_DISABLED);
5661 }
5662
5663 }
5664
5665 /* There is only one send ring on 5705/5750, no need to explicitly
5666 * disable the others.
5667 */
5668 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5669 /* Clear out send RCB ring in SRAM. */
5670 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5671 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5672 BDINFO_FLAGS_DISABLED);
5673 }
5674
5675 tp->tx_prod = 0;
5676 tp->tx_cons = 0;
5677 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5678 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5679
5680 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5681 tp->tx_desc_mapping,
5682 (TG3_TX_RING_SIZE <<
5683 BDINFO_FLAGS_MAXLEN_SHIFT),
5684 NIC_SRAM_TX_BUFFER_DESC);
5685
5686 /* There is only one receive return ring on 5705/5750, no need
5687 * to explicitly disable the others.
5688 */
5689 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5690 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5691 i += TG3_BDINFO_SIZE) {
5692 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5693 BDINFO_FLAGS_DISABLED);
5694 }
5695 }
5696
5697 tp->rx_rcb_ptr = 0;
5698 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5699
5700 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5701 tp->rx_rcb_mapping,
5702 (TG3_RX_RCB_RING_SIZE(tp) <<
5703 BDINFO_FLAGS_MAXLEN_SHIFT),
5704 0);
5705
5706 tp->rx_std_ptr = tp->rx_pending;
5707 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5708 tp->rx_std_ptr);
5709
0f893dc6 5710 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
1da177e4
LT
5711 tp->rx_jumbo_pending : 0;
5712 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5713 tp->rx_jumbo_ptr);
5714
5715 /* Initialize MAC address and backoff seed. */
5716 __tg3_set_mac_addr(tp);
5717
5718 /* MTU + ethernet header + FCS + optional VLAN tag */
5719 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5720
5721 /* The slot time is changed by tg3_setup_phy if we
5722 * run at gigabit with half duplex.
5723 */
5724 tw32(MAC_TX_LENGTHS,
5725 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5726 (6 << TX_LENGTHS_IPG_SHIFT) |
5727 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5728
5729 /* Receive rules. */
5730 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5731 tw32(RCVLPC_CONFIG, 0x0181);
5732
5733 /* Calculate RDMAC_MODE setting early, we need it to determine
5734 * the RCVLPC_STATE_ENABLE mask.
5735 */
5736 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5737 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5738 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5739 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5740 RDMAC_MODE_LNGREAD_ENAB);
5741 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5742 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
85e94ced
MC
5743
5744 /* If statement applies to 5705 and 5750 PCI devices only */
5745 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5746 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5747 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
1da177e4
LT
5748 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5749 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5750 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5751 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5752 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5753 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5754 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5755 }
5756 }
5757
85e94ced
MC
5758 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5759 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5760
1da177e4
LT
5761#if TG3_TSO_SUPPORT != 0
5762 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5763 rdmac_mode |= (1 << 27);
5764#endif
5765
5766 /* Receive/send statistics. */
5767 if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5768 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5769 val = tr32(RCVLPC_STATS_ENABLE);
5770 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5771 tw32(RCVLPC_STATS_ENABLE, val);
5772 } else {
5773 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5774 }
5775 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5776 tw32(SNDDATAI_STATSENAB, 0xffffff);
5777 tw32(SNDDATAI_STATSCTRL,
5778 (SNDDATAI_SCTRL_ENABLE |
5779 SNDDATAI_SCTRL_FASTUPD));
5780
5781 /* Setup host coalescing engine. */
5782 tw32(HOSTCC_MODE, 0);
5783 for (i = 0; i < 2000; i++) {
5784 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5785 break;
5786 udelay(10);
5787 }
5788
d244c892 5789 __tg3_set_coalesce(tp, &tp->coal);
1da177e4
LT
5790
5791 /* set status block DMA address */
5792 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5793 ((u64) tp->status_mapping >> 32));
5794 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5795 ((u64) tp->status_mapping & 0xffffffff));
5796
5797 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5798 /* Status/statistics block address. See tg3_timer,
5799 * the tg3_periodic_fetch_stats call there, and
5800 * tg3_get_stats to see how this works for 5705/5750 chips.
5801 */
1da177e4
LT
5802 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5803 ((u64) tp->stats_mapping >> 32));
5804 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5805 ((u64) tp->stats_mapping & 0xffffffff));
5806 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5807 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5808 }
5809
5810 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5811
5812 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5813 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5814 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5815 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5816
5817 /* Clear statistics/status block in chip, and status block in ram. */
5818 for (i = NIC_SRAM_STATS_BLK;
5819 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5820 i += sizeof(u32)) {
5821 tg3_write_mem(tp, i, 0);
5822 udelay(40);
5823 }
5824 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5825
c94e3941
MC
5826 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5827 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
5828 /* reset to prevent losing 1st rx packet intermittently */
5829 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5830 udelay(10);
5831 }
5832
1da177e4
LT
5833 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5834 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5835 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5836 udelay(40);
5837
314fba34
MC
5838 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5839 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5840 * register to preserve the GPIO settings for LOMs. The GPIOs,
5841 * whether used as inputs or outputs, are set by boot code after
5842 * reset.
5843 */
5844 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5845 u32 gpio_mask;
5846
5847 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5848 GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
3e7d83bc
MC
5849
5850 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5851 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5852 GRC_LCLCTRL_GPIO_OUTPUT3;
5853
314fba34
MC
5854 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5855
5856 /* GPIO1 must be driven high for eeprom write protect */
1da177e4
LT
5857 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5858 GRC_LCLCTRL_GPIO_OUTPUT1);
314fba34 5859 }
1da177e4
LT
5860 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5861 udelay(100);
5862
09ee929c 5863 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
fac9b83e 5864 tp->last_tag = 0;
1da177e4
LT
5865
5866 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5867 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5868 udelay(40);
5869 }
5870
5871 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5872 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5873 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5874 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5875 WDMAC_MODE_LNGREAD_ENAB);
5876
85e94ced
MC
5877 /* If statement applies to 5705 and 5750 PCI devices only */
5878 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5879 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5880 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1da177e4
LT
5881 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5882 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5883 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5884 /* nothing */
5885 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5886 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5887 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5888 val |= WDMAC_MODE_RX_ACCEL;
5889 }
5890 }
5891
5892 tw32_f(WDMAC_MODE, val);
5893 udelay(40);
5894
5895 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5896 val = tr32(TG3PCI_X_CAPS);
5897 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5898 val &= ~PCIX_CAPS_BURST_MASK;
5899 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5900 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5901 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5902 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5903 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5904 val |= (tp->split_mode_max_reqs <<
5905 PCIX_CAPS_SPLIT_SHIFT);
5906 }
5907 tw32(TG3PCI_X_CAPS, val);
5908 }
5909
5910 tw32_f(RDMAC_MODE, rdmac_mode);
5911 udelay(40);
5912
5913 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5914 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5915 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5916 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5917 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5918 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5919 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5920 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5921#if TG3_TSO_SUPPORT != 0
5922 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5923 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5924#endif
5925 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5926 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5927
5928 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5929 err = tg3_load_5701_a0_firmware_fix(tp);
5930 if (err)
5931 return err;
5932 }
5933
5934#if TG3_TSO_SUPPORT != 0
5935 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5936 err = tg3_load_tso_firmware(tp);
5937 if (err)
5938 return err;
5939 }
5940#endif
5941
5942 tp->tx_mode = TX_MODE_ENABLE;
5943 tw32_f(MAC_TX_MODE, tp->tx_mode);
5944 udelay(100);
5945
5946 tp->rx_mode = RX_MODE_ENABLE;
5947 tw32_f(MAC_RX_MODE, tp->rx_mode);
5948 udelay(10);
5949
5950 if (tp->link_config.phy_is_low_power) {
5951 tp->link_config.phy_is_low_power = 0;
5952 tp->link_config.speed = tp->link_config.orig_speed;
5953 tp->link_config.duplex = tp->link_config.orig_duplex;
5954 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5955 }
5956
5957 tp->mi_mode = MAC_MI_MODE_BASE;
5958 tw32_f(MAC_MI_MODE, tp->mi_mode);
5959 udelay(80);
5960
5961 tw32(MAC_LED_CTRL, tp->led_ctrl);
5962
5963 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
c94e3941 5964 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1da177e4
LT
5965 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5966 udelay(10);
5967 }
5968 tw32_f(MAC_RX_MODE, tp->rx_mode);
5969 udelay(10);
5970
5971 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5972 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5973 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5974 /* Set drive transmission level to 1.2V */
5975 /* only if the signal pre-emphasis bit is not set */
5976 val = tr32(MAC_SERDES_CFG);
5977 val &= 0xfffff000;
5978 val |= 0x880;
5979 tw32(MAC_SERDES_CFG, val);
5980 }
5981 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5982 tw32(MAC_SERDES_CFG, 0x616000);
5983 }
5984
5985 /* Prevent chip from dropping frames when flow control
5986 * is enabled.
5987 */
5988 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5989
5990 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5991 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5992 /* Use hardware link auto-negotiation */
5993 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5994 }
5995
5996 err = tg3_setup_phy(tp, 1);
5997 if (err)
5998 return err;
5999
6000 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6001 u32 tmp;
6002
6003 /* Clear CRC stats. */
6004 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6005 tg3_writephy(tp, 0x1e, tmp | 0x8000);
6006 tg3_readphy(tp, 0x14, &tmp);
6007 }
6008 }
6009
6010 __tg3_set_rx_mode(tp->dev);
6011
6012 /* Initialize receive rules. */
6013 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
6014 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6015 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
6016 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6017
4cf78e4f 6018 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
a4e2b347 6019 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
1da177e4
LT
6020 limit = 8;
6021 else
6022 limit = 16;
6023 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6024 limit -= 4;
6025 switch (limit) {
6026 case 16:
6027 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
6028 case 15:
6029 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
6030 case 14:
6031 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
6032 case 13:
6033 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
6034 case 12:
6035 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
6036 case 11:
6037 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
6038 case 10:
6039 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
6040 case 9:
6041 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
6042 case 8:
6043 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
6044 case 7:
6045 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
6046 case 6:
6047 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
6048 case 5:
6049 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
6050 case 4:
6051 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6052 case 3:
6053 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6054 case 2:
6055 case 1:
6056
6057 default:
6058 break;
6059 };
6060
6061 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6062
1da177e4
LT
6063 return 0;
6064}
6065
6066/* Called at device open time to get the chip ready for
6067 * packet processing. Invoked with tp->lock held.
6068 */
6069static int tg3_init_hw(struct tg3 *tp)
6070{
6071 int err;
6072
6073 /* Force the chip into D0. */
6074 err = tg3_set_power_state(tp, 0);
6075 if (err)
6076 goto out;
6077
6078 tg3_switch_clocks(tp);
6079
6080 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6081
6082 err = tg3_reset_hw(tp);
6083
6084out:
6085 return err;
6086}
6087
6088#define TG3_STAT_ADD32(PSTAT, REG) \
6089do { u32 __val = tr32(REG); \
6090 (PSTAT)->low += __val; \
6091 if ((PSTAT)->low < __val) \
6092 (PSTAT)->high += 1; \
6093} while (0)
6094
6095static void tg3_periodic_fetch_stats(struct tg3 *tp)
6096{
6097 struct tg3_hw_stats *sp = tp->hw_stats;
6098
6099 if (!netif_carrier_ok(tp->dev))
6100 return;
6101
6102 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6103 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6104 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6105 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6106 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6107 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6108 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6109 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6110 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6111 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6112 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6113 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6114 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6115
6116 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6117 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6118 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6119 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6120 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6121 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6122 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6123 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6124 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6125 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6126 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6127 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6128 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6129 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6130}
6131
6132static void tg3_timer(unsigned long __opaque)
6133{
6134 struct tg3 *tp = (struct tg3 *) __opaque;
1da177e4 6135
f47c11ee 6136 spin_lock(&tp->lock);
1da177e4 6137
fac9b83e
DM
6138 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6139 /* All of this garbage is because when using non-tagged
6140 * IRQ status the mailbox/status_block protocol the chip
6141 * uses with the cpu is race prone.
6142 */
6143 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6144 tw32(GRC_LOCAL_CTRL,
6145 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6146 } else {
6147 tw32(HOSTCC_MODE, tp->coalesce_mode |
6148 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6149 }
1da177e4 6150
fac9b83e
DM
6151 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6152 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
f47c11ee 6153 spin_unlock(&tp->lock);
fac9b83e
DM
6154 schedule_work(&tp->reset_task);
6155 return;
6156 }
1da177e4
LT
6157 }
6158
1da177e4
LT
6159 /* This part only runs once per second. */
6160 if (!--tp->timer_counter) {
fac9b83e
DM
6161 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6162 tg3_periodic_fetch_stats(tp);
6163
1da177e4
LT
6164 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6165 u32 mac_stat;
6166 int phy_event;
6167
6168 mac_stat = tr32(MAC_STATUS);
6169
6170 phy_event = 0;
6171 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6172 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6173 phy_event = 1;
6174 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6175 phy_event = 1;
6176
6177 if (phy_event)
6178 tg3_setup_phy(tp, 0);
6179 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6180 u32 mac_stat = tr32(MAC_STATUS);
6181 int need_setup = 0;
6182
6183 if (netif_carrier_ok(tp->dev) &&
6184 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6185 need_setup = 1;
6186 }
6187 if (! netif_carrier_ok(tp->dev) &&
6188 (mac_stat & (MAC_STATUS_PCS_SYNCED |
6189 MAC_STATUS_SIGNAL_DET))) {
6190 need_setup = 1;
6191 }
6192 if (need_setup) {
6193 tw32_f(MAC_MODE,
6194 (tp->mac_mode &
6195 ~MAC_MODE_PORT_MODE_MASK));
6196 udelay(40);
6197 tw32_f(MAC_MODE, tp->mac_mode);
6198 udelay(40);
6199 tg3_setup_phy(tp, 0);
6200 }
747e8f8b
MC
6201 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6202 tg3_serdes_parallel_detect(tp);
1da177e4
LT
6203
6204 tp->timer_counter = tp->timer_multiplier;
6205 }
6206
28fbef78 6207 /* Heartbeat is only sent once every 2 seconds. */
1da177e4
LT
6208 if (!--tp->asf_counter) {
6209 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6210 u32 val;
6211
28fbef78
MC
6212 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
6213 FWCMD_NICDRV_ALIVE2);
6214 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6215 /* 5 seconds timeout */
6216 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
1da177e4
LT
6217 val = tr32(GRC_RX_CPU_EVENT);
6218 val |= (1 << 14);
6219 tw32(GRC_RX_CPU_EVENT, val);
6220 }
6221 tp->asf_counter = tp->asf_multiplier;
6222 }
6223
f47c11ee 6224 spin_unlock(&tp->lock);
1da177e4
LT
6225
6226 tp->timer.expires = jiffies + tp->timer_offset;
6227 add_timer(&tp->timer);
6228}
6229
7938109f
MC
6230static int tg3_test_interrupt(struct tg3 *tp)
6231{
6232 struct net_device *dev = tp->dev;
6233 int err, i;
6234 u32 int_mbox = 0;
6235
d4bc3927
MC
6236 if (!netif_running(dev))
6237 return -ENODEV;
6238
7938109f
MC
6239 tg3_disable_ints(tp);
6240
6241 free_irq(tp->pdev->irq, dev);
6242
6243 err = request_irq(tp->pdev->irq, tg3_test_isr,
f4d0ee98 6244 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
7938109f
MC
6245 if (err)
6246 return err;
6247
38f3843e 6248 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7938109f
MC
6249 tg3_enable_ints(tp);
6250
6251 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6252 HOSTCC_MODE_NOW);
6253
6254 for (i = 0; i < 5; i++) {
09ee929c
MC
6255 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6256 TG3_64BIT_REG_LOW);
7938109f
MC
6257 if (int_mbox != 0)
6258 break;
6259 msleep(10);
6260 }
6261
6262 tg3_disable_ints(tp);
6263
6264 free_irq(tp->pdev->irq, dev);
6265
6266 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6267 err = request_irq(tp->pdev->irq, tg3_msi,
f4d0ee98 6268 SA_SAMPLE_RANDOM, dev->name, dev);
fac9b83e
DM
6269 else {
6270 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6271 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6272 fn = tg3_interrupt_tagged;
6273 err = request_irq(tp->pdev->irq, fn,
f4d0ee98 6274 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
fac9b83e 6275 }
7938109f
MC
6276
6277 if (err)
6278 return err;
6279
6280 if (int_mbox != 0)
6281 return 0;
6282
6283 return -EIO;
6284}
6285
6286/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6287 * successfully restored
6288 */
6289static int tg3_test_msi(struct tg3 *tp)
6290{
6291 struct net_device *dev = tp->dev;
6292 int err;
6293 u16 pci_cmd;
6294
6295 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6296 return 0;
6297
6298 /* Turn off SERR reporting in case MSI terminates with Master
6299 * Abort.
6300 */
6301 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6302 pci_write_config_word(tp->pdev, PCI_COMMAND,
6303 pci_cmd & ~PCI_COMMAND_SERR);
6304
6305 err = tg3_test_interrupt(tp);
6306
6307 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6308
6309 if (!err)
6310 return 0;
6311
6312 /* other failures */
6313 if (err != -EIO)
6314 return err;
6315
6316 /* MSI test failed, go back to INTx mode */
6317 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6318 "switching to INTx mode. Please report this failure to "
6319 "the PCI maintainer and include system chipset information.\n",
6320 tp->dev->name);
6321
6322 free_irq(tp->pdev->irq, dev);
6323 pci_disable_msi(tp->pdev);
6324
6325 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6326
fac9b83e
DM
6327 {
6328 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6329 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6330 fn = tg3_interrupt_tagged;
7938109f 6331
fac9b83e
DM
6332 err = request_irq(tp->pdev->irq, fn,
6333 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6334 }
7938109f
MC
6335 if (err)
6336 return err;
6337
6338 /* Need to reset the chip because the MSI cycle may have terminated
6339 * with Master Abort.
6340 */
f47c11ee 6341 tg3_full_lock(tp, 1);
7938109f 6342
944d980e 6343 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f
MC
6344 err = tg3_init_hw(tp);
6345
f47c11ee 6346 tg3_full_unlock(tp);
7938109f
MC
6347
6348 if (err)
6349 free_irq(tp->pdev->irq, dev);
6350
6351 return err;
6352}
6353
1da177e4
LT
6354static int tg3_open(struct net_device *dev)
6355{
6356 struct tg3 *tp = netdev_priv(dev);
6357 int err;
6358
f47c11ee 6359 tg3_full_lock(tp, 0);
1da177e4
LT
6360
6361 tg3_disable_ints(tp);
6362 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6363
f47c11ee 6364 tg3_full_unlock(tp);
1da177e4
LT
6365
6366 /* The placement of this call is tied
6367 * to the setup and use of Host TX descriptors.
6368 */
6369 err = tg3_alloc_consistent(tp);
6370 if (err)
6371 return err;
6372
88b06bc2
MC
6373 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6374 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6375 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
fac9b83e
DM
6376 /* All MSI supporting chips should support tagged
6377 * status. Assert that this is the case.
6378 */
6379 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6380 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6381 "Not using MSI.\n", tp->dev->name);
6382 } else if (pci_enable_msi(tp->pdev) == 0) {
88b06bc2
MC
6383 u32 msi_mode;
6384
6385 msi_mode = tr32(MSGINT_MODE);
6386 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6387 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6388 }
6389 }
6390 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6391 err = request_irq(tp->pdev->irq, tg3_msi,
f4d0ee98 6392 SA_SAMPLE_RANDOM, dev->name, dev);
fac9b83e
DM
6393 else {
6394 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6395 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6396 fn = tg3_interrupt_tagged;
6397
6398 err = request_irq(tp->pdev->irq, fn,
f4d0ee98 6399 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
fac9b83e 6400 }
1da177e4
LT
6401
6402 if (err) {
88b06bc2
MC
6403 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6404 pci_disable_msi(tp->pdev);
6405 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6406 }
1da177e4
LT
6407 tg3_free_consistent(tp);
6408 return err;
6409 }
6410
f47c11ee 6411 tg3_full_lock(tp, 0);
1da177e4
LT
6412
6413 err = tg3_init_hw(tp);
6414 if (err) {
944d980e 6415 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
6416 tg3_free_rings(tp);
6417 } else {
fac9b83e
DM
6418 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6419 tp->timer_offset = HZ;
6420 else
6421 tp->timer_offset = HZ / 10;
6422
6423 BUG_ON(tp->timer_offset > HZ);
6424 tp->timer_counter = tp->timer_multiplier =
6425 (HZ / tp->timer_offset);
6426 tp->asf_counter = tp->asf_multiplier =
28fbef78 6427 ((HZ / tp->timer_offset) * 2);
1da177e4
LT
6428
6429 init_timer(&tp->timer);
6430 tp->timer.expires = jiffies + tp->timer_offset;
6431 tp->timer.data = (unsigned long) tp;
6432 tp->timer.function = tg3_timer;
1da177e4
LT
6433 }
6434
f47c11ee 6435 tg3_full_unlock(tp);
1da177e4
LT
6436
6437 if (err) {
88b06bc2
MC
6438 free_irq(tp->pdev->irq, dev);
6439 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6440 pci_disable_msi(tp->pdev);
6441 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6442 }
1da177e4
LT
6443 tg3_free_consistent(tp);
6444 return err;
6445 }
6446
7938109f
MC
6447 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6448 err = tg3_test_msi(tp);
fac9b83e 6449
7938109f 6450 if (err) {
f47c11ee 6451 tg3_full_lock(tp, 0);
7938109f
MC
6452
6453 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6454 pci_disable_msi(tp->pdev);
6455 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6456 }
944d980e 6457 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f
MC
6458 tg3_free_rings(tp);
6459 tg3_free_consistent(tp);
6460
f47c11ee 6461 tg3_full_unlock(tp);
7938109f
MC
6462
6463 return err;
6464 }
6465 }
6466
f47c11ee 6467 tg3_full_lock(tp, 0);
1da177e4 6468
7938109f
MC
6469 add_timer(&tp->timer);
6470 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
1da177e4
LT
6471 tg3_enable_ints(tp);
6472
f47c11ee 6473 tg3_full_unlock(tp);
1da177e4
LT
6474
6475 netif_start_queue(dev);
6476
6477 return 0;
6478}
6479
6480#if 0
6481/*static*/ void tg3_dump_state(struct tg3 *tp)
6482{
6483 u32 val32, val32_2, val32_3, val32_4, val32_5;
6484 u16 val16;
6485 int i;
6486
6487 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6488 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6489 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6490 val16, val32);
6491
6492 /* MAC block */
6493 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6494 tr32(MAC_MODE), tr32(MAC_STATUS));
6495 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6496 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6497 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6498 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6499 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6500 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6501
6502 /* Send data initiator control block */
6503 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6504 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6505 printk(" SNDDATAI_STATSCTRL[%08x]\n",
6506 tr32(SNDDATAI_STATSCTRL));
6507
6508 /* Send data completion control block */
6509 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6510
6511 /* Send BD ring selector block */
6512 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6513 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6514
6515 /* Send BD initiator control block */
6516 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6517 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6518
6519 /* Send BD completion control block */
6520 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6521
6522 /* Receive list placement control block */
6523 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6524 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6525 printk(" RCVLPC_STATSCTRL[%08x]\n",
6526 tr32(RCVLPC_STATSCTRL));
6527
6528 /* Receive data and receive BD initiator control block */
6529 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6530 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6531
6532 /* Receive data completion control block */
6533 printk("DEBUG: RCVDCC_MODE[%08x]\n",
6534 tr32(RCVDCC_MODE));
6535
6536 /* Receive BD initiator control block */
6537 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6538 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6539
6540 /* Receive BD completion control block */
6541 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6542 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6543
6544 /* Receive list selector control block */
6545 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6546 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6547
6548 /* Mbuf cluster free block */
6549 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6550 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6551
6552 /* Host coalescing control block */
6553 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6554 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6555 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6556 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6557 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6558 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6559 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6560 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6561 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6562 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6563 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6564 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6565
6566 /* Memory arbiter control block */
6567 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6568 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6569
6570 /* Buffer manager control block */
6571 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6572 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6573 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6574 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6575 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6576 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6577 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6578 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6579
6580 /* Read DMA control block */
6581 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6582 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6583
6584 /* Write DMA control block */
6585 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6586 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6587
6588 /* DMA completion block */
6589 printk("DEBUG: DMAC_MODE[%08x]\n",
6590 tr32(DMAC_MODE));
6591
6592 /* GRC block */
6593 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6594 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6595 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6596 tr32(GRC_LOCAL_CTRL));
6597
6598 /* TG3_BDINFOs */
6599 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6600 tr32(RCVDBDI_JUMBO_BD + 0x0),
6601 tr32(RCVDBDI_JUMBO_BD + 0x4),
6602 tr32(RCVDBDI_JUMBO_BD + 0x8),
6603 tr32(RCVDBDI_JUMBO_BD + 0xc));
6604 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6605 tr32(RCVDBDI_STD_BD + 0x0),
6606 tr32(RCVDBDI_STD_BD + 0x4),
6607 tr32(RCVDBDI_STD_BD + 0x8),
6608 tr32(RCVDBDI_STD_BD + 0xc));
6609 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6610 tr32(RCVDBDI_MINI_BD + 0x0),
6611 tr32(RCVDBDI_MINI_BD + 0x4),
6612 tr32(RCVDBDI_MINI_BD + 0x8),
6613 tr32(RCVDBDI_MINI_BD + 0xc));
6614
6615 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6616 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6617 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6618 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6619 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6620 val32, val32_2, val32_3, val32_4);
6621
6622 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6623 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6624 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6625 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6626 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6627 val32, val32_2, val32_3, val32_4);
6628
6629 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6630 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6631 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6632 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6633 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6634 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6635 val32, val32_2, val32_3, val32_4, val32_5);
6636
6637 /* SW status block */
6638 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6639 tp->hw_status->status,
6640 tp->hw_status->status_tag,
6641 tp->hw_status->rx_jumbo_consumer,
6642 tp->hw_status->rx_consumer,
6643 tp->hw_status->rx_mini_consumer,
6644 tp->hw_status->idx[0].rx_producer,
6645 tp->hw_status->idx[0].tx_consumer);
6646
6647 /* SW statistics block */
6648 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6649 ((u32 *)tp->hw_stats)[0],
6650 ((u32 *)tp->hw_stats)[1],
6651 ((u32 *)tp->hw_stats)[2],
6652 ((u32 *)tp->hw_stats)[3]);
6653
6654 /* Mailboxes */
6655 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
09ee929c
MC
6656 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6657 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6658 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6659 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
1da177e4
LT
6660
6661 /* NIC side send descriptors. */
6662 for (i = 0; i < 6; i++) {
6663 unsigned long txd;
6664
6665 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6666 + (i * sizeof(struct tg3_tx_buffer_desc));
6667 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6668 i,
6669 readl(txd + 0x0), readl(txd + 0x4),
6670 readl(txd + 0x8), readl(txd + 0xc));
6671 }
6672
6673 /* NIC side RX descriptors. */
6674 for (i = 0; i < 6; i++) {
6675 unsigned long rxd;
6676
6677 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6678 + (i * sizeof(struct tg3_rx_buffer_desc));
6679 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6680 i,
6681 readl(rxd + 0x0), readl(rxd + 0x4),
6682 readl(rxd + 0x8), readl(rxd + 0xc));
6683 rxd += (4 * sizeof(u32));
6684 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6685 i,
6686 readl(rxd + 0x0), readl(rxd + 0x4),
6687 readl(rxd + 0x8), readl(rxd + 0xc));
6688 }
6689
6690 for (i = 0; i < 6; i++) {
6691 unsigned long rxd;
6692
6693 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6694 + (i * sizeof(struct tg3_rx_buffer_desc));
6695 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6696 i,
6697 readl(rxd + 0x0), readl(rxd + 0x4),
6698 readl(rxd + 0x8), readl(rxd + 0xc));
6699 rxd += (4 * sizeof(u32));
6700 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6701 i,
6702 readl(rxd + 0x0), readl(rxd + 0x4),
6703 readl(rxd + 0x8), readl(rxd + 0xc));
6704 }
6705}
6706#endif
6707
6708static struct net_device_stats *tg3_get_stats(struct net_device *);
6709static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6710
6711static int tg3_close(struct net_device *dev)
6712{
6713 struct tg3 *tp = netdev_priv(dev);
6714
6715 netif_stop_queue(dev);
6716
6717 del_timer_sync(&tp->timer);
6718
f47c11ee 6719 tg3_full_lock(tp, 1);
1da177e4
LT
6720#if 0
6721 tg3_dump_state(tp);
6722#endif
6723
6724 tg3_disable_ints(tp);
6725
944d980e 6726 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
6727 tg3_free_rings(tp);
6728 tp->tg3_flags &=
6729 ~(TG3_FLAG_INIT_COMPLETE |
6730 TG3_FLAG_GOT_SERDES_FLOWCTL);
6731 netif_carrier_off(tp->dev);
6732
f47c11ee 6733 tg3_full_unlock(tp);
1da177e4 6734
88b06bc2
MC
6735 free_irq(tp->pdev->irq, dev);
6736 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6737 pci_disable_msi(tp->pdev);
6738 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6739 }
1da177e4
LT
6740
6741 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6742 sizeof(tp->net_stats_prev));
6743 memcpy(&tp->estats_prev, tg3_get_estats(tp),
6744 sizeof(tp->estats_prev));
6745
6746 tg3_free_consistent(tp);
6747
6748 return 0;
6749}
6750
6751static inline unsigned long get_stat64(tg3_stat64_t *val)
6752{
6753 unsigned long ret;
6754
6755#if (BITS_PER_LONG == 32)
6756 ret = val->low;
6757#else
6758 ret = ((u64)val->high << 32) | ((u64)val->low);
6759#endif
6760 return ret;
6761}
6762
6763static unsigned long calc_crc_errors(struct tg3 *tp)
6764{
6765 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6766
6767 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6768 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6769 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1da177e4
LT
6770 u32 val;
6771
f47c11ee 6772 spin_lock_bh(&tp->lock);
1da177e4
LT
6773 if (!tg3_readphy(tp, 0x1e, &val)) {
6774 tg3_writephy(tp, 0x1e, val | 0x8000);
6775 tg3_readphy(tp, 0x14, &val);
6776 } else
6777 val = 0;
f47c11ee 6778 spin_unlock_bh(&tp->lock);
1da177e4
LT
6779
6780 tp->phy_crc_errors += val;
6781
6782 return tp->phy_crc_errors;
6783 }
6784
6785 return get_stat64(&hw_stats->rx_fcs_errors);
6786}
6787
6788#define ESTAT_ADD(member) \
6789 estats->member = old_estats->member + \
6790 get_stat64(&hw_stats->member)
6791
6792static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6793{
6794 struct tg3_ethtool_stats *estats = &tp->estats;
6795 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6796 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6797
6798 if (!hw_stats)
6799 return old_estats;
6800
6801 ESTAT_ADD(rx_octets);
6802 ESTAT_ADD(rx_fragments);
6803 ESTAT_ADD(rx_ucast_packets);
6804 ESTAT_ADD(rx_mcast_packets);
6805 ESTAT_ADD(rx_bcast_packets);
6806 ESTAT_ADD(rx_fcs_errors);
6807 ESTAT_ADD(rx_align_errors);
6808 ESTAT_ADD(rx_xon_pause_rcvd);
6809 ESTAT_ADD(rx_xoff_pause_rcvd);
6810 ESTAT_ADD(rx_mac_ctrl_rcvd);
6811 ESTAT_ADD(rx_xoff_entered);
6812 ESTAT_ADD(rx_frame_too_long_errors);
6813 ESTAT_ADD(rx_jabbers);
6814 ESTAT_ADD(rx_undersize_packets);
6815 ESTAT_ADD(rx_in_length_errors);
6816 ESTAT_ADD(rx_out_length_errors);
6817 ESTAT_ADD(rx_64_or_less_octet_packets);
6818 ESTAT_ADD(rx_65_to_127_octet_packets);
6819 ESTAT_ADD(rx_128_to_255_octet_packets);
6820 ESTAT_ADD(rx_256_to_511_octet_packets);
6821 ESTAT_ADD(rx_512_to_1023_octet_packets);
6822 ESTAT_ADD(rx_1024_to_1522_octet_packets);
6823 ESTAT_ADD(rx_1523_to_2047_octet_packets);
6824 ESTAT_ADD(rx_2048_to_4095_octet_packets);
6825 ESTAT_ADD(rx_4096_to_8191_octet_packets);
6826 ESTAT_ADD(rx_8192_to_9022_octet_packets);
6827
6828 ESTAT_ADD(tx_octets);
6829 ESTAT_ADD(tx_collisions);
6830 ESTAT_ADD(tx_xon_sent);
6831 ESTAT_ADD(tx_xoff_sent);
6832 ESTAT_ADD(tx_flow_control);
6833 ESTAT_ADD(tx_mac_errors);
6834 ESTAT_ADD(tx_single_collisions);
6835 ESTAT_ADD(tx_mult_collisions);
6836 ESTAT_ADD(tx_deferred);
6837 ESTAT_ADD(tx_excessive_collisions);
6838 ESTAT_ADD(tx_late_collisions);
6839 ESTAT_ADD(tx_collide_2times);
6840 ESTAT_ADD(tx_collide_3times);
6841 ESTAT_ADD(tx_collide_4times);
6842 ESTAT_ADD(tx_collide_5times);
6843 ESTAT_ADD(tx_collide_6times);
6844 ESTAT_ADD(tx_collide_7times);
6845 ESTAT_ADD(tx_collide_8times);
6846 ESTAT_ADD(tx_collide_9times);
6847 ESTAT_ADD(tx_collide_10times);
6848 ESTAT_ADD(tx_collide_11times);
6849 ESTAT_ADD(tx_collide_12times);
6850 ESTAT_ADD(tx_collide_13times);
6851 ESTAT_ADD(tx_collide_14times);
6852 ESTAT_ADD(tx_collide_15times);
6853 ESTAT_ADD(tx_ucast_packets);
6854 ESTAT_ADD(tx_mcast_packets);
6855 ESTAT_ADD(tx_bcast_packets);
6856 ESTAT_ADD(tx_carrier_sense_errors);
6857 ESTAT_ADD(tx_discards);
6858 ESTAT_ADD(tx_errors);
6859
6860 ESTAT_ADD(dma_writeq_full);
6861 ESTAT_ADD(dma_write_prioq_full);
6862 ESTAT_ADD(rxbds_empty);
6863 ESTAT_ADD(rx_discards);
6864 ESTAT_ADD(rx_errors);
6865 ESTAT_ADD(rx_threshold_hit);
6866
6867 ESTAT_ADD(dma_readq_full);
6868 ESTAT_ADD(dma_read_prioq_full);
6869 ESTAT_ADD(tx_comp_queue_full);
6870
6871 ESTAT_ADD(ring_set_send_prod_index);
6872 ESTAT_ADD(ring_status_update);
6873 ESTAT_ADD(nic_irqs);
6874 ESTAT_ADD(nic_avoided_irqs);
6875 ESTAT_ADD(nic_tx_threshold_hit);
6876
6877 return estats;
6878}
6879
6880static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6881{
6882 struct tg3 *tp = netdev_priv(dev);
6883 struct net_device_stats *stats = &tp->net_stats;
6884 struct net_device_stats *old_stats = &tp->net_stats_prev;
6885 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6886
6887 if (!hw_stats)
6888 return old_stats;
6889
6890 stats->rx_packets = old_stats->rx_packets +
6891 get_stat64(&hw_stats->rx_ucast_packets) +
6892 get_stat64(&hw_stats->rx_mcast_packets) +
6893 get_stat64(&hw_stats->rx_bcast_packets);
6894
6895 stats->tx_packets = old_stats->tx_packets +
6896 get_stat64(&hw_stats->tx_ucast_packets) +
6897 get_stat64(&hw_stats->tx_mcast_packets) +
6898 get_stat64(&hw_stats->tx_bcast_packets);
6899
6900 stats->rx_bytes = old_stats->rx_bytes +
6901 get_stat64(&hw_stats->rx_octets);
6902 stats->tx_bytes = old_stats->tx_bytes +
6903 get_stat64(&hw_stats->tx_octets);
6904
6905 stats->rx_errors = old_stats->rx_errors +
4f63b877 6906 get_stat64(&hw_stats->rx_errors);
1da177e4
LT
6907 stats->tx_errors = old_stats->tx_errors +
6908 get_stat64(&hw_stats->tx_errors) +
6909 get_stat64(&hw_stats->tx_mac_errors) +
6910 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6911 get_stat64(&hw_stats->tx_discards);
6912
6913 stats->multicast = old_stats->multicast +
6914 get_stat64(&hw_stats->rx_mcast_packets);
6915 stats->collisions = old_stats->collisions +
6916 get_stat64(&hw_stats->tx_collisions);
6917
6918 stats->rx_length_errors = old_stats->rx_length_errors +
6919 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6920 get_stat64(&hw_stats->rx_undersize_packets);
6921
6922 stats->rx_over_errors = old_stats->rx_over_errors +
6923 get_stat64(&hw_stats->rxbds_empty);
6924 stats->rx_frame_errors = old_stats->rx_frame_errors +
6925 get_stat64(&hw_stats->rx_align_errors);
6926 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6927 get_stat64(&hw_stats->tx_discards);
6928 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6929 get_stat64(&hw_stats->tx_carrier_sense_errors);
6930
6931 stats->rx_crc_errors = old_stats->rx_crc_errors +
6932 calc_crc_errors(tp);
6933
4f63b877
JL
6934 stats->rx_missed_errors = old_stats->rx_missed_errors +
6935 get_stat64(&hw_stats->rx_discards);
6936
1da177e4
LT
6937 return stats;
6938}
6939
6940static inline u32 calc_crc(unsigned char *buf, int len)
6941{
6942 u32 reg;
6943 u32 tmp;
6944 int j, k;
6945
6946 reg = 0xffffffff;
6947
6948 for (j = 0; j < len; j++) {
6949 reg ^= buf[j];
6950
6951 for (k = 0; k < 8; k++) {
6952 tmp = reg & 0x01;
6953
6954 reg >>= 1;
6955
6956 if (tmp) {
6957 reg ^= 0xedb88320;
6958 }
6959 }
6960 }
6961
6962 return ~reg;
6963}
6964
6965static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6966{
6967 /* accept or reject all multicast frames */
6968 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6969 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6970 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6971 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6972}
6973
6974static void __tg3_set_rx_mode(struct net_device *dev)
6975{
6976 struct tg3 *tp = netdev_priv(dev);
6977 u32 rx_mode;
6978
6979 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6980 RX_MODE_KEEP_VLAN_TAG);
6981
6982 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6983 * flag clear.
6984 */
6985#if TG3_VLAN_TAG_USED
6986 if (!tp->vlgrp &&
6987 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6988 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6989#else
6990 /* By definition, VLAN is disabled always in this
6991 * case.
6992 */
6993 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6994 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6995#endif
6996
6997 if (dev->flags & IFF_PROMISC) {
6998 /* Promiscuous mode. */
6999 rx_mode |= RX_MODE_PROMISC;
7000 } else if (dev->flags & IFF_ALLMULTI) {
7001 /* Accept all multicast. */
7002 tg3_set_multi (tp, 1);
7003 } else if (dev->mc_count < 1) {
7004 /* Reject all multicast. */
7005 tg3_set_multi (tp, 0);
7006 } else {
7007 /* Accept one or more multicast(s). */
7008 struct dev_mc_list *mclist;
7009 unsigned int i;
7010 u32 mc_filter[4] = { 0, };
7011 u32 regidx;
7012 u32 bit;
7013 u32 crc;
7014
7015 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7016 i++, mclist = mclist->next) {
7017
7018 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7019 bit = ~crc & 0x7f;
7020 regidx = (bit & 0x60) >> 5;
7021 bit &= 0x1f;
7022 mc_filter[regidx] |= (1 << bit);
7023 }
7024
7025 tw32(MAC_HASH_REG_0, mc_filter[0]);
7026 tw32(MAC_HASH_REG_1, mc_filter[1]);
7027 tw32(MAC_HASH_REG_2, mc_filter[2]);
7028 tw32(MAC_HASH_REG_3, mc_filter[3]);
7029 }
7030
7031 if (rx_mode != tp->rx_mode) {
7032 tp->rx_mode = rx_mode;
7033 tw32_f(MAC_RX_MODE, rx_mode);
7034 udelay(10);
7035 }
7036}
7037
7038static void tg3_set_rx_mode(struct net_device *dev)
7039{
7040 struct tg3 *tp = netdev_priv(dev);
7041
f47c11ee 7042 tg3_full_lock(tp, 0);
1da177e4 7043 __tg3_set_rx_mode(dev);
f47c11ee 7044 tg3_full_unlock(tp);
1da177e4
LT
7045}
7046
7047#define TG3_REGDUMP_LEN (32 * 1024)
7048
7049static int tg3_get_regs_len(struct net_device *dev)
7050{
7051 return TG3_REGDUMP_LEN;
7052}
7053
7054static void tg3_get_regs(struct net_device *dev,
7055 struct ethtool_regs *regs, void *_p)
7056{
7057 u32 *p = _p;
7058 struct tg3 *tp = netdev_priv(dev);
7059 u8 *orig_p = _p;
7060 int i;
7061
7062 regs->version = 0;
7063
7064 memset(p, 0, TG3_REGDUMP_LEN);
7065
f47c11ee 7066 tg3_full_lock(tp, 0);
1da177e4
LT
7067
7068#define __GET_REG32(reg) (*(p)++ = tr32(reg))
7069#define GET_REG32_LOOP(base,len) \
7070do { p = (u32 *)(orig_p + (base)); \
7071 for (i = 0; i < len; i += 4) \
7072 __GET_REG32((base) + i); \
7073} while (0)
7074#define GET_REG32_1(reg) \
7075do { p = (u32 *)(orig_p + (reg)); \
7076 __GET_REG32((reg)); \
7077} while (0)
7078
7079 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7080 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7081 GET_REG32_LOOP(MAC_MODE, 0x4f0);
7082 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7083 GET_REG32_1(SNDDATAC_MODE);
7084 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7085 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7086 GET_REG32_1(SNDBDC_MODE);
7087 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7088 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7089 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7090 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7091 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7092 GET_REG32_1(RCVDCC_MODE);
7093 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7094 GET_REG32_LOOP(RCVCC_MODE, 0x14);
7095 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7096 GET_REG32_1(MBFREE_MODE);
7097 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7098 GET_REG32_LOOP(MEMARB_MODE, 0x10);
7099 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7100 GET_REG32_LOOP(RDMAC_MODE, 0x08);
7101 GET_REG32_LOOP(WDMAC_MODE, 0x08);
7102 GET_REG32_LOOP(RX_CPU_BASE, 0x280);
7103 GET_REG32_LOOP(TX_CPU_BASE, 0x280);
7104 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7105 GET_REG32_LOOP(FTQ_RESET, 0x120);
7106 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7107 GET_REG32_1(DMAC_MODE);
7108 GET_REG32_LOOP(GRC_MODE, 0x4c);
7109 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7110 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7111
7112#undef __GET_REG32
7113#undef GET_REG32_LOOP
7114#undef GET_REG32_1
7115
f47c11ee 7116 tg3_full_unlock(tp);
1da177e4
LT
7117}
7118
7119static int tg3_get_eeprom_len(struct net_device *dev)
7120{
7121 struct tg3 *tp = netdev_priv(dev);
7122
7123 return tp->nvram_size;
7124}
7125
7126static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7127
7128static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7129{
7130 struct tg3 *tp = netdev_priv(dev);
7131 int ret;
7132 u8 *pd;
7133 u32 i, offset, len, val, b_offset, b_count;
7134
7135 offset = eeprom->offset;
7136 len = eeprom->len;
7137 eeprom->len = 0;
7138
7139 eeprom->magic = TG3_EEPROM_MAGIC;
7140
7141 if (offset & 3) {
7142 /* adjustments to start on required 4 byte boundary */
7143 b_offset = offset & 3;
7144 b_count = 4 - b_offset;
7145 if (b_count > len) {
7146 /* i.e. offset=1 len=2 */
7147 b_count = len;
7148 }
7149 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7150 if (ret)
7151 return ret;
7152 val = cpu_to_le32(val);
7153 memcpy(data, ((char*)&val) + b_offset, b_count);
7154 len -= b_count;
7155 offset += b_count;
7156 eeprom->len += b_count;
7157 }
7158
7159 /* read bytes upto the last 4 byte boundary */
7160 pd = &data[eeprom->len];
7161 for (i = 0; i < (len - (len & 3)); i += 4) {
7162 ret = tg3_nvram_read(tp, offset + i, &val);
7163 if (ret) {
7164 eeprom->len += i;
7165 return ret;
7166 }
7167 val = cpu_to_le32(val);
7168 memcpy(pd + i, &val, 4);
7169 }
7170 eeprom->len += i;
7171
7172 if (len & 3) {
7173 /* read last bytes not ending on 4 byte boundary */
7174 pd = &data[eeprom->len];
7175 b_count = len & 3;
7176 b_offset = offset + len - b_count;
7177 ret = tg3_nvram_read(tp, b_offset, &val);
7178 if (ret)
7179 return ret;
7180 val = cpu_to_le32(val);
7181 memcpy(pd, ((char*)&val), b_count);
7182 eeprom->len += b_count;
7183 }
7184 return 0;
7185}
7186
7187static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7188
7189static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7190{
7191 struct tg3 *tp = netdev_priv(dev);
7192 int ret;
7193 u32 offset, len, b_offset, odd_len, start, end;
7194 u8 *buf;
7195
7196 if (eeprom->magic != TG3_EEPROM_MAGIC)
7197 return -EINVAL;
7198
7199 offset = eeprom->offset;
7200 len = eeprom->len;
7201
7202 if ((b_offset = (offset & 3))) {
7203 /* adjustments to start on required 4 byte boundary */
7204 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7205 if (ret)
7206 return ret;
7207 start = cpu_to_le32(start);
7208 len += b_offset;
7209 offset &= ~3;
1c8594b4
MC
7210 if (len < 4)
7211 len = 4;
1da177e4
LT
7212 }
7213
7214 odd_len = 0;
1c8594b4 7215 if (len & 3) {
1da177e4
LT
7216 /* adjustments to end on required 4 byte boundary */
7217 odd_len = 1;
7218 len = (len + 3) & ~3;
7219 ret = tg3_nvram_read(tp, offset+len-4, &end);
7220 if (ret)
7221 return ret;
7222 end = cpu_to_le32(end);
7223 }
7224
7225 buf = data;
7226 if (b_offset || odd_len) {
7227 buf = kmalloc(len, GFP_KERNEL);
7228 if (buf == 0)
7229 return -ENOMEM;
7230 if (b_offset)
7231 memcpy(buf, &start, 4);
7232 if (odd_len)
7233 memcpy(buf+len-4, &end, 4);
7234 memcpy(buf + b_offset, data, eeprom->len);
7235 }
7236
7237 ret = tg3_nvram_write_block(tp, offset, len, buf);
7238
7239 if (buf != data)
7240 kfree(buf);
7241
7242 return ret;
7243}
7244
7245static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7246{
7247 struct tg3 *tp = netdev_priv(dev);
7248
7249 cmd->supported = (SUPPORTED_Autoneg);
7250
7251 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7252 cmd->supported |= (SUPPORTED_1000baseT_Half |
7253 SUPPORTED_1000baseT_Full);
7254
a4e2b347 7255 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1da177e4
LT
7256 cmd->supported |= (SUPPORTED_100baseT_Half |
7257 SUPPORTED_100baseT_Full |
7258 SUPPORTED_10baseT_Half |
7259 SUPPORTED_10baseT_Full |
7260 SUPPORTED_MII);
7261 else
7262 cmd->supported |= SUPPORTED_FIBRE;
7263
7264 cmd->advertising = tp->link_config.advertising;
7265 if (netif_running(dev)) {
7266 cmd->speed = tp->link_config.active_speed;
7267 cmd->duplex = tp->link_config.active_duplex;
7268 }
7269 cmd->port = 0;
7270 cmd->phy_address = PHY_ADDR;
7271 cmd->transceiver = 0;
7272 cmd->autoneg = tp->link_config.autoneg;
7273 cmd->maxtxpkt = 0;
7274 cmd->maxrxpkt = 0;
7275 return 0;
7276}
7277
7278static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7279{
7280 struct tg3 *tp = netdev_priv(dev);
7281
7282 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7283 /* These are the only valid advertisement bits allowed. */
7284 if (cmd->autoneg == AUTONEG_ENABLE &&
7285 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7286 ADVERTISED_1000baseT_Full |
7287 ADVERTISED_Autoneg |
7288 ADVERTISED_FIBRE)))
7289 return -EINVAL;
7290 }
7291
f47c11ee 7292 tg3_full_lock(tp, 0);
1da177e4
LT
7293
7294 tp->link_config.autoneg = cmd->autoneg;
7295 if (cmd->autoneg == AUTONEG_ENABLE) {
7296 tp->link_config.advertising = cmd->advertising;
7297 tp->link_config.speed = SPEED_INVALID;
7298 tp->link_config.duplex = DUPLEX_INVALID;
7299 } else {
7300 tp->link_config.advertising = 0;
7301 tp->link_config.speed = cmd->speed;
7302 tp->link_config.duplex = cmd->duplex;
7303 }
7304
7305 if (netif_running(dev))
7306 tg3_setup_phy(tp, 1);
7307
f47c11ee 7308 tg3_full_unlock(tp);
1da177e4
LT
7309
7310 return 0;
7311}
7312
7313static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7314{
7315 struct tg3 *tp = netdev_priv(dev);
7316
7317 strcpy(info->driver, DRV_MODULE_NAME);
7318 strcpy(info->version, DRV_MODULE_VERSION);
7319 strcpy(info->bus_info, pci_name(tp->pdev));
7320}
7321
7322static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7323{
7324 struct tg3 *tp = netdev_priv(dev);
7325
7326 wol->supported = WAKE_MAGIC;
7327 wol->wolopts = 0;
7328 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7329 wol->wolopts = WAKE_MAGIC;
7330 memset(&wol->sopass, 0, sizeof(wol->sopass));
7331}
7332
7333static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7334{
7335 struct tg3 *tp = netdev_priv(dev);
7336
7337 if (wol->wolopts & ~WAKE_MAGIC)
7338 return -EINVAL;
7339 if ((wol->wolopts & WAKE_MAGIC) &&
7340 tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7341 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7342 return -EINVAL;
7343
f47c11ee 7344 spin_lock_bh(&tp->lock);
1da177e4
LT
7345 if (wol->wolopts & WAKE_MAGIC)
7346 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7347 else
7348 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
f47c11ee 7349 spin_unlock_bh(&tp->lock);
1da177e4
LT
7350
7351 return 0;
7352}
7353
7354static u32 tg3_get_msglevel(struct net_device *dev)
7355{
7356 struct tg3 *tp = netdev_priv(dev);
7357 return tp->msg_enable;
7358}
7359
7360static void tg3_set_msglevel(struct net_device *dev, u32 value)
7361{
7362 struct tg3 *tp = netdev_priv(dev);
7363 tp->msg_enable = value;
7364}
7365
7366#if TG3_TSO_SUPPORT != 0
7367static int tg3_set_tso(struct net_device *dev, u32 value)
7368{
7369 struct tg3 *tp = netdev_priv(dev);
7370
7371 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7372 if (value)
7373 return -EINVAL;
7374 return 0;
7375 }
7376 return ethtool_op_set_tso(dev, value);
7377}
7378#endif
7379
7380static int tg3_nway_reset(struct net_device *dev)
7381{
7382 struct tg3 *tp = netdev_priv(dev);
7383 u32 bmcr;
7384 int r;
7385
7386 if (!netif_running(dev))
7387 return -EAGAIN;
7388
c94e3941
MC
7389 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7390 return -EINVAL;
7391
f47c11ee 7392 spin_lock_bh(&tp->lock);
1da177e4
LT
7393 r = -EINVAL;
7394 tg3_readphy(tp, MII_BMCR, &bmcr);
7395 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
c94e3941
MC
7396 ((bmcr & BMCR_ANENABLE) ||
7397 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7398 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7399 BMCR_ANENABLE);
1da177e4
LT
7400 r = 0;
7401 }
f47c11ee 7402 spin_unlock_bh(&tp->lock);
1da177e4
LT
7403
7404 return r;
7405}
7406
7407static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7408{
7409 struct tg3 *tp = netdev_priv(dev);
7410
7411 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7412 ering->rx_mini_max_pending = 0;
7413 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7414
7415 ering->rx_pending = tp->rx_pending;
7416 ering->rx_mini_pending = 0;
7417 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7418 ering->tx_pending = tp->tx_pending;
7419}
7420
7421static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7422{
7423 struct tg3 *tp = netdev_priv(dev);
bbe832c0 7424 int irq_sync = 0;
1da177e4
LT
7425
7426 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7427 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7428 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7429 return -EINVAL;
7430
bbe832c0 7431 if (netif_running(dev)) {
1da177e4 7432 tg3_netif_stop(tp);
bbe832c0
MC
7433 irq_sync = 1;
7434 }
1da177e4 7435
bbe832c0 7436 tg3_full_lock(tp, irq_sync);
1da177e4
LT
7437
7438 tp->rx_pending = ering->rx_pending;
7439
7440 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7441 tp->rx_pending > 63)
7442 tp->rx_pending = 63;
7443 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7444 tp->tx_pending = ering->tx_pending;
7445
7446 if (netif_running(dev)) {
944d980e 7447 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7448 tg3_init_hw(tp);
7449 tg3_netif_start(tp);
7450 }
7451
f47c11ee 7452 tg3_full_unlock(tp);
1da177e4
LT
7453
7454 return 0;
7455}
7456
7457static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7458{
7459 struct tg3 *tp = netdev_priv(dev);
7460
7461 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7462 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7463 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7464}
7465
7466static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7467{
7468 struct tg3 *tp = netdev_priv(dev);
bbe832c0 7469 int irq_sync = 0;
1da177e4 7470
bbe832c0 7471 if (netif_running(dev)) {
1da177e4 7472 tg3_netif_stop(tp);
bbe832c0
MC
7473 irq_sync = 1;
7474 }
1da177e4 7475
bbe832c0 7476 tg3_full_lock(tp, irq_sync);
f47c11ee 7477
1da177e4
LT
7478 if (epause->autoneg)
7479 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7480 else
7481 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7482 if (epause->rx_pause)
7483 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7484 else
7485 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7486 if (epause->tx_pause)
7487 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7488 else
7489 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7490
7491 if (netif_running(dev)) {
944d980e 7492 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7493 tg3_init_hw(tp);
7494 tg3_netif_start(tp);
7495 }
f47c11ee
DM
7496
7497 tg3_full_unlock(tp);
1da177e4
LT
7498
7499 return 0;
7500}
7501
7502static u32 tg3_get_rx_csum(struct net_device *dev)
7503{
7504 struct tg3 *tp = netdev_priv(dev);
7505 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7506}
7507
7508static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7509{
7510 struct tg3 *tp = netdev_priv(dev);
7511
7512 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7513 if (data != 0)
7514 return -EINVAL;
7515 return 0;
7516 }
7517
f47c11ee 7518 spin_lock_bh(&tp->lock);
1da177e4
LT
7519 if (data)
7520 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7521 else
7522 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
f47c11ee 7523 spin_unlock_bh(&tp->lock);
1da177e4
LT
7524
7525 return 0;
7526}
7527
7528static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7529{
7530 struct tg3 *tp = netdev_priv(dev);
7531
7532 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7533 if (data != 0)
7534 return -EINVAL;
7535 return 0;
7536 }
7537
7538 if (data)
7539 dev->features |= NETIF_F_IP_CSUM;
7540 else
7541 dev->features &= ~NETIF_F_IP_CSUM;
7542
7543 return 0;
7544}
7545
7546static int tg3_get_stats_count (struct net_device *dev)
7547{
7548 return TG3_NUM_STATS;
7549}
7550
4cafd3f5
MC
7551static int tg3_get_test_count (struct net_device *dev)
7552{
7553 return TG3_NUM_TEST;
7554}
7555
1da177e4
LT
7556static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7557{
7558 switch (stringset) {
7559 case ETH_SS_STATS:
7560 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7561 break;
4cafd3f5
MC
7562 case ETH_SS_TEST:
7563 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7564 break;
1da177e4
LT
7565 default:
7566 WARN_ON(1); /* we need a WARN() */
7567 break;
7568 }
7569}
7570
4009a93d
MC
7571static int tg3_phys_id(struct net_device *dev, u32 data)
7572{
7573 struct tg3 *tp = netdev_priv(dev);
7574 int i;
7575
7576 if (!netif_running(tp->dev))
7577 return -EAGAIN;
7578
7579 if (data == 0)
7580 data = 2;
7581
7582 for (i = 0; i < (data * 2); i++) {
7583 if ((i % 2) == 0)
7584 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7585 LED_CTRL_1000MBPS_ON |
7586 LED_CTRL_100MBPS_ON |
7587 LED_CTRL_10MBPS_ON |
7588 LED_CTRL_TRAFFIC_OVERRIDE |
7589 LED_CTRL_TRAFFIC_BLINK |
7590 LED_CTRL_TRAFFIC_LED);
7591
7592 else
7593 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7594 LED_CTRL_TRAFFIC_OVERRIDE);
7595
7596 if (msleep_interruptible(500))
7597 break;
7598 }
7599 tw32(MAC_LED_CTRL, tp->led_ctrl);
7600 return 0;
7601}
7602
1da177e4
LT
7603static void tg3_get_ethtool_stats (struct net_device *dev,
7604 struct ethtool_stats *estats, u64 *tmp_stats)
7605{
7606 struct tg3 *tp = netdev_priv(dev);
7607 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7608}
7609
566f86ad
MC
7610#define NVRAM_TEST_SIZE 0x100
7611
7612static int tg3_test_nvram(struct tg3 *tp)
7613{
7614 u32 *buf, csum;
7615 int i, j, err = 0;
7616
7617 buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7618 if (buf == NULL)
7619 return -ENOMEM;
7620
7621 for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7622 u32 val;
7623
7624 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7625 break;
7626 buf[j] = cpu_to_le32(val);
7627 }
7628 if (i < NVRAM_TEST_SIZE)
7629 goto out;
7630
7631 err = -EIO;
7632 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7633 goto out;
7634
7635 /* Bootstrap checksum at offset 0x10 */
7636 csum = calc_crc((unsigned char *) buf, 0x10);
7637 if(csum != cpu_to_le32(buf[0x10/4]))
7638 goto out;
7639
7640 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7641 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7642 if (csum != cpu_to_le32(buf[0xfc/4]))
7643 goto out;
7644
7645 err = 0;
7646
7647out:
7648 kfree(buf);
7649 return err;
7650}
7651
ca43007a
MC
7652#define TG3_SERDES_TIMEOUT_SEC 2
7653#define TG3_COPPER_TIMEOUT_SEC 6
7654
7655static int tg3_test_link(struct tg3 *tp)
7656{
7657 int i, max;
7658
7659 if (!netif_running(tp->dev))
7660 return -ENODEV;
7661
4c987487 7662 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
ca43007a
MC
7663 max = TG3_SERDES_TIMEOUT_SEC;
7664 else
7665 max = TG3_COPPER_TIMEOUT_SEC;
7666
7667 for (i = 0; i < max; i++) {
7668 if (netif_carrier_ok(tp->dev))
7669 return 0;
7670
7671 if (msleep_interruptible(1000))
7672 break;
7673 }
7674
7675 return -EIO;
7676}
7677
a71116d1
MC
7678/* Only test the commonly used registers */
7679static int tg3_test_registers(struct tg3 *tp)
7680{
7681 int i, is_5705;
7682 u32 offset, read_mask, write_mask, val, save_val, read_val;
7683 static struct {
7684 u16 offset;
7685 u16 flags;
7686#define TG3_FL_5705 0x1
7687#define TG3_FL_NOT_5705 0x2
7688#define TG3_FL_NOT_5788 0x4
7689 u32 read_mask;
7690 u32 write_mask;
7691 } reg_tbl[] = {
7692 /* MAC Control Registers */
7693 { MAC_MODE, TG3_FL_NOT_5705,
7694 0x00000000, 0x00ef6f8c },
7695 { MAC_MODE, TG3_FL_5705,
7696 0x00000000, 0x01ef6b8c },
7697 { MAC_STATUS, TG3_FL_NOT_5705,
7698 0x03800107, 0x00000000 },
7699 { MAC_STATUS, TG3_FL_5705,
7700 0x03800100, 0x00000000 },
7701 { MAC_ADDR_0_HIGH, 0x0000,
7702 0x00000000, 0x0000ffff },
7703 { MAC_ADDR_0_LOW, 0x0000,
7704 0x00000000, 0xffffffff },
7705 { MAC_RX_MTU_SIZE, 0x0000,
7706 0x00000000, 0x0000ffff },
7707 { MAC_TX_MODE, 0x0000,
7708 0x00000000, 0x00000070 },
7709 { MAC_TX_LENGTHS, 0x0000,
7710 0x00000000, 0x00003fff },
7711 { MAC_RX_MODE, TG3_FL_NOT_5705,
7712 0x00000000, 0x000007fc },
7713 { MAC_RX_MODE, TG3_FL_5705,
7714 0x00000000, 0x000007dc },
7715 { MAC_HASH_REG_0, 0x0000,
7716 0x00000000, 0xffffffff },
7717 { MAC_HASH_REG_1, 0x0000,
7718 0x00000000, 0xffffffff },
7719 { MAC_HASH_REG_2, 0x0000,
7720 0x00000000, 0xffffffff },
7721 { MAC_HASH_REG_3, 0x0000,
7722 0x00000000, 0xffffffff },
7723
7724 /* Receive Data and Receive BD Initiator Control Registers. */
7725 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7726 0x00000000, 0xffffffff },
7727 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7728 0x00000000, 0xffffffff },
7729 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7730 0x00000000, 0x00000003 },
7731 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7732 0x00000000, 0xffffffff },
7733 { RCVDBDI_STD_BD+0, 0x0000,
7734 0x00000000, 0xffffffff },
7735 { RCVDBDI_STD_BD+4, 0x0000,
7736 0x00000000, 0xffffffff },
7737 { RCVDBDI_STD_BD+8, 0x0000,
7738 0x00000000, 0xffff0002 },
7739 { RCVDBDI_STD_BD+0xc, 0x0000,
7740 0x00000000, 0xffffffff },
7741
7742 /* Receive BD Initiator Control Registers. */
7743 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7744 0x00000000, 0xffffffff },
7745 { RCVBDI_STD_THRESH, TG3_FL_5705,
7746 0x00000000, 0x000003ff },
7747 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7748 0x00000000, 0xffffffff },
7749
7750 /* Host Coalescing Control Registers. */
7751 { HOSTCC_MODE, TG3_FL_NOT_5705,
7752 0x00000000, 0x00000004 },
7753 { HOSTCC_MODE, TG3_FL_5705,
7754 0x00000000, 0x000000f6 },
7755 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7756 0x00000000, 0xffffffff },
7757 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7758 0x00000000, 0x000003ff },
7759 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7760 0x00000000, 0xffffffff },
7761 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7762 0x00000000, 0x000003ff },
7763 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7764 0x00000000, 0xffffffff },
7765 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7766 0x00000000, 0x000000ff },
7767 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7768 0x00000000, 0xffffffff },
7769 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7770 0x00000000, 0x000000ff },
7771 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7772 0x00000000, 0xffffffff },
7773 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7774 0x00000000, 0xffffffff },
7775 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7776 0x00000000, 0xffffffff },
7777 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7778 0x00000000, 0x000000ff },
7779 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7780 0x00000000, 0xffffffff },
7781 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7782 0x00000000, 0x000000ff },
7783 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7784 0x00000000, 0xffffffff },
7785 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7786 0x00000000, 0xffffffff },
7787 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7788 0x00000000, 0xffffffff },
7789 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7790 0x00000000, 0xffffffff },
7791 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7792 0x00000000, 0xffffffff },
7793 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7794 0xffffffff, 0x00000000 },
7795 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7796 0xffffffff, 0x00000000 },
7797
7798 /* Buffer Manager Control Registers. */
7799 { BUFMGR_MB_POOL_ADDR, 0x0000,
7800 0x00000000, 0x007fff80 },
7801 { BUFMGR_MB_POOL_SIZE, 0x0000,
7802 0x00000000, 0x007fffff },
7803 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7804 0x00000000, 0x0000003f },
7805 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7806 0x00000000, 0x000001ff },
7807 { BUFMGR_MB_HIGH_WATER, 0x0000,
7808 0x00000000, 0x000001ff },
7809 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7810 0xffffffff, 0x00000000 },
7811 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7812 0xffffffff, 0x00000000 },
7813
7814 /* Mailbox Registers */
7815 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7816 0x00000000, 0x000001ff },
7817 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7818 0x00000000, 0x000001ff },
7819 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7820 0x00000000, 0x000007ff },
7821 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7822 0x00000000, 0x000001ff },
7823
7824 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7825 };
7826
7827 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7828 is_5705 = 1;
7829 else
7830 is_5705 = 0;
7831
7832 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7833 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7834 continue;
7835
7836 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7837 continue;
7838
7839 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7840 (reg_tbl[i].flags & TG3_FL_NOT_5788))
7841 continue;
7842
7843 offset = (u32) reg_tbl[i].offset;
7844 read_mask = reg_tbl[i].read_mask;
7845 write_mask = reg_tbl[i].write_mask;
7846
7847 /* Save the original register content */
7848 save_val = tr32(offset);
7849
7850 /* Determine the read-only value. */
7851 read_val = save_val & read_mask;
7852
7853 /* Write zero to the register, then make sure the read-only bits
7854 * are not changed and the read/write bits are all zeros.
7855 */
7856 tw32(offset, 0);
7857
7858 val = tr32(offset);
7859
7860 /* Test the read-only and read/write bits. */
7861 if (((val & read_mask) != read_val) || (val & write_mask))
7862 goto out;
7863
7864 /* Write ones to all the bits defined by RdMask and WrMask, then
7865 * make sure the read-only bits are not changed and the
7866 * read/write bits are all ones.
7867 */
7868 tw32(offset, read_mask | write_mask);
7869
7870 val = tr32(offset);
7871
7872 /* Test the read-only bits. */
7873 if ((val & read_mask) != read_val)
7874 goto out;
7875
7876 /* Test the read/write bits. */
7877 if ((val & write_mask) != write_mask)
7878 goto out;
7879
7880 tw32(offset, save_val);
7881 }
7882
7883 return 0;
7884
7885out:
7886 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7887 tw32(offset, save_val);
7888 return -EIO;
7889}
7890
7942e1db
MC
7891static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7892{
7893 static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7894 int i;
7895 u32 j;
7896
7897 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7898 for (j = 0; j < len; j += 4) {
7899 u32 val;
7900
7901 tg3_write_mem(tp, offset + j, test_pattern[i]);
7902 tg3_read_mem(tp, offset + j, &val);
7903 if (val != test_pattern[i])
7904 return -EIO;
7905 }
7906 }
7907 return 0;
7908}
7909
7910static int tg3_test_memory(struct tg3 *tp)
7911{
7912 static struct mem_entry {
7913 u32 offset;
7914 u32 len;
7915 } mem_tbl_570x[] = {
7916 { 0x00000000, 0x01000},
7917 { 0x00002000, 0x1c000},
7918 { 0xffffffff, 0x00000}
7919 }, mem_tbl_5705[] = {
7920 { 0x00000100, 0x0000c},
7921 { 0x00000200, 0x00008},
7922 { 0x00000b50, 0x00400},
7923 { 0x00004000, 0x00800},
7924 { 0x00006000, 0x01000},
7925 { 0x00008000, 0x02000},
7926 { 0x00010000, 0x0e000},
7927 { 0xffffffff, 0x00000}
7928 };
7929 struct mem_entry *mem_tbl;
7930 int err = 0;
7931 int i;
7932
7933 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7934 mem_tbl = mem_tbl_5705;
7935 else
7936 mem_tbl = mem_tbl_570x;
7937
7938 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
7939 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
7940 mem_tbl[i].len)) != 0)
7941 break;
7942 }
7943
7944 return err;
7945}
7946
9f40dead
MC
7947#define TG3_MAC_LOOPBACK 0
7948#define TG3_PHY_LOOPBACK 1
7949
7950static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
c76949a6 7951{
9f40dead 7952 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
c76949a6
MC
7953 u32 desc_idx;
7954 struct sk_buff *skb, *rx_skb;
7955 u8 *tx_data;
7956 dma_addr_t map;
7957 int num_pkts, tx_len, rx_len, i, err;
7958 struct tg3_rx_buffer_desc *desc;
7959
9f40dead 7960 if (loopback_mode == TG3_MAC_LOOPBACK) {
c94e3941
MC
7961 /* HW errata - mac loopback fails in some cases on 5780.
7962 * Normal traffic and PHY loopback are not affected by
7963 * errata.
7964 */
7965 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
7966 return 0;
7967
9f40dead
MC
7968 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7969 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7970 MAC_MODE_PORT_MODE_GMII;
7971 tw32(MAC_MODE, mac_mode);
7972 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
c94e3941
MC
7973 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
7974 BMCR_SPEED1000);
7975 udelay(40);
7976 /* reset to prevent losing 1st rx packet intermittently */
7977 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7978 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7979 udelay(10);
7980 tw32_f(MAC_RX_MODE, tp->rx_mode);
7981 }
9f40dead
MC
7982 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7983 MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
7984 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
7985 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7986 tw32(MAC_MODE, mac_mode);
9f40dead
MC
7987 }
7988 else
7989 return -EINVAL;
c76949a6
MC
7990
7991 err = -EIO;
7992
c76949a6
MC
7993 tx_len = 1514;
7994 skb = dev_alloc_skb(tx_len);
7995 tx_data = skb_put(skb, tx_len);
7996 memcpy(tx_data, tp->dev->dev_addr, 6);
7997 memset(tx_data + 6, 0x0, 8);
7998
7999 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8000
8001 for (i = 14; i < tx_len; i++)
8002 tx_data[i] = (u8) (i & 0xff);
8003
8004 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8005
8006 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8007 HOSTCC_MODE_NOW);
8008
8009 udelay(10);
8010
8011 rx_start_idx = tp->hw_status->idx[0].rx_producer;
8012
c76949a6
MC
8013 num_pkts = 0;
8014
9f40dead 8015 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
c76949a6 8016
9f40dead 8017 tp->tx_prod++;
c76949a6
MC
8018 num_pkts++;
8019
9f40dead
MC
8020 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8021 tp->tx_prod);
09ee929c 8022 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
c76949a6
MC
8023
8024 udelay(10);
8025
8026 for (i = 0; i < 10; i++) {
8027 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8028 HOSTCC_MODE_NOW);
8029
8030 udelay(10);
8031
8032 tx_idx = tp->hw_status->idx[0].tx_consumer;
8033 rx_idx = tp->hw_status->idx[0].rx_producer;
9f40dead 8034 if ((tx_idx == tp->tx_prod) &&
c76949a6
MC
8035 (rx_idx == (rx_start_idx + num_pkts)))
8036 break;
8037 }
8038
8039 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8040 dev_kfree_skb(skb);
8041
9f40dead 8042 if (tx_idx != tp->tx_prod)
c76949a6
MC
8043 goto out;
8044
8045 if (rx_idx != rx_start_idx + num_pkts)
8046 goto out;
8047
8048 desc = &tp->rx_rcb[rx_start_idx];
8049 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8050 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8051 if (opaque_key != RXD_OPAQUE_RING_STD)
8052 goto out;
8053
8054 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8055 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8056 goto out;
8057
8058 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8059 if (rx_len != tx_len)
8060 goto out;
8061
8062 rx_skb = tp->rx_std_buffers[desc_idx].skb;
8063
8064 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8065 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8066
8067 for (i = 14; i < tx_len; i++) {
8068 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8069 goto out;
8070 }
8071 err = 0;
8072
8073 /* tg3_free_rings will unmap and free the rx_skb */
8074out:
8075 return err;
8076}
8077
9f40dead
MC
8078#define TG3_MAC_LOOPBACK_FAILED 1
8079#define TG3_PHY_LOOPBACK_FAILED 2
8080#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
8081 TG3_PHY_LOOPBACK_FAILED)
8082
8083static int tg3_test_loopback(struct tg3 *tp)
8084{
8085 int err = 0;
8086
8087 if (!netif_running(tp->dev))
8088 return TG3_LOOPBACK_FAILED;
8089
8090 tg3_reset_hw(tp);
8091
8092 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8093 err |= TG3_MAC_LOOPBACK_FAILED;
8094 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8095 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8096 err |= TG3_PHY_LOOPBACK_FAILED;
8097 }
8098
8099 return err;
8100}
8101
4cafd3f5
MC
8102static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8103 u64 *data)
8104{
566f86ad
MC
8105 struct tg3 *tp = netdev_priv(dev);
8106
8107 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8108
8109 if (tg3_test_nvram(tp) != 0) {
8110 etest->flags |= ETH_TEST_FL_FAILED;
8111 data[0] = 1;
8112 }
ca43007a
MC
8113 if (tg3_test_link(tp) != 0) {
8114 etest->flags |= ETH_TEST_FL_FAILED;
8115 data[1] = 1;
8116 }
a71116d1 8117 if (etest->flags & ETH_TEST_FL_OFFLINE) {
bbe832c0
MC
8118 int irq_sync = 0;
8119
8120 if (netif_running(dev)) {
a71116d1 8121 tg3_netif_stop(tp);
bbe832c0
MC
8122 irq_sync = 1;
8123 }
a71116d1 8124
bbe832c0 8125 tg3_full_lock(tp, irq_sync);
a71116d1
MC
8126
8127 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8128 tg3_nvram_lock(tp);
8129 tg3_halt_cpu(tp, RX_CPU_BASE);
8130 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8131 tg3_halt_cpu(tp, TX_CPU_BASE);
8132 tg3_nvram_unlock(tp);
8133
8134 if (tg3_test_registers(tp) != 0) {
8135 etest->flags |= ETH_TEST_FL_FAILED;
8136 data[2] = 1;
8137 }
7942e1db
MC
8138 if (tg3_test_memory(tp) != 0) {
8139 etest->flags |= ETH_TEST_FL_FAILED;
8140 data[3] = 1;
8141 }
9f40dead 8142 if ((data[4] = tg3_test_loopback(tp)) != 0)
c76949a6 8143 etest->flags |= ETH_TEST_FL_FAILED;
a71116d1 8144
f47c11ee
DM
8145 tg3_full_unlock(tp);
8146
d4bc3927
MC
8147 if (tg3_test_interrupt(tp) != 0) {
8148 etest->flags |= ETH_TEST_FL_FAILED;
8149 data[5] = 1;
8150 }
f47c11ee
DM
8151
8152 tg3_full_lock(tp, 0);
d4bc3927 8153
a71116d1
MC
8154 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8155 if (netif_running(dev)) {
8156 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8157 tg3_init_hw(tp);
8158 tg3_netif_start(tp);
8159 }
f47c11ee
DM
8160
8161 tg3_full_unlock(tp);
a71116d1 8162 }
4cafd3f5
MC
8163}
8164
1da177e4
LT
8165static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8166{
8167 struct mii_ioctl_data *data = if_mii(ifr);
8168 struct tg3 *tp = netdev_priv(dev);
8169 int err;
8170
8171 switch(cmd) {
8172 case SIOCGMIIPHY:
8173 data->phy_id = PHY_ADDR;
8174
8175 /* fallthru */
8176 case SIOCGMIIREG: {
8177 u32 mii_regval;
8178
8179 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8180 break; /* We have no PHY */
8181
f47c11ee 8182 spin_lock_bh(&tp->lock);
1da177e4 8183 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
f47c11ee 8184 spin_unlock_bh(&tp->lock);
1da177e4
LT
8185
8186 data->val_out = mii_regval;
8187
8188 return err;
8189 }
8190
8191 case SIOCSMIIREG:
8192 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8193 break; /* We have no PHY */
8194
8195 if (!capable(CAP_NET_ADMIN))
8196 return -EPERM;
8197
f47c11ee 8198 spin_lock_bh(&tp->lock);
1da177e4 8199 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
f47c11ee 8200 spin_unlock_bh(&tp->lock);
1da177e4
LT
8201
8202 return err;
8203
8204 default:
8205 /* do nothing */
8206 break;
8207 }
8208 return -EOPNOTSUPP;
8209}
8210
8211#if TG3_VLAN_TAG_USED
8212static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8213{
8214 struct tg3 *tp = netdev_priv(dev);
8215
f47c11ee 8216 tg3_full_lock(tp, 0);
1da177e4
LT
8217
8218 tp->vlgrp = grp;
8219
8220 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8221 __tg3_set_rx_mode(dev);
8222
f47c11ee 8223 tg3_full_unlock(tp);
1da177e4
LT
8224}
8225
8226static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8227{
8228 struct tg3 *tp = netdev_priv(dev);
8229
f47c11ee 8230 tg3_full_lock(tp, 0);
1da177e4
LT
8231 if (tp->vlgrp)
8232 tp->vlgrp->vlan_devices[vid] = NULL;
f47c11ee 8233 tg3_full_unlock(tp);
1da177e4
LT
8234}
8235#endif
8236
15f9850d
DM
8237static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8238{
8239 struct tg3 *tp = netdev_priv(dev);
8240
8241 memcpy(ec, &tp->coal, sizeof(*ec));
8242 return 0;
8243}
8244
d244c892
MC
8245static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8246{
8247 struct tg3 *tp = netdev_priv(dev);
8248 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8249 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8250
8251 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8252 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8253 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8254 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8255 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8256 }
8257
8258 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8259 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8260 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8261 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8262 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8263 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8264 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8265 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8266 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8267 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8268 return -EINVAL;
8269
8270 /* No rx interrupts will be generated if both are zero */
8271 if ((ec->rx_coalesce_usecs == 0) &&
8272 (ec->rx_max_coalesced_frames == 0))
8273 return -EINVAL;
8274
8275 /* No tx interrupts will be generated if both are zero */
8276 if ((ec->tx_coalesce_usecs == 0) &&
8277 (ec->tx_max_coalesced_frames == 0))
8278 return -EINVAL;
8279
8280 /* Only copy relevant parameters, ignore all others. */
8281 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8282 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8283 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8284 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8285 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8286 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8287 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8288 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8289 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8290
8291 if (netif_running(dev)) {
8292 tg3_full_lock(tp, 0);
8293 __tg3_set_coalesce(tp, &tp->coal);
8294 tg3_full_unlock(tp);
8295 }
8296 return 0;
8297}
8298
1da177e4
LT
8299static struct ethtool_ops tg3_ethtool_ops = {
8300 .get_settings = tg3_get_settings,
8301 .set_settings = tg3_set_settings,
8302 .get_drvinfo = tg3_get_drvinfo,
8303 .get_regs_len = tg3_get_regs_len,
8304 .get_regs = tg3_get_regs,
8305 .get_wol = tg3_get_wol,
8306 .set_wol = tg3_set_wol,
8307 .get_msglevel = tg3_get_msglevel,
8308 .set_msglevel = tg3_set_msglevel,
8309 .nway_reset = tg3_nway_reset,
8310 .get_link = ethtool_op_get_link,
8311 .get_eeprom_len = tg3_get_eeprom_len,
8312 .get_eeprom = tg3_get_eeprom,
8313 .set_eeprom = tg3_set_eeprom,
8314 .get_ringparam = tg3_get_ringparam,
8315 .set_ringparam = tg3_set_ringparam,
8316 .get_pauseparam = tg3_get_pauseparam,
8317 .set_pauseparam = tg3_set_pauseparam,
8318 .get_rx_csum = tg3_get_rx_csum,
8319 .set_rx_csum = tg3_set_rx_csum,
8320 .get_tx_csum = ethtool_op_get_tx_csum,
8321 .set_tx_csum = tg3_set_tx_csum,
8322 .get_sg = ethtool_op_get_sg,
8323 .set_sg = ethtool_op_set_sg,
8324#if TG3_TSO_SUPPORT != 0
8325 .get_tso = ethtool_op_get_tso,
8326 .set_tso = tg3_set_tso,
8327#endif
4cafd3f5
MC
8328 .self_test_count = tg3_get_test_count,
8329 .self_test = tg3_self_test,
1da177e4 8330 .get_strings = tg3_get_strings,
4009a93d 8331 .phys_id = tg3_phys_id,
1da177e4
LT
8332 .get_stats_count = tg3_get_stats_count,
8333 .get_ethtool_stats = tg3_get_ethtool_stats,
15f9850d 8334 .get_coalesce = tg3_get_coalesce,
d244c892 8335 .set_coalesce = tg3_set_coalesce,
2ff43697 8336 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
8337};
8338
8339static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8340{
8341 u32 cursize, val;
8342
8343 tp->nvram_size = EEPROM_CHIP_SIZE;
8344
8345 if (tg3_nvram_read(tp, 0, &val) != 0)
8346 return;
8347
8348 if (swab32(val) != TG3_EEPROM_MAGIC)
8349 return;
8350
8351 /*
8352 * Size the chip by reading offsets at increasing powers of two.
8353 * When we encounter our validation signature, we know the addressing
8354 * has wrapped around, and thus have our chip size.
8355 */
8356 cursize = 0x800;
8357
8358 while (cursize < tp->nvram_size) {
8359 if (tg3_nvram_read(tp, cursize, &val) != 0)
8360 return;
8361
8362 if (swab32(val) == TG3_EEPROM_MAGIC)
8363 break;
8364
8365 cursize <<= 1;
8366 }
8367
8368 tp->nvram_size = cursize;
8369}
8370
8371static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8372{
8373 u32 val;
8374
8375 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8376 if (val != 0) {
8377 tp->nvram_size = (val >> 16) * 1024;
8378 return;
8379 }
8380 }
8381 tp->nvram_size = 0x20000;
8382}
8383
8384static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8385{
8386 u32 nvcfg1;
8387
8388 nvcfg1 = tr32(NVRAM_CFG1);
8389 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8390 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8391 }
8392 else {
8393 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8394 tw32(NVRAM_CFG1, nvcfg1);
8395 }
8396
4c987487 8397 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
a4e2b347 8398 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
8399 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8400 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8401 tp->nvram_jedecnum = JEDEC_ATMEL;
8402 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8403 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8404 break;
8405 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8406 tp->nvram_jedecnum = JEDEC_ATMEL;
8407 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8408 break;
8409 case FLASH_VENDOR_ATMEL_EEPROM:
8410 tp->nvram_jedecnum = JEDEC_ATMEL;
8411 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8412 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8413 break;
8414 case FLASH_VENDOR_ST:
8415 tp->nvram_jedecnum = JEDEC_ST;
8416 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8417 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8418 break;
8419 case FLASH_VENDOR_SAIFUN:
8420 tp->nvram_jedecnum = JEDEC_SAIFUN;
8421 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8422 break;
8423 case FLASH_VENDOR_SST_SMALL:
8424 case FLASH_VENDOR_SST_LARGE:
8425 tp->nvram_jedecnum = JEDEC_SST;
8426 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8427 break;
8428 }
8429 }
8430 else {
8431 tp->nvram_jedecnum = JEDEC_ATMEL;
8432 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8433 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8434 }
8435}
8436
361b4ac2
MC
8437static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8438{
8439 u32 nvcfg1;
8440
8441 nvcfg1 = tr32(NVRAM_CFG1);
8442
e6af301b
MC
8443 /* NVRAM protection for TPM */
8444 if (nvcfg1 & (1 << 27))
8445 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8446
361b4ac2
MC
8447 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8448 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8449 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8450 tp->nvram_jedecnum = JEDEC_ATMEL;
8451 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8452 break;
8453 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8454 tp->nvram_jedecnum = JEDEC_ATMEL;
8455 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8456 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8457 break;
8458 case FLASH_5752VENDOR_ST_M45PE10:
8459 case FLASH_5752VENDOR_ST_M45PE20:
8460 case FLASH_5752VENDOR_ST_M45PE40:
8461 tp->nvram_jedecnum = JEDEC_ST;
8462 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8463 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8464 break;
8465 }
8466
8467 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8468 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8469 case FLASH_5752PAGE_SIZE_256:
8470 tp->nvram_pagesize = 256;
8471 break;
8472 case FLASH_5752PAGE_SIZE_512:
8473 tp->nvram_pagesize = 512;
8474 break;
8475 case FLASH_5752PAGE_SIZE_1K:
8476 tp->nvram_pagesize = 1024;
8477 break;
8478 case FLASH_5752PAGE_SIZE_2K:
8479 tp->nvram_pagesize = 2048;
8480 break;
8481 case FLASH_5752PAGE_SIZE_4K:
8482 tp->nvram_pagesize = 4096;
8483 break;
8484 case FLASH_5752PAGE_SIZE_264:
8485 tp->nvram_pagesize = 264;
8486 break;
8487 }
8488 }
8489 else {
8490 /* For eeprom, set pagesize to maximum eeprom size */
8491 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8492
8493 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8494 tw32(NVRAM_CFG1, nvcfg1);
8495 }
8496}
8497
1da177e4
LT
8498/* Chips other than 5700/5701 use the NVRAM for fetching info. */
8499static void __devinit tg3_nvram_init(struct tg3 *tp)
8500{
8501 int j;
8502
8503 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8504 return;
8505
8506 tw32_f(GRC_EEPROM_ADDR,
8507 (EEPROM_ADDR_FSM_RESET |
8508 (EEPROM_DEFAULT_CLOCK_PERIOD <<
8509 EEPROM_ADDR_CLKPERD_SHIFT)));
8510
8511 /* XXX schedule_timeout() ... */
8512 for (j = 0; j < 100; j++)
8513 udelay(10);
8514
8515 /* Enable seeprom accesses. */
8516 tw32_f(GRC_LOCAL_CTRL,
8517 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8518 udelay(100);
8519
8520 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8521 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8522 tp->tg3_flags |= TG3_FLAG_NVRAM;
8523
e6af301b 8524 tg3_enable_nvram_access(tp);
1da177e4 8525
361b4ac2
MC
8526 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8527 tg3_get_5752_nvram_info(tp);
8528 else
8529 tg3_get_nvram_info(tp);
8530
1da177e4
LT
8531 tg3_get_nvram_size(tp);
8532
e6af301b 8533 tg3_disable_nvram_access(tp);
1da177e4
LT
8534
8535 } else {
8536 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8537
8538 tg3_get_eeprom_size(tp);
8539 }
8540}
8541
8542static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8543 u32 offset, u32 *val)
8544{
8545 u32 tmp;
8546 int i;
8547
8548 if (offset > EEPROM_ADDR_ADDR_MASK ||
8549 (offset % 4) != 0)
8550 return -EINVAL;
8551
8552 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8553 EEPROM_ADDR_DEVID_MASK |
8554 EEPROM_ADDR_READ);
8555 tw32(GRC_EEPROM_ADDR,
8556 tmp |
8557 (0 << EEPROM_ADDR_DEVID_SHIFT) |
8558 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8559 EEPROM_ADDR_ADDR_MASK) |
8560 EEPROM_ADDR_READ | EEPROM_ADDR_START);
8561
8562 for (i = 0; i < 10000; i++) {
8563 tmp = tr32(GRC_EEPROM_ADDR);
8564
8565 if (tmp & EEPROM_ADDR_COMPLETE)
8566 break;
8567 udelay(100);
8568 }
8569 if (!(tmp & EEPROM_ADDR_COMPLETE))
8570 return -EBUSY;
8571
8572 *val = tr32(GRC_EEPROM_DATA);
8573 return 0;
8574}
8575
8576#define NVRAM_CMD_TIMEOUT 10000
8577
8578static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8579{
8580 int i;
8581
8582 tw32(NVRAM_CMD, nvram_cmd);
8583 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8584 udelay(10);
8585 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8586 udelay(10);
8587 break;
8588 }
8589 }
8590 if (i == NVRAM_CMD_TIMEOUT) {
8591 return -EBUSY;
8592 }
8593 return 0;
8594}
8595
8596static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8597{
8598 int ret;
8599
8600 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8601 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8602 return -EINVAL;
8603 }
8604
8605 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8606 return tg3_nvram_read_using_eeprom(tp, offset, val);
8607
8608 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8609 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8610 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8611
8612 offset = ((offset / tp->nvram_pagesize) <<
8613 ATMEL_AT45DB0X1B_PAGE_POS) +
8614 (offset % tp->nvram_pagesize);
8615 }
8616
8617 if (offset > NVRAM_ADDR_MSK)
8618 return -EINVAL;
8619
8620 tg3_nvram_lock(tp);
8621
e6af301b 8622 tg3_enable_nvram_access(tp);
1da177e4
LT
8623
8624 tw32(NVRAM_ADDR, offset);
8625 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8626 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8627
8628 if (ret == 0)
8629 *val = swab32(tr32(NVRAM_RDDATA));
8630
8631 tg3_nvram_unlock(tp);
8632
e6af301b 8633 tg3_disable_nvram_access(tp);
1da177e4
LT
8634
8635 return ret;
8636}
8637
8638static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8639 u32 offset, u32 len, u8 *buf)
8640{
8641 int i, j, rc = 0;
8642 u32 val;
8643
8644 for (i = 0; i < len; i += 4) {
8645 u32 addr, data;
8646
8647 addr = offset + i;
8648
8649 memcpy(&data, buf + i, 4);
8650
8651 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8652
8653 val = tr32(GRC_EEPROM_ADDR);
8654 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8655
8656 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8657 EEPROM_ADDR_READ);
8658 tw32(GRC_EEPROM_ADDR, val |
8659 (0 << EEPROM_ADDR_DEVID_SHIFT) |
8660 (addr & EEPROM_ADDR_ADDR_MASK) |
8661 EEPROM_ADDR_START |
8662 EEPROM_ADDR_WRITE);
8663
8664 for (j = 0; j < 10000; j++) {
8665 val = tr32(GRC_EEPROM_ADDR);
8666
8667 if (val & EEPROM_ADDR_COMPLETE)
8668 break;
8669 udelay(100);
8670 }
8671 if (!(val & EEPROM_ADDR_COMPLETE)) {
8672 rc = -EBUSY;
8673 break;
8674 }
8675 }
8676
8677 return rc;
8678}
8679
8680/* offset and length are dword aligned */
8681static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8682 u8 *buf)
8683{
8684 int ret = 0;
8685 u32 pagesize = tp->nvram_pagesize;
8686 u32 pagemask = pagesize - 1;
8687 u32 nvram_cmd;
8688 u8 *tmp;
8689
8690 tmp = kmalloc(pagesize, GFP_KERNEL);
8691 if (tmp == NULL)
8692 return -ENOMEM;
8693
8694 while (len) {
8695 int j;
e6af301b 8696 u32 phy_addr, page_off, size;
1da177e4
LT
8697
8698 phy_addr = offset & ~pagemask;
8699
8700 for (j = 0; j < pagesize; j += 4) {
8701 if ((ret = tg3_nvram_read(tp, phy_addr + j,
8702 (u32 *) (tmp + j))))
8703 break;
8704 }
8705 if (ret)
8706 break;
8707
8708 page_off = offset & pagemask;
8709 size = pagesize;
8710 if (len < size)
8711 size = len;
8712
8713 len -= size;
8714
8715 memcpy(tmp + page_off, buf, size);
8716
8717 offset = offset + (pagesize - page_off);
8718
e6af301b 8719 tg3_enable_nvram_access(tp);
1da177e4
LT
8720
8721 /*
8722 * Before we can erase the flash page, we need
8723 * to issue a special "write enable" command.
8724 */
8725 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8726
8727 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8728 break;
8729
8730 /* Erase the target page */
8731 tw32(NVRAM_ADDR, phy_addr);
8732
8733 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8734 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8735
8736 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8737 break;
8738
8739 /* Issue another write enable to start the write. */
8740 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8741
8742 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8743 break;
8744
8745 for (j = 0; j < pagesize; j += 4) {
8746 u32 data;
8747
8748 data = *((u32 *) (tmp + j));
8749 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8750
8751 tw32(NVRAM_ADDR, phy_addr + j);
8752
8753 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8754 NVRAM_CMD_WR;
8755
8756 if (j == 0)
8757 nvram_cmd |= NVRAM_CMD_FIRST;
8758 else if (j == (pagesize - 4))
8759 nvram_cmd |= NVRAM_CMD_LAST;
8760
8761 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8762 break;
8763 }
8764 if (ret)
8765 break;
8766 }
8767
8768 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8769 tg3_nvram_exec_cmd(tp, nvram_cmd);
8770
8771 kfree(tmp);
8772
8773 return ret;
8774}
8775
8776/* offset and length are dword aligned */
8777static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8778 u8 *buf)
8779{
8780 int i, ret = 0;
8781
8782 for (i = 0; i < len; i += 4, offset += 4) {
8783 u32 data, page_off, phy_addr, nvram_cmd;
8784
8785 memcpy(&data, buf + i, 4);
8786 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8787
8788 page_off = offset % tp->nvram_pagesize;
8789
8790 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8791 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8792
8793 phy_addr = ((offset / tp->nvram_pagesize) <<
8794 ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8795 }
8796 else {
8797 phy_addr = offset;
8798 }
8799
8800 tw32(NVRAM_ADDR, phy_addr);
8801
8802 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
8803
8804 if ((page_off == 0) || (i == 0))
8805 nvram_cmd |= NVRAM_CMD_FIRST;
8806 else if (page_off == (tp->nvram_pagesize - 4))
8807 nvram_cmd |= NVRAM_CMD_LAST;
8808
8809 if (i == (len - 4))
8810 nvram_cmd |= NVRAM_CMD_LAST;
8811
4c987487
MC
8812 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
8813 (tp->nvram_jedecnum == JEDEC_ST) &&
8814 (nvram_cmd & NVRAM_CMD_FIRST)) {
1da177e4
LT
8815
8816 if ((ret = tg3_nvram_exec_cmd(tp,
8817 NVRAM_CMD_WREN | NVRAM_CMD_GO |
8818 NVRAM_CMD_DONE)))
8819
8820 break;
8821 }
8822 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8823 /* We always do complete word writes to eeprom. */
8824 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
8825 }
8826
8827 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8828 break;
8829 }
8830 return ret;
8831}
8832
8833/* offset and length are dword aligned */
8834static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
8835{
8836 int ret;
8837
8838 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8839 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
8840 return -EINVAL;
8841 }
8842
8843 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34
MC
8844 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
8845 ~GRC_LCLCTRL_GPIO_OUTPUT1);
1da177e4
LT
8846 udelay(40);
8847 }
8848
8849 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
8850 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
8851 }
8852 else {
8853 u32 grc_mode;
8854
8855 tg3_nvram_lock(tp);
8856
e6af301b
MC
8857 tg3_enable_nvram_access(tp);
8858 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
8859 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
1da177e4 8860 tw32(NVRAM_WRITE1, 0x406);
1da177e4
LT
8861
8862 grc_mode = tr32(GRC_MODE);
8863 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
8864
8865 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
8866 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8867
8868 ret = tg3_nvram_write_block_buffered(tp, offset, len,
8869 buf);
8870 }
8871 else {
8872 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
8873 buf);
8874 }
8875
8876 grc_mode = tr32(GRC_MODE);
8877 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
8878
e6af301b 8879 tg3_disable_nvram_access(tp);
1da177e4
LT
8880 tg3_nvram_unlock(tp);
8881 }
8882
8883 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34 8884 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1da177e4
LT
8885 udelay(40);
8886 }
8887
8888 return ret;
8889}
8890
8891struct subsys_tbl_ent {
8892 u16 subsys_vendor, subsys_devid;
8893 u32 phy_id;
8894};
8895
8896static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
8897 /* Broadcom boards. */
8898 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
8899 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
8900 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
8901 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
8902 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
8903 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
8904 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
8905 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
8906 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
8907 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
8908 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
8909
8910 /* 3com boards. */
8911 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
8912 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
8913 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
8914 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
8915 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
8916
8917 /* DELL boards. */
8918 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
8919 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
8920 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
8921 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
8922
8923 /* Compaq boards. */
8924 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
8925 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
8926 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
8927 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
8928 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
8929
8930 /* IBM boards. */
8931 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
8932};
8933
8934static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
8935{
8936 int i;
8937
8938 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
8939 if ((subsys_id_to_phy_id[i].subsys_vendor ==
8940 tp->pdev->subsystem_vendor) &&
8941 (subsys_id_to_phy_id[i].subsys_devid ==
8942 tp->pdev->subsystem_device))
8943 return &subsys_id_to_phy_id[i];
8944 }
8945 return NULL;
8946}
8947
7d0c41ef
MC
8948/* Since this function may be called in D3-hot power state during
8949 * tg3_init_one(), only config cycles are allowed.
8950 */
8951static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
1da177e4 8952{
1da177e4 8953 u32 val;
7d0c41ef
MC
8954
8955 /* Make sure register accesses (indirect or otherwise)
8956 * will function correctly.
8957 */
8958 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8959 tp->misc_host_ctrl);
1da177e4
LT
8960
8961 tp->phy_id = PHY_ID_INVALID;
7d0c41ef
MC
8962 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8963
1da177e4
LT
8964 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8965 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8966 u32 nic_cfg, led_cfg;
7d0c41ef
MC
8967 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
8968 int eeprom_phy_serdes = 0;
1da177e4
LT
8969
8970 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8971 tp->nic_sram_data_cfg = nic_cfg;
8972
8973 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
8974 ver >>= NIC_SRAM_DATA_VER_SHIFT;
8975 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8976 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8977 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
8978 (ver > 0) && (ver < 0x100))
8979 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
8980
1da177e4
LT
8981 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
8982 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
8983 eeprom_phy_serdes = 1;
8984
8985 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
8986 if (nic_phy_id != 0) {
8987 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
8988 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
8989
8990 eeprom_phy_id = (id1 >> 16) << 10;
8991 eeprom_phy_id |= (id2 & 0xfc00) << 16;
8992 eeprom_phy_id |= (id2 & 0x03ff) << 0;
8993 } else
8994 eeprom_phy_id = 0;
8995
7d0c41ef 8996 tp->phy_id = eeprom_phy_id;
747e8f8b 8997 if (eeprom_phy_serdes) {
a4e2b347 8998 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
747e8f8b
MC
8999 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9000 else
9001 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9002 }
7d0c41ef 9003
cbf46853 9004 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
9005 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9006 SHASTA_EXT_LED_MODE_MASK);
cbf46853 9007 else
1da177e4
LT
9008 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9009
9010 switch (led_cfg) {
9011 default:
9012 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9013 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9014 break;
9015
9016 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9017 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9018 break;
9019
9020 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9021 tp->led_ctrl = LED_CTRL_MODE_MAC;
9ba27794
MC
9022
9023 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9024 * read on some older 5700/5701 bootcode.
9025 */
9026 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9027 ASIC_REV_5700 ||
9028 GET_ASIC_REV(tp->pci_chip_rev_id) ==
9029 ASIC_REV_5701)
9030 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9031
1da177e4
LT
9032 break;
9033
9034 case SHASTA_EXT_LED_SHARED:
9035 tp->led_ctrl = LED_CTRL_MODE_SHARED;
9036 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9037 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9038 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9039 LED_CTRL_MODE_PHY_2);
9040 break;
9041
9042 case SHASTA_EXT_LED_MAC:
9043 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9044 break;
9045
9046 case SHASTA_EXT_LED_COMBO:
9047 tp->led_ctrl = LED_CTRL_MODE_COMBO;
9048 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9049 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9050 LED_CTRL_MODE_PHY_2);
9051 break;
9052
9053 };
9054
9055 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9056 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9057 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9058 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9059
9060 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9061 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9062 (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9063 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9064
9065 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9066 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 9067 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
9068 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9069 }
9070 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9071 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9072
9073 if (cfg2 & (1 << 17))
9074 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9075
9076 /* serdes signal pre-emphasis in register 0x590 set by */
9077 /* bootcode if bit 18 is set */
9078 if (cfg2 & (1 << 18))
9079 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9080 }
7d0c41ef
MC
9081}
9082
9083static int __devinit tg3_phy_probe(struct tg3 *tp)
9084{
9085 u32 hw_phy_id_1, hw_phy_id_2;
9086 u32 hw_phy_id, hw_phy_id_masked;
9087 int err;
1da177e4
LT
9088
9089 /* Reading the PHY ID register can conflict with ASF
9090 * firwmare access to the PHY hardware.
9091 */
9092 err = 0;
9093 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9094 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9095 } else {
9096 /* Now read the physical PHY_ID from the chip and verify
9097 * that it is sane. If it doesn't look good, we fall back
9098 * to either the hard-coded table based PHY_ID and failing
9099 * that the value found in the eeprom area.
9100 */
9101 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9102 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9103
9104 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
9105 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9106 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
9107
9108 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9109 }
9110
9111 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9112 tp->phy_id = hw_phy_id;
9113 if (hw_phy_id_masked == PHY_ID_BCM8002)
9114 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
da6b2d01
MC
9115 else
9116 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
1da177e4 9117 } else {
7d0c41ef
MC
9118 if (tp->phy_id != PHY_ID_INVALID) {
9119 /* Do nothing, phy ID already set up in
9120 * tg3_get_eeprom_hw_cfg().
9121 */
1da177e4
LT
9122 } else {
9123 struct subsys_tbl_ent *p;
9124
9125 /* No eeprom signature? Try the hardcoded
9126 * subsys device table.
9127 */
9128 p = lookup_by_subsys(tp);
9129 if (!p)
9130 return -ENODEV;
9131
9132 tp->phy_id = p->phy_id;
9133 if (!tp->phy_id ||
9134 tp->phy_id == PHY_ID_BCM8002)
9135 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9136 }
9137 }
9138
747e8f8b 9139 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
1da177e4
LT
9140 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9141 u32 bmsr, adv_reg, tg3_ctrl;
9142
9143 tg3_readphy(tp, MII_BMSR, &bmsr);
9144 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9145 (bmsr & BMSR_LSTATUS))
9146 goto skip_phy_reset;
9147
9148 err = tg3_phy_reset(tp);
9149 if (err)
9150 return err;
9151
9152 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9153 ADVERTISE_100HALF | ADVERTISE_100FULL |
9154 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9155 tg3_ctrl = 0;
9156 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9157 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9158 MII_TG3_CTRL_ADV_1000_FULL);
9159 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9160 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9161 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9162 MII_TG3_CTRL_ENABLE_AS_MASTER);
9163 }
9164
9165 if (!tg3_copper_is_advertising_all(tp)) {
9166 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9167
9168 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9169 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9170
9171 tg3_writephy(tp, MII_BMCR,
9172 BMCR_ANENABLE | BMCR_ANRESTART);
9173 }
9174 tg3_phy_set_wirespeed(tp);
9175
9176 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9177 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9178 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9179 }
9180
9181skip_phy_reset:
9182 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9183 err = tg3_init_5401phy_dsp(tp);
9184 if (err)
9185 return err;
9186 }
9187
9188 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9189 err = tg3_init_5401phy_dsp(tp);
9190 }
9191
747e8f8b 9192 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1da177e4
LT
9193 tp->link_config.advertising =
9194 (ADVERTISED_1000baseT_Half |
9195 ADVERTISED_1000baseT_Full |
9196 ADVERTISED_Autoneg |
9197 ADVERTISED_FIBRE);
9198 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9199 tp->link_config.advertising &=
9200 ~(ADVERTISED_1000baseT_Half |
9201 ADVERTISED_1000baseT_Full);
9202
9203 return err;
9204}
9205
9206static void __devinit tg3_read_partno(struct tg3 *tp)
9207{
9208 unsigned char vpd_data[256];
9209 int i;
9210
9211 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9212 /* Sun decided not to put the necessary bits in the
9213 * NVRAM of their onboard tg3 parts :(
9214 */
9215 strcpy(tp->board_part_number, "Sun 570X");
9216 return;
9217 }
9218
9219 for (i = 0; i < 256; i += 4) {
9220 u32 tmp;
9221
9222 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9223 goto out_not_found;
9224
9225 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
9226 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
9227 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9228 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9229 }
9230
9231 /* Now parse and find the part number. */
9232 for (i = 0; i < 256; ) {
9233 unsigned char val = vpd_data[i];
9234 int block_end;
9235
9236 if (val == 0x82 || val == 0x91) {
9237 i = (i + 3 +
9238 (vpd_data[i + 1] +
9239 (vpd_data[i + 2] << 8)));
9240 continue;
9241 }
9242
9243 if (val != 0x90)
9244 goto out_not_found;
9245
9246 block_end = (i + 3 +
9247 (vpd_data[i + 1] +
9248 (vpd_data[i + 2] << 8)));
9249 i += 3;
9250 while (i < block_end) {
9251 if (vpd_data[i + 0] == 'P' &&
9252 vpd_data[i + 1] == 'N') {
9253 int partno_len = vpd_data[i + 2];
9254
9255 if (partno_len > 24)
9256 goto out_not_found;
9257
9258 memcpy(tp->board_part_number,
9259 &vpd_data[i + 3],
9260 partno_len);
9261
9262 /* Success. */
9263 return;
9264 }
9265 }
9266
9267 /* Part number not found. */
9268 goto out_not_found;
9269 }
9270
9271out_not_found:
9272 strcpy(tp->board_part_number, "none");
9273}
9274
9275#ifdef CONFIG_SPARC64
9276static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9277{
9278 struct pci_dev *pdev = tp->pdev;
9279 struct pcidev_cookie *pcp = pdev->sysdata;
9280
9281 if (pcp != NULL) {
9282 int node = pcp->prom_node;
9283 u32 venid;
9284 int err;
9285
9286 err = prom_getproperty(node, "subsystem-vendor-id",
9287 (char *) &venid, sizeof(venid));
9288 if (err == 0 || err == -1)
9289 return 0;
9290 if (venid == PCI_VENDOR_ID_SUN)
9291 return 1;
9292 }
9293 return 0;
9294}
9295#endif
9296
9297static int __devinit tg3_get_invariants(struct tg3 *tp)
9298{
9299 static struct pci_device_id write_reorder_chipsets[] = {
1da177e4
LT
9300 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9301 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
399de50b
MC
9302 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9303 PCI_DEVICE_ID_VIA_8385_0) },
1da177e4
LT
9304 { },
9305 };
9306 u32 misc_ctrl_reg;
9307 u32 cacheline_sz_reg;
9308 u32 pci_state_reg, grc_misc_cfg;
9309 u32 val;
9310 u16 pci_cmd;
9311 int err;
9312
9313#ifdef CONFIG_SPARC64
9314 if (tg3_is_sun_570X(tp))
9315 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9316#endif
9317
1da177e4
LT
9318 /* Force memory write invalidate off. If we leave it on,
9319 * then on 5700_BX chips we have to enable a workaround.
9320 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9321 * to match the cacheline size. The Broadcom driver have this
9322 * workaround but turns MWI off all the times so never uses
9323 * it. This seems to suggest that the workaround is insufficient.
9324 */
9325 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9326 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9327 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9328
9329 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9330 * has the register indirect write enable bit set before
9331 * we try to access any of the MMIO registers. It is also
9332 * critical that the PCI-X hw workaround situation is decided
9333 * before that as well.
9334 */
9335 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9336 &misc_ctrl_reg);
9337
9338 tp->pci_chip_rev_id = (misc_ctrl_reg >>
9339 MISC_HOST_CTRL_CHIPREV_SHIFT);
9340
ff645bec
MC
9341 /* Wrong chip ID in 5752 A0. This code can be removed later
9342 * as A0 is not in production.
9343 */
9344 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9345 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9346
6892914f
MC
9347 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9348 * we need to disable memory and use config. cycles
9349 * only to access all registers. The 5702/03 chips
9350 * can mistakenly decode the special cycles from the
9351 * ICH chipsets as memory write cycles, causing corruption
9352 * of register and memory space. Only certain ICH bridges
9353 * will drive special cycles with non-zero data during the
9354 * address phase which can fall within the 5703's address
9355 * range. This is not an ICH bug as the PCI spec allows
9356 * non-zero address during special cycles. However, only
9357 * these ICH bridges are known to drive non-zero addresses
9358 * during special cycles.
9359 *
9360 * Since special cycles do not cross PCI bridges, we only
9361 * enable this workaround if the 5703 is on the secondary
9362 * bus of these ICH bridges.
9363 */
9364 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
9365 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
9366 static struct tg3_dev_id {
9367 u32 vendor;
9368 u32 device;
9369 u32 rev;
9370 } ich_chipsets[] = {
9371 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
9372 PCI_ANY_ID },
9373 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
9374 PCI_ANY_ID },
9375 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
9376 0xa },
9377 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
9378 PCI_ANY_ID },
9379 { },
9380 };
9381 struct tg3_dev_id *pci_id = &ich_chipsets[0];
9382 struct pci_dev *bridge = NULL;
9383
9384 while (pci_id->vendor != 0) {
9385 bridge = pci_get_device(pci_id->vendor, pci_id->device,
9386 bridge);
9387 if (!bridge) {
9388 pci_id++;
9389 continue;
9390 }
9391 if (pci_id->rev != PCI_ANY_ID) {
9392 u8 rev;
9393
9394 pci_read_config_byte(bridge, PCI_REVISION_ID,
9395 &rev);
9396 if (rev > pci_id->rev)
9397 continue;
9398 }
9399 if (bridge->subordinate &&
9400 (bridge->subordinate->number ==
9401 tp->pdev->bus->number)) {
9402
9403 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
9404 pci_dev_put(bridge);
9405 break;
9406 }
9407 }
9408 }
9409
4cf78e4f 9410 /* Find msi capability. */
a4e2b347
MC
9411 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
9412 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9413 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
4cf78e4f 9414 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
a4e2b347 9415 }
4cf78e4f 9416
1da177e4
LT
9417 /* Initialize misc host control in PCI block. */
9418 tp->misc_host_ctrl |= (misc_ctrl_reg &
9419 MISC_HOST_CTRL_CHIPREV);
9420 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9421 tp->misc_host_ctrl);
9422
9423 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9424 &cacheline_sz_reg);
9425
9426 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
9427 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
9428 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
9429 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
9430
6708e5cc 9431 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
4cf78e4f 9432 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
a4e2b347 9433 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6708e5cc
JL
9434 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9435
1b440c56
JL
9436 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
9437 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9438 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9439
bb7064dc 9440 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
9441 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
9442
0f893dc6
MC
9443 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9444 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9445 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
9446 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9447
1da177e4
LT
9448 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9449 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9450
399de50b
MC
9451 /* If we have an AMD 762 or VIA K8T800 chipset, write
9452 * reordering to the mailbox registers done by the host
9453 * controller can cause major troubles. We read back from
9454 * every mailbox register write to force the writes to be
9455 * posted to the chip in order.
9456 */
9457 if (pci_dev_present(write_reorder_chipsets) &&
9458 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9459 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9460
1da177e4
LT
9461 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9462 tp->pci_lat_timer < 64) {
9463 tp->pci_lat_timer = 64;
9464
9465 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
9466 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
9467 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
9468 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
9469
9470 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9471 cacheline_sz_reg);
9472 }
9473
9474 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9475 &pci_state_reg);
9476
9477 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9478 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9479
9480 /* If this is a 5700 BX chipset, and we are in PCI-X
9481 * mode, enable register write workaround.
9482 *
9483 * The workaround is to use indirect register accesses
9484 * for all chip writes not to mailbox registers.
9485 */
9486 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9487 u32 pm_reg;
9488 u16 pci_cmd;
9489
9490 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9491
9492 /* The chip can have it's power management PCI config
9493 * space registers clobbered due to this bug.
9494 * So explicitly force the chip into D0 here.
9495 */
9496 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9497 &pm_reg);
9498 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9499 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9500 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9501 pm_reg);
9502
9503 /* Also, force SERR#/PERR# in PCI command. */
9504 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9505 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9506 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9507 }
9508 }
9509
087fe256
MC
9510 /* 5700 BX chips need to have their TX producer index mailboxes
9511 * written twice to workaround a bug.
9512 */
9513 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9514 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9515
1da177e4
LT
9516 /* Back to back register writes can cause problems on this chip,
9517 * the workaround is to read back all reg writes except those to
9518 * mailbox regs. See tg3_write_indirect_reg32().
9519 *
9520 * PCI Express 5750_A0 rev chips need this workaround too.
9521 */
9522 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9523 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9524 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9525 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9526
9527 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9528 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9529 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9530 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9531
9532 /* Chip-specific fixup from Broadcom driver */
9533 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9534 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9535 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9536 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9537 }
9538
1ee582d8 9539 /* Default fast path register access methods */
20094930 9540 tp->read32 = tg3_read32;
1ee582d8 9541 tp->write32 = tg3_write32;
09ee929c 9542 tp->read32_mbox = tg3_read32;
20094930 9543 tp->write32_mbox = tg3_write32;
1ee582d8
MC
9544 tp->write32_tx_mbox = tg3_write32;
9545 tp->write32_rx_mbox = tg3_write32;
9546
9547 /* Various workaround register access methods */
9548 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
9549 tp->write32 = tg3_write_indirect_reg32;
9550 else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
9551 tp->write32 = tg3_write_flush_reg32;
9552
9553 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
9554 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
9555 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9556 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
9557 tp->write32_rx_mbox = tg3_write_flush_reg32;
9558 }
20094930 9559
6892914f
MC
9560 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
9561 tp->read32 = tg3_read_indirect_reg32;
9562 tp->write32 = tg3_write_indirect_reg32;
9563 tp->read32_mbox = tg3_read_indirect_mbox;
9564 tp->write32_mbox = tg3_write_indirect_mbox;
9565 tp->write32_tx_mbox = tg3_write_indirect_mbox;
9566 tp->write32_rx_mbox = tg3_write_indirect_mbox;
9567
9568 iounmap(tp->regs);
22abe310 9569 tp->regs = NULL;
6892914f
MC
9570
9571 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9572 pci_cmd &= ~PCI_COMMAND_MEMORY;
9573 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9574 }
9575
7d0c41ef
MC
9576 /* Get eeprom hw config before calling tg3_set_power_state().
9577 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9578 * determined before calling tg3_set_power_state() so that
9579 * we know whether or not to switch out of Vaux power.
9580 * When the flag is set, it means that GPIO1 is used for eeprom
9581 * write protect and also implies that it is a LOM where GPIOs
9582 * are not used to switch power.
9583 */
9584 tg3_get_eeprom_hw_cfg(tp);
9585
314fba34
MC
9586 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9587 * GPIO1 driven high will bring 5700's external PHY out of reset.
9588 * It is also used as eeprom write protect on LOMs.
9589 */
9590 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9591 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9592 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9593 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9594 GRC_LCLCTRL_GPIO_OUTPUT1);
3e7d83bc
MC
9595 /* Unused GPIO3 must be driven as output on 5752 because there
9596 * are no pull-up resistors on unused GPIO pins.
9597 */
9598 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9599 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
314fba34 9600
1da177e4
LT
9601 /* Force the chip into D0. */
9602 err = tg3_set_power_state(tp, 0);
9603 if (err) {
9604 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9605 pci_name(tp->pdev));
9606 return err;
9607 }
9608
9609 /* 5700 B0 chips do not support checksumming correctly due
9610 * to hardware bugs.
9611 */
9612 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9613 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9614
9615 /* Pseudo-header checksum is done by hardware logic and not
9616 * the offload processers, so make the chip do the pseudo-
9617 * header checksums on receive. For transmit it is more
9618 * convenient to do the pseudo-header checksum in software
9619 * as Linux does that on transmit for us in all cases.
9620 */
9621 tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9622 tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9623
9624 /* Derive initial jumbo mode from MTU assigned in
9625 * ether_setup() via the alloc_etherdev() call
9626 */
0f893dc6 9627 if (tp->dev->mtu > ETH_DATA_LEN &&
a4e2b347 9628 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
0f893dc6 9629 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
1da177e4
LT
9630
9631 /* Determine WakeOnLan speed to use. */
9632 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9633 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9634 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9635 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9636 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9637 } else {
9638 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9639 }
9640
9641 /* A few boards don't want Ethernet@WireSpeed phy feature */
9642 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9643 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9644 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
747e8f8b
MC
9645 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
9646 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1da177e4
LT
9647 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9648
9649 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9650 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9651 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9652 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9653 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9654
bb7064dc 9655 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
1da177e4
LT
9656 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9657
1da177e4 9658 tp->coalesce_mode = 0;
1da177e4
LT
9659 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9660 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9661 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9662
9663 /* Initialize MAC MI mode, polling disabled. */
9664 tw32_f(MAC_MI_MODE, tp->mi_mode);
9665 udelay(80);
9666
9667 /* Initialize data/descriptor byte/word swapping. */
9668 val = tr32(GRC_MODE);
9669 val &= GRC_MODE_HOST_STACKUP;
9670 tw32(GRC_MODE, val | tp->grc_mode);
9671
9672 tg3_switch_clocks(tp);
9673
9674 /* Clear this out for sanity. */
9675 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9676
9677 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9678 &pci_state_reg);
9679 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9680 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9681 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9682
9683 if (chiprevid == CHIPREV_ID_5701_A0 ||
9684 chiprevid == CHIPREV_ID_5701_B0 ||
9685 chiprevid == CHIPREV_ID_5701_B2 ||
9686 chiprevid == CHIPREV_ID_5701_B5) {
9687 void __iomem *sram_base;
9688
9689 /* Write some dummy words into the SRAM status block
9690 * area, see if it reads back correctly. If the return
9691 * value is bad, force enable the PCIX workaround.
9692 */
9693 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9694
9695 writel(0x00000000, sram_base);
9696 writel(0x00000000, sram_base + 4);
9697 writel(0xffffffff, sram_base + 4);
9698 if (readl(sram_base) != 0x00000000)
9699 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9700 }
9701 }
9702
9703 udelay(50);
9704 tg3_nvram_init(tp);
9705
9706 grc_misc_cfg = tr32(GRC_MISC_CFG);
9707 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9708
9709 /* Broadcom's driver says that CIOBE multisplit has a bug */
9710#if 0
9711 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9712 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9713 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9714 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9715 }
9716#endif
9717 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9718 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9719 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9720 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9721
fac9b83e
DM
9722 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9723 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9724 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9725 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9726 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9727 HOSTCC_MODE_CLRTICK_TXBD);
9728
9729 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9730 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9731 tp->misc_host_ctrl);
9732 }
9733
1da177e4
LT
9734 /* these are limited to 10/100 only */
9735 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9736 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9737 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9738 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9739 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9740 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9741 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9742 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9743 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9744 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9745 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9746
9747 err = tg3_phy_probe(tp);
9748 if (err) {
9749 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
9750 pci_name(tp->pdev), err);
9751 /* ... but do not return immediately ... */
9752 }
9753
9754 tg3_read_partno(tp);
9755
9756 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9757 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9758 } else {
9759 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9760 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
9761 else
9762 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9763 }
9764
9765 /* 5700 {AX,BX} chips have a broken status block link
9766 * change bit implementation, so we must use the
9767 * status register in those cases.
9768 */
9769 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9770 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
9771 else
9772 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
9773
9774 /* The led_ctrl is set during tg3_phy_probe, here we might
9775 * have to force the link status polling mechanism based
9776 * upon subsystem IDs.
9777 */
9778 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
9779 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9780 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
9781 TG3_FLAG_USE_LINKCHG_REG);
9782 }
9783
9784 /* For all SERDES we poll the MAC status register. */
9785 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9786 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
9787 else
9788 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9789
1da177e4
LT
9790 /* It seems all chips can get confused if TX buffers
9791 * straddle the 4GB address boundary in some cases.
9792 */
9793 tp->dev->hard_start_xmit = tg3_start_xmit;
9794
9795 tp->rx_offset = 2;
9796 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
9797 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
9798 tp->rx_offset = 0;
9799
9800 /* By default, disable wake-on-lan. User can change this
9801 * using ETHTOOL_SWOL.
9802 */
9803 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9804
9805 return err;
9806}
9807
9808#ifdef CONFIG_SPARC64
9809static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
9810{
9811 struct net_device *dev = tp->dev;
9812 struct pci_dev *pdev = tp->pdev;
9813 struct pcidev_cookie *pcp = pdev->sysdata;
9814
9815 if (pcp != NULL) {
9816 int node = pcp->prom_node;
9817
9818 if (prom_getproplen(node, "local-mac-address") == 6) {
9819 prom_getproperty(node, "local-mac-address",
9820 dev->dev_addr, 6);
2ff43697 9821 memcpy(dev->perm_addr, dev->dev_addr, 6);
1da177e4
LT
9822 return 0;
9823 }
9824 }
9825 return -ENODEV;
9826}
9827
9828static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
9829{
9830 struct net_device *dev = tp->dev;
9831
9832 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2ff43697 9833 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
1da177e4
LT
9834 return 0;
9835}
9836#endif
9837
9838static int __devinit tg3_get_device_address(struct tg3 *tp)
9839{
9840 struct net_device *dev = tp->dev;
9841 u32 hi, lo, mac_offset;
9842
9843#ifdef CONFIG_SPARC64
9844 if (!tg3_get_macaddr_sparc(tp))
9845 return 0;
9846#endif
9847
9848 mac_offset = 0x7c;
4cf78e4f
MC
9849 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9850 !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
a4e2b347 9851 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
9852 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
9853 mac_offset = 0xcc;
9854 if (tg3_nvram_lock(tp))
9855 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
9856 else
9857 tg3_nvram_unlock(tp);
9858 }
9859
9860 /* First try to get it from MAC address mailbox. */
9861 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
9862 if ((hi >> 16) == 0x484b) {
9863 dev->dev_addr[0] = (hi >> 8) & 0xff;
9864 dev->dev_addr[1] = (hi >> 0) & 0xff;
9865
9866 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
9867 dev->dev_addr[2] = (lo >> 24) & 0xff;
9868 dev->dev_addr[3] = (lo >> 16) & 0xff;
9869 dev->dev_addr[4] = (lo >> 8) & 0xff;
9870 dev->dev_addr[5] = (lo >> 0) & 0xff;
9871 }
9872 /* Next, try NVRAM. */
9873 else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
9874 !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
9875 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
9876 dev->dev_addr[0] = ((hi >> 16) & 0xff);
9877 dev->dev_addr[1] = ((hi >> 24) & 0xff);
9878 dev->dev_addr[2] = ((lo >> 0) & 0xff);
9879 dev->dev_addr[3] = ((lo >> 8) & 0xff);
9880 dev->dev_addr[4] = ((lo >> 16) & 0xff);
9881 dev->dev_addr[5] = ((lo >> 24) & 0xff);
9882 }
9883 /* Finally just fetch it out of the MAC control regs. */
9884 else {
9885 hi = tr32(MAC_ADDR_0_HIGH);
9886 lo = tr32(MAC_ADDR_0_LOW);
9887
9888 dev->dev_addr[5] = lo & 0xff;
9889 dev->dev_addr[4] = (lo >> 8) & 0xff;
9890 dev->dev_addr[3] = (lo >> 16) & 0xff;
9891 dev->dev_addr[2] = (lo >> 24) & 0xff;
9892 dev->dev_addr[1] = hi & 0xff;
9893 dev->dev_addr[0] = (hi >> 8) & 0xff;
9894 }
9895
9896 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
9897#ifdef CONFIG_SPARC64
9898 if (!tg3_get_default_macaddr_sparc(tp))
9899 return 0;
9900#endif
9901 return -EINVAL;
9902 }
2ff43697 9903 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
9904 return 0;
9905}
9906
59e6b434
DM
9907#define BOUNDARY_SINGLE_CACHELINE 1
9908#define BOUNDARY_MULTI_CACHELINE 2
9909
9910static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
9911{
9912 int cacheline_size;
9913 u8 byte;
9914 int goal;
9915
9916 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
9917 if (byte == 0)
9918 cacheline_size = 1024;
9919 else
9920 cacheline_size = (int) byte * 4;
9921
9922 /* On 5703 and later chips, the boundary bits have no
9923 * effect.
9924 */
9925 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9926 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
9927 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9928 goto out;
9929
9930#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
9931 goal = BOUNDARY_MULTI_CACHELINE;
9932#else
9933#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
9934 goal = BOUNDARY_SINGLE_CACHELINE;
9935#else
9936 goal = 0;
9937#endif
9938#endif
9939
9940 if (!goal)
9941 goto out;
9942
9943 /* PCI controllers on most RISC systems tend to disconnect
9944 * when a device tries to burst across a cache-line boundary.
9945 * Therefore, letting tg3 do so just wastes PCI bandwidth.
9946 *
9947 * Unfortunately, for PCI-E there are only limited
9948 * write-side controls for this, and thus for reads
9949 * we will still get the disconnects. We'll also waste
9950 * these PCI cycles for both read and write for chips
9951 * other than 5700 and 5701 which do not implement the
9952 * boundary bits.
9953 */
9954 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
9955 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
9956 switch (cacheline_size) {
9957 case 16:
9958 case 32:
9959 case 64:
9960 case 128:
9961 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9962 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
9963 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
9964 } else {
9965 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9966 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9967 }
9968 break;
9969
9970 case 256:
9971 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
9972 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
9973 break;
9974
9975 default:
9976 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9977 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9978 break;
9979 };
9980 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9981 switch (cacheline_size) {
9982 case 16:
9983 case 32:
9984 case 64:
9985 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9986 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9987 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
9988 break;
9989 }
9990 /* fallthrough */
9991 case 128:
9992 default:
9993 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9994 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
9995 break;
9996 };
9997 } else {
9998 switch (cacheline_size) {
9999 case 16:
10000 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10001 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10002 DMA_RWCTRL_WRITE_BNDRY_16);
10003 break;
10004 }
10005 /* fallthrough */
10006 case 32:
10007 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10008 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10009 DMA_RWCTRL_WRITE_BNDRY_32);
10010 break;
10011 }
10012 /* fallthrough */
10013 case 64:
10014 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10015 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10016 DMA_RWCTRL_WRITE_BNDRY_64);
10017 break;
10018 }
10019 /* fallthrough */
10020 case 128:
10021 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10022 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10023 DMA_RWCTRL_WRITE_BNDRY_128);
10024 break;
10025 }
10026 /* fallthrough */
10027 case 256:
10028 val |= (DMA_RWCTRL_READ_BNDRY_256 |
10029 DMA_RWCTRL_WRITE_BNDRY_256);
10030 break;
10031 case 512:
10032 val |= (DMA_RWCTRL_READ_BNDRY_512 |
10033 DMA_RWCTRL_WRITE_BNDRY_512);
10034 break;
10035 case 1024:
10036 default:
10037 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10038 DMA_RWCTRL_WRITE_BNDRY_1024);
10039 break;
10040 };
10041 }
10042
10043out:
10044 return val;
10045}
10046
1da177e4
LT
10047static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10048{
10049 struct tg3_internal_buffer_desc test_desc;
10050 u32 sram_dma_descs;
10051 int i, ret;
10052
10053 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10054
10055 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10056 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10057 tw32(RDMAC_STATUS, 0);
10058 tw32(WDMAC_STATUS, 0);
10059
10060 tw32(BUFMGR_MODE, 0);
10061 tw32(FTQ_RESET, 0);
10062
10063 test_desc.addr_hi = ((u64) buf_dma) >> 32;
10064 test_desc.addr_lo = buf_dma & 0xffffffff;
10065 test_desc.nic_mbuf = 0x00002100;
10066 test_desc.len = size;
10067
10068 /*
10069 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10070 * the *second* time the tg3 driver was getting loaded after an
10071 * initial scan.
10072 *
10073 * Broadcom tells me:
10074 * ...the DMA engine is connected to the GRC block and a DMA
10075 * reset may affect the GRC block in some unpredictable way...
10076 * The behavior of resets to individual blocks has not been tested.
10077 *
10078 * Broadcom noted the GRC reset will also reset all sub-components.
10079 */
10080 if (to_device) {
10081 test_desc.cqid_sqid = (13 << 8) | 2;
10082
10083 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10084 udelay(40);
10085 } else {
10086 test_desc.cqid_sqid = (16 << 8) | 7;
10087
10088 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10089 udelay(40);
10090 }
10091 test_desc.flags = 0x00000005;
10092
10093 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10094 u32 val;
10095
10096 val = *(((u32 *)&test_desc) + i);
10097 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10098 sram_dma_descs + (i * sizeof(u32)));
10099 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10100 }
10101 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10102
10103 if (to_device) {
10104 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10105 } else {
10106 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10107 }
10108
10109 ret = -ENODEV;
10110 for (i = 0; i < 40; i++) {
10111 u32 val;
10112
10113 if (to_device)
10114 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10115 else
10116 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10117 if ((val & 0xffff) == sram_dma_descs) {
10118 ret = 0;
10119 break;
10120 }
10121
10122 udelay(100);
10123 }
10124
10125 return ret;
10126}
10127
ded7340d 10128#define TEST_BUFFER_SIZE 0x2000
1da177e4
LT
10129
10130static int __devinit tg3_test_dma(struct tg3 *tp)
10131{
10132 dma_addr_t buf_dma;
59e6b434 10133 u32 *buf, saved_dma_rwctrl;
1da177e4
LT
10134 int ret;
10135
10136 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10137 if (!buf) {
10138 ret = -ENOMEM;
10139 goto out_nofree;
10140 }
10141
10142 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10143 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10144
59e6b434 10145 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
1da177e4
LT
10146
10147 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10148 /* DMA read watermark not used on PCIE */
10149 tp->dma_rwctrl |= 0x00180000;
10150 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
85e94ced
MC
10151 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10152 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
1da177e4
LT
10153 tp->dma_rwctrl |= 0x003f0000;
10154 else
10155 tp->dma_rwctrl |= 0x003f000f;
10156 } else {
10157 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10158 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10159 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10160
10161 if (ccval == 0x6 || ccval == 0x7)
10162 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10163
59e6b434 10164 /* Set bit 23 to enable PCIX hw bug fix */
1da177e4 10165 tp->dma_rwctrl |= 0x009f0000;
4cf78e4f
MC
10166 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10167 /* 5780 always in PCIX mode */
10168 tp->dma_rwctrl |= 0x00144000;
a4e2b347
MC
10169 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10170 /* 5714 always in PCIX mode */
10171 tp->dma_rwctrl |= 0x00148000;
1da177e4
LT
10172 } else {
10173 tp->dma_rwctrl |= 0x001b000f;
10174 }
10175 }
10176
10177 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10178 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10179 tp->dma_rwctrl &= 0xfffffff0;
10180
10181 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10182 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10183 /* Remove this if it causes problems for some boards. */
10184 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10185
10186 /* On 5700/5701 chips, we need to set this bit.
10187 * Otherwise the chip will issue cacheline transactions
10188 * to streamable DMA memory with not all the byte
10189 * enables turned on. This is an error on several
10190 * RISC PCI controllers, in particular sparc64.
10191 *
10192 * On 5703/5704 chips, this bit has been reassigned
10193 * a different meaning. In particular, it is used
10194 * on those chips to enable a PCI-X workaround.
10195 */
10196 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10197 }
10198
10199 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10200
10201#if 0
10202 /* Unneeded, already done by tg3_get_invariants. */
10203 tg3_switch_clocks(tp);
10204#endif
10205
10206 ret = 0;
10207 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10208 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10209 goto out;
10210
59e6b434
DM
10211 /* It is best to perform DMA test with maximum write burst size
10212 * to expose the 5700/5701 write DMA bug.
10213 */
10214 saved_dma_rwctrl = tp->dma_rwctrl;
10215 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10216 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10217
1da177e4
LT
10218 while (1) {
10219 u32 *p = buf, i;
10220
10221 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10222 p[i] = i;
10223
10224 /* Send the buffer to the chip. */
10225 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10226 if (ret) {
10227 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10228 break;
10229 }
10230
10231#if 0
10232 /* validate data reached card RAM correctly. */
10233 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10234 u32 val;
10235 tg3_read_mem(tp, 0x2100 + (i*4), &val);
10236 if (le32_to_cpu(val) != p[i]) {
10237 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
10238 /* ret = -ENODEV here? */
10239 }
10240 p[i] = 0;
10241 }
10242#endif
10243 /* Now read it back. */
10244 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10245 if (ret) {
10246 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10247
10248 break;
10249 }
10250
10251 /* Verify it. */
10252 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10253 if (p[i] == i)
10254 continue;
10255
59e6b434
DM
10256 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10257 DMA_RWCTRL_WRITE_BNDRY_16) {
10258 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
1da177e4
LT
10259 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10260 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10261 break;
10262 } else {
10263 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10264 ret = -ENODEV;
10265 goto out;
10266 }
10267 }
10268
10269 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10270 /* Success. */
10271 ret = 0;
10272 break;
10273 }
10274 }
59e6b434
DM
10275 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10276 DMA_RWCTRL_WRITE_BNDRY_16) {
6d1cfbab
MC
10277 static struct pci_device_id dma_wait_state_chipsets[] = {
10278 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10279 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10280 { },
10281 };
10282
59e6b434 10283 /* DMA test passed without adjusting DMA boundary,
6d1cfbab
MC
10284 * now look for chipsets that are known to expose the
10285 * DMA bug without failing the test.
59e6b434 10286 */
6d1cfbab
MC
10287 if (pci_dev_present(dma_wait_state_chipsets)) {
10288 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10289 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10290 }
10291 else
10292 /* Safe to use the calculated DMA boundary. */
10293 tp->dma_rwctrl = saved_dma_rwctrl;
10294
59e6b434
DM
10295 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10296 }
1da177e4
LT
10297
10298out:
10299 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10300out_nofree:
10301 return ret;
10302}
10303
10304static void __devinit tg3_init_link_config(struct tg3 *tp)
10305{
10306 tp->link_config.advertising =
10307 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10308 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10309 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10310 ADVERTISED_Autoneg | ADVERTISED_MII);
10311 tp->link_config.speed = SPEED_INVALID;
10312 tp->link_config.duplex = DUPLEX_INVALID;
10313 tp->link_config.autoneg = AUTONEG_ENABLE;
10314 netif_carrier_off(tp->dev);
10315 tp->link_config.active_speed = SPEED_INVALID;
10316 tp->link_config.active_duplex = DUPLEX_INVALID;
10317 tp->link_config.phy_is_low_power = 0;
10318 tp->link_config.orig_speed = SPEED_INVALID;
10319 tp->link_config.orig_duplex = DUPLEX_INVALID;
10320 tp->link_config.orig_autoneg = AUTONEG_INVALID;
10321}
10322
10323static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10324{
fdfec172
MC
10325 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10326 tp->bufmgr_config.mbuf_read_dma_low_water =
10327 DEFAULT_MB_RDMA_LOW_WATER_5705;
10328 tp->bufmgr_config.mbuf_mac_rx_low_water =
10329 DEFAULT_MB_MACRX_LOW_WATER_5705;
10330 tp->bufmgr_config.mbuf_high_water =
10331 DEFAULT_MB_HIGH_WATER_5705;
10332
10333 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10334 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10335 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10336 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10337 tp->bufmgr_config.mbuf_high_water_jumbo =
10338 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10339 } else {
10340 tp->bufmgr_config.mbuf_read_dma_low_water =
10341 DEFAULT_MB_RDMA_LOW_WATER;
10342 tp->bufmgr_config.mbuf_mac_rx_low_water =
10343 DEFAULT_MB_MACRX_LOW_WATER;
10344 tp->bufmgr_config.mbuf_high_water =
10345 DEFAULT_MB_HIGH_WATER;
10346
10347 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10348 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10349 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10350 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10351 tp->bufmgr_config.mbuf_high_water_jumbo =
10352 DEFAULT_MB_HIGH_WATER_JUMBO;
10353 }
1da177e4
LT
10354
10355 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10356 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10357}
10358
10359static char * __devinit tg3_phy_string(struct tg3 *tp)
10360{
10361 switch (tp->phy_id & PHY_ID_MASK) {
10362 case PHY_ID_BCM5400: return "5400";
10363 case PHY_ID_BCM5401: return "5401";
10364 case PHY_ID_BCM5411: return "5411";
10365 case PHY_ID_BCM5701: return "5701";
10366 case PHY_ID_BCM5703: return "5703";
10367 case PHY_ID_BCM5704: return "5704";
10368 case PHY_ID_BCM5705: return "5705";
10369 case PHY_ID_BCM5750: return "5750";
85e94ced 10370 case PHY_ID_BCM5752: return "5752";
a4e2b347 10371 case PHY_ID_BCM5714: return "5714";
4cf78e4f 10372 case PHY_ID_BCM5780: return "5780";
1da177e4
LT
10373 case PHY_ID_BCM8002: return "8002/serdes";
10374 case 0: return "serdes";
10375 default: return "unknown";
10376 };
10377}
10378
f9804ddb
MC
10379static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
10380{
10381 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10382 strcpy(str, "PCI Express");
10383 return str;
10384 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
10385 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
10386
10387 strcpy(str, "PCIX:");
10388
10389 if ((clock_ctrl == 7) ||
10390 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
10391 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
10392 strcat(str, "133MHz");
10393 else if (clock_ctrl == 0)
10394 strcat(str, "33MHz");
10395 else if (clock_ctrl == 2)
10396 strcat(str, "50MHz");
10397 else if (clock_ctrl == 4)
10398 strcat(str, "66MHz");
10399 else if (clock_ctrl == 6)
10400 strcat(str, "100MHz");
10401 else if (clock_ctrl == 7)
10402 strcat(str, "133MHz");
10403 } else {
10404 strcpy(str, "PCI:");
10405 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
10406 strcat(str, "66MHz");
10407 else
10408 strcat(str, "33MHz");
10409 }
10410 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
10411 strcat(str, ":32-bit");
10412 else
10413 strcat(str, ":64-bit");
10414 return str;
10415}
10416
1da177e4
LT
10417static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
10418{
10419 struct pci_dev *peer;
10420 unsigned int func, devnr = tp->pdev->devfn & ~7;
10421
10422 for (func = 0; func < 8; func++) {
10423 peer = pci_get_slot(tp->pdev->bus, devnr | func);
10424 if (peer && peer != tp->pdev)
10425 break;
10426 pci_dev_put(peer);
10427 }
10428 if (!peer || peer == tp->pdev)
10429 BUG();
10430
10431 /*
10432 * We don't need to keep the refcount elevated; there's no way
10433 * to remove one half of this device without removing the other
10434 */
10435 pci_dev_put(peer);
10436
10437 return peer;
10438}
10439
15f9850d
DM
10440static void __devinit tg3_init_coal(struct tg3 *tp)
10441{
10442 struct ethtool_coalesce *ec = &tp->coal;
10443
10444 memset(ec, 0, sizeof(*ec));
10445 ec->cmd = ETHTOOL_GCOALESCE;
10446 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
10447 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
10448 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
10449 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
10450 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
10451 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
10452 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
10453 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
10454 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
10455
10456 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
10457 HOSTCC_MODE_CLRTICK_TXBD)) {
10458 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
10459 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
10460 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
10461 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
10462 }
d244c892
MC
10463
10464 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10465 ec->rx_coalesce_usecs_irq = 0;
10466 ec->tx_coalesce_usecs_irq = 0;
10467 ec->stats_block_coalesce_usecs = 0;
10468 }
15f9850d
DM
10469}
10470
1da177e4
LT
10471static int __devinit tg3_init_one(struct pci_dev *pdev,
10472 const struct pci_device_id *ent)
10473{
10474 static int tg3_version_printed = 0;
10475 unsigned long tg3reg_base, tg3reg_len;
10476 struct net_device *dev;
10477 struct tg3 *tp;
10478 int i, err, pci_using_dac, pm_cap;
f9804ddb 10479 char str[40];
1da177e4
LT
10480
10481 if (tg3_version_printed++ == 0)
10482 printk(KERN_INFO "%s", version);
10483
10484 err = pci_enable_device(pdev);
10485 if (err) {
10486 printk(KERN_ERR PFX "Cannot enable PCI device, "
10487 "aborting.\n");
10488 return err;
10489 }
10490
10491 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10492 printk(KERN_ERR PFX "Cannot find proper PCI device "
10493 "base address, aborting.\n");
10494 err = -ENODEV;
10495 goto err_out_disable_pdev;
10496 }
10497
10498 err = pci_request_regions(pdev, DRV_MODULE_NAME);
10499 if (err) {
10500 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
10501 "aborting.\n");
10502 goto err_out_disable_pdev;
10503 }
10504
10505 pci_set_master(pdev);
10506
10507 /* Find power-management capability. */
10508 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10509 if (pm_cap == 0) {
10510 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
10511 "aborting.\n");
10512 err = -EIO;
10513 goto err_out_free_res;
10514 }
10515
10516 /* Configure DMA attributes. */
10517 err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
10518 if (!err) {
10519 pci_using_dac = 1;
10520 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
10521 if (err < 0) {
10522 printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
10523 "for consistent allocations\n");
10524 goto err_out_free_res;
10525 }
10526 } else {
10527 err = pci_set_dma_mask(pdev, 0xffffffffULL);
10528 if (err) {
10529 printk(KERN_ERR PFX "No usable DMA configuration, "
10530 "aborting.\n");
10531 goto err_out_free_res;
10532 }
10533 pci_using_dac = 0;
10534 }
10535
10536 tg3reg_base = pci_resource_start(pdev, 0);
10537 tg3reg_len = pci_resource_len(pdev, 0);
10538
10539 dev = alloc_etherdev(sizeof(*tp));
10540 if (!dev) {
10541 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
10542 err = -ENOMEM;
10543 goto err_out_free_res;
10544 }
10545
10546 SET_MODULE_OWNER(dev);
10547 SET_NETDEV_DEV(dev, &pdev->dev);
10548
10549 if (pci_using_dac)
10550 dev->features |= NETIF_F_HIGHDMA;
10551 dev->features |= NETIF_F_LLTX;
10552#if TG3_VLAN_TAG_USED
10553 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
10554 dev->vlan_rx_register = tg3_vlan_rx_register;
10555 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
10556#endif
10557
10558 tp = netdev_priv(dev);
10559 tp->pdev = pdev;
10560 tp->dev = dev;
10561 tp->pm_cap = pm_cap;
10562 tp->mac_mode = TG3_DEF_MAC_MODE;
10563 tp->rx_mode = TG3_DEF_RX_MODE;
10564 tp->tx_mode = TG3_DEF_TX_MODE;
10565 tp->mi_mode = MAC_MI_MODE_BASE;
10566 if (tg3_debug > 0)
10567 tp->msg_enable = tg3_debug;
10568 else
10569 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10570
10571 /* The word/byte swap controls here control register access byte
10572 * swapping. DMA data byte swapping is controlled in the GRC_MODE
10573 * setting below.
10574 */
10575 tp->misc_host_ctrl =
10576 MISC_HOST_CTRL_MASK_PCI_INT |
10577 MISC_HOST_CTRL_WORD_SWAP |
10578 MISC_HOST_CTRL_INDIR_ACCESS |
10579 MISC_HOST_CTRL_PCISTATE_RW;
10580
10581 /* The NONFRM (non-frame) byte/word swap controls take effect
10582 * on descriptor entries, anything which isn't packet data.
10583 *
10584 * The StrongARM chips on the board (one for tx, one for rx)
10585 * are running in big-endian mode.
10586 */
10587 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10588 GRC_MODE_WSWAP_NONFRM_DATA);
10589#ifdef __BIG_ENDIAN
10590 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10591#endif
10592 spin_lock_init(&tp->lock);
10593 spin_lock_init(&tp->tx_lock);
10594 spin_lock_init(&tp->indirect_lock);
10595 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10596
10597 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10598 if (tp->regs == 0UL) {
10599 printk(KERN_ERR PFX "Cannot map device registers, "
10600 "aborting.\n");
10601 err = -ENOMEM;
10602 goto err_out_free_dev;
10603 }
10604
10605 tg3_init_link_config(tp);
10606
1da177e4
LT
10607 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10608 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10609 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10610
10611 dev->open = tg3_open;
10612 dev->stop = tg3_close;
10613 dev->get_stats = tg3_get_stats;
10614 dev->set_multicast_list = tg3_set_rx_mode;
10615 dev->set_mac_address = tg3_set_mac_addr;
10616 dev->do_ioctl = tg3_ioctl;
10617 dev->tx_timeout = tg3_tx_timeout;
10618 dev->poll = tg3_poll;
10619 dev->ethtool_ops = &tg3_ethtool_ops;
10620 dev->weight = 64;
10621 dev->watchdog_timeo = TG3_TX_TIMEOUT;
10622 dev->change_mtu = tg3_change_mtu;
10623 dev->irq = pdev->irq;
10624#ifdef CONFIG_NET_POLL_CONTROLLER
10625 dev->poll_controller = tg3_poll_controller;
10626#endif
10627
10628 err = tg3_get_invariants(tp);
10629 if (err) {
10630 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10631 "aborting.\n");
10632 goto err_out_iounmap;
10633 }
10634
fdfec172 10635 tg3_init_bufmgr_config(tp);
1da177e4
LT
10636
10637#if TG3_TSO_SUPPORT != 0
10638 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10639 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10640 }
10641 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10642 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10643 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10644 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10645 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10646 } else {
10647 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10648 }
10649
10650 /* TSO is off by default, user can enable using ethtool. */
10651#if 0
10652 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
10653 dev->features |= NETIF_F_TSO;
10654#endif
10655
10656#endif
10657
10658 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10659 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10660 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10661 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10662 tp->rx_pending = 63;
10663 }
10664
10665 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10666 tp->pdev_peer = tg3_find_5704_peer(tp);
10667
10668 err = tg3_get_device_address(tp);
10669 if (err) {
10670 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10671 "aborting.\n");
10672 goto err_out_iounmap;
10673 }
10674
10675 /*
10676 * Reset chip in case UNDI or EFI driver did not shutdown
10677 * DMA self test will enable WDMAC and we'll see (spurious)
10678 * pending DMA on the PCI bus at that point.
10679 */
10680 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10681 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10682 pci_save_state(tp->pdev);
10683 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
944d980e 10684 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
10685 }
10686
10687 err = tg3_test_dma(tp);
10688 if (err) {
10689 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10690 goto err_out_iounmap;
10691 }
10692
10693 /* Tigon3 can do ipv4 only... and some chips have buggy
10694 * checksumming.
10695 */
10696 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10697 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10698 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10699 } else
10700 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10701
10702 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10703 dev->features &= ~NETIF_F_HIGHDMA;
10704
10705 /* flow control autonegotiation is default behavior */
10706 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10707
15f9850d
DM
10708 tg3_init_coal(tp);
10709
7d3f4c97
DM
10710 /* Now that we have fully setup the chip, save away a snapshot
10711 * of the PCI config space. We need to restore this after
10712 * GRC_MISC_CFG core clock resets and some resume events.
10713 */
10714 pci_save_state(tp->pdev);
10715
1da177e4
LT
10716 err = register_netdev(dev);
10717 if (err) {
10718 printk(KERN_ERR PFX "Cannot register net device, "
10719 "aborting.\n");
10720 goto err_out_iounmap;
10721 }
10722
10723 pci_set_drvdata(pdev, dev);
10724
f9804ddb 10725 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
1da177e4
LT
10726 dev->name,
10727 tp->board_part_number,
10728 tp->pci_chip_rev_id,
10729 tg3_phy_string(tp),
f9804ddb 10730 tg3_bus_string(tp, str),
1da177e4
LT
10731 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10732
10733 for (i = 0; i < 6; i++)
10734 printk("%2.2x%c", dev->dev_addr[i],
10735 i == 5 ? '\n' : ':');
10736
10737 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
10738 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
10739 "TSOcap[%d] \n",
10740 dev->name,
10741 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
10742 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
10743 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
10744 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
10745 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
10746 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
10747 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
59e6b434
DM
10748 printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10749 dev->name, tp->dma_rwctrl);
1da177e4
LT
10750
10751 return 0;
10752
10753err_out_iounmap:
6892914f
MC
10754 if (tp->regs) {
10755 iounmap(tp->regs);
22abe310 10756 tp->regs = NULL;
6892914f 10757 }
1da177e4
LT
10758
10759err_out_free_dev:
10760 free_netdev(dev);
10761
10762err_out_free_res:
10763 pci_release_regions(pdev);
10764
10765err_out_disable_pdev:
10766 pci_disable_device(pdev);
10767 pci_set_drvdata(pdev, NULL);
10768 return err;
10769}
10770
10771static void __devexit tg3_remove_one(struct pci_dev *pdev)
10772{
10773 struct net_device *dev = pci_get_drvdata(pdev);
10774
10775 if (dev) {
10776 struct tg3 *tp = netdev_priv(dev);
10777
10778 unregister_netdev(dev);
6892914f
MC
10779 if (tp->regs) {
10780 iounmap(tp->regs);
22abe310 10781 tp->regs = NULL;
6892914f 10782 }
1da177e4
LT
10783 free_netdev(dev);
10784 pci_release_regions(pdev);
10785 pci_disable_device(pdev);
10786 pci_set_drvdata(pdev, NULL);
10787 }
10788}
10789
10790static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10791{
10792 struct net_device *dev = pci_get_drvdata(pdev);
10793 struct tg3 *tp = netdev_priv(dev);
10794 int err;
10795
10796 if (!netif_running(dev))
10797 return 0;
10798
10799 tg3_netif_stop(tp);
10800
10801 del_timer_sync(&tp->timer);
10802
f47c11ee 10803 tg3_full_lock(tp, 1);
1da177e4 10804 tg3_disable_ints(tp);
f47c11ee 10805 tg3_full_unlock(tp);
1da177e4
LT
10806
10807 netif_device_detach(dev);
10808
f47c11ee 10809 tg3_full_lock(tp, 0);
944d980e 10810 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
f47c11ee 10811 tg3_full_unlock(tp);
1da177e4
LT
10812
10813 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
10814 if (err) {
f47c11ee 10815 tg3_full_lock(tp, 0);
1da177e4
LT
10816
10817 tg3_init_hw(tp);
10818
10819 tp->timer.expires = jiffies + tp->timer_offset;
10820 add_timer(&tp->timer);
10821
10822 netif_device_attach(dev);
10823 tg3_netif_start(tp);
10824
f47c11ee 10825 tg3_full_unlock(tp);
1da177e4
LT
10826 }
10827
10828 return err;
10829}
10830
10831static int tg3_resume(struct pci_dev *pdev)
10832{
10833 struct net_device *dev = pci_get_drvdata(pdev);
10834 struct tg3 *tp = netdev_priv(dev);
10835 int err;
10836
10837 if (!netif_running(dev))
10838 return 0;
10839
10840 pci_restore_state(tp->pdev);
10841
10842 err = tg3_set_power_state(tp, 0);
10843 if (err)
10844 return err;
10845
10846 netif_device_attach(dev);
10847
f47c11ee 10848 tg3_full_lock(tp, 0);
1da177e4
LT
10849
10850 tg3_init_hw(tp);
10851
10852 tp->timer.expires = jiffies + tp->timer_offset;
10853 add_timer(&tp->timer);
10854
1da177e4
LT
10855 tg3_netif_start(tp);
10856
f47c11ee 10857 tg3_full_unlock(tp);
1da177e4
LT
10858
10859 return 0;
10860}
10861
10862static struct pci_driver tg3_driver = {
10863 .name = DRV_MODULE_NAME,
10864 .id_table = tg3_pci_tbl,
10865 .probe = tg3_init_one,
10866 .remove = __devexit_p(tg3_remove_one),
10867 .suspend = tg3_suspend,
10868 .resume = tg3_resume
10869};
10870
10871static int __init tg3_init(void)
10872{
10873 return pci_module_init(&tg3_driver);
10874}
10875
10876static void __exit tg3_cleanup(void)
10877{
10878 pci_unregister_driver(&tg3_driver);
10879}
10880
10881module_init(tg3_init);
10882module_exit(tg3_cleanup);