]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/tg3.c
[TG3]: 5780 PHY fixes
[net-next-2.6.git] / drivers / net / tg3.c
CommitLineData
1da177e4
LT
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
8 *
9 * Firmware is:
49cabf49
MC
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
1da177e4
LT
16 */
17
18#include <linux/config.h>
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
27#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/workqueue.h>
61487480 39#include <linux/prefetch.h>
1da177e4
LT
40
41#include <net/checksum.h>
42
43#include <asm/system.h>
44#include <asm/io.h>
45#include <asm/byteorder.h>
46#include <asm/uaccess.h>
47
48#ifdef CONFIG_SPARC64
49#include <asm/idprom.h>
50#include <asm/oplib.h>
51#include <asm/pbm.h>
52#endif
53
54#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
55#define TG3_VLAN_TAG_USED 1
56#else
57#define TG3_VLAN_TAG_USED 0
58#endif
59
60#ifdef NETIF_F_TSO
61#define TG3_TSO_SUPPORT 1
62#else
63#define TG3_TSO_SUPPORT 0
64#endif
65
66#include "tg3.h"
67
68#define DRV_MODULE_NAME "tg3"
69#define PFX DRV_MODULE_NAME ": "
1cbf0747
DM
70#define DRV_MODULE_VERSION "3.40"
71#define DRV_MODULE_RELDATE "September 15, 2005"
1da177e4
LT
72
73#define TG3_DEF_MAC_MODE 0
74#define TG3_DEF_RX_MODE 0
75#define TG3_DEF_TX_MODE 0
76#define TG3_DEF_MSG_ENABLE \
77 (NETIF_MSG_DRV | \
78 NETIF_MSG_PROBE | \
79 NETIF_MSG_LINK | \
80 NETIF_MSG_TIMER | \
81 NETIF_MSG_IFDOWN | \
82 NETIF_MSG_IFUP | \
83 NETIF_MSG_RX_ERR | \
84 NETIF_MSG_TX_ERR)
85
86/* length of time before we decide the hardware is borked,
87 * and dev->tx_timeout() should be called to fix the problem
88 */
89#define TG3_TX_TIMEOUT (5 * HZ)
90
91/* hardware minimum and maximum for a single frame's data payload */
92#define TG3_MIN_MTU 60
93#define TG3_MAX_MTU(tp) \
0f893dc6 94 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
1da177e4
LT
95
96/* These numbers seem to be hard coded in the NIC firmware somehow.
97 * You can't change the ring sizes, but you can change where you place
98 * them in the NIC onboard memory.
99 */
100#define TG3_RX_RING_SIZE 512
101#define TG3_DEF_RX_RING_PENDING 200
102#define TG3_RX_JUMBO_RING_SIZE 256
103#define TG3_DEF_RX_JUMBO_RING_PENDING 100
104
105/* Do not place this n-ring entries value into the tp struct itself,
106 * we really want to expose these constants to GCC so that modulo et
107 * al. operations are done with shifts and masks instead of with
108 * hw multiply/modulo instructions. Another solution would be to
109 * replace things like '% foo' with '& (foo - 1)'.
110 */
111#define TG3_RX_RCB_RING_SIZE(tp) \
112 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
113
114#define TG3_TX_RING_SIZE 512
115#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
116
117#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
118 TG3_RX_RING_SIZE)
119#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 TG3_RX_JUMBO_RING_SIZE)
121#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
122 TG3_RX_RCB_RING_SIZE(tp))
123#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
124 TG3_TX_RING_SIZE)
1da177e4 125#define TX_BUFFS_AVAIL(TP) \
51b91468
MC
126 ((TP)->tx_pending - \
127 (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
1da177e4
LT
128#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
129
130#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
131#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
132
133/* minimum number of free TX descriptors required to wake up TX process */
134#define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
135
136/* number of ETHTOOL_GSTATS u64's */
137#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
138
4cafd3f5
MC
139#define TG3_NUM_TEST 6
140
1da177e4
LT
141static char version[] __devinitdata =
142 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
143
144MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
145MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
146MODULE_LICENSE("GPL");
147MODULE_VERSION(DRV_MODULE_VERSION);
148
149static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
150module_param(tg3_debug, int, 0);
151MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
152
153static struct pci_device_id tg3_pci_tbl[] = {
154 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
155 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
156 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
159 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
161 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
163 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
165 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
167 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
169 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
171 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
173 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
175 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
177 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
179 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
181 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
183 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
185 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
187 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
189 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
191 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
193 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
195 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
201 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
203 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
205 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
207 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
6e9017a7 212 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
af2bcd97 213 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
d8659255
XVP
214 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1da177e4
LT
216 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
4cf78e4f
MC
222 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1da177e4
LT
226 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
237 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
239 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242 { 0, }
243};
244
245MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
246
247static struct {
248 const char string[ETH_GSTRING_LEN];
249} ethtool_stats_keys[TG3_NUM_STATS] = {
250 { "rx_octets" },
251 { "rx_fragments" },
252 { "rx_ucast_packets" },
253 { "rx_mcast_packets" },
254 { "rx_bcast_packets" },
255 { "rx_fcs_errors" },
256 { "rx_align_errors" },
257 { "rx_xon_pause_rcvd" },
258 { "rx_xoff_pause_rcvd" },
259 { "rx_mac_ctrl_rcvd" },
260 { "rx_xoff_entered" },
261 { "rx_frame_too_long_errors" },
262 { "rx_jabbers" },
263 { "rx_undersize_packets" },
264 { "rx_in_length_errors" },
265 { "rx_out_length_errors" },
266 { "rx_64_or_less_octet_packets" },
267 { "rx_65_to_127_octet_packets" },
268 { "rx_128_to_255_octet_packets" },
269 { "rx_256_to_511_octet_packets" },
270 { "rx_512_to_1023_octet_packets" },
271 { "rx_1024_to_1522_octet_packets" },
272 { "rx_1523_to_2047_octet_packets" },
273 { "rx_2048_to_4095_octet_packets" },
274 { "rx_4096_to_8191_octet_packets" },
275 { "rx_8192_to_9022_octet_packets" },
276
277 { "tx_octets" },
278 { "tx_collisions" },
279
280 { "tx_xon_sent" },
281 { "tx_xoff_sent" },
282 { "tx_flow_control" },
283 { "tx_mac_errors" },
284 { "tx_single_collisions" },
285 { "tx_mult_collisions" },
286 { "tx_deferred" },
287 { "tx_excessive_collisions" },
288 { "tx_late_collisions" },
289 { "tx_collide_2times" },
290 { "tx_collide_3times" },
291 { "tx_collide_4times" },
292 { "tx_collide_5times" },
293 { "tx_collide_6times" },
294 { "tx_collide_7times" },
295 { "tx_collide_8times" },
296 { "tx_collide_9times" },
297 { "tx_collide_10times" },
298 { "tx_collide_11times" },
299 { "tx_collide_12times" },
300 { "tx_collide_13times" },
301 { "tx_collide_14times" },
302 { "tx_collide_15times" },
303 { "tx_ucast_packets" },
304 { "tx_mcast_packets" },
305 { "tx_bcast_packets" },
306 { "tx_carrier_sense_errors" },
307 { "tx_discards" },
308 { "tx_errors" },
309
310 { "dma_writeq_full" },
311 { "dma_write_prioq_full" },
312 { "rxbds_empty" },
313 { "rx_discards" },
314 { "rx_errors" },
315 { "rx_threshold_hit" },
316
317 { "dma_readq_full" },
318 { "dma_read_prioq_full" },
319 { "tx_comp_queue_full" },
320
321 { "ring_set_send_prod_index" },
322 { "ring_status_update" },
323 { "nic_irqs" },
324 { "nic_avoided_irqs" },
325 { "nic_tx_threshold_hit" }
326};
327
4cafd3f5
MC
328static struct {
329 const char string[ETH_GSTRING_LEN];
330} ethtool_test_keys[TG3_NUM_TEST] = {
331 { "nvram test (online) " },
332 { "link test (online) " },
333 { "register test (offline)" },
334 { "memory test (offline)" },
335 { "loopback test (offline)" },
336 { "interrupt test (offline)" },
337};
338
1da177e4
LT
339static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
340{
6892914f
MC
341 unsigned long flags;
342
343 spin_lock_irqsave(&tp->indirect_lock, flags);
1ee582d8
MC
344 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
345 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
6892914f 346 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1ee582d8
MC
347}
348
349static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
350{
351 writel(val, tp->regs + off);
352 readl(tp->regs + off);
1da177e4
LT
353}
354
6892914f 355static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
1da177e4 356{
6892914f
MC
357 unsigned long flags;
358 u32 val;
359
360 spin_lock_irqsave(&tp->indirect_lock, flags);
361 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
362 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
363 spin_unlock_irqrestore(&tp->indirect_lock, flags);
364 return val;
365}
366
367static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
368{
369 unsigned long flags;
370
371 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
372 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
373 TG3_64BIT_REG_LOW, val);
374 return;
375 }
376 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
377 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
378 TG3_64BIT_REG_LOW, val);
379 return;
1da177e4 380 }
6892914f
MC
381
382 spin_lock_irqsave(&tp->indirect_lock, flags);
383 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
384 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
385 spin_unlock_irqrestore(&tp->indirect_lock, flags);
386
387 /* In indirect mode when disabling interrupts, we also need
388 * to clear the interrupt bit in the GRC local ctrl register.
389 */
390 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
391 (val == 0x1)) {
392 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
393 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
394 }
395}
396
397static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
398{
399 unsigned long flags;
400 u32 val;
401
402 spin_lock_irqsave(&tp->indirect_lock, flags);
403 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
404 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
405 spin_unlock_irqrestore(&tp->indirect_lock, flags);
406 return val;
407}
408
409static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
410{
411 tp->write32(tp, off, val);
412 if (!(tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) &&
413 !(tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) &&
414 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
415 tp->read32(tp, off); /* flush */
1da177e4
LT
416}
417
09ee929c
MC
418static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
419{
420 tp->write32_mbox(tp, off, val);
6892914f
MC
421 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
422 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
423 tp->read32_mbox(tp, off);
09ee929c
MC
424}
425
20094930 426static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
1da177e4
LT
427{
428 void __iomem *mbox = tp->regs + off;
429 writel(val, mbox);
430 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
431 writel(val, mbox);
432 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
433 readl(mbox);
434}
435
20094930
MC
436static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
437{
438 writel(val, tp->regs + off);
439}
1da177e4 440
20094930
MC
441static u32 tg3_read32(struct tg3 *tp, u32 off)
442{
443 return (readl(tp->regs + off));
444}
445
446#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
09ee929c 447#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
20094930
MC
448#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
449#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
09ee929c 450#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
20094930
MC
451
452#define tw32(reg,val) tp->write32(tp, reg, val)
1da177e4 453#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val))
20094930 454#define tr32(reg) tp->read32(tp, reg)
1da177e4
LT
455
456static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
457{
6892914f
MC
458 unsigned long flags;
459
460 spin_lock_irqsave(&tp->indirect_lock, flags);
1da177e4
LT
461 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
462 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
463
464 /* Always leave this as zero. */
465 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
6892914f 466 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
467}
468
469static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
470{
6892914f
MC
471 unsigned long flags;
472
473 spin_lock_irqsave(&tp->indirect_lock, flags);
1da177e4
LT
474 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
476
477 /* Always leave this as zero. */
478 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
6892914f 479 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
480}
481
482static void tg3_disable_ints(struct tg3 *tp)
483{
484 tw32(TG3PCI_MISC_HOST_CTRL,
485 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c 486 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
1da177e4
LT
487}
488
489static inline void tg3_cond_int(struct tg3 *tp)
490{
38f3843e
MC
491 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
492 (tp->hw_status->status & SD_STATUS_UPDATED))
1da177e4
LT
493 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
494}
495
496static void tg3_enable_ints(struct tg3 *tp)
497{
bbe832c0
MC
498 tp->irq_sync = 0;
499 wmb();
500
1da177e4
LT
501 tw32(TG3PCI_MISC_HOST_CTRL,
502 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c
MC
503 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
504 (tp->last_tag << 24));
1da177e4
LT
505 tg3_cond_int(tp);
506}
507
04237ddd
MC
508static inline unsigned int tg3_has_work(struct tg3 *tp)
509{
510 struct tg3_hw_status *sblk = tp->hw_status;
511 unsigned int work_exists = 0;
512
513 /* check for phy events */
514 if (!(tp->tg3_flags &
515 (TG3_FLAG_USE_LINKCHG_REG |
516 TG3_FLAG_POLL_SERDES))) {
517 if (sblk->status & SD_STATUS_LINK_CHG)
518 work_exists = 1;
519 }
520 /* check for RX/TX work to do */
521 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
522 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
523 work_exists = 1;
524
525 return work_exists;
526}
527
1da177e4 528/* tg3_restart_ints
04237ddd
MC
529 * similar to tg3_enable_ints, but it accurately determines whether there
530 * is new work pending and can return without flushing the PIO write
531 * which reenables interrupts
1da177e4
LT
532 */
533static void tg3_restart_ints(struct tg3 *tp)
534{
fac9b83e
DM
535 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
536 tp->last_tag << 24);
1da177e4
LT
537 mmiowb();
538
fac9b83e
DM
539 /* When doing tagged status, this work check is unnecessary.
540 * The last_tag we write above tells the chip which piece of
541 * work we've completed.
542 */
543 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
544 tg3_has_work(tp))
04237ddd
MC
545 tw32(HOSTCC_MODE, tp->coalesce_mode |
546 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
1da177e4
LT
547}
548
549static inline void tg3_netif_stop(struct tg3 *tp)
550{
bbe832c0 551 tp->dev->trans_start = jiffies; /* prevent tx timeout */
1da177e4
LT
552 netif_poll_disable(tp->dev);
553 netif_tx_disable(tp->dev);
554}
555
556static inline void tg3_netif_start(struct tg3 *tp)
557{
558 netif_wake_queue(tp->dev);
559 /* NOTE: unconditional netif_wake_queue is only appropriate
560 * so long as all callers are assured to have free tx slots
561 * (such as after tg3_init_hw)
562 */
563 netif_poll_enable(tp->dev);
f47c11ee
DM
564 tp->hw_status->status |= SD_STATUS_UPDATED;
565 tg3_enable_ints(tp);
1da177e4
LT
566}
567
568static void tg3_switch_clocks(struct tg3 *tp)
569{
570 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
571 u32 orig_clock_ctrl;
572
4cf78e4f
MC
573 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
574 return;
575
1da177e4
LT
576 orig_clock_ctrl = clock_ctrl;
577 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
578 CLOCK_CTRL_CLKRUN_OENABLE |
579 0x1f);
580 tp->pci_clock_ctrl = clock_ctrl;
581
582 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
583 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
584 tw32_f(TG3PCI_CLOCK_CTRL,
585 clock_ctrl | CLOCK_CTRL_625_CORE);
586 udelay(40);
587 }
588 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
589 tw32_f(TG3PCI_CLOCK_CTRL,
590 clock_ctrl |
591 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
592 udelay(40);
593 tw32_f(TG3PCI_CLOCK_CTRL,
594 clock_ctrl | (CLOCK_CTRL_ALTCLK));
595 udelay(40);
596 }
597 tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
598 udelay(40);
599}
600
601#define PHY_BUSY_LOOPS 5000
602
603static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
604{
605 u32 frame_val;
606 unsigned int loops;
607 int ret;
608
609 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
610 tw32_f(MAC_MI_MODE,
611 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
612 udelay(80);
613 }
614
615 *val = 0x0;
616
617 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
618 MI_COM_PHY_ADDR_MASK);
619 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
620 MI_COM_REG_ADDR_MASK);
621 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
622
623 tw32_f(MAC_MI_COM, frame_val);
624
625 loops = PHY_BUSY_LOOPS;
626 while (loops != 0) {
627 udelay(10);
628 frame_val = tr32(MAC_MI_COM);
629
630 if ((frame_val & MI_COM_BUSY) == 0) {
631 udelay(5);
632 frame_val = tr32(MAC_MI_COM);
633 break;
634 }
635 loops -= 1;
636 }
637
638 ret = -EBUSY;
639 if (loops != 0) {
640 *val = frame_val & MI_COM_DATA_MASK;
641 ret = 0;
642 }
643
644 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
645 tw32_f(MAC_MI_MODE, tp->mi_mode);
646 udelay(80);
647 }
648
649 return ret;
650}
651
652static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
653{
654 u32 frame_val;
655 unsigned int loops;
656 int ret;
657
658 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
659 tw32_f(MAC_MI_MODE,
660 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
661 udelay(80);
662 }
663
664 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
665 MI_COM_PHY_ADDR_MASK);
666 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
667 MI_COM_REG_ADDR_MASK);
668 frame_val |= (val & MI_COM_DATA_MASK);
669 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
670
671 tw32_f(MAC_MI_COM, frame_val);
672
673 loops = PHY_BUSY_LOOPS;
674 while (loops != 0) {
675 udelay(10);
676 frame_val = tr32(MAC_MI_COM);
677 if ((frame_val & MI_COM_BUSY) == 0) {
678 udelay(5);
679 frame_val = tr32(MAC_MI_COM);
680 break;
681 }
682 loops -= 1;
683 }
684
685 ret = -EBUSY;
686 if (loops != 0)
687 ret = 0;
688
689 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
690 tw32_f(MAC_MI_MODE, tp->mi_mode);
691 udelay(80);
692 }
693
694 return ret;
695}
696
697static void tg3_phy_set_wirespeed(struct tg3 *tp)
698{
699 u32 val;
700
701 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
702 return;
703
704 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
705 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
706 tg3_writephy(tp, MII_TG3_AUX_CTRL,
707 (val | (1 << 15) | (1 << 4)));
708}
709
710static int tg3_bmcr_reset(struct tg3 *tp)
711{
712 u32 phy_control;
713 int limit, err;
714
715 /* OK, reset it, and poll the BMCR_RESET bit until it
716 * clears or we time out.
717 */
718 phy_control = BMCR_RESET;
719 err = tg3_writephy(tp, MII_BMCR, phy_control);
720 if (err != 0)
721 return -EBUSY;
722
723 limit = 5000;
724 while (limit--) {
725 err = tg3_readphy(tp, MII_BMCR, &phy_control);
726 if (err != 0)
727 return -EBUSY;
728
729 if ((phy_control & BMCR_RESET) == 0) {
730 udelay(40);
731 break;
732 }
733 udelay(10);
734 }
735 if (limit <= 0)
736 return -EBUSY;
737
738 return 0;
739}
740
741static int tg3_wait_macro_done(struct tg3 *tp)
742{
743 int limit = 100;
744
745 while (limit--) {
746 u32 tmp32;
747
748 if (!tg3_readphy(tp, 0x16, &tmp32)) {
749 if ((tmp32 & 0x1000) == 0)
750 break;
751 }
752 }
753 if (limit <= 0)
754 return -EBUSY;
755
756 return 0;
757}
758
759static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
760{
761 static const u32 test_pat[4][6] = {
762 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
763 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
764 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
765 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
766 };
767 int chan;
768
769 for (chan = 0; chan < 4; chan++) {
770 int i;
771
772 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
773 (chan * 0x2000) | 0x0200);
774 tg3_writephy(tp, 0x16, 0x0002);
775
776 for (i = 0; i < 6; i++)
777 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
778 test_pat[chan][i]);
779
780 tg3_writephy(tp, 0x16, 0x0202);
781 if (tg3_wait_macro_done(tp)) {
782 *resetp = 1;
783 return -EBUSY;
784 }
785
786 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
787 (chan * 0x2000) | 0x0200);
788 tg3_writephy(tp, 0x16, 0x0082);
789 if (tg3_wait_macro_done(tp)) {
790 *resetp = 1;
791 return -EBUSY;
792 }
793
794 tg3_writephy(tp, 0x16, 0x0802);
795 if (tg3_wait_macro_done(tp)) {
796 *resetp = 1;
797 return -EBUSY;
798 }
799
800 for (i = 0; i < 6; i += 2) {
801 u32 low, high;
802
803 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
804 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
805 tg3_wait_macro_done(tp)) {
806 *resetp = 1;
807 return -EBUSY;
808 }
809 low &= 0x7fff;
810 high &= 0x000f;
811 if (low != test_pat[chan][i] ||
812 high != test_pat[chan][i+1]) {
813 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
814 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
815 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
816
817 return -EBUSY;
818 }
819 }
820 }
821
822 return 0;
823}
824
825static int tg3_phy_reset_chanpat(struct tg3 *tp)
826{
827 int chan;
828
829 for (chan = 0; chan < 4; chan++) {
830 int i;
831
832 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
833 (chan * 0x2000) | 0x0200);
834 tg3_writephy(tp, 0x16, 0x0002);
835 for (i = 0; i < 6; i++)
836 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
837 tg3_writephy(tp, 0x16, 0x0202);
838 if (tg3_wait_macro_done(tp))
839 return -EBUSY;
840 }
841
842 return 0;
843}
844
845static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
846{
847 u32 reg32, phy9_orig;
848 int retries, do_phy_reset, err;
849
850 retries = 10;
851 do_phy_reset = 1;
852 do {
853 if (do_phy_reset) {
854 err = tg3_bmcr_reset(tp);
855 if (err)
856 return err;
857 do_phy_reset = 0;
858 }
859
860 /* Disable transmitter and interrupt. */
861 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
862 continue;
863
864 reg32 |= 0x3000;
865 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
866
867 /* Set full-duplex, 1000 mbps. */
868 tg3_writephy(tp, MII_BMCR,
869 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
870
871 /* Set to master mode. */
872 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
873 continue;
874
875 tg3_writephy(tp, MII_TG3_CTRL,
876 (MII_TG3_CTRL_AS_MASTER |
877 MII_TG3_CTRL_ENABLE_AS_MASTER));
878
879 /* Enable SM_DSP_CLOCK and 6dB. */
880 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
881
882 /* Block the PHY control access. */
883 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
884 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
885
886 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
887 if (!err)
888 break;
889 } while (--retries);
890
891 err = tg3_phy_reset_chanpat(tp);
892 if (err)
893 return err;
894
895 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
896 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
897
898 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
899 tg3_writephy(tp, 0x16, 0x0000);
900
901 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
902 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
903 /* Set Extended packet length bit for jumbo frames */
904 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
905 }
906 else {
907 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
908 }
909
910 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
911
912 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
913 reg32 &= ~0x3000;
914 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
915 } else if (!err)
916 err = -EBUSY;
917
918 return err;
919}
920
921/* This will reset the tigon3 PHY if there is no valid
922 * link unless the FORCE argument is non-zero.
923 */
924static int tg3_phy_reset(struct tg3 *tp)
925{
926 u32 phy_status;
927 int err;
928
929 err = tg3_readphy(tp, MII_BMSR, &phy_status);
930 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
931 if (err != 0)
932 return -EBUSY;
933
934 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
935 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
936 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
937 err = tg3_phy_reset_5703_4_5(tp);
938 if (err)
939 return err;
940 goto out;
941 }
942
943 err = tg3_bmcr_reset(tp);
944 if (err)
945 return err;
946
947out:
948 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
949 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
950 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
951 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
952 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
953 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
954 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
955 }
956 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
957 tg3_writephy(tp, 0x1c, 0x8d68);
958 tg3_writephy(tp, 0x1c, 0x8d68);
959 }
960 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
961 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
962 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
963 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
964 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
965 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
966 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
967 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
968 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
969 }
970 /* Set Extended packet length bit (bit 14) on all chips that */
971 /* support jumbo frames */
972 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
973 /* Cannot do read-modify-write on 5401 */
974 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
0f893dc6 975 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
976 u32 phy_reg;
977
978 /* Set bit 14 with read-modify-write to preserve other bits */
979 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
980 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
981 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
982 }
983
984 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
985 * jumbo frames transmission.
986 */
0f893dc6 987 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
988 u32 phy_reg;
989
990 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
991 tg3_writephy(tp, MII_TG3_EXT_CTRL,
992 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
993 }
994
995 tg3_phy_set_wirespeed(tp);
996 return 0;
997}
998
999static void tg3_frob_aux_power(struct tg3 *tp)
1000{
1001 struct tg3 *tp_peer = tp;
1002
1003 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1004 return;
1005
1006 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1007 tp_peer = pci_get_drvdata(tp->pdev_peer);
1008 if (!tp_peer)
1009 BUG();
1010 }
1011
1012
1013 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1014 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
1015 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1016 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1017 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1018 (GRC_LCLCTRL_GPIO_OE0 |
1019 GRC_LCLCTRL_GPIO_OE1 |
1020 GRC_LCLCTRL_GPIO_OE2 |
1021 GRC_LCLCTRL_GPIO_OUTPUT0 |
1022 GRC_LCLCTRL_GPIO_OUTPUT1));
1023 udelay(100);
1024 } else {
1025 u32 no_gpio2;
1026 u32 grc_local_ctrl;
1027
1028 if (tp_peer != tp &&
1029 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1030 return;
1031
1032 /* On 5753 and variants, GPIO2 cannot be used. */
1033 no_gpio2 = tp->nic_sram_data_cfg &
1034 NIC_SRAM_DATA_CFG_NO_GPIO2;
1035
1036 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1037 GRC_LCLCTRL_GPIO_OE1 |
1038 GRC_LCLCTRL_GPIO_OE2 |
1039 GRC_LCLCTRL_GPIO_OUTPUT1 |
1040 GRC_LCLCTRL_GPIO_OUTPUT2;
1041 if (no_gpio2) {
1042 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1043 GRC_LCLCTRL_GPIO_OUTPUT2);
1044 }
1045 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1046 grc_local_ctrl);
1047 udelay(100);
1048
1049 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1050
1051 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1052 grc_local_ctrl);
1053 udelay(100);
1054
1055 if (!no_gpio2) {
1056 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1057 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1058 grc_local_ctrl);
1059 udelay(100);
1060 }
1061 }
1062 } else {
1063 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1064 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1065 if (tp_peer != tp &&
1066 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1067 return;
1068
1069 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1070 (GRC_LCLCTRL_GPIO_OE1 |
1071 GRC_LCLCTRL_GPIO_OUTPUT1));
1072 udelay(100);
1073
1074 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1075 (GRC_LCLCTRL_GPIO_OE1));
1076 udelay(100);
1077
1078 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1079 (GRC_LCLCTRL_GPIO_OE1 |
1080 GRC_LCLCTRL_GPIO_OUTPUT1));
1081 udelay(100);
1082 }
1083 }
1084}
1085
1086static int tg3_setup_phy(struct tg3 *, int);
1087
1088#define RESET_KIND_SHUTDOWN 0
1089#define RESET_KIND_INIT 1
1090#define RESET_KIND_SUSPEND 2
1091
1092static void tg3_write_sig_post_reset(struct tg3 *, int);
1093static int tg3_halt_cpu(struct tg3 *, u32);
1094
1095static int tg3_set_power_state(struct tg3 *tp, int state)
1096{
1097 u32 misc_host_ctrl;
1098 u16 power_control, power_caps;
1099 int pm = tp->pm_cap;
1100
1101 /* Make sure register accesses (indirect or otherwise)
1102 * will function correctly.
1103 */
1104 pci_write_config_dword(tp->pdev,
1105 TG3PCI_MISC_HOST_CTRL,
1106 tp->misc_host_ctrl);
1107
1108 pci_read_config_word(tp->pdev,
1109 pm + PCI_PM_CTRL,
1110 &power_control);
1111 power_control |= PCI_PM_CTRL_PME_STATUS;
1112 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1113 switch (state) {
1114 case 0:
1115 power_control |= 0;
1116 pci_write_config_word(tp->pdev,
1117 pm + PCI_PM_CTRL,
1118 power_control);
8c6bda1a
MC
1119 udelay(100); /* Delay after power state change */
1120
1121 /* Switch out of Vaux if it is not a LOM */
1122 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1123 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1124 udelay(100);
1125 }
1da177e4
LT
1126
1127 return 0;
1128
1129 case 1:
1130 power_control |= 1;
1131 break;
1132
1133 case 2:
1134 power_control |= 2;
1135 break;
1136
1137 case 3:
1138 power_control |= 3;
1139 break;
1140
1141 default:
1142 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1143 "requested.\n",
1144 tp->dev->name, state);
1145 return -EINVAL;
1146 };
1147
1148 power_control |= PCI_PM_CTRL_PME_ENABLE;
1149
1150 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1151 tw32(TG3PCI_MISC_HOST_CTRL,
1152 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1153
1154 if (tp->link_config.phy_is_low_power == 0) {
1155 tp->link_config.phy_is_low_power = 1;
1156 tp->link_config.orig_speed = tp->link_config.speed;
1157 tp->link_config.orig_duplex = tp->link_config.duplex;
1158 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1159 }
1160
747e8f8b 1161 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1da177e4
LT
1162 tp->link_config.speed = SPEED_10;
1163 tp->link_config.duplex = DUPLEX_HALF;
1164 tp->link_config.autoneg = AUTONEG_ENABLE;
1165 tg3_setup_phy(tp, 0);
1166 }
1167
1168 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1169
1170 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1171 u32 mac_mode;
1172
1173 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1174 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1175 udelay(40);
1176
1177 mac_mode = MAC_MODE_PORT_MODE_MII;
1178
1179 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1180 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1181 mac_mode |= MAC_MODE_LINK_POLARITY;
1182 } else {
1183 mac_mode = MAC_MODE_PORT_MODE_TBI;
1184 }
1185
cbf46853 1186 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1da177e4
LT
1187 tw32(MAC_LED_CTRL, tp->led_ctrl);
1188
1189 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1190 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1191 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1192
1193 tw32_f(MAC_MODE, mac_mode);
1194 udelay(100);
1195
1196 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1197 udelay(10);
1198 }
1199
1200 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1201 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1202 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1203 u32 base_val;
1204
1205 base_val = tp->pci_clock_ctrl;
1206 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1207 CLOCK_CTRL_TXCLK_DISABLE);
1208
1209 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1210 CLOCK_CTRL_ALTCLK |
1211 CLOCK_CTRL_PWRDOWN_PLL133);
1212 udelay(40);
4cf78e4f
MC
1213 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
1214 /* do nothing */
85e94ced 1215 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1da177e4
LT
1216 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1217 u32 newbits1, newbits2;
1218
1219 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1220 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1221 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1222 CLOCK_CTRL_TXCLK_DISABLE |
1223 CLOCK_CTRL_ALTCLK);
1224 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1225 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1226 newbits1 = CLOCK_CTRL_625_CORE;
1227 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1228 } else {
1229 newbits1 = CLOCK_CTRL_ALTCLK;
1230 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1231 }
1232
1233 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1234 udelay(40);
1235
1236 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1237 udelay(40);
1238
1239 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1240 u32 newbits3;
1241
1242 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1243 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1244 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1245 CLOCK_CTRL_TXCLK_DISABLE |
1246 CLOCK_CTRL_44MHZ_CORE);
1247 } else {
1248 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1249 }
1250
1251 tw32_f(TG3PCI_CLOCK_CTRL,
1252 tp->pci_clock_ctrl | newbits3);
1253 udelay(40);
1254 }
1255 }
1256
1257 tg3_frob_aux_power(tp);
1258
1259 /* Workaround for unstable PLL clock */
1260 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1261 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1262 u32 val = tr32(0x7d00);
1263
1264 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1265 tw32(0x7d00, val);
1266 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1267 tg3_halt_cpu(tp, RX_CPU_BASE);
1268 }
1269
1270 /* Finally, set the new power state. */
1271 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
8c6bda1a 1272 udelay(100); /* Delay after power state change */
1da177e4
LT
1273
1274 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1275
1276 return 0;
1277}
1278
1279static void tg3_link_report(struct tg3 *tp)
1280{
1281 if (!netif_carrier_ok(tp->dev)) {
1282 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1283 } else {
1284 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1285 tp->dev->name,
1286 (tp->link_config.active_speed == SPEED_1000 ?
1287 1000 :
1288 (tp->link_config.active_speed == SPEED_100 ?
1289 100 : 10)),
1290 (tp->link_config.active_duplex == DUPLEX_FULL ?
1291 "full" : "half"));
1292
1293 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1294 "%s for RX.\n",
1295 tp->dev->name,
1296 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1297 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1298 }
1299}
1300
1301static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1302{
1303 u32 new_tg3_flags = 0;
1304 u32 old_rx_mode = tp->rx_mode;
1305 u32 old_tx_mode = tp->tx_mode;
1306
1307 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
747e8f8b
MC
1308
1309 /* Convert 1000BaseX flow control bits to 1000BaseT
1310 * bits before resolving flow control.
1311 */
1312 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1313 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1314 ADVERTISE_PAUSE_ASYM);
1315 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1316
1317 if (local_adv & ADVERTISE_1000XPAUSE)
1318 local_adv |= ADVERTISE_PAUSE_CAP;
1319 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1320 local_adv |= ADVERTISE_PAUSE_ASYM;
1321 if (remote_adv & LPA_1000XPAUSE)
1322 remote_adv |= LPA_PAUSE_CAP;
1323 if (remote_adv & LPA_1000XPAUSE_ASYM)
1324 remote_adv |= LPA_PAUSE_ASYM;
1325 }
1326
1da177e4
LT
1327 if (local_adv & ADVERTISE_PAUSE_CAP) {
1328 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1329 if (remote_adv & LPA_PAUSE_CAP)
1330 new_tg3_flags |=
1331 (TG3_FLAG_RX_PAUSE |
1332 TG3_FLAG_TX_PAUSE);
1333 else if (remote_adv & LPA_PAUSE_ASYM)
1334 new_tg3_flags |=
1335 (TG3_FLAG_RX_PAUSE);
1336 } else {
1337 if (remote_adv & LPA_PAUSE_CAP)
1338 new_tg3_flags |=
1339 (TG3_FLAG_RX_PAUSE |
1340 TG3_FLAG_TX_PAUSE);
1341 }
1342 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1343 if ((remote_adv & LPA_PAUSE_CAP) &&
1344 (remote_adv & LPA_PAUSE_ASYM))
1345 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1346 }
1347
1348 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1349 tp->tg3_flags |= new_tg3_flags;
1350 } else {
1351 new_tg3_flags = tp->tg3_flags;
1352 }
1353
1354 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1355 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1356 else
1357 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1358
1359 if (old_rx_mode != tp->rx_mode) {
1360 tw32_f(MAC_RX_MODE, tp->rx_mode);
1361 }
1362
1363 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1364 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1365 else
1366 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1367
1368 if (old_tx_mode != tp->tx_mode) {
1369 tw32_f(MAC_TX_MODE, tp->tx_mode);
1370 }
1371}
1372
1373static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1374{
1375 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1376 case MII_TG3_AUX_STAT_10HALF:
1377 *speed = SPEED_10;
1378 *duplex = DUPLEX_HALF;
1379 break;
1380
1381 case MII_TG3_AUX_STAT_10FULL:
1382 *speed = SPEED_10;
1383 *duplex = DUPLEX_FULL;
1384 break;
1385
1386 case MII_TG3_AUX_STAT_100HALF:
1387 *speed = SPEED_100;
1388 *duplex = DUPLEX_HALF;
1389 break;
1390
1391 case MII_TG3_AUX_STAT_100FULL:
1392 *speed = SPEED_100;
1393 *duplex = DUPLEX_FULL;
1394 break;
1395
1396 case MII_TG3_AUX_STAT_1000HALF:
1397 *speed = SPEED_1000;
1398 *duplex = DUPLEX_HALF;
1399 break;
1400
1401 case MII_TG3_AUX_STAT_1000FULL:
1402 *speed = SPEED_1000;
1403 *duplex = DUPLEX_FULL;
1404 break;
1405
1406 default:
1407 *speed = SPEED_INVALID;
1408 *duplex = DUPLEX_INVALID;
1409 break;
1410 };
1411}
1412
1413static void tg3_phy_copper_begin(struct tg3 *tp)
1414{
1415 u32 new_adv;
1416 int i;
1417
1418 if (tp->link_config.phy_is_low_power) {
1419 /* Entering low power mode. Disable gigabit and
1420 * 100baseT advertisements.
1421 */
1422 tg3_writephy(tp, MII_TG3_CTRL, 0);
1423
1424 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1425 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1426 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1427 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1428
1429 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1430 } else if (tp->link_config.speed == SPEED_INVALID) {
1431 tp->link_config.advertising =
1432 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1433 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1434 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1435 ADVERTISED_Autoneg | ADVERTISED_MII);
1436
1437 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1438 tp->link_config.advertising &=
1439 ~(ADVERTISED_1000baseT_Half |
1440 ADVERTISED_1000baseT_Full);
1441
1442 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1443 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1444 new_adv |= ADVERTISE_10HALF;
1445 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1446 new_adv |= ADVERTISE_10FULL;
1447 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1448 new_adv |= ADVERTISE_100HALF;
1449 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1450 new_adv |= ADVERTISE_100FULL;
1451 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1452
1453 if (tp->link_config.advertising &
1454 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1455 new_adv = 0;
1456 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1457 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1458 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1459 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1460 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1461 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1462 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1463 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1464 MII_TG3_CTRL_ENABLE_AS_MASTER);
1465 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1466 } else {
1467 tg3_writephy(tp, MII_TG3_CTRL, 0);
1468 }
1469 } else {
1470 /* Asking for a specific link mode. */
1471 if (tp->link_config.speed == SPEED_1000) {
1472 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1473 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1474
1475 if (tp->link_config.duplex == DUPLEX_FULL)
1476 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1477 else
1478 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1479 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1480 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1481 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1482 MII_TG3_CTRL_ENABLE_AS_MASTER);
1483 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1484 } else {
1485 tg3_writephy(tp, MII_TG3_CTRL, 0);
1486
1487 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1488 if (tp->link_config.speed == SPEED_100) {
1489 if (tp->link_config.duplex == DUPLEX_FULL)
1490 new_adv |= ADVERTISE_100FULL;
1491 else
1492 new_adv |= ADVERTISE_100HALF;
1493 } else {
1494 if (tp->link_config.duplex == DUPLEX_FULL)
1495 new_adv |= ADVERTISE_10FULL;
1496 else
1497 new_adv |= ADVERTISE_10HALF;
1498 }
1499 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1500 }
1501 }
1502
1503 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1504 tp->link_config.speed != SPEED_INVALID) {
1505 u32 bmcr, orig_bmcr;
1506
1507 tp->link_config.active_speed = tp->link_config.speed;
1508 tp->link_config.active_duplex = tp->link_config.duplex;
1509
1510 bmcr = 0;
1511 switch (tp->link_config.speed) {
1512 default:
1513 case SPEED_10:
1514 break;
1515
1516 case SPEED_100:
1517 bmcr |= BMCR_SPEED100;
1518 break;
1519
1520 case SPEED_1000:
1521 bmcr |= TG3_BMCR_SPEED1000;
1522 break;
1523 };
1524
1525 if (tp->link_config.duplex == DUPLEX_FULL)
1526 bmcr |= BMCR_FULLDPLX;
1527
1528 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1529 (bmcr != orig_bmcr)) {
1530 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1531 for (i = 0; i < 1500; i++) {
1532 u32 tmp;
1533
1534 udelay(10);
1535 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1536 tg3_readphy(tp, MII_BMSR, &tmp))
1537 continue;
1538 if (!(tmp & BMSR_LSTATUS)) {
1539 udelay(40);
1540 break;
1541 }
1542 }
1543 tg3_writephy(tp, MII_BMCR, bmcr);
1544 udelay(40);
1545 }
1546 } else {
1547 tg3_writephy(tp, MII_BMCR,
1548 BMCR_ANENABLE | BMCR_ANRESTART);
1549 }
1550}
1551
1552static int tg3_init_5401phy_dsp(struct tg3 *tp)
1553{
1554 int err;
1555
1556 /* Turn off tap power management. */
1557 /* Set Extended packet length bit */
1558 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1559
1560 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1561 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1562
1563 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1564 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1565
1566 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1567 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1568
1569 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1570 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1571
1572 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1573 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1574
1575 udelay(40);
1576
1577 return err;
1578}
1579
1580static int tg3_copper_is_advertising_all(struct tg3 *tp)
1581{
1582 u32 adv_reg, all_mask;
1583
1584 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1585 return 0;
1586
1587 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1588 ADVERTISE_100HALF | ADVERTISE_100FULL);
1589 if ((adv_reg & all_mask) != all_mask)
1590 return 0;
1591 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1592 u32 tg3_ctrl;
1593
1594 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1595 return 0;
1596
1597 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1598 MII_TG3_CTRL_ADV_1000_FULL);
1599 if ((tg3_ctrl & all_mask) != all_mask)
1600 return 0;
1601 }
1602 return 1;
1603}
1604
1605static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1606{
1607 int current_link_up;
1608 u32 bmsr, dummy;
1609 u16 current_speed;
1610 u8 current_duplex;
1611 int i, err;
1612
1613 tw32(MAC_EVENT, 0);
1614
1615 tw32_f(MAC_STATUS,
1616 (MAC_STATUS_SYNC_CHANGED |
1617 MAC_STATUS_CFG_CHANGED |
1618 MAC_STATUS_MI_COMPLETION |
1619 MAC_STATUS_LNKSTATE_CHANGED));
1620 udelay(40);
1621
1622 tp->mi_mode = MAC_MI_MODE_BASE;
1623 tw32_f(MAC_MI_MODE, tp->mi_mode);
1624 udelay(80);
1625
1626 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1627
1628 /* Some third-party PHYs need to be reset on link going
1629 * down.
1630 */
1631 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1632 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1633 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1634 netif_carrier_ok(tp->dev)) {
1635 tg3_readphy(tp, MII_BMSR, &bmsr);
1636 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1637 !(bmsr & BMSR_LSTATUS))
1638 force_reset = 1;
1639 }
1640 if (force_reset)
1641 tg3_phy_reset(tp);
1642
1643 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1644 tg3_readphy(tp, MII_BMSR, &bmsr);
1645 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1646 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1647 bmsr = 0;
1648
1649 if (!(bmsr & BMSR_LSTATUS)) {
1650 err = tg3_init_5401phy_dsp(tp);
1651 if (err)
1652 return err;
1653
1654 tg3_readphy(tp, MII_BMSR, &bmsr);
1655 for (i = 0; i < 1000; i++) {
1656 udelay(10);
1657 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1658 (bmsr & BMSR_LSTATUS)) {
1659 udelay(40);
1660 break;
1661 }
1662 }
1663
1664 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1665 !(bmsr & BMSR_LSTATUS) &&
1666 tp->link_config.active_speed == SPEED_1000) {
1667 err = tg3_phy_reset(tp);
1668 if (!err)
1669 err = tg3_init_5401phy_dsp(tp);
1670 if (err)
1671 return err;
1672 }
1673 }
1674 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1675 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1676 /* 5701 {A0,B0} CRC bug workaround */
1677 tg3_writephy(tp, 0x15, 0x0a75);
1678 tg3_writephy(tp, 0x1c, 0x8c68);
1679 tg3_writephy(tp, 0x1c, 0x8d68);
1680 tg3_writephy(tp, 0x1c, 0x8c68);
1681 }
1682
1683 /* Clear pending interrupts... */
1684 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1685 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1686
1687 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1688 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1689 else
1690 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1691
1692 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1693 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1694 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1695 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1696 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1697 else
1698 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1699 }
1700
1701 current_link_up = 0;
1702 current_speed = SPEED_INVALID;
1703 current_duplex = DUPLEX_INVALID;
1704
1705 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1706 u32 val;
1707
1708 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1709 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1710 if (!(val & (1 << 10))) {
1711 val |= (1 << 10);
1712 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1713 goto relink;
1714 }
1715 }
1716
1717 bmsr = 0;
1718 for (i = 0; i < 100; i++) {
1719 tg3_readphy(tp, MII_BMSR, &bmsr);
1720 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1721 (bmsr & BMSR_LSTATUS))
1722 break;
1723 udelay(40);
1724 }
1725
1726 if (bmsr & BMSR_LSTATUS) {
1727 u32 aux_stat, bmcr;
1728
1729 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1730 for (i = 0; i < 2000; i++) {
1731 udelay(10);
1732 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1733 aux_stat)
1734 break;
1735 }
1736
1737 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1738 &current_speed,
1739 &current_duplex);
1740
1741 bmcr = 0;
1742 for (i = 0; i < 200; i++) {
1743 tg3_readphy(tp, MII_BMCR, &bmcr);
1744 if (tg3_readphy(tp, MII_BMCR, &bmcr))
1745 continue;
1746 if (bmcr && bmcr != 0x7fff)
1747 break;
1748 udelay(10);
1749 }
1750
1751 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1752 if (bmcr & BMCR_ANENABLE) {
1753 current_link_up = 1;
1754
1755 /* Force autoneg restart if we are exiting
1756 * low power mode.
1757 */
1758 if (!tg3_copper_is_advertising_all(tp))
1759 current_link_up = 0;
1760 } else {
1761 current_link_up = 0;
1762 }
1763 } else {
1764 if (!(bmcr & BMCR_ANENABLE) &&
1765 tp->link_config.speed == current_speed &&
1766 tp->link_config.duplex == current_duplex) {
1767 current_link_up = 1;
1768 } else {
1769 current_link_up = 0;
1770 }
1771 }
1772
1773 tp->link_config.active_speed = current_speed;
1774 tp->link_config.active_duplex = current_duplex;
1775 }
1776
1777 if (current_link_up == 1 &&
1778 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1779 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1780 u32 local_adv, remote_adv;
1781
1782 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1783 local_adv = 0;
1784 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1785
1786 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1787 remote_adv = 0;
1788
1789 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1790
1791 /* If we are not advertising full pause capability,
1792 * something is wrong. Bring the link down and reconfigure.
1793 */
1794 if (local_adv != ADVERTISE_PAUSE_CAP) {
1795 current_link_up = 0;
1796 } else {
1797 tg3_setup_flow_control(tp, local_adv, remote_adv);
1798 }
1799 }
1800relink:
1801 if (current_link_up == 0) {
1802 u32 tmp;
1803
1804 tg3_phy_copper_begin(tp);
1805
1806 tg3_readphy(tp, MII_BMSR, &tmp);
1807 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1808 (tmp & BMSR_LSTATUS))
1809 current_link_up = 1;
1810 }
1811
1812 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1813 if (current_link_up == 1) {
1814 if (tp->link_config.active_speed == SPEED_100 ||
1815 tp->link_config.active_speed == SPEED_10)
1816 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1817 else
1818 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1819 } else
1820 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1821
1822 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1823 if (tp->link_config.active_duplex == DUPLEX_HALF)
1824 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1825
1826 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1827 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1828 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1829 (current_link_up == 1 &&
1830 tp->link_config.active_speed == SPEED_10))
1831 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1832 } else {
1833 if (current_link_up == 1)
1834 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1835 }
1836
1837 /* ??? Without this setting Netgear GA302T PHY does not
1838 * ??? send/receive packets...
1839 */
1840 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1841 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1842 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1843 tw32_f(MAC_MI_MODE, tp->mi_mode);
1844 udelay(80);
1845 }
1846
1847 tw32_f(MAC_MODE, tp->mac_mode);
1848 udelay(40);
1849
1850 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1851 /* Polled via timer. */
1852 tw32_f(MAC_EVENT, 0);
1853 } else {
1854 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1855 }
1856 udelay(40);
1857
1858 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1859 current_link_up == 1 &&
1860 tp->link_config.active_speed == SPEED_1000 &&
1861 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1862 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1863 udelay(120);
1864 tw32_f(MAC_STATUS,
1865 (MAC_STATUS_SYNC_CHANGED |
1866 MAC_STATUS_CFG_CHANGED));
1867 udelay(40);
1868 tg3_write_mem(tp,
1869 NIC_SRAM_FIRMWARE_MBOX,
1870 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1871 }
1872
1873 if (current_link_up != netif_carrier_ok(tp->dev)) {
1874 if (current_link_up)
1875 netif_carrier_on(tp->dev);
1876 else
1877 netif_carrier_off(tp->dev);
1878 tg3_link_report(tp);
1879 }
1880
1881 return 0;
1882}
1883
1884struct tg3_fiber_aneginfo {
1885 int state;
1886#define ANEG_STATE_UNKNOWN 0
1887#define ANEG_STATE_AN_ENABLE 1
1888#define ANEG_STATE_RESTART_INIT 2
1889#define ANEG_STATE_RESTART 3
1890#define ANEG_STATE_DISABLE_LINK_OK 4
1891#define ANEG_STATE_ABILITY_DETECT_INIT 5
1892#define ANEG_STATE_ABILITY_DETECT 6
1893#define ANEG_STATE_ACK_DETECT_INIT 7
1894#define ANEG_STATE_ACK_DETECT 8
1895#define ANEG_STATE_COMPLETE_ACK_INIT 9
1896#define ANEG_STATE_COMPLETE_ACK 10
1897#define ANEG_STATE_IDLE_DETECT_INIT 11
1898#define ANEG_STATE_IDLE_DETECT 12
1899#define ANEG_STATE_LINK_OK 13
1900#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
1901#define ANEG_STATE_NEXT_PAGE_WAIT 15
1902
1903 u32 flags;
1904#define MR_AN_ENABLE 0x00000001
1905#define MR_RESTART_AN 0x00000002
1906#define MR_AN_COMPLETE 0x00000004
1907#define MR_PAGE_RX 0x00000008
1908#define MR_NP_LOADED 0x00000010
1909#define MR_TOGGLE_TX 0x00000020
1910#define MR_LP_ADV_FULL_DUPLEX 0x00000040
1911#define MR_LP_ADV_HALF_DUPLEX 0x00000080
1912#define MR_LP_ADV_SYM_PAUSE 0x00000100
1913#define MR_LP_ADV_ASYM_PAUSE 0x00000200
1914#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1915#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1916#define MR_LP_ADV_NEXT_PAGE 0x00001000
1917#define MR_TOGGLE_RX 0x00002000
1918#define MR_NP_RX 0x00004000
1919
1920#define MR_LINK_OK 0x80000000
1921
1922 unsigned long link_time, cur_time;
1923
1924 u32 ability_match_cfg;
1925 int ability_match_count;
1926
1927 char ability_match, idle_match, ack_match;
1928
1929 u32 txconfig, rxconfig;
1930#define ANEG_CFG_NP 0x00000080
1931#define ANEG_CFG_ACK 0x00000040
1932#define ANEG_CFG_RF2 0x00000020
1933#define ANEG_CFG_RF1 0x00000010
1934#define ANEG_CFG_PS2 0x00000001
1935#define ANEG_CFG_PS1 0x00008000
1936#define ANEG_CFG_HD 0x00004000
1937#define ANEG_CFG_FD 0x00002000
1938#define ANEG_CFG_INVAL 0x00001f06
1939
1940};
1941#define ANEG_OK 0
1942#define ANEG_DONE 1
1943#define ANEG_TIMER_ENAB 2
1944#define ANEG_FAILED -1
1945
1946#define ANEG_STATE_SETTLE_TIME 10000
1947
1948static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1949 struct tg3_fiber_aneginfo *ap)
1950{
1951 unsigned long delta;
1952 u32 rx_cfg_reg;
1953 int ret;
1954
1955 if (ap->state == ANEG_STATE_UNKNOWN) {
1956 ap->rxconfig = 0;
1957 ap->link_time = 0;
1958 ap->cur_time = 0;
1959 ap->ability_match_cfg = 0;
1960 ap->ability_match_count = 0;
1961 ap->ability_match = 0;
1962 ap->idle_match = 0;
1963 ap->ack_match = 0;
1964 }
1965 ap->cur_time++;
1966
1967 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1968 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1969
1970 if (rx_cfg_reg != ap->ability_match_cfg) {
1971 ap->ability_match_cfg = rx_cfg_reg;
1972 ap->ability_match = 0;
1973 ap->ability_match_count = 0;
1974 } else {
1975 if (++ap->ability_match_count > 1) {
1976 ap->ability_match = 1;
1977 ap->ability_match_cfg = rx_cfg_reg;
1978 }
1979 }
1980 if (rx_cfg_reg & ANEG_CFG_ACK)
1981 ap->ack_match = 1;
1982 else
1983 ap->ack_match = 0;
1984
1985 ap->idle_match = 0;
1986 } else {
1987 ap->idle_match = 1;
1988 ap->ability_match_cfg = 0;
1989 ap->ability_match_count = 0;
1990 ap->ability_match = 0;
1991 ap->ack_match = 0;
1992
1993 rx_cfg_reg = 0;
1994 }
1995
1996 ap->rxconfig = rx_cfg_reg;
1997 ret = ANEG_OK;
1998
1999 switch(ap->state) {
2000 case ANEG_STATE_UNKNOWN:
2001 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2002 ap->state = ANEG_STATE_AN_ENABLE;
2003
2004 /* fallthru */
2005 case ANEG_STATE_AN_ENABLE:
2006 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2007 if (ap->flags & MR_AN_ENABLE) {
2008 ap->link_time = 0;
2009 ap->cur_time = 0;
2010 ap->ability_match_cfg = 0;
2011 ap->ability_match_count = 0;
2012 ap->ability_match = 0;
2013 ap->idle_match = 0;
2014 ap->ack_match = 0;
2015
2016 ap->state = ANEG_STATE_RESTART_INIT;
2017 } else {
2018 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2019 }
2020 break;
2021
2022 case ANEG_STATE_RESTART_INIT:
2023 ap->link_time = ap->cur_time;
2024 ap->flags &= ~(MR_NP_LOADED);
2025 ap->txconfig = 0;
2026 tw32(MAC_TX_AUTO_NEG, 0);
2027 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2028 tw32_f(MAC_MODE, tp->mac_mode);
2029 udelay(40);
2030
2031 ret = ANEG_TIMER_ENAB;
2032 ap->state = ANEG_STATE_RESTART;
2033
2034 /* fallthru */
2035 case ANEG_STATE_RESTART:
2036 delta = ap->cur_time - ap->link_time;
2037 if (delta > ANEG_STATE_SETTLE_TIME) {
2038 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2039 } else {
2040 ret = ANEG_TIMER_ENAB;
2041 }
2042 break;
2043
2044 case ANEG_STATE_DISABLE_LINK_OK:
2045 ret = ANEG_DONE;
2046 break;
2047
2048 case ANEG_STATE_ABILITY_DETECT_INIT:
2049 ap->flags &= ~(MR_TOGGLE_TX);
2050 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2051 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2052 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2053 tw32_f(MAC_MODE, tp->mac_mode);
2054 udelay(40);
2055
2056 ap->state = ANEG_STATE_ABILITY_DETECT;
2057 break;
2058
2059 case ANEG_STATE_ABILITY_DETECT:
2060 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2061 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2062 }
2063 break;
2064
2065 case ANEG_STATE_ACK_DETECT_INIT:
2066 ap->txconfig |= ANEG_CFG_ACK;
2067 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2068 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2069 tw32_f(MAC_MODE, tp->mac_mode);
2070 udelay(40);
2071
2072 ap->state = ANEG_STATE_ACK_DETECT;
2073
2074 /* fallthru */
2075 case ANEG_STATE_ACK_DETECT:
2076 if (ap->ack_match != 0) {
2077 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2078 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2079 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2080 } else {
2081 ap->state = ANEG_STATE_AN_ENABLE;
2082 }
2083 } else if (ap->ability_match != 0 &&
2084 ap->rxconfig == 0) {
2085 ap->state = ANEG_STATE_AN_ENABLE;
2086 }
2087 break;
2088
2089 case ANEG_STATE_COMPLETE_ACK_INIT:
2090 if (ap->rxconfig & ANEG_CFG_INVAL) {
2091 ret = ANEG_FAILED;
2092 break;
2093 }
2094 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2095 MR_LP_ADV_HALF_DUPLEX |
2096 MR_LP_ADV_SYM_PAUSE |
2097 MR_LP_ADV_ASYM_PAUSE |
2098 MR_LP_ADV_REMOTE_FAULT1 |
2099 MR_LP_ADV_REMOTE_FAULT2 |
2100 MR_LP_ADV_NEXT_PAGE |
2101 MR_TOGGLE_RX |
2102 MR_NP_RX);
2103 if (ap->rxconfig & ANEG_CFG_FD)
2104 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2105 if (ap->rxconfig & ANEG_CFG_HD)
2106 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2107 if (ap->rxconfig & ANEG_CFG_PS1)
2108 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2109 if (ap->rxconfig & ANEG_CFG_PS2)
2110 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2111 if (ap->rxconfig & ANEG_CFG_RF1)
2112 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2113 if (ap->rxconfig & ANEG_CFG_RF2)
2114 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2115 if (ap->rxconfig & ANEG_CFG_NP)
2116 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2117
2118 ap->link_time = ap->cur_time;
2119
2120 ap->flags ^= (MR_TOGGLE_TX);
2121 if (ap->rxconfig & 0x0008)
2122 ap->flags |= MR_TOGGLE_RX;
2123 if (ap->rxconfig & ANEG_CFG_NP)
2124 ap->flags |= MR_NP_RX;
2125 ap->flags |= MR_PAGE_RX;
2126
2127 ap->state = ANEG_STATE_COMPLETE_ACK;
2128 ret = ANEG_TIMER_ENAB;
2129 break;
2130
2131 case ANEG_STATE_COMPLETE_ACK:
2132 if (ap->ability_match != 0 &&
2133 ap->rxconfig == 0) {
2134 ap->state = ANEG_STATE_AN_ENABLE;
2135 break;
2136 }
2137 delta = ap->cur_time - ap->link_time;
2138 if (delta > ANEG_STATE_SETTLE_TIME) {
2139 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2140 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2141 } else {
2142 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2143 !(ap->flags & MR_NP_RX)) {
2144 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2145 } else {
2146 ret = ANEG_FAILED;
2147 }
2148 }
2149 }
2150 break;
2151
2152 case ANEG_STATE_IDLE_DETECT_INIT:
2153 ap->link_time = ap->cur_time;
2154 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2155 tw32_f(MAC_MODE, tp->mac_mode);
2156 udelay(40);
2157
2158 ap->state = ANEG_STATE_IDLE_DETECT;
2159 ret = ANEG_TIMER_ENAB;
2160 break;
2161
2162 case ANEG_STATE_IDLE_DETECT:
2163 if (ap->ability_match != 0 &&
2164 ap->rxconfig == 0) {
2165 ap->state = ANEG_STATE_AN_ENABLE;
2166 break;
2167 }
2168 delta = ap->cur_time - ap->link_time;
2169 if (delta > ANEG_STATE_SETTLE_TIME) {
2170 /* XXX another gem from the Broadcom driver :( */
2171 ap->state = ANEG_STATE_LINK_OK;
2172 }
2173 break;
2174
2175 case ANEG_STATE_LINK_OK:
2176 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2177 ret = ANEG_DONE;
2178 break;
2179
2180 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2181 /* ??? unimplemented */
2182 break;
2183
2184 case ANEG_STATE_NEXT_PAGE_WAIT:
2185 /* ??? unimplemented */
2186 break;
2187
2188 default:
2189 ret = ANEG_FAILED;
2190 break;
2191 };
2192
2193 return ret;
2194}
2195
2196static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2197{
2198 int res = 0;
2199 struct tg3_fiber_aneginfo aninfo;
2200 int status = ANEG_FAILED;
2201 unsigned int tick;
2202 u32 tmp;
2203
2204 tw32_f(MAC_TX_AUTO_NEG, 0);
2205
2206 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2207 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2208 udelay(40);
2209
2210 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2211 udelay(40);
2212
2213 memset(&aninfo, 0, sizeof(aninfo));
2214 aninfo.flags |= MR_AN_ENABLE;
2215 aninfo.state = ANEG_STATE_UNKNOWN;
2216 aninfo.cur_time = 0;
2217 tick = 0;
2218 while (++tick < 195000) {
2219 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2220 if (status == ANEG_DONE || status == ANEG_FAILED)
2221 break;
2222
2223 udelay(1);
2224 }
2225
2226 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2227 tw32_f(MAC_MODE, tp->mac_mode);
2228 udelay(40);
2229
2230 *flags = aninfo.flags;
2231
2232 if (status == ANEG_DONE &&
2233 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2234 MR_LP_ADV_FULL_DUPLEX)))
2235 res = 1;
2236
2237 return res;
2238}
2239
2240static void tg3_init_bcm8002(struct tg3 *tp)
2241{
2242 u32 mac_status = tr32(MAC_STATUS);
2243 int i;
2244
2245 /* Reset when initting first time or we have a link. */
2246 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2247 !(mac_status & MAC_STATUS_PCS_SYNCED))
2248 return;
2249
2250 /* Set PLL lock range. */
2251 tg3_writephy(tp, 0x16, 0x8007);
2252
2253 /* SW reset */
2254 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2255
2256 /* Wait for reset to complete. */
2257 /* XXX schedule_timeout() ... */
2258 for (i = 0; i < 500; i++)
2259 udelay(10);
2260
2261 /* Config mode; select PMA/Ch 1 regs. */
2262 tg3_writephy(tp, 0x10, 0x8411);
2263
2264 /* Enable auto-lock and comdet, select txclk for tx. */
2265 tg3_writephy(tp, 0x11, 0x0a10);
2266
2267 tg3_writephy(tp, 0x18, 0x00a0);
2268 tg3_writephy(tp, 0x16, 0x41ff);
2269
2270 /* Assert and deassert POR. */
2271 tg3_writephy(tp, 0x13, 0x0400);
2272 udelay(40);
2273 tg3_writephy(tp, 0x13, 0x0000);
2274
2275 tg3_writephy(tp, 0x11, 0x0a50);
2276 udelay(40);
2277 tg3_writephy(tp, 0x11, 0x0a10);
2278
2279 /* Wait for signal to stabilize */
2280 /* XXX schedule_timeout() ... */
2281 for (i = 0; i < 15000; i++)
2282 udelay(10);
2283
2284 /* Deselect the channel register so we can read the PHYID
2285 * later.
2286 */
2287 tg3_writephy(tp, 0x10, 0x8011);
2288}
2289
2290static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2291{
2292 u32 sg_dig_ctrl, sg_dig_status;
2293 u32 serdes_cfg, expected_sg_dig_ctrl;
2294 int workaround, port_a;
2295 int current_link_up;
2296
2297 serdes_cfg = 0;
2298 expected_sg_dig_ctrl = 0;
2299 workaround = 0;
2300 port_a = 1;
2301 current_link_up = 0;
2302
2303 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2304 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2305 workaround = 1;
2306 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2307 port_a = 0;
2308
2309 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2310 /* preserve bits 20-23 for voltage regulator */
2311 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2312 }
2313
2314 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2315
2316 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2317 if (sg_dig_ctrl & (1 << 31)) {
2318 if (workaround) {
2319 u32 val = serdes_cfg;
2320
2321 if (port_a)
2322 val |= 0xc010000;
2323 else
2324 val |= 0x4010000;
2325 tw32_f(MAC_SERDES_CFG, val);
2326 }
2327 tw32_f(SG_DIG_CTRL, 0x01388400);
2328 }
2329 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2330 tg3_setup_flow_control(tp, 0, 0);
2331 current_link_up = 1;
2332 }
2333 goto out;
2334 }
2335
2336 /* Want auto-negotiation. */
2337 expected_sg_dig_ctrl = 0x81388400;
2338
2339 /* Pause capability */
2340 expected_sg_dig_ctrl |= (1 << 11);
2341
2342 /* Asymettric pause */
2343 expected_sg_dig_ctrl |= (1 << 12);
2344
2345 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2346 if (workaround)
2347 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2348 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2349 udelay(5);
2350 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2351
2352 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2353 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2354 MAC_STATUS_SIGNAL_DET)) {
2355 int i;
2356
2357 /* Giver time to negotiate (~200ms) */
2358 for (i = 0; i < 40000; i++) {
2359 sg_dig_status = tr32(SG_DIG_STATUS);
2360 if (sg_dig_status & (0x3))
2361 break;
2362 udelay(5);
2363 }
2364 mac_status = tr32(MAC_STATUS);
2365
2366 if ((sg_dig_status & (1 << 1)) &&
2367 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2368 u32 local_adv, remote_adv;
2369
2370 local_adv = ADVERTISE_PAUSE_CAP;
2371 remote_adv = 0;
2372 if (sg_dig_status & (1 << 19))
2373 remote_adv |= LPA_PAUSE_CAP;
2374 if (sg_dig_status & (1 << 20))
2375 remote_adv |= LPA_PAUSE_ASYM;
2376
2377 tg3_setup_flow_control(tp, local_adv, remote_adv);
2378 current_link_up = 1;
2379 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2380 } else if (!(sg_dig_status & (1 << 1))) {
2381 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2382 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2383 else {
2384 if (workaround) {
2385 u32 val = serdes_cfg;
2386
2387 if (port_a)
2388 val |= 0xc010000;
2389 else
2390 val |= 0x4010000;
2391
2392 tw32_f(MAC_SERDES_CFG, val);
2393 }
2394
2395 tw32_f(SG_DIG_CTRL, 0x01388400);
2396 udelay(40);
2397
2398 /* Link parallel detection - link is up */
2399 /* only if we have PCS_SYNC and not */
2400 /* receiving config code words */
2401 mac_status = tr32(MAC_STATUS);
2402 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2403 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2404 tg3_setup_flow_control(tp, 0, 0);
2405 current_link_up = 1;
2406 }
2407 }
2408 }
2409 }
2410
2411out:
2412 return current_link_up;
2413}
2414
2415static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2416{
2417 int current_link_up = 0;
2418
2419 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2420 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2421 goto out;
2422 }
2423
2424 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2425 u32 flags;
2426 int i;
2427
2428 if (fiber_autoneg(tp, &flags)) {
2429 u32 local_adv, remote_adv;
2430
2431 local_adv = ADVERTISE_PAUSE_CAP;
2432 remote_adv = 0;
2433 if (flags & MR_LP_ADV_SYM_PAUSE)
2434 remote_adv |= LPA_PAUSE_CAP;
2435 if (flags & MR_LP_ADV_ASYM_PAUSE)
2436 remote_adv |= LPA_PAUSE_ASYM;
2437
2438 tg3_setup_flow_control(tp, local_adv, remote_adv);
2439
2440 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2441 current_link_up = 1;
2442 }
2443 for (i = 0; i < 30; i++) {
2444 udelay(20);
2445 tw32_f(MAC_STATUS,
2446 (MAC_STATUS_SYNC_CHANGED |
2447 MAC_STATUS_CFG_CHANGED));
2448 udelay(40);
2449 if ((tr32(MAC_STATUS) &
2450 (MAC_STATUS_SYNC_CHANGED |
2451 MAC_STATUS_CFG_CHANGED)) == 0)
2452 break;
2453 }
2454
2455 mac_status = tr32(MAC_STATUS);
2456 if (current_link_up == 0 &&
2457 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2458 !(mac_status & MAC_STATUS_RCVD_CFG))
2459 current_link_up = 1;
2460 } else {
2461 /* Forcing 1000FD link up. */
2462 current_link_up = 1;
2463 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2464
2465 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2466 udelay(40);
2467 }
2468
2469out:
2470 return current_link_up;
2471}
2472
2473static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2474{
2475 u32 orig_pause_cfg;
2476 u16 orig_active_speed;
2477 u8 orig_active_duplex;
2478 u32 mac_status;
2479 int current_link_up;
2480 int i;
2481
2482 orig_pause_cfg =
2483 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2484 TG3_FLAG_TX_PAUSE));
2485 orig_active_speed = tp->link_config.active_speed;
2486 orig_active_duplex = tp->link_config.active_duplex;
2487
2488 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2489 netif_carrier_ok(tp->dev) &&
2490 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2491 mac_status = tr32(MAC_STATUS);
2492 mac_status &= (MAC_STATUS_PCS_SYNCED |
2493 MAC_STATUS_SIGNAL_DET |
2494 MAC_STATUS_CFG_CHANGED |
2495 MAC_STATUS_RCVD_CFG);
2496 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2497 MAC_STATUS_SIGNAL_DET)) {
2498 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2499 MAC_STATUS_CFG_CHANGED));
2500 return 0;
2501 }
2502 }
2503
2504 tw32_f(MAC_TX_AUTO_NEG, 0);
2505
2506 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2507 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2508 tw32_f(MAC_MODE, tp->mac_mode);
2509 udelay(40);
2510
2511 if (tp->phy_id == PHY_ID_BCM8002)
2512 tg3_init_bcm8002(tp);
2513
2514 /* Enable link change event even when serdes polling. */
2515 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2516 udelay(40);
2517
2518 current_link_up = 0;
2519 mac_status = tr32(MAC_STATUS);
2520
2521 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2522 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2523 else
2524 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2525
2526 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2527 tw32_f(MAC_MODE, tp->mac_mode);
2528 udelay(40);
2529
2530 tp->hw_status->status =
2531 (SD_STATUS_UPDATED |
2532 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2533
2534 for (i = 0; i < 100; i++) {
2535 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2536 MAC_STATUS_CFG_CHANGED));
2537 udelay(5);
2538 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2539 MAC_STATUS_CFG_CHANGED)) == 0)
2540 break;
2541 }
2542
2543 mac_status = tr32(MAC_STATUS);
2544 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2545 current_link_up = 0;
2546 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2547 tw32_f(MAC_MODE, (tp->mac_mode |
2548 MAC_MODE_SEND_CONFIGS));
2549 udelay(1);
2550 tw32_f(MAC_MODE, tp->mac_mode);
2551 }
2552 }
2553
2554 if (current_link_up == 1) {
2555 tp->link_config.active_speed = SPEED_1000;
2556 tp->link_config.active_duplex = DUPLEX_FULL;
2557 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2558 LED_CTRL_LNKLED_OVERRIDE |
2559 LED_CTRL_1000MBPS_ON));
2560 } else {
2561 tp->link_config.active_speed = SPEED_INVALID;
2562 tp->link_config.active_duplex = DUPLEX_INVALID;
2563 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2564 LED_CTRL_LNKLED_OVERRIDE |
2565 LED_CTRL_TRAFFIC_OVERRIDE));
2566 }
2567
2568 if (current_link_up != netif_carrier_ok(tp->dev)) {
2569 if (current_link_up)
2570 netif_carrier_on(tp->dev);
2571 else
2572 netif_carrier_off(tp->dev);
2573 tg3_link_report(tp);
2574 } else {
2575 u32 now_pause_cfg =
2576 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2577 TG3_FLAG_TX_PAUSE);
2578 if (orig_pause_cfg != now_pause_cfg ||
2579 orig_active_speed != tp->link_config.active_speed ||
2580 orig_active_duplex != tp->link_config.active_duplex)
2581 tg3_link_report(tp);
2582 }
2583
2584 return 0;
2585}
2586
747e8f8b
MC
2587static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2588{
2589 int current_link_up, err = 0;
2590 u32 bmsr, bmcr;
2591 u16 current_speed;
2592 u8 current_duplex;
2593
2594 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2595 tw32_f(MAC_MODE, tp->mac_mode);
2596 udelay(40);
2597
2598 tw32(MAC_EVENT, 0);
2599
2600 tw32_f(MAC_STATUS,
2601 (MAC_STATUS_SYNC_CHANGED |
2602 MAC_STATUS_CFG_CHANGED |
2603 MAC_STATUS_MI_COMPLETION |
2604 MAC_STATUS_LNKSTATE_CHANGED));
2605 udelay(40);
2606
2607 if (force_reset)
2608 tg3_phy_reset(tp);
2609
2610 current_link_up = 0;
2611 current_speed = SPEED_INVALID;
2612 current_duplex = DUPLEX_INVALID;
2613
2614 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2615 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2616
2617 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2618
2619 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2620 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2621 /* do nothing, just check for link up at the end */
2622 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2623 u32 adv, new_adv;
2624
2625 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2626 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2627 ADVERTISE_1000XPAUSE |
2628 ADVERTISE_1000XPSE_ASYM |
2629 ADVERTISE_SLCT);
2630
2631 /* Always advertise symmetric PAUSE just like copper */
2632 new_adv |= ADVERTISE_1000XPAUSE;
2633
2634 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2635 new_adv |= ADVERTISE_1000XHALF;
2636 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2637 new_adv |= ADVERTISE_1000XFULL;
2638
2639 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2640 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2641 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2642 tg3_writephy(tp, MII_BMCR, bmcr);
2643
2644 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2645 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2646 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2647
2648 return err;
2649 }
2650 } else {
2651 u32 new_bmcr;
2652
2653 bmcr &= ~BMCR_SPEED1000;
2654 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2655
2656 if (tp->link_config.duplex == DUPLEX_FULL)
2657 new_bmcr |= BMCR_FULLDPLX;
2658
2659 if (new_bmcr != bmcr) {
2660 /* BMCR_SPEED1000 is a reserved bit that needs
2661 * to be set on write.
2662 */
2663 new_bmcr |= BMCR_SPEED1000;
2664
2665 /* Force a linkdown */
2666 if (netif_carrier_ok(tp->dev)) {
2667 u32 adv;
2668
2669 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2670 adv &= ~(ADVERTISE_1000XFULL |
2671 ADVERTISE_1000XHALF |
2672 ADVERTISE_SLCT);
2673 tg3_writephy(tp, MII_ADVERTISE, adv);
2674 tg3_writephy(tp, MII_BMCR, bmcr |
2675 BMCR_ANRESTART |
2676 BMCR_ANENABLE);
2677 udelay(10);
2678 netif_carrier_off(tp->dev);
2679 }
2680 tg3_writephy(tp, MII_BMCR, new_bmcr);
2681 bmcr = new_bmcr;
2682 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2683 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2684 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2685 }
2686 }
2687
2688 if (bmsr & BMSR_LSTATUS) {
2689 current_speed = SPEED_1000;
2690 current_link_up = 1;
2691 if (bmcr & BMCR_FULLDPLX)
2692 current_duplex = DUPLEX_FULL;
2693 else
2694 current_duplex = DUPLEX_HALF;
2695
2696 if (bmcr & BMCR_ANENABLE) {
2697 u32 local_adv, remote_adv, common;
2698
2699 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2700 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2701 common = local_adv & remote_adv;
2702 if (common & (ADVERTISE_1000XHALF |
2703 ADVERTISE_1000XFULL)) {
2704 if (common & ADVERTISE_1000XFULL)
2705 current_duplex = DUPLEX_FULL;
2706 else
2707 current_duplex = DUPLEX_HALF;
2708
2709 tg3_setup_flow_control(tp, local_adv,
2710 remote_adv);
2711 }
2712 else
2713 current_link_up = 0;
2714 }
2715 }
2716
2717 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2718 if (tp->link_config.active_duplex == DUPLEX_HALF)
2719 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2720
2721 tw32_f(MAC_MODE, tp->mac_mode);
2722 udelay(40);
2723
2724 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2725
2726 tp->link_config.active_speed = current_speed;
2727 tp->link_config.active_duplex = current_duplex;
2728
2729 if (current_link_up != netif_carrier_ok(tp->dev)) {
2730 if (current_link_up)
2731 netif_carrier_on(tp->dev);
2732 else {
2733 netif_carrier_off(tp->dev);
2734 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2735 }
2736 tg3_link_report(tp);
2737 }
2738 return err;
2739}
2740
2741static void tg3_serdes_parallel_detect(struct tg3 *tp)
2742{
2743 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2744 /* Give autoneg time to complete. */
2745 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2746 return;
2747 }
2748 if (!netif_carrier_ok(tp->dev) &&
2749 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2750 u32 bmcr;
2751
2752 tg3_readphy(tp, MII_BMCR, &bmcr);
2753 if (bmcr & BMCR_ANENABLE) {
2754 u32 phy1, phy2;
2755
2756 /* Select shadow register 0x1f */
2757 tg3_writephy(tp, 0x1c, 0x7c00);
2758 tg3_readphy(tp, 0x1c, &phy1);
2759
2760 /* Select expansion interrupt status register */
2761 tg3_writephy(tp, 0x17, 0x0f01);
2762 tg3_readphy(tp, 0x15, &phy2);
2763 tg3_readphy(tp, 0x15, &phy2);
2764
2765 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2766 /* We have signal detect and not receiving
2767 * config code words, link is up by parallel
2768 * detection.
2769 */
2770
2771 bmcr &= ~BMCR_ANENABLE;
2772 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2773 tg3_writephy(tp, MII_BMCR, bmcr);
2774 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2775 }
2776 }
2777 }
2778 else if (netif_carrier_ok(tp->dev) &&
2779 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2780 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2781 u32 phy2;
2782
2783 /* Select expansion interrupt status register */
2784 tg3_writephy(tp, 0x17, 0x0f01);
2785 tg3_readphy(tp, 0x15, &phy2);
2786 if (phy2 & 0x20) {
2787 u32 bmcr;
2788
2789 /* Config code words received, turn on autoneg. */
2790 tg3_readphy(tp, MII_BMCR, &bmcr);
2791 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2792
2793 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2794
2795 }
2796 }
2797}
2798
1da177e4
LT
2799static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2800{
2801 int err;
2802
2803 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2804 err = tg3_setup_fiber_phy(tp, force_reset);
747e8f8b
MC
2805 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2806 err = tg3_setup_fiber_mii_phy(tp, force_reset);
1da177e4
LT
2807 } else {
2808 err = tg3_setup_copper_phy(tp, force_reset);
2809 }
2810
2811 if (tp->link_config.active_speed == SPEED_1000 &&
2812 tp->link_config.active_duplex == DUPLEX_HALF)
2813 tw32(MAC_TX_LENGTHS,
2814 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2815 (6 << TX_LENGTHS_IPG_SHIFT) |
2816 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2817 else
2818 tw32(MAC_TX_LENGTHS,
2819 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2820 (6 << TX_LENGTHS_IPG_SHIFT) |
2821 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2822
2823 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2824 if (netif_carrier_ok(tp->dev)) {
2825 tw32(HOSTCC_STAT_COAL_TICKS,
15f9850d 2826 tp->coal.stats_block_coalesce_usecs);
1da177e4
LT
2827 } else {
2828 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2829 }
2830 }
2831
2832 return err;
2833}
2834
2835/* Tigon3 never reports partial packet sends. So we do not
2836 * need special logic to handle SKBs that have not had all
2837 * of their frags sent yet, like SunGEM does.
2838 */
2839static void tg3_tx(struct tg3 *tp)
2840{
2841 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2842 u32 sw_idx = tp->tx_cons;
2843
2844 while (sw_idx != hw_idx) {
2845 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2846 struct sk_buff *skb = ri->skb;
2847 int i;
2848
2849 if (unlikely(skb == NULL))
2850 BUG();
2851
2852 pci_unmap_single(tp->pdev,
2853 pci_unmap_addr(ri, mapping),
2854 skb_headlen(skb),
2855 PCI_DMA_TODEVICE);
2856
2857 ri->skb = NULL;
2858
2859 sw_idx = NEXT_TX(sw_idx);
2860
2861 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2862 if (unlikely(sw_idx == hw_idx))
2863 BUG();
2864
2865 ri = &tp->tx_buffers[sw_idx];
2866 if (unlikely(ri->skb != NULL))
2867 BUG();
2868
2869 pci_unmap_page(tp->pdev,
2870 pci_unmap_addr(ri, mapping),
2871 skb_shinfo(skb)->frags[i].size,
2872 PCI_DMA_TODEVICE);
2873
2874 sw_idx = NEXT_TX(sw_idx);
2875 }
2876
f47c11ee 2877 dev_kfree_skb(skb);
1da177e4
LT
2878 }
2879
2880 tp->tx_cons = sw_idx;
2881
51b91468
MC
2882 if (unlikely(netif_queue_stopped(tp->dev))) {
2883 spin_lock(&tp->tx_lock);
2884 if (netif_queue_stopped(tp->dev) &&
2885 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2886 netif_wake_queue(tp->dev);
2887 spin_unlock(&tp->tx_lock);
2888 }
1da177e4
LT
2889}
2890
2891/* Returns size of skb allocated or < 0 on error.
2892 *
2893 * We only need to fill in the address because the other members
2894 * of the RX descriptor are invariant, see tg3_init_rings.
2895 *
2896 * Note the purposeful assymetry of cpu vs. chip accesses. For
2897 * posting buffers we only dirty the first cache line of the RX
2898 * descriptor (containing the address). Whereas for the RX status
2899 * buffers the cpu only reads the last cacheline of the RX descriptor
2900 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2901 */
2902static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2903 int src_idx, u32 dest_idx_unmasked)
2904{
2905 struct tg3_rx_buffer_desc *desc;
2906 struct ring_info *map, *src_map;
2907 struct sk_buff *skb;
2908 dma_addr_t mapping;
2909 int skb_size, dest_idx;
2910
2911 src_map = NULL;
2912 switch (opaque_key) {
2913 case RXD_OPAQUE_RING_STD:
2914 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2915 desc = &tp->rx_std[dest_idx];
2916 map = &tp->rx_std_buffers[dest_idx];
2917 if (src_idx >= 0)
2918 src_map = &tp->rx_std_buffers[src_idx];
7e72aad4 2919 skb_size = tp->rx_pkt_buf_sz;
1da177e4
LT
2920 break;
2921
2922 case RXD_OPAQUE_RING_JUMBO:
2923 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2924 desc = &tp->rx_jumbo[dest_idx];
2925 map = &tp->rx_jumbo_buffers[dest_idx];
2926 if (src_idx >= 0)
2927 src_map = &tp->rx_jumbo_buffers[src_idx];
2928 skb_size = RX_JUMBO_PKT_BUF_SZ;
2929 break;
2930
2931 default:
2932 return -EINVAL;
2933 };
2934
2935 /* Do not overwrite any of the map or rp information
2936 * until we are sure we can commit to a new buffer.
2937 *
2938 * Callers depend upon this behavior and assume that
2939 * we leave everything unchanged if we fail.
2940 */
2941 skb = dev_alloc_skb(skb_size);
2942 if (skb == NULL)
2943 return -ENOMEM;
2944
2945 skb->dev = tp->dev;
2946 skb_reserve(skb, tp->rx_offset);
2947
2948 mapping = pci_map_single(tp->pdev, skb->data,
2949 skb_size - tp->rx_offset,
2950 PCI_DMA_FROMDEVICE);
2951
2952 map->skb = skb;
2953 pci_unmap_addr_set(map, mapping, mapping);
2954
2955 if (src_map != NULL)
2956 src_map->skb = NULL;
2957
2958 desc->addr_hi = ((u64)mapping >> 32);
2959 desc->addr_lo = ((u64)mapping & 0xffffffff);
2960
2961 return skb_size;
2962}
2963
2964/* We only need to move over in the address because the other
2965 * members of the RX descriptor are invariant. See notes above
2966 * tg3_alloc_rx_skb for full details.
2967 */
2968static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2969 int src_idx, u32 dest_idx_unmasked)
2970{
2971 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2972 struct ring_info *src_map, *dest_map;
2973 int dest_idx;
2974
2975 switch (opaque_key) {
2976 case RXD_OPAQUE_RING_STD:
2977 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2978 dest_desc = &tp->rx_std[dest_idx];
2979 dest_map = &tp->rx_std_buffers[dest_idx];
2980 src_desc = &tp->rx_std[src_idx];
2981 src_map = &tp->rx_std_buffers[src_idx];
2982 break;
2983
2984 case RXD_OPAQUE_RING_JUMBO:
2985 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2986 dest_desc = &tp->rx_jumbo[dest_idx];
2987 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2988 src_desc = &tp->rx_jumbo[src_idx];
2989 src_map = &tp->rx_jumbo_buffers[src_idx];
2990 break;
2991
2992 default:
2993 return;
2994 };
2995
2996 dest_map->skb = src_map->skb;
2997 pci_unmap_addr_set(dest_map, mapping,
2998 pci_unmap_addr(src_map, mapping));
2999 dest_desc->addr_hi = src_desc->addr_hi;
3000 dest_desc->addr_lo = src_desc->addr_lo;
3001
3002 src_map->skb = NULL;
3003}
3004
3005#if TG3_VLAN_TAG_USED
3006static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3007{
3008 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3009}
3010#endif
3011
3012/* The RX ring scheme is composed of multiple rings which post fresh
3013 * buffers to the chip, and one special ring the chip uses to report
3014 * status back to the host.
3015 *
3016 * The special ring reports the status of received packets to the
3017 * host. The chip does not write into the original descriptor the
3018 * RX buffer was obtained from. The chip simply takes the original
3019 * descriptor as provided by the host, updates the status and length
3020 * field, then writes this into the next status ring entry.
3021 *
3022 * Each ring the host uses to post buffers to the chip is described
3023 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3024 * it is first placed into the on-chip ram. When the packet's length
3025 * is known, it walks down the TG3_BDINFO entries to select the ring.
3026 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3027 * which is within the range of the new packet's length is chosen.
3028 *
3029 * The "separate ring for rx status" scheme may sound queer, but it makes
3030 * sense from a cache coherency perspective. If only the host writes
3031 * to the buffer post rings, and only the chip writes to the rx status
3032 * rings, then cache lines never move beyond shared-modified state.
3033 * If both the host and chip were to write into the same ring, cache line
3034 * eviction could occur since both entities want it in an exclusive state.
3035 */
3036static int tg3_rx(struct tg3 *tp, int budget)
3037{
3038 u32 work_mask;
483ba50b
MC
3039 u32 sw_idx = tp->rx_rcb_ptr;
3040 u16 hw_idx;
1da177e4
LT
3041 int received;
3042
3043 hw_idx = tp->hw_status->idx[0].rx_producer;
3044 /*
3045 * We need to order the read of hw_idx and the read of
3046 * the opaque cookie.
3047 */
3048 rmb();
1da177e4
LT
3049 work_mask = 0;
3050 received = 0;
3051 while (sw_idx != hw_idx && budget > 0) {
3052 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3053 unsigned int len;
3054 struct sk_buff *skb;
3055 dma_addr_t dma_addr;
3056 u32 opaque_key, desc_idx, *post_ptr;
3057
3058 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3059 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3060 if (opaque_key == RXD_OPAQUE_RING_STD) {
3061 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3062 mapping);
3063 skb = tp->rx_std_buffers[desc_idx].skb;
3064 post_ptr = &tp->rx_std_ptr;
3065 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3066 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3067 mapping);
3068 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3069 post_ptr = &tp->rx_jumbo_ptr;
3070 }
3071 else {
3072 goto next_pkt_nopost;
3073 }
3074
3075 work_mask |= opaque_key;
3076
3077 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3078 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3079 drop_it:
3080 tg3_recycle_rx(tp, opaque_key,
3081 desc_idx, *post_ptr);
3082 drop_it_no_recycle:
3083 /* Other statistics kept track of by card. */
3084 tp->net_stats.rx_dropped++;
3085 goto next_pkt;
3086 }
3087
3088 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3089
3090 if (len > RX_COPY_THRESHOLD
3091 && tp->rx_offset == 2
3092 /* rx_offset != 2 iff this is a 5701 card running
3093 * in PCI-X mode [see tg3_get_invariants()] */
3094 ) {
3095 int skb_size;
3096
3097 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3098 desc_idx, *post_ptr);
3099 if (skb_size < 0)
3100 goto drop_it;
3101
3102 pci_unmap_single(tp->pdev, dma_addr,
3103 skb_size - tp->rx_offset,
3104 PCI_DMA_FROMDEVICE);
3105
3106 skb_put(skb, len);
3107 } else {
3108 struct sk_buff *copy_skb;
3109
3110 tg3_recycle_rx(tp, opaque_key,
3111 desc_idx, *post_ptr);
3112
3113 copy_skb = dev_alloc_skb(len + 2);
3114 if (copy_skb == NULL)
3115 goto drop_it_no_recycle;
3116
3117 copy_skb->dev = tp->dev;
3118 skb_reserve(copy_skb, 2);
3119 skb_put(copy_skb, len);
3120 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3121 memcpy(copy_skb->data, skb->data, len);
3122 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3123
3124 /* We'll reuse the original ring buffer. */
3125 skb = copy_skb;
3126 }
3127
3128 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3129 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3130 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3131 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3132 skb->ip_summed = CHECKSUM_UNNECESSARY;
3133 else
3134 skb->ip_summed = CHECKSUM_NONE;
3135
3136 skb->protocol = eth_type_trans(skb, tp->dev);
3137#if TG3_VLAN_TAG_USED
3138 if (tp->vlgrp != NULL &&
3139 desc->type_flags & RXD_FLAG_VLAN) {
3140 tg3_vlan_rx(tp, skb,
3141 desc->err_vlan & RXD_VLAN_MASK);
3142 } else
3143#endif
3144 netif_receive_skb(skb);
3145
3146 tp->dev->last_rx = jiffies;
3147 received++;
3148 budget--;
3149
3150next_pkt:
3151 (*post_ptr)++;
3152next_pkt_nopost:
483ba50b
MC
3153 sw_idx++;
3154 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
52f6d697
MC
3155
3156 /* Refresh hw_idx to see if there is new work */
3157 if (sw_idx == hw_idx) {
3158 hw_idx = tp->hw_status->idx[0].rx_producer;
3159 rmb();
3160 }
1da177e4
LT
3161 }
3162
3163 /* ACK the status ring. */
483ba50b
MC
3164 tp->rx_rcb_ptr = sw_idx;
3165 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
1da177e4
LT
3166
3167 /* Refill RX ring(s). */
3168 if (work_mask & RXD_OPAQUE_RING_STD) {
3169 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3170 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3171 sw_idx);
3172 }
3173 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3174 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3175 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3176 sw_idx);
3177 }
3178 mmiowb();
3179
3180 return received;
3181}
3182
3183static int tg3_poll(struct net_device *netdev, int *budget)
3184{
3185 struct tg3 *tp = netdev_priv(netdev);
3186 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4
LT
3187 int done;
3188
1da177e4
LT
3189 /* handle link change and other phy events */
3190 if (!(tp->tg3_flags &
3191 (TG3_FLAG_USE_LINKCHG_REG |
3192 TG3_FLAG_POLL_SERDES))) {
3193 if (sblk->status & SD_STATUS_LINK_CHG) {
3194 sblk->status = SD_STATUS_UPDATED |
3195 (sblk->status & ~SD_STATUS_LINK_CHG);
f47c11ee 3196 spin_lock(&tp->lock);
1da177e4 3197 tg3_setup_phy(tp, 0);
f47c11ee 3198 spin_unlock(&tp->lock);
1da177e4
LT
3199 }
3200 }
3201
3202 /* run TX completion thread */
3203 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
1da177e4 3204 tg3_tx(tp);
1da177e4
LT
3205 }
3206
1da177e4
LT
3207 /* run RX thread, within the bounds set by NAPI.
3208 * All RX "locking" is done by ensuring outside
3209 * code synchronizes with dev->poll()
3210 */
1da177e4
LT
3211 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3212 int orig_budget = *budget;
3213 int work_done;
3214
3215 if (orig_budget > netdev->quota)
3216 orig_budget = netdev->quota;
3217
3218 work_done = tg3_rx(tp, orig_budget);
3219
3220 *budget -= work_done;
3221 netdev->quota -= work_done;
1da177e4
LT
3222 }
3223
38f3843e 3224 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
f7383c22 3225 tp->last_tag = sblk->status_tag;
38f3843e
MC
3226 rmb();
3227 } else
3228 sblk->status &= ~SD_STATUS_UPDATED;
f7383c22 3229
1da177e4 3230 /* if no more work, tell net stack and NIC we're done */
f7383c22 3231 done = !tg3_has_work(tp);
1da177e4 3232 if (done) {
f47c11ee 3233 netif_rx_complete(netdev);
1da177e4 3234 tg3_restart_ints(tp);
1da177e4
LT
3235 }
3236
3237 return (done ? 0 : 1);
3238}
3239
f47c11ee
DM
3240static void tg3_irq_quiesce(struct tg3 *tp)
3241{
3242 BUG_ON(tp->irq_sync);
3243
3244 tp->irq_sync = 1;
3245 smp_mb();
3246
3247 synchronize_irq(tp->pdev->irq);
3248}
3249
3250static inline int tg3_irq_sync(struct tg3 *tp)
3251{
3252 return tp->irq_sync;
3253}
3254
3255/* Fully shutdown all tg3 driver activity elsewhere in the system.
3256 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3257 * with as well. Most of the time, this is not necessary except when
3258 * shutting down the device.
3259 */
3260static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3261{
3262 if (irq_sync)
3263 tg3_irq_quiesce(tp);
3264 spin_lock_bh(&tp->lock);
3265 spin_lock(&tp->tx_lock);
3266}
3267
3268static inline void tg3_full_unlock(struct tg3 *tp)
3269{
3270 spin_unlock(&tp->tx_lock);
3271 spin_unlock_bh(&tp->lock);
3272}
3273
88b06bc2
MC
3274/* MSI ISR - No need to check for interrupt sharing and no need to
3275 * flush status block and interrupt mailbox. PCI ordering rules
3276 * guarantee that MSI will arrive after the status block.
3277 */
3278static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3279{
3280 struct net_device *dev = dev_id;
3281 struct tg3 *tp = netdev_priv(dev);
88b06bc2 3282
61487480
MC
3283 prefetch(tp->hw_status);
3284 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
88b06bc2 3285 /*
fac9b83e 3286 * Writing any value to intr-mbox-0 clears PCI INTA# and
88b06bc2 3287 * chip-internal interrupt pending events.
fac9b83e 3288 * Writing non-zero to intr-mbox-0 additional tells the
88b06bc2
MC
3289 * NIC to stop sending us irqs, engaging "in-intr-handler"
3290 * event coalescing.
3291 */
3292 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
61487480 3293 if (likely(!tg3_irq_sync(tp)))
88b06bc2 3294 netif_rx_schedule(dev); /* schedule NAPI poll */
61487480 3295
88b06bc2
MC
3296 return IRQ_RETVAL(1);
3297}
3298
1da177e4
LT
3299static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3300{
3301 struct net_device *dev = dev_id;
3302 struct tg3 *tp = netdev_priv(dev);
3303 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4
LT
3304 unsigned int handled = 1;
3305
1da177e4
LT
3306 /* In INTx mode, it is possible for the interrupt to arrive at
3307 * the CPU before the status block posted prior to the interrupt.
3308 * Reading the PCI State register will confirm whether the
3309 * interrupt is ours and will flush the status block.
3310 */
3311 if ((sblk->status & SD_STATUS_UPDATED) ||
3312 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3313 /*
fac9b83e 3314 * Writing any value to intr-mbox-0 clears PCI INTA# and
1da177e4 3315 * chip-internal interrupt pending events.
fac9b83e 3316 * Writing non-zero to intr-mbox-0 additional tells the
1da177e4
LT
3317 * NIC to stop sending us irqs, engaging "in-intr-handler"
3318 * event coalescing.
3319 */
3320 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3321 0x00000001);
f47c11ee
DM
3322 if (tg3_irq_sync(tp))
3323 goto out;
fac9b83e 3324 sblk->status &= ~SD_STATUS_UPDATED;
61487480
MC
3325 if (likely(tg3_has_work(tp))) {
3326 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
fac9b83e 3327 netif_rx_schedule(dev); /* schedule NAPI poll */
61487480 3328 } else {
fac9b83e
DM
3329 /* No work, shared interrupt perhaps? re-enable
3330 * interrupts, and flush that PCI write
3331 */
09ee929c 3332 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
fac9b83e 3333 0x00000000);
fac9b83e
DM
3334 }
3335 } else { /* shared interrupt */
3336 handled = 0;
3337 }
f47c11ee 3338out:
fac9b83e
DM
3339 return IRQ_RETVAL(handled);
3340}
3341
3342static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3343{
3344 struct net_device *dev = dev_id;
3345 struct tg3 *tp = netdev_priv(dev);
3346 struct tg3_hw_status *sblk = tp->hw_status;
fac9b83e
DM
3347 unsigned int handled = 1;
3348
fac9b83e
DM
3349 /* In INTx mode, it is possible for the interrupt to arrive at
3350 * the CPU before the status block posted prior to the interrupt.
3351 * Reading the PCI State register will confirm whether the
3352 * interrupt is ours and will flush the status block.
3353 */
38f3843e 3354 if ((sblk->status_tag != tp->last_tag) ||
fac9b83e 3355 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
1da177e4 3356 /*
fac9b83e
DM
3357 * writing any value to intr-mbox-0 clears PCI INTA# and
3358 * chip-internal interrupt pending events.
3359 * writing non-zero to intr-mbox-0 additional tells the
3360 * NIC to stop sending us irqs, engaging "in-intr-handler"
3361 * event coalescing.
1da177e4 3362 */
fac9b83e
DM
3363 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3364 0x00000001);
f47c11ee
DM
3365 if (tg3_irq_sync(tp))
3366 goto out;
38f3843e 3367 if (netif_rx_schedule_prep(dev)) {
61487480 3368 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
38f3843e
MC
3369 /* Update last_tag to mark that this status has been
3370 * seen. Because interrupt may be shared, we may be
3371 * racing with tg3_poll(), so only update last_tag
3372 * if tg3_poll() is not scheduled.
1da177e4 3373 */
38f3843e
MC
3374 tp->last_tag = sblk->status_tag;
3375 __netif_rx_schedule(dev);
1da177e4
LT
3376 }
3377 } else { /* shared interrupt */
3378 handled = 0;
3379 }
f47c11ee 3380out:
1da177e4
LT
3381 return IRQ_RETVAL(handled);
3382}
3383
7938109f
MC
3384/* ISR for interrupt test */
3385static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3386 struct pt_regs *regs)
3387{
3388 struct net_device *dev = dev_id;
3389 struct tg3 *tp = netdev_priv(dev);
3390 struct tg3_hw_status *sblk = tp->hw_status;
3391
3392 if (sblk->status & SD_STATUS_UPDATED) {
3393 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3394 0x00000001);
3395 return IRQ_RETVAL(1);
3396 }
3397 return IRQ_RETVAL(0);
3398}
3399
1da177e4 3400static int tg3_init_hw(struct tg3 *);
944d980e 3401static int tg3_halt(struct tg3 *, int, int);
1da177e4
LT
3402
3403#ifdef CONFIG_NET_POLL_CONTROLLER
3404static void tg3_poll_controller(struct net_device *dev)
3405{
88b06bc2
MC
3406 struct tg3 *tp = netdev_priv(dev);
3407
3408 tg3_interrupt(tp->pdev->irq, dev, NULL);
1da177e4
LT
3409}
3410#endif
3411
3412static void tg3_reset_task(void *_data)
3413{
3414 struct tg3 *tp = _data;
3415 unsigned int restart_timer;
3416
3417 tg3_netif_stop(tp);
3418
f47c11ee 3419 tg3_full_lock(tp, 1);
1da177e4
LT
3420
3421 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3422 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3423
944d980e 3424 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
1da177e4
LT
3425 tg3_init_hw(tp);
3426
3427 tg3_netif_start(tp);
3428
f47c11ee 3429 tg3_full_unlock(tp);
1da177e4
LT
3430
3431 if (restart_timer)
3432 mod_timer(&tp->timer, jiffies + 1);
3433}
3434
3435static void tg3_tx_timeout(struct net_device *dev)
3436{
3437 struct tg3 *tp = netdev_priv(dev);
3438
3439 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3440 dev->name);
3441
3442 schedule_work(&tp->reset_task);
3443}
3444
c58ec932
MC
3445/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3446static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3447{
3448 u32 base = (u32) mapping & 0xffffffff;
3449
3450 return ((base > 0xffffdcc0) &&
3451 (base + len + 8 < base));
3452}
3453
1da177e4
LT
3454static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3455
3456static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
c58ec932
MC
3457 u32 last_plus_one, u32 *start,
3458 u32 base_flags, u32 mss)
1da177e4
LT
3459{
3460 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
c58ec932 3461 dma_addr_t new_addr = 0;
1da177e4 3462 u32 entry = *start;
c58ec932 3463 int i, ret = 0;
1da177e4
LT
3464
3465 if (!new_skb) {
c58ec932
MC
3466 ret = -1;
3467 } else {
3468 /* New SKB is guaranteed to be linear. */
3469 entry = *start;
3470 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3471 PCI_DMA_TODEVICE);
3472 /* Make sure new skb does not cross any 4G boundaries.
3473 * Drop the packet if it does.
3474 */
3475 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3476 ret = -1;
3477 dev_kfree_skb(new_skb);
3478 new_skb = NULL;
3479 } else {
3480 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3481 base_flags, 1 | (mss << 1));
3482 *start = NEXT_TX(entry);
3483 }
1da177e4
LT
3484 }
3485
1da177e4
LT
3486 /* Now clean up the sw ring entries. */
3487 i = 0;
3488 while (entry != last_plus_one) {
3489 int len;
3490
3491 if (i == 0)
3492 len = skb_headlen(skb);
3493 else
3494 len = skb_shinfo(skb)->frags[i-1].size;
3495 pci_unmap_single(tp->pdev,
3496 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3497 len, PCI_DMA_TODEVICE);
3498 if (i == 0) {
3499 tp->tx_buffers[entry].skb = new_skb;
3500 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3501 } else {
3502 tp->tx_buffers[entry].skb = NULL;
3503 }
3504 entry = NEXT_TX(entry);
3505 i++;
3506 }
3507
3508 dev_kfree_skb(skb);
3509
c58ec932 3510 return ret;
1da177e4
LT
3511}
3512
3513static void tg3_set_txd(struct tg3 *tp, int entry,
3514 dma_addr_t mapping, int len, u32 flags,
3515 u32 mss_and_is_end)
3516{
3517 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3518 int is_end = (mss_and_is_end & 0x1);
3519 u32 mss = (mss_and_is_end >> 1);
3520 u32 vlan_tag = 0;
3521
3522 if (is_end)
3523 flags |= TXD_FLAG_END;
3524 if (flags & TXD_FLAG_VLAN) {
3525 vlan_tag = flags >> 16;
3526 flags &= 0xffff;
3527 }
3528 vlan_tag |= (mss << TXD_MSS_SHIFT);
3529
3530 txd->addr_hi = ((u64) mapping >> 32);
3531 txd->addr_lo = ((u64) mapping & 0xffffffff);
3532 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3533 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3534}
3535
1da177e4
LT
3536static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3537{
3538 struct tg3 *tp = netdev_priv(dev);
3539 dma_addr_t mapping;
1da177e4
LT
3540 u32 len, entry, base_flags, mss;
3541 int would_hit_hwbug;
1da177e4
LT
3542
3543 len = skb_headlen(skb);
3544
3545 /* No BH disabling for tx_lock here. We are running in BH disabled
3546 * context and TX reclaim runs via tp->poll inside of a software
f47c11ee
DM
3547 * interrupt. Furthermore, IRQ processing runs lockless so we have
3548 * no IRQ context deadlocks to worry about either. Rejoice!
1da177e4 3549 */
f47c11ee 3550 if (!spin_trylock(&tp->tx_lock))
1da177e4 3551 return NETDEV_TX_LOCKED;
1da177e4
LT
3552
3553 /* This is a hard error, log it. */
3554 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3555 netif_stop_queue(dev);
f47c11ee 3556 spin_unlock(&tp->tx_lock);
1da177e4
LT
3557 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3558 dev->name);
3559 return NETDEV_TX_BUSY;
3560 }
3561
3562 entry = tp->tx_prod;
3563 base_flags = 0;
3564 if (skb->ip_summed == CHECKSUM_HW)
3565 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3566#if TG3_TSO_SUPPORT != 0
3567 mss = 0;
3568 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3569 (mss = skb_shinfo(skb)->tso_size) != 0) {
3570 int tcp_opt_len, ip_tcp_len;
3571
3572 if (skb_header_cloned(skb) &&
3573 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3574 dev_kfree_skb(skb);
3575 goto out_unlock;
3576 }
3577
3578 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3579 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3580
3581 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3582 TXD_FLAG_CPU_POST_DMA);
3583
3584 skb->nh.iph->check = 0;
3585 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3586 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3587 skb->h.th->check = 0;
3588 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3589 }
3590 else {
3591 skb->h.th->check =
3592 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3593 skb->nh.iph->daddr,
3594 0, IPPROTO_TCP, 0);
3595 }
3596
3597 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3598 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3599 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3600 int tsflags;
3601
3602 tsflags = ((skb->nh.iph->ihl - 5) +
3603 (tcp_opt_len >> 2));
3604 mss |= (tsflags << 11);
3605 }
3606 } else {
3607 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3608 int tsflags;
3609
3610 tsflags = ((skb->nh.iph->ihl - 5) +
3611 (tcp_opt_len >> 2));
3612 base_flags |= tsflags << 12;
3613 }
3614 }
3615 }
3616#else
3617 mss = 0;
3618#endif
3619#if TG3_VLAN_TAG_USED
3620 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3621 base_flags |= (TXD_FLAG_VLAN |
3622 (vlan_tx_tag_get(skb) << 16));
3623#endif
3624
3625 /* Queue skb data, a.k.a. the main skb fragment. */
3626 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3627
3628 tp->tx_buffers[entry].skb = skb;
3629 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3630
3631 would_hit_hwbug = 0;
3632
3633 if (tg3_4g_overflow_test(mapping, len))
c58ec932 3634 would_hit_hwbug = 1;
1da177e4
LT
3635
3636 tg3_set_txd(tp, entry, mapping, len, base_flags,
3637 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3638
3639 entry = NEXT_TX(entry);
3640
3641 /* Now loop through additional data fragments, and queue them. */
3642 if (skb_shinfo(skb)->nr_frags > 0) {
3643 unsigned int i, last;
3644
3645 last = skb_shinfo(skb)->nr_frags - 1;
3646 for (i = 0; i <= last; i++) {
3647 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3648
3649 len = frag->size;
3650 mapping = pci_map_page(tp->pdev,
3651 frag->page,
3652 frag->page_offset,
3653 len, PCI_DMA_TODEVICE);
3654
3655 tp->tx_buffers[entry].skb = NULL;
3656 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3657
c58ec932
MC
3658 if (tg3_4g_overflow_test(mapping, len))
3659 would_hit_hwbug = 1;
1da177e4
LT
3660
3661 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3662 tg3_set_txd(tp, entry, mapping, len,
3663 base_flags, (i == last)|(mss << 1));
3664 else
3665 tg3_set_txd(tp, entry, mapping, len,
3666 base_flags, (i == last));
3667
3668 entry = NEXT_TX(entry);
3669 }
3670 }
3671
3672 if (would_hit_hwbug) {
3673 u32 last_plus_one = entry;
3674 u32 start;
1da177e4 3675
c58ec932
MC
3676 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3677 start &= (TG3_TX_RING_SIZE - 1);
1da177e4
LT
3678
3679 /* If the workaround fails due to memory/mapping
3680 * failure, silently drop this packet.
3681 */
c58ec932
MC
3682 if (tigon3_4gb_hwbug_workaround(tp, skb, last_plus_one,
3683 &start, base_flags, mss))
1da177e4
LT
3684 goto out_unlock;
3685
3686 entry = start;
3687 }
3688
3689 /* Packets are ready, update Tx producer idx local and on card. */
3690 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3691
3692 tp->tx_prod = entry;
51b91468 3693 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
1da177e4 3694 netif_stop_queue(dev);
51b91468
MC
3695 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3696 netif_wake_queue(tp->dev);
3697 }
1da177e4
LT
3698
3699out_unlock:
3700 mmiowb();
f47c11ee 3701 spin_unlock(&tp->tx_lock);
1da177e4
LT
3702
3703 dev->trans_start = jiffies;
3704
3705 return NETDEV_TX_OK;
3706}
3707
3708static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3709 int new_mtu)
3710{
3711 dev->mtu = new_mtu;
3712
ef7f5ec0
MC
3713 if (new_mtu > ETH_DATA_LEN) {
3714 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
3715 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3716 ethtool_op_set_tso(dev, 0);
3717 }
3718 else
3719 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3720 } else {
3721 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
3722 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
0f893dc6 3723 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
ef7f5ec0 3724 }
1da177e4
LT
3725}
3726
3727static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3728{
3729 struct tg3 *tp = netdev_priv(dev);
3730
3731 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3732 return -EINVAL;
3733
3734 if (!netif_running(dev)) {
3735 /* We'll just catch it later when the
3736 * device is up'd.
3737 */
3738 tg3_set_mtu(dev, tp, new_mtu);
3739 return 0;
3740 }
3741
3742 tg3_netif_stop(tp);
f47c11ee
DM
3743
3744 tg3_full_lock(tp, 1);
1da177e4 3745
944d980e 3746 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
3747
3748 tg3_set_mtu(dev, tp, new_mtu);
3749
3750 tg3_init_hw(tp);
3751
3752 tg3_netif_start(tp);
3753
f47c11ee 3754 tg3_full_unlock(tp);
1da177e4
LT
3755
3756 return 0;
3757}
3758
3759/* Free up pending packets in all rx/tx rings.
3760 *
3761 * The chip has been shut down and the driver detached from
3762 * the networking, so no interrupts or new tx packets will
3763 * end up in the driver. tp->{tx,}lock is not held and we are not
3764 * in an interrupt context and thus may sleep.
3765 */
3766static void tg3_free_rings(struct tg3 *tp)
3767{
3768 struct ring_info *rxp;
3769 int i;
3770
3771 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3772 rxp = &tp->rx_std_buffers[i];
3773
3774 if (rxp->skb == NULL)
3775 continue;
3776 pci_unmap_single(tp->pdev,
3777 pci_unmap_addr(rxp, mapping),
7e72aad4 3778 tp->rx_pkt_buf_sz - tp->rx_offset,
1da177e4
LT
3779 PCI_DMA_FROMDEVICE);
3780 dev_kfree_skb_any(rxp->skb);
3781 rxp->skb = NULL;
3782 }
3783
3784 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3785 rxp = &tp->rx_jumbo_buffers[i];
3786
3787 if (rxp->skb == NULL)
3788 continue;
3789 pci_unmap_single(tp->pdev,
3790 pci_unmap_addr(rxp, mapping),
3791 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3792 PCI_DMA_FROMDEVICE);
3793 dev_kfree_skb_any(rxp->skb);
3794 rxp->skb = NULL;
3795 }
3796
3797 for (i = 0; i < TG3_TX_RING_SIZE; ) {
3798 struct tx_ring_info *txp;
3799 struct sk_buff *skb;
3800 int j;
3801
3802 txp = &tp->tx_buffers[i];
3803 skb = txp->skb;
3804
3805 if (skb == NULL) {
3806 i++;
3807 continue;
3808 }
3809
3810 pci_unmap_single(tp->pdev,
3811 pci_unmap_addr(txp, mapping),
3812 skb_headlen(skb),
3813 PCI_DMA_TODEVICE);
3814 txp->skb = NULL;
3815
3816 i++;
3817
3818 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3819 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3820 pci_unmap_page(tp->pdev,
3821 pci_unmap_addr(txp, mapping),
3822 skb_shinfo(skb)->frags[j].size,
3823 PCI_DMA_TODEVICE);
3824 i++;
3825 }
3826
3827 dev_kfree_skb_any(skb);
3828 }
3829}
3830
3831/* Initialize tx/rx rings for packet processing.
3832 *
3833 * The chip has been shut down and the driver detached from
3834 * the networking, so no interrupts or new tx packets will
3835 * end up in the driver. tp->{tx,}lock are held and thus
3836 * we may not sleep.
3837 */
3838static void tg3_init_rings(struct tg3 *tp)
3839{
3840 u32 i;
3841
3842 /* Free up all the SKBs. */
3843 tg3_free_rings(tp);
3844
3845 /* Zero out all descriptors. */
3846 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3847 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3848 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3849 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3850
7e72aad4
MC
3851 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
3852 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) &&
3853 (tp->dev->mtu > ETH_DATA_LEN))
3854 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3855
1da177e4
LT
3856 /* Initialize invariants of the rings, we only set this
3857 * stuff once. This works because the card does not
3858 * write into the rx buffer posting rings.
3859 */
3860 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3861 struct tg3_rx_buffer_desc *rxd;
3862
3863 rxd = &tp->rx_std[i];
7e72aad4 3864 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
1da177e4
LT
3865 << RXD_LEN_SHIFT;
3866 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3867 rxd->opaque = (RXD_OPAQUE_RING_STD |
3868 (i << RXD_OPAQUE_INDEX_SHIFT));
3869 }
3870
0f893dc6 3871 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
3872 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3873 struct tg3_rx_buffer_desc *rxd;
3874
3875 rxd = &tp->rx_jumbo[i];
3876 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3877 << RXD_LEN_SHIFT;
3878 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3879 RXD_FLAG_JUMBO;
3880 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3881 (i << RXD_OPAQUE_INDEX_SHIFT));
3882 }
3883 }
3884
3885 /* Now allocate fresh SKBs for each rx ring. */
3886 for (i = 0; i < tp->rx_pending; i++) {
3887 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3888 -1, i) < 0)
3889 break;
3890 }
3891
0f893dc6 3892 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
3893 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3894 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3895 -1, i) < 0)
3896 break;
3897 }
3898 }
3899}
3900
3901/*
3902 * Must not be invoked with interrupt sources disabled and
3903 * the hardware shutdown down.
3904 */
3905static void tg3_free_consistent(struct tg3 *tp)
3906{
3907 if (tp->rx_std_buffers) {
3908 kfree(tp->rx_std_buffers);
3909 tp->rx_std_buffers = NULL;
3910 }
3911 if (tp->rx_std) {
3912 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3913 tp->rx_std, tp->rx_std_mapping);
3914 tp->rx_std = NULL;
3915 }
3916 if (tp->rx_jumbo) {
3917 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3918 tp->rx_jumbo, tp->rx_jumbo_mapping);
3919 tp->rx_jumbo = NULL;
3920 }
3921 if (tp->rx_rcb) {
3922 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3923 tp->rx_rcb, tp->rx_rcb_mapping);
3924 tp->rx_rcb = NULL;
3925 }
3926 if (tp->tx_ring) {
3927 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3928 tp->tx_ring, tp->tx_desc_mapping);
3929 tp->tx_ring = NULL;
3930 }
3931 if (tp->hw_status) {
3932 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3933 tp->hw_status, tp->status_mapping);
3934 tp->hw_status = NULL;
3935 }
3936 if (tp->hw_stats) {
3937 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3938 tp->hw_stats, tp->stats_mapping);
3939 tp->hw_stats = NULL;
3940 }
3941}
3942
3943/*
3944 * Must not be invoked with interrupt sources disabled and
3945 * the hardware shutdown down. Can sleep.
3946 */
3947static int tg3_alloc_consistent(struct tg3 *tp)
3948{
3949 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3950 (TG3_RX_RING_SIZE +
3951 TG3_RX_JUMBO_RING_SIZE)) +
3952 (sizeof(struct tx_ring_info) *
3953 TG3_TX_RING_SIZE),
3954 GFP_KERNEL);
3955 if (!tp->rx_std_buffers)
3956 return -ENOMEM;
3957
3958 memset(tp->rx_std_buffers, 0,
3959 (sizeof(struct ring_info) *
3960 (TG3_RX_RING_SIZE +
3961 TG3_RX_JUMBO_RING_SIZE)) +
3962 (sizeof(struct tx_ring_info) *
3963 TG3_TX_RING_SIZE));
3964
3965 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3966 tp->tx_buffers = (struct tx_ring_info *)
3967 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3968
3969 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3970 &tp->rx_std_mapping);
3971 if (!tp->rx_std)
3972 goto err_out;
3973
3974 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3975 &tp->rx_jumbo_mapping);
3976
3977 if (!tp->rx_jumbo)
3978 goto err_out;
3979
3980 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3981 &tp->rx_rcb_mapping);
3982 if (!tp->rx_rcb)
3983 goto err_out;
3984
3985 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3986 &tp->tx_desc_mapping);
3987 if (!tp->tx_ring)
3988 goto err_out;
3989
3990 tp->hw_status = pci_alloc_consistent(tp->pdev,
3991 TG3_HW_STATUS_SIZE,
3992 &tp->status_mapping);
3993 if (!tp->hw_status)
3994 goto err_out;
3995
3996 tp->hw_stats = pci_alloc_consistent(tp->pdev,
3997 sizeof(struct tg3_hw_stats),
3998 &tp->stats_mapping);
3999 if (!tp->hw_stats)
4000 goto err_out;
4001
4002 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4003 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4004
4005 return 0;
4006
4007err_out:
4008 tg3_free_consistent(tp);
4009 return -ENOMEM;
4010}
4011
4012#define MAX_WAIT_CNT 1000
4013
4014/* To stop a block, clear the enable bit and poll till it
4015 * clears. tp->lock is held.
4016 */
b3b7d6be 4017static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
1da177e4
LT
4018{
4019 unsigned int i;
4020 u32 val;
4021
4022 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4023 switch (ofs) {
4024 case RCVLSC_MODE:
4025 case DMAC_MODE:
4026 case MBFREE_MODE:
4027 case BUFMGR_MODE:
4028 case MEMARB_MODE:
4029 /* We can't enable/disable these bits of the
4030 * 5705/5750, just say success.
4031 */
4032 return 0;
4033
4034 default:
4035 break;
4036 };
4037 }
4038
4039 val = tr32(ofs);
4040 val &= ~enable_bit;
4041 tw32_f(ofs, val);
4042
4043 for (i = 0; i < MAX_WAIT_CNT; i++) {
4044 udelay(100);
4045 val = tr32(ofs);
4046 if ((val & enable_bit) == 0)
4047 break;
4048 }
4049
b3b7d6be 4050 if (i == MAX_WAIT_CNT && !silent) {
1da177e4
LT
4051 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4052 "ofs=%lx enable_bit=%x\n",
4053 ofs, enable_bit);
4054 return -ENODEV;
4055 }
4056
4057 return 0;
4058}
4059
4060/* tp->lock is held. */
b3b7d6be 4061static int tg3_abort_hw(struct tg3 *tp, int silent)
1da177e4
LT
4062{
4063 int i, err;
4064
4065 tg3_disable_ints(tp);
4066
4067 tp->rx_mode &= ~RX_MODE_ENABLE;
4068 tw32_f(MAC_RX_MODE, tp->rx_mode);
4069 udelay(10);
4070
b3b7d6be
DM
4071 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4072 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4073 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4074 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4075 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4076 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4077
4078 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4079 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4080 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4081 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4082 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4083 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4084 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
1da177e4
LT
4085
4086 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4087 tw32_f(MAC_MODE, tp->mac_mode);
4088 udelay(40);
4089
4090 tp->tx_mode &= ~TX_MODE_ENABLE;
4091 tw32_f(MAC_TX_MODE, tp->tx_mode);
4092
4093 for (i = 0; i < MAX_WAIT_CNT; i++) {
4094 udelay(100);
4095 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4096 break;
4097 }
4098 if (i >= MAX_WAIT_CNT) {
4099 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4100 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4101 tp->dev->name, tr32(MAC_TX_MODE));
e6de8ad1 4102 err |= -ENODEV;
1da177e4
LT
4103 }
4104
e6de8ad1 4105 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
b3b7d6be
DM
4106 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4107 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
1da177e4
LT
4108
4109 tw32(FTQ_RESET, 0xffffffff);
4110 tw32(FTQ_RESET, 0x00000000);
4111
b3b7d6be
DM
4112 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4113 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
1da177e4
LT
4114
4115 if (tp->hw_status)
4116 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4117 if (tp->hw_stats)
4118 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4119
1da177e4
LT
4120 return err;
4121}
4122
4123/* tp->lock is held. */
4124static int tg3_nvram_lock(struct tg3 *tp)
4125{
4126 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4127 int i;
4128
4129 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4130 for (i = 0; i < 8000; i++) {
4131 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4132 break;
4133 udelay(20);
4134 }
4135 if (i == 8000)
4136 return -ENODEV;
4137 }
4138 return 0;
4139}
4140
4141/* tp->lock is held. */
4142static void tg3_nvram_unlock(struct tg3 *tp)
4143{
4144 if (tp->tg3_flags & TG3_FLAG_NVRAM)
4145 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4146}
4147
e6af301b
MC
4148/* tp->lock is held. */
4149static void tg3_enable_nvram_access(struct tg3 *tp)
4150{
4151 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4152 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4153 u32 nvaccess = tr32(NVRAM_ACCESS);
4154
4155 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4156 }
4157}
4158
4159/* tp->lock is held. */
4160static void tg3_disable_nvram_access(struct tg3 *tp)
4161{
4162 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4163 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4164 u32 nvaccess = tr32(NVRAM_ACCESS);
4165
4166 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4167 }
4168}
4169
1da177e4
LT
4170/* tp->lock is held. */
4171static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4172{
4173 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4174 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4175 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4176
4177 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4178 switch (kind) {
4179 case RESET_KIND_INIT:
4180 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4181 DRV_STATE_START);
4182 break;
4183
4184 case RESET_KIND_SHUTDOWN:
4185 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4186 DRV_STATE_UNLOAD);
4187 break;
4188
4189 case RESET_KIND_SUSPEND:
4190 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4191 DRV_STATE_SUSPEND);
4192 break;
4193
4194 default:
4195 break;
4196 };
4197 }
4198}
4199
4200/* tp->lock is held. */
4201static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4202{
4203 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4204 switch (kind) {
4205 case RESET_KIND_INIT:
4206 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4207 DRV_STATE_START_DONE);
4208 break;
4209
4210 case RESET_KIND_SHUTDOWN:
4211 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4212 DRV_STATE_UNLOAD_DONE);
4213 break;
4214
4215 default:
4216 break;
4217 };
4218 }
4219}
4220
4221/* tp->lock is held. */
4222static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4223{
4224 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4225 switch (kind) {
4226 case RESET_KIND_INIT:
4227 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4228 DRV_STATE_START);
4229 break;
4230
4231 case RESET_KIND_SHUTDOWN:
4232 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4233 DRV_STATE_UNLOAD);
4234 break;
4235
4236 case RESET_KIND_SUSPEND:
4237 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4238 DRV_STATE_SUSPEND);
4239 break;
4240
4241 default:
4242 break;
4243 };
4244 }
4245}
4246
4247static void tg3_stop_fw(struct tg3 *);
4248
4249/* tp->lock is held. */
4250static int tg3_chip_reset(struct tg3 *tp)
4251{
4252 u32 val;
1ee582d8 4253 void (*write_op)(struct tg3 *, u32, u32);
1da177e4
LT
4254 int i;
4255
4256 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4257 tg3_nvram_lock(tp);
4258
4259 /*
4260 * We must avoid the readl() that normally takes place.
4261 * It locks machines, causes machine checks, and other
4262 * fun things. So, temporarily disable the 5701
4263 * hardware workaround, while we do the reset.
4264 */
1ee582d8
MC
4265 write_op = tp->write32;
4266 if (write_op == tg3_write_flush_reg32)
4267 tp->write32 = tg3_write32;
1da177e4
LT
4268
4269 /* do the reset */
4270 val = GRC_MISC_CFG_CORECLK_RESET;
4271
4272 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4273 if (tr32(0x7e2c) == 0x60) {
4274 tw32(0x7e2c, 0x20);
4275 }
4276 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4277 tw32(GRC_MISC_CFG, (1 << 29));
4278 val |= (1 << 29);
4279 }
4280 }
4281
4282 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4283 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4284 tw32(GRC_MISC_CFG, val);
4285
1ee582d8
MC
4286 /* restore 5701 hardware bug workaround write method */
4287 tp->write32 = write_op;
1da177e4
LT
4288
4289 /* Unfortunately, we have to delay before the PCI read back.
4290 * Some 575X chips even will not respond to a PCI cfg access
4291 * when the reset command is given to the chip.
4292 *
4293 * How do these hardware designers expect things to work
4294 * properly if the PCI write is posted for a long period
4295 * of time? It is always necessary to have some method by
4296 * which a register read back can occur to push the write
4297 * out which does the reset.
4298 *
4299 * For most tg3 variants the trick below was working.
4300 * Ho hum...
4301 */
4302 udelay(120);
4303
4304 /* Flush PCI posted writes. The normal MMIO registers
4305 * are inaccessible at this time so this is the only
4306 * way to make this reliably (actually, this is no longer
4307 * the case, see above). I tried to use indirect
4308 * register read/write but this upset some 5701 variants.
4309 */
4310 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4311
4312 udelay(120);
4313
4314 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4315 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4316 int i;
4317 u32 cfg_val;
4318
4319 /* Wait for link training to complete. */
4320 for (i = 0; i < 5000; i++)
4321 udelay(100);
4322
4323 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4324 pci_write_config_dword(tp->pdev, 0xc4,
4325 cfg_val | (1 << 15));
4326 }
4327 /* Set PCIE max payload size and clear error status. */
4328 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4329 }
4330
4331 /* Re-enable indirect register accesses. */
4332 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4333 tp->misc_host_ctrl);
4334
4335 /* Set MAX PCI retry to zero. */
4336 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4337 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4338 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4339 val |= PCISTATE_RETRY_SAME_DMA;
4340 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4341
4342 pci_restore_state(tp->pdev);
4343
4344 /* Make sure PCI-X relaxed ordering bit is clear. */
4345 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4346 val &= ~PCIX_CAPS_RELAXED_ORDERING;
4347 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4348
4cf78e4f
MC
4349 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
4350 u32 val;
4351
4352 /* Chip reset on 5780 will reset MSI enable bit,
4353 * so need to restore it.
4354 */
4355 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4356 u16 ctrl;
4357
4358 pci_read_config_word(tp->pdev,
4359 tp->msi_cap + PCI_MSI_FLAGS,
4360 &ctrl);
4361 pci_write_config_word(tp->pdev,
4362 tp->msi_cap + PCI_MSI_FLAGS,
4363 ctrl | PCI_MSI_FLAGS_ENABLE);
4364 val = tr32(MSGINT_MODE);
4365 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4366 }
4367
4368 val = tr32(MEMARB_MODE);
4369 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4370
4371 } else
4372 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
1da177e4
LT
4373
4374 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4375 tg3_stop_fw(tp);
4376 tw32(0x5000, 0x400);
4377 }
4378
4379 tw32(GRC_MODE, tp->grc_mode);
4380
4381 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4382 u32 val = tr32(0xc4);
4383
4384 tw32(0xc4, val | (1 << 15));
4385 }
4386
4387 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4388 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4389 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4390 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4391 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4392 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4393 }
4394
4395 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4396 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4397 tw32_f(MAC_MODE, tp->mac_mode);
747e8f8b
MC
4398 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4399 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4400 tw32_f(MAC_MODE, tp->mac_mode);
1da177e4
LT
4401 } else
4402 tw32_f(MAC_MODE, 0);
4403 udelay(40);
4404
4405 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4406 /* Wait for firmware initialization to complete. */
4407 for (i = 0; i < 100000; i++) {
4408 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4409 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4410 break;
4411 udelay(10);
4412 }
4413 if (i >= 100000) {
4414 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4415 "firmware will not restart magic=%08x\n",
4416 tp->dev->name, val);
4417 return -ENODEV;
4418 }
4419 }
4420
4421 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4422 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4423 u32 val = tr32(0x7c00);
4424
4425 tw32(0x7c00, val | (1 << 25));
4426 }
4427
4428 /* Reprobe ASF enable state. */
4429 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4430 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4431 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4432 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4433 u32 nic_cfg;
4434
4435 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4436 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4437 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 4438 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
4439 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4440 }
4441 }
4442
4443 return 0;
4444}
4445
4446/* tp->lock is held. */
4447static void tg3_stop_fw(struct tg3 *tp)
4448{
4449 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4450 u32 val;
4451 int i;
4452
4453 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4454 val = tr32(GRC_RX_CPU_EVENT);
4455 val |= (1 << 14);
4456 tw32(GRC_RX_CPU_EVENT, val);
4457
4458 /* Wait for RX cpu to ACK the event. */
4459 for (i = 0; i < 100; i++) {
4460 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4461 break;
4462 udelay(1);
4463 }
4464 }
4465}
4466
4467/* tp->lock is held. */
944d980e 4468static int tg3_halt(struct tg3 *tp, int kind, int silent)
1da177e4
LT
4469{
4470 int err;
4471
4472 tg3_stop_fw(tp);
4473
944d980e 4474 tg3_write_sig_pre_reset(tp, kind);
1da177e4 4475
b3b7d6be 4476 tg3_abort_hw(tp, silent);
1da177e4
LT
4477 err = tg3_chip_reset(tp);
4478
944d980e
MC
4479 tg3_write_sig_legacy(tp, kind);
4480 tg3_write_sig_post_reset(tp, kind);
1da177e4
LT
4481
4482 if (err)
4483 return err;
4484
4485 return 0;
4486}
4487
4488#define TG3_FW_RELEASE_MAJOR 0x0
4489#define TG3_FW_RELASE_MINOR 0x0
4490#define TG3_FW_RELEASE_FIX 0x0
4491#define TG3_FW_START_ADDR 0x08000000
4492#define TG3_FW_TEXT_ADDR 0x08000000
4493#define TG3_FW_TEXT_LEN 0x9c0
4494#define TG3_FW_RODATA_ADDR 0x080009c0
4495#define TG3_FW_RODATA_LEN 0x60
4496#define TG3_FW_DATA_ADDR 0x08000a40
4497#define TG3_FW_DATA_LEN 0x20
4498#define TG3_FW_SBSS_ADDR 0x08000a60
4499#define TG3_FW_SBSS_LEN 0xc
4500#define TG3_FW_BSS_ADDR 0x08000a70
4501#define TG3_FW_BSS_LEN 0x10
4502
4503static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4504 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4505 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4506 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4507 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4508 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4509 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4510 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4511 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4512 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4513 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4514 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4515 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4516 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4517 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4518 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4519 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4520 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4521 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4522 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4523 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4524 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4525 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4526 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4527 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4528 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4529 0, 0, 0, 0, 0, 0,
4530 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4531 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4532 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4533 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4534 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4535 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4536 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4537 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4538 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4539 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4540 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4541 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4542 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4543 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4544 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4545 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4546 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4547 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4548 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4549 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4550 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4551 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4552 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4553 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4554 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4555 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4556 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4557 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4558 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4559 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4560 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4561 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4562 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4563 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4564 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4565 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4566 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4567 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4568 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4569 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4570 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4571 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4572 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4573 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4574 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4575 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4576 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4577 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4578 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4579 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4580 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4581 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4582 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4583 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4584 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4585 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4586 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4587 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4588 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4589 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4590 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4591 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4592 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4593 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4594 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4595};
4596
4597static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4598 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4599 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4600 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4601 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4602 0x00000000
4603};
4604
4605#if 0 /* All zeros, don't eat up space with it. */
4606u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4607 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4608 0x00000000, 0x00000000, 0x00000000, 0x00000000
4609};
4610#endif
4611
4612#define RX_CPU_SCRATCH_BASE 0x30000
4613#define RX_CPU_SCRATCH_SIZE 0x04000
4614#define TX_CPU_SCRATCH_BASE 0x34000
4615#define TX_CPU_SCRATCH_SIZE 0x04000
4616
4617/* tp->lock is held. */
4618static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4619{
4620 int i;
4621
4622 if (offset == TX_CPU_BASE &&
4623 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4624 BUG();
4625
4626 if (offset == RX_CPU_BASE) {
4627 for (i = 0; i < 10000; i++) {
4628 tw32(offset + CPU_STATE, 0xffffffff);
4629 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4630 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4631 break;
4632 }
4633
4634 tw32(offset + CPU_STATE, 0xffffffff);
4635 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
4636 udelay(10);
4637 } else {
4638 for (i = 0; i < 10000; i++) {
4639 tw32(offset + CPU_STATE, 0xffffffff);
4640 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4641 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4642 break;
4643 }
4644 }
4645
4646 if (i >= 10000) {
4647 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4648 "and %s CPU\n",
4649 tp->dev->name,
4650 (offset == RX_CPU_BASE ? "RX" : "TX"));
4651 return -ENODEV;
4652 }
4653 return 0;
4654}
4655
4656struct fw_info {
4657 unsigned int text_base;
4658 unsigned int text_len;
4659 u32 *text_data;
4660 unsigned int rodata_base;
4661 unsigned int rodata_len;
4662 u32 *rodata_data;
4663 unsigned int data_base;
4664 unsigned int data_len;
4665 u32 *data_data;
4666};
4667
4668/* tp->lock is held. */
4669static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4670 int cpu_scratch_size, struct fw_info *info)
4671{
4672 int err, i;
1da177e4
LT
4673 void (*write_op)(struct tg3 *, u32, u32);
4674
4675 if (cpu_base == TX_CPU_BASE &&
4676 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4677 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4678 "TX cpu firmware on %s which is 5705.\n",
4679 tp->dev->name);
4680 return -EINVAL;
4681 }
4682
4683 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4684 write_op = tg3_write_mem;
4685 else
4686 write_op = tg3_write_indirect_reg32;
4687
1b628151
MC
4688 /* It is possible that bootcode is still loading at this point.
4689 * Get the nvram lock first before halting the cpu.
4690 */
4691 tg3_nvram_lock(tp);
1da177e4 4692 err = tg3_halt_cpu(tp, cpu_base);
1b628151 4693 tg3_nvram_unlock(tp);
1da177e4
LT
4694 if (err)
4695 goto out;
4696
4697 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4698 write_op(tp, cpu_scratch_base + i, 0);
4699 tw32(cpu_base + CPU_STATE, 0xffffffff);
4700 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4701 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4702 write_op(tp, (cpu_scratch_base +
4703 (info->text_base & 0xffff) +
4704 (i * sizeof(u32))),
4705 (info->text_data ?
4706 info->text_data[i] : 0));
4707 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4708 write_op(tp, (cpu_scratch_base +
4709 (info->rodata_base & 0xffff) +
4710 (i * sizeof(u32))),
4711 (info->rodata_data ?
4712 info->rodata_data[i] : 0));
4713 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4714 write_op(tp, (cpu_scratch_base +
4715 (info->data_base & 0xffff) +
4716 (i * sizeof(u32))),
4717 (info->data_data ?
4718 info->data_data[i] : 0));
4719
4720 err = 0;
4721
4722out:
1da177e4
LT
4723 return err;
4724}
4725
4726/* tp->lock is held. */
4727static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4728{
4729 struct fw_info info;
4730 int err, i;
4731
4732 info.text_base = TG3_FW_TEXT_ADDR;
4733 info.text_len = TG3_FW_TEXT_LEN;
4734 info.text_data = &tg3FwText[0];
4735 info.rodata_base = TG3_FW_RODATA_ADDR;
4736 info.rodata_len = TG3_FW_RODATA_LEN;
4737 info.rodata_data = &tg3FwRodata[0];
4738 info.data_base = TG3_FW_DATA_ADDR;
4739 info.data_len = TG3_FW_DATA_LEN;
4740 info.data_data = NULL;
4741
4742 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4743 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4744 &info);
4745 if (err)
4746 return err;
4747
4748 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4749 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4750 &info);
4751 if (err)
4752 return err;
4753
4754 /* Now startup only the RX cpu. */
4755 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4756 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4757
4758 for (i = 0; i < 5; i++) {
4759 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4760 break;
4761 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4762 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
4763 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4764 udelay(1000);
4765 }
4766 if (i >= 5) {
4767 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4768 "to set RX CPU PC, is %08x should be %08x\n",
4769 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4770 TG3_FW_TEXT_ADDR);
4771 return -ENODEV;
4772 }
4773 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4774 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
4775
4776 return 0;
4777}
4778
4779#if TG3_TSO_SUPPORT != 0
4780
4781#define TG3_TSO_FW_RELEASE_MAJOR 0x1
4782#define TG3_TSO_FW_RELASE_MINOR 0x6
4783#define TG3_TSO_FW_RELEASE_FIX 0x0
4784#define TG3_TSO_FW_START_ADDR 0x08000000
4785#define TG3_TSO_FW_TEXT_ADDR 0x08000000
4786#define TG3_TSO_FW_TEXT_LEN 0x1aa0
4787#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
4788#define TG3_TSO_FW_RODATA_LEN 0x60
4789#define TG3_TSO_FW_DATA_ADDR 0x08001b20
4790#define TG3_TSO_FW_DATA_LEN 0x30
4791#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
4792#define TG3_TSO_FW_SBSS_LEN 0x2c
4793#define TG3_TSO_FW_BSS_ADDR 0x08001b80
4794#define TG3_TSO_FW_BSS_LEN 0x894
4795
4796static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4797 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4798 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4799 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4800 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4801 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4802 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4803 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4804 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4805 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4806 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4807 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4808 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4809 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4810 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4811 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4812 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4813 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4814 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4815 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4816 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4817 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4818 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4819 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4820 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4821 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4822 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4823 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4824 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4825 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4826 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4827 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4828 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4829 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4830 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4831 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4832 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4833 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4834 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4835 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4836 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4837 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4838 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4839 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4840 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4841 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4842 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4843 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4844 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4845 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4846 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4847 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4848 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4849 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4850 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4851 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4852 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4853 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4854 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4855 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4856 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4857 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4858 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4859 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4860 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4861 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4862 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4863 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4864 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4865 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4866 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4867 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4868 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4869 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4870 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4871 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4872 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4873 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4874 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4875 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4876 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4877 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4878 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4879 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4880 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4881 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4882 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4883 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4884 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4885 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4886 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4887 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4888 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4889 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4890 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4891 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4892 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4893 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4894 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4895 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4896 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4897 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4898 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4899 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4900 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4901 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4902 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4903 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4904 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4905 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4906 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4907 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4908 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4909 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4910 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4911 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4912 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4913 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4914 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4915 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4916 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4917 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4918 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4919 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4920 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4921 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4922 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4923 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4924 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4925 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4926 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4927 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4928 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4929 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4930 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4931 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4932 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4933 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4934 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4935 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4936 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4937 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4938 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4939 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4940 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4941 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4942 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4943 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4944 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4945 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4946 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4947 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4948 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4949 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4950 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4951 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4952 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4953 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4954 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4955 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4956 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4957 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4958 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4959 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4960 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4961 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4962 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4963 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4964 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4965 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4966 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4967 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4968 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4969 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4970 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4971 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4972 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4973 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4974 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4975 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4976 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4977 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4978 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4979 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4980 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4981 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4982 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4983 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4984 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4985 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4986 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4987 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4988 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4989 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4990 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4991 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4992 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4993 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4994 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4995 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4996 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4997 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4998 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4999 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5000 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5001 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5002 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5003 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5004 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5005 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5006 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5007 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5008 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5009 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5010 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5011 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5012 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5013 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5014 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5015 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5016 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5017 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5018 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5019 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5020 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5021 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5022 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5023 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5024 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5025 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5026 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5027 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5028 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5029 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5030 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5031 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5032 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5033 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5034 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5035 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5036 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5037 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5038 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5039 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5040 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5041 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5042 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5043 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5044 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5045 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5046 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5047 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5048 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5049 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5050 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5051 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5052 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5053 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5054 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5055 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5056 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5057 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5058 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5059 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5060 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5061 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5062 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5063 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5064 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5065 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5066 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5067 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5068 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5069 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5070 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5071 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5072 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5073 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5074 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5075 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5076 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5077 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5078 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5079 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5080 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5081};
5082
5083static u32 tg3TsoFwRodata[] = {
5084 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5085 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5086 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5087 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5088 0x00000000,
5089};
5090
5091static u32 tg3TsoFwData[] = {
5092 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5093 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5094 0x00000000,
5095};
5096
5097/* 5705 needs a special version of the TSO firmware. */
5098#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5099#define TG3_TSO5_FW_RELASE_MINOR 0x2
5100#define TG3_TSO5_FW_RELEASE_FIX 0x0
5101#define TG3_TSO5_FW_START_ADDR 0x00010000
5102#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5103#define TG3_TSO5_FW_TEXT_LEN 0xe90
5104#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5105#define TG3_TSO5_FW_RODATA_LEN 0x50
5106#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5107#define TG3_TSO5_FW_DATA_LEN 0x20
5108#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5109#define TG3_TSO5_FW_SBSS_LEN 0x28
5110#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5111#define TG3_TSO5_FW_BSS_LEN 0x88
5112
5113static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5114 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5115 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5116 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5117 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5118 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5119 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5120 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5121 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5122 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5123 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5124 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5125 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5126 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5127 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5128 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5129 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5130 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5131 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5132 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5133 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5134 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5135 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5136 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5137 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5138 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5139 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5140 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5141 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5142 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5143 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5144 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5145 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5146 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5147 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5148 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5149 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5150 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5151 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5152 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5153 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5154 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5155 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5156 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5157 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5158 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5159 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5160 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5161 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5162 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5163 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5164 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5165 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5166 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5167 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5168 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5169 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5170 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5171 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5172 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5173 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5174 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5175 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5176 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5177 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5178 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5179 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5180 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5181 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5182 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5183 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5184 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5185 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5186 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5187 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5188 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5189 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5190 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5191 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5192 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5193 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5194 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5195 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5196 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5197 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5198 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5199 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5200 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5201 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5202 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5203 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5204 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5205 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5206 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5207 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5208 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5209 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5210 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5211 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5212 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5213 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5214 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5215 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5216 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5217 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5218 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5219 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5220 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5221 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5222 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5223 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5224 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5225 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5226 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5227 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5228 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5229 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5230 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5231 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5232 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5233 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5234 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5235 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5236 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5237 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5238 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5239 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5240 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5241 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5242 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5243 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5244 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5245 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5246 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5247 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5248 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5249 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5250 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5251 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5252 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5253 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5254 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5255 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5256 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5257 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5258 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5259 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5260 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5261 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5262 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5263 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5264 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5265 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5266 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5267 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5268 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5269 0x00000000, 0x00000000, 0x00000000,
5270};
5271
5272static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5273 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5274 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5275 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5276 0x00000000, 0x00000000, 0x00000000,
5277};
5278
5279static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5280 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5281 0x00000000, 0x00000000, 0x00000000,
5282};
5283
5284/* tp->lock is held. */
5285static int tg3_load_tso_firmware(struct tg3 *tp)
5286{
5287 struct fw_info info;
5288 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5289 int err, i;
5290
5291 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5292 return 0;
5293
5294 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5295 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5296 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5297 info.text_data = &tg3Tso5FwText[0];
5298 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5299 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5300 info.rodata_data = &tg3Tso5FwRodata[0];
5301 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5302 info.data_len = TG3_TSO5_FW_DATA_LEN;
5303 info.data_data = &tg3Tso5FwData[0];
5304 cpu_base = RX_CPU_BASE;
5305 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5306 cpu_scratch_size = (info.text_len +
5307 info.rodata_len +
5308 info.data_len +
5309 TG3_TSO5_FW_SBSS_LEN +
5310 TG3_TSO5_FW_BSS_LEN);
5311 } else {
5312 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5313 info.text_len = TG3_TSO_FW_TEXT_LEN;
5314 info.text_data = &tg3TsoFwText[0];
5315 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5316 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5317 info.rodata_data = &tg3TsoFwRodata[0];
5318 info.data_base = TG3_TSO_FW_DATA_ADDR;
5319 info.data_len = TG3_TSO_FW_DATA_LEN;
5320 info.data_data = &tg3TsoFwData[0];
5321 cpu_base = TX_CPU_BASE;
5322 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5323 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5324 }
5325
5326 err = tg3_load_firmware_cpu(tp, cpu_base,
5327 cpu_scratch_base, cpu_scratch_size,
5328 &info);
5329 if (err)
5330 return err;
5331
5332 /* Now startup the cpu. */
5333 tw32(cpu_base + CPU_STATE, 0xffffffff);
5334 tw32_f(cpu_base + CPU_PC, info.text_base);
5335
5336 for (i = 0; i < 5; i++) {
5337 if (tr32(cpu_base + CPU_PC) == info.text_base)
5338 break;
5339 tw32(cpu_base + CPU_STATE, 0xffffffff);
5340 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
5341 tw32_f(cpu_base + CPU_PC, info.text_base);
5342 udelay(1000);
5343 }
5344 if (i >= 5) {
5345 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5346 "to set CPU PC, is %08x should be %08x\n",
5347 tp->dev->name, tr32(cpu_base + CPU_PC),
5348 info.text_base);
5349 return -ENODEV;
5350 }
5351 tw32(cpu_base + CPU_STATE, 0xffffffff);
5352 tw32_f(cpu_base + CPU_MODE, 0x00000000);
5353 return 0;
5354}
5355
5356#endif /* TG3_TSO_SUPPORT != 0 */
5357
5358/* tp->lock is held. */
5359static void __tg3_set_mac_addr(struct tg3 *tp)
5360{
5361 u32 addr_high, addr_low;
5362 int i;
5363
5364 addr_high = ((tp->dev->dev_addr[0] << 8) |
5365 tp->dev->dev_addr[1]);
5366 addr_low = ((tp->dev->dev_addr[2] << 24) |
5367 (tp->dev->dev_addr[3] << 16) |
5368 (tp->dev->dev_addr[4] << 8) |
5369 (tp->dev->dev_addr[5] << 0));
5370 for (i = 0; i < 4; i++) {
5371 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5372 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5373 }
5374
5375 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5376 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5377 for (i = 0; i < 12; i++) {
5378 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5379 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5380 }
5381 }
5382
5383 addr_high = (tp->dev->dev_addr[0] +
5384 tp->dev->dev_addr[1] +
5385 tp->dev->dev_addr[2] +
5386 tp->dev->dev_addr[3] +
5387 tp->dev->dev_addr[4] +
5388 tp->dev->dev_addr[5]) &
5389 TX_BACKOFF_SEED_MASK;
5390 tw32(MAC_TX_BACKOFF_SEED, addr_high);
5391}
5392
5393static int tg3_set_mac_addr(struct net_device *dev, void *p)
5394{
5395 struct tg3 *tp = netdev_priv(dev);
5396 struct sockaddr *addr = p;
5397
5398 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5399
f47c11ee 5400 spin_lock_bh(&tp->lock);
1da177e4 5401 __tg3_set_mac_addr(tp);
f47c11ee 5402 spin_unlock_bh(&tp->lock);
1da177e4
LT
5403
5404 return 0;
5405}
5406
5407/* tp->lock is held. */
5408static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5409 dma_addr_t mapping, u32 maxlen_flags,
5410 u32 nic_addr)
5411{
5412 tg3_write_mem(tp,
5413 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5414 ((u64) mapping >> 32));
5415 tg3_write_mem(tp,
5416 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5417 ((u64) mapping & 0xffffffff));
5418 tg3_write_mem(tp,
5419 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5420 maxlen_flags);
5421
5422 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5423 tg3_write_mem(tp,
5424 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5425 nic_addr);
5426}
5427
5428static void __tg3_set_rx_mode(struct net_device *);
d244c892 5429static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
15f9850d
DM
5430{
5431 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5432 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5433 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5434 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5435 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5436 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5437 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5438 }
5439 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5440 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5441 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5442 u32 val = ec->stats_block_coalesce_usecs;
5443
5444 if (!netif_carrier_ok(tp->dev))
5445 val = 0;
5446
5447 tw32(HOSTCC_STAT_COAL_TICKS, val);
5448 }
5449}
1da177e4
LT
5450
5451/* tp->lock is held. */
5452static int tg3_reset_hw(struct tg3 *tp)
5453{
5454 u32 val, rdmac_mode;
5455 int i, err, limit;
5456
5457 tg3_disable_ints(tp);
5458
5459 tg3_stop_fw(tp);
5460
5461 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5462
5463 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
e6de8ad1 5464 tg3_abort_hw(tp, 1);
1da177e4
LT
5465 }
5466
5467 err = tg3_chip_reset(tp);
5468 if (err)
5469 return err;
5470
5471 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5472
5473 /* This works around an issue with Athlon chipsets on
5474 * B3 tigon3 silicon. This bit has no effect on any
5475 * other revision. But do not set this on PCI Express
5476 * chips.
5477 */
5478 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5479 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5480 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5481
5482 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5483 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5484 val = tr32(TG3PCI_PCISTATE);
5485 val |= PCISTATE_RETRY_SAME_DMA;
5486 tw32(TG3PCI_PCISTATE, val);
5487 }
5488
5489 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5490 /* Enable some hw fixes. */
5491 val = tr32(TG3PCI_MSI_DATA);
5492 val |= (1 << 26) | (1 << 28) | (1 << 29);
5493 tw32(TG3PCI_MSI_DATA, val);
5494 }
5495
5496 /* Descriptor ring init may make accesses to the
5497 * NIC SRAM area to setup the TX descriptors, so we
5498 * can only do this after the hardware has been
5499 * successfully reset.
5500 */
5501 tg3_init_rings(tp);
5502
5503 /* This value is determined during the probe time DMA
5504 * engine test, tg3_test_dma.
5505 */
5506 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5507
5508 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5509 GRC_MODE_4X_NIC_SEND_RINGS |
5510 GRC_MODE_NO_TX_PHDR_CSUM |
5511 GRC_MODE_NO_RX_PHDR_CSUM);
5512 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5513 if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5514 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5515 if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5516 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5517
5518 tw32(GRC_MODE,
5519 tp->grc_mode |
5520 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5521
5522 /* Setup the timer prescalar register. Clock is always 66Mhz. */
5523 val = tr32(GRC_MISC_CFG);
5524 val &= ~0xff;
5525 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5526 tw32(GRC_MISC_CFG, val);
5527
5528 /* Initialize MBUF/DESC pool. */
cbf46853 5529 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
1da177e4
LT
5530 /* Do nothing. */
5531 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5532 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5533 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5534 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5535 else
5536 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5537 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5538 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5539 }
5540#if TG3_TSO_SUPPORT != 0
5541 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5542 int fw_len;
5543
5544 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5545 TG3_TSO5_FW_RODATA_LEN +
5546 TG3_TSO5_FW_DATA_LEN +
5547 TG3_TSO5_FW_SBSS_LEN +
5548 TG3_TSO5_FW_BSS_LEN);
5549 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5550 tw32(BUFMGR_MB_POOL_ADDR,
5551 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5552 tw32(BUFMGR_MB_POOL_SIZE,
5553 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5554 }
5555#endif
5556
0f893dc6 5557 if (tp->dev->mtu <= ETH_DATA_LEN) {
1da177e4
LT
5558 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5559 tp->bufmgr_config.mbuf_read_dma_low_water);
5560 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5561 tp->bufmgr_config.mbuf_mac_rx_low_water);
5562 tw32(BUFMGR_MB_HIGH_WATER,
5563 tp->bufmgr_config.mbuf_high_water);
5564 } else {
5565 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5566 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5567 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5568 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5569 tw32(BUFMGR_MB_HIGH_WATER,
5570 tp->bufmgr_config.mbuf_high_water_jumbo);
5571 }
5572 tw32(BUFMGR_DMA_LOW_WATER,
5573 tp->bufmgr_config.dma_low_water);
5574 tw32(BUFMGR_DMA_HIGH_WATER,
5575 tp->bufmgr_config.dma_high_water);
5576
5577 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5578 for (i = 0; i < 2000; i++) {
5579 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5580 break;
5581 udelay(10);
5582 }
5583 if (i >= 2000) {
5584 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5585 tp->dev->name);
5586 return -ENODEV;
5587 }
5588
5589 /* Setup replenish threshold. */
5590 tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5591
5592 /* Initialize TG3_BDINFO's at:
5593 * RCVDBDI_STD_BD: standard eth size rx ring
5594 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
5595 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
5596 *
5597 * like so:
5598 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
5599 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
5600 * ring attribute flags
5601 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
5602 *
5603 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5604 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5605 *
5606 * The size of each ring is fixed in the firmware, but the location is
5607 * configurable.
5608 */
5609 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5610 ((u64) tp->rx_std_mapping >> 32));
5611 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5612 ((u64) tp->rx_std_mapping & 0xffffffff));
5613 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5614 NIC_SRAM_RX_BUFFER_DESC);
5615
5616 /* Don't even try to program the JUMBO/MINI buffer descriptor
5617 * configs on 5705.
5618 */
5619 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5620 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5621 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5622 } else {
5623 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5624 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5625
5626 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5627 BDINFO_FLAGS_DISABLED);
5628
5629 /* Setup replenish threshold. */
5630 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5631
0f893dc6 5632 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
5633 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5634 ((u64) tp->rx_jumbo_mapping >> 32));
5635 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5636 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5637 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5638 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5639 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5640 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5641 } else {
5642 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5643 BDINFO_FLAGS_DISABLED);
5644 }
5645
5646 }
5647
5648 /* There is only one send ring on 5705/5750, no need to explicitly
5649 * disable the others.
5650 */
5651 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5652 /* Clear out send RCB ring in SRAM. */
5653 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5654 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5655 BDINFO_FLAGS_DISABLED);
5656 }
5657
5658 tp->tx_prod = 0;
5659 tp->tx_cons = 0;
5660 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5661 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5662
5663 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5664 tp->tx_desc_mapping,
5665 (TG3_TX_RING_SIZE <<
5666 BDINFO_FLAGS_MAXLEN_SHIFT),
5667 NIC_SRAM_TX_BUFFER_DESC);
5668
5669 /* There is only one receive return ring on 5705/5750, no need
5670 * to explicitly disable the others.
5671 */
5672 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5673 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5674 i += TG3_BDINFO_SIZE) {
5675 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5676 BDINFO_FLAGS_DISABLED);
5677 }
5678 }
5679
5680 tp->rx_rcb_ptr = 0;
5681 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5682
5683 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5684 tp->rx_rcb_mapping,
5685 (TG3_RX_RCB_RING_SIZE(tp) <<
5686 BDINFO_FLAGS_MAXLEN_SHIFT),
5687 0);
5688
5689 tp->rx_std_ptr = tp->rx_pending;
5690 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5691 tp->rx_std_ptr);
5692
0f893dc6 5693 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
1da177e4
LT
5694 tp->rx_jumbo_pending : 0;
5695 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5696 tp->rx_jumbo_ptr);
5697
5698 /* Initialize MAC address and backoff seed. */
5699 __tg3_set_mac_addr(tp);
5700
5701 /* MTU + ethernet header + FCS + optional VLAN tag */
5702 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5703
5704 /* The slot time is changed by tg3_setup_phy if we
5705 * run at gigabit with half duplex.
5706 */
5707 tw32(MAC_TX_LENGTHS,
5708 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5709 (6 << TX_LENGTHS_IPG_SHIFT) |
5710 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5711
5712 /* Receive rules. */
5713 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5714 tw32(RCVLPC_CONFIG, 0x0181);
5715
5716 /* Calculate RDMAC_MODE setting early, we need it to determine
5717 * the RCVLPC_STATE_ENABLE mask.
5718 */
5719 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5720 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5721 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5722 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5723 RDMAC_MODE_LNGREAD_ENAB);
5724 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5725 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
85e94ced
MC
5726
5727 /* If statement applies to 5705 and 5750 PCI devices only */
5728 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5729 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5730 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
1da177e4
LT
5731 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5732 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5733 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5734 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5735 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5736 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5737 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5738 }
5739 }
5740
85e94ced
MC
5741 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5742 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5743
1da177e4
LT
5744#if TG3_TSO_SUPPORT != 0
5745 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5746 rdmac_mode |= (1 << 27);
5747#endif
5748
5749 /* Receive/send statistics. */
5750 if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5751 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5752 val = tr32(RCVLPC_STATS_ENABLE);
5753 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5754 tw32(RCVLPC_STATS_ENABLE, val);
5755 } else {
5756 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5757 }
5758 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5759 tw32(SNDDATAI_STATSENAB, 0xffffff);
5760 tw32(SNDDATAI_STATSCTRL,
5761 (SNDDATAI_SCTRL_ENABLE |
5762 SNDDATAI_SCTRL_FASTUPD));
5763
5764 /* Setup host coalescing engine. */
5765 tw32(HOSTCC_MODE, 0);
5766 for (i = 0; i < 2000; i++) {
5767 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5768 break;
5769 udelay(10);
5770 }
5771
d244c892 5772 __tg3_set_coalesce(tp, &tp->coal);
1da177e4
LT
5773
5774 /* set status block DMA address */
5775 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5776 ((u64) tp->status_mapping >> 32));
5777 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5778 ((u64) tp->status_mapping & 0xffffffff));
5779
5780 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5781 /* Status/statistics block address. See tg3_timer,
5782 * the tg3_periodic_fetch_stats call there, and
5783 * tg3_get_stats to see how this works for 5705/5750 chips.
5784 */
1da177e4
LT
5785 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5786 ((u64) tp->stats_mapping >> 32));
5787 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5788 ((u64) tp->stats_mapping & 0xffffffff));
5789 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5790 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5791 }
5792
5793 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5794
5795 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5796 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5797 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5798 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5799
5800 /* Clear statistics/status block in chip, and status block in ram. */
5801 for (i = NIC_SRAM_STATS_BLK;
5802 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5803 i += sizeof(u32)) {
5804 tg3_write_mem(tp, i, 0);
5805 udelay(40);
5806 }
5807 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5808
c94e3941
MC
5809 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5810 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
5811 /* reset to prevent losing 1st rx packet intermittently */
5812 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5813 udelay(10);
5814 }
5815
1da177e4
LT
5816 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5817 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5818 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5819 udelay(40);
5820
314fba34
MC
5821 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5822 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5823 * register to preserve the GPIO settings for LOMs. The GPIOs,
5824 * whether used as inputs or outputs, are set by boot code after
5825 * reset.
5826 */
5827 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5828 u32 gpio_mask;
5829
5830 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5831 GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
3e7d83bc
MC
5832
5833 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5834 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5835 GRC_LCLCTRL_GPIO_OUTPUT3;
5836
314fba34
MC
5837 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5838
5839 /* GPIO1 must be driven high for eeprom write protect */
1da177e4
LT
5840 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5841 GRC_LCLCTRL_GPIO_OUTPUT1);
314fba34 5842 }
1da177e4
LT
5843 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5844 udelay(100);
5845
09ee929c 5846 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
fac9b83e 5847 tp->last_tag = 0;
1da177e4
LT
5848
5849 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5850 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5851 udelay(40);
5852 }
5853
5854 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5855 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5856 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5857 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5858 WDMAC_MODE_LNGREAD_ENAB);
5859
85e94ced
MC
5860 /* If statement applies to 5705 and 5750 PCI devices only */
5861 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5862 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5863 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1da177e4
LT
5864 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5865 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5866 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5867 /* nothing */
5868 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5869 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5870 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5871 val |= WDMAC_MODE_RX_ACCEL;
5872 }
5873 }
5874
5875 tw32_f(WDMAC_MODE, val);
5876 udelay(40);
5877
5878 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5879 val = tr32(TG3PCI_X_CAPS);
5880 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5881 val &= ~PCIX_CAPS_BURST_MASK;
5882 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5883 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5884 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5885 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5886 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5887 val |= (tp->split_mode_max_reqs <<
5888 PCIX_CAPS_SPLIT_SHIFT);
5889 }
5890 tw32(TG3PCI_X_CAPS, val);
5891 }
5892
5893 tw32_f(RDMAC_MODE, rdmac_mode);
5894 udelay(40);
5895
5896 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5897 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5898 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5899 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5900 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5901 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5902 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5903 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5904#if TG3_TSO_SUPPORT != 0
5905 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5906 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5907#endif
5908 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5909 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5910
5911 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5912 err = tg3_load_5701_a0_firmware_fix(tp);
5913 if (err)
5914 return err;
5915 }
5916
5917#if TG3_TSO_SUPPORT != 0
5918 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5919 err = tg3_load_tso_firmware(tp);
5920 if (err)
5921 return err;
5922 }
5923#endif
5924
5925 tp->tx_mode = TX_MODE_ENABLE;
5926 tw32_f(MAC_TX_MODE, tp->tx_mode);
5927 udelay(100);
5928
5929 tp->rx_mode = RX_MODE_ENABLE;
5930 tw32_f(MAC_RX_MODE, tp->rx_mode);
5931 udelay(10);
5932
5933 if (tp->link_config.phy_is_low_power) {
5934 tp->link_config.phy_is_low_power = 0;
5935 tp->link_config.speed = tp->link_config.orig_speed;
5936 tp->link_config.duplex = tp->link_config.orig_duplex;
5937 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5938 }
5939
5940 tp->mi_mode = MAC_MI_MODE_BASE;
5941 tw32_f(MAC_MI_MODE, tp->mi_mode);
5942 udelay(80);
5943
5944 tw32(MAC_LED_CTRL, tp->led_ctrl);
5945
5946 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
c94e3941 5947 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1da177e4
LT
5948 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5949 udelay(10);
5950 }
5951 tw32_f(MAC_RX_MODE, tp->rx_mode);
5952 udelay(10);
5953
5954 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5955 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5956 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5957 /* Set drive transmission level to 1.2V */
5958 /* only if the signal pre-emphasis bit is not set */
5959 val = tr32(MAC_SERDES_CFG);
5960 val &= 0xfffff000;
5961 val |= 0x880;
5962 tw32(MAC_SERDES_CFG, val);
5963 }
5964 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5965 tw32(MAC_SERDES_CFG, 0x616000);
5966 }
5967
5968 /* Prevent chip from dropping frames when flow control
5969 * is enabled.
5970 */
5971 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5972
5973 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5974 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5975 /* Use hardware link auto-negotiation */
5976 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5977 }
5978
5979 err = tg3_setup_phy(tp, 1);
5980 if (err)
5981 return err;
5982
5983 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5984 u32 tmp;
5985
5986 /* Clear CRC stats. */
5987 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5988 tg3_writephy(tp, 0x1e, tmp | 0x8000);
5989 tg3_readphy(tp, 0x14, &tmp);
5990 }
5991 }
5992
5993 __tg3_set_rx_mode(tp->dev);
5994
5995 /* Initialize receive rules. */
5996 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
5997 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5998 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
5999 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6000
4cf78e4f
MC
6001 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6002 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780))
1da177e4
LT
6003 limit = 8;
6004 else
6005 limit = 16;
6006 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6007 limit -= 4;
6008 switch (limit) {
6009 case 16:
6010 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
6011 case 15:
6012 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
6013 case 14:
6014 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
6015 case 13:
6016 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
6017 case 12:
6018 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
6019 case 11:
6020 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
6021 case 10:
6022 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
6023 case 9:
6024 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
6025 case 8:
6026 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
6027 case 7:
6028 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
6029 case 6:
6030 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
6031 case 5:
6032 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
6033 case 4:
6034 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6035 case 3:
6036 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6037 case 2:
6038 case 1:
6039
6040 default:
6041 break;
6042 };
6043
6044 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6045
1da177e4
LT
6046 return 0;
6047}
6048
6049/* Called at device open time to get the chip ready for
6050 * packet processing. Invoked with tp->lock held.
6051 */
6052static int tg3_init_hw(struct tg3 *tp)
6053{
6054 int err;
6055
6056 /* Force the chip into D0. */
6057 err = tg3_set_power_state(tp, 0);
6058 if (err)
6059 goto out;
6060
6061 tg3_switch_clocks(tp);
6062
6063 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6064
6065 err = tg3_reset_hw(tp);
6066
6067out:
6068 return err;
6069}
6070
6071#define TG3_STAT_ADD32(PSTAT, REG) \
6072do { u32 __val = tr32(REG); \
6073 (PSTAT)->low += __val; \
6074 if ((PSTAT)->low < __val) \
6075 (PSTAT)->high += 1; \
6076} while (0)
6077
6078static void tg3_periodic_fetch_stats(struct tg3 *tp)
6079{
6080 struct tg3_hw_stats *sp = tp->hw_stats;
6081
6082 if (!netif_carrier_ok(tp->dev))
6083 return;
6084
6085 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6086 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6087 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6088 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6089 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6090 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6091 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6092 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6093 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6094 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6095 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6096 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6097 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6098
6099 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6100 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6101 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6102 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6103 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6104 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6105 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6106 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6107 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6108 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6109 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6110 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6111 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6112 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6113}
6114
6115static void tg3_timer(unsigned long __opaque)
6116{
6117 struct tg3 *tp = (struct tg3 *) __opaque;
1da177e4 6118
f47c11ee 6119 spin_lock(&tp->lock);
1da177e4 6120
fac9b83e
DM
6121 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6122 /* All of this garbage is because when using non-tagged
6123 * IRQ status the mailbox/status_block protocol the chip
6124 * uses with the cpu is race prone.
6125 */
6126 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6127 tw32(GRC_LOCAL_CTRL,
6128 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6129 } else {
6130 tw32(HOSTCC_MODE, tp->coalesce_mode |
6131 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6132 }
1da177e4 6133
fac9b83e
DM
6134 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6135 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
f47c11ee 6136 spin_unlock(&tp->lock);
fac9b83e
DM
6137 schedule_work(&tp->reset_task);
6138 return;
6139 }
1da177e4
LT
6140 }
6141
1da177e4
LT
6142 /* This part only runs once per second. */
6143 if (!--tp->timer_counter) {
fac9b83e
DM
6144 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6145 tg3_periodic_fetch_stats(tp);
6146
1da177e4
LT
6147 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6148 u32 mac_stat;
6149 int phy_event;
6150
6151 mac_stat = tr32(MAC_STATUS);
6152
6153 phy_event = 0;
6154 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6155 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6156 phy_event = 1;
6157 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6158 phy_event = 1;
6159
6160 if (phy_event)
6161 tg3_setup_phy(tp, 0);
6162 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6163 u32 mac_stat = tr32(MAC_STATUS);
6164 int need_setup = 0;
6165
6166 if (netif_carrier_ok(tp->dev) &&
6167 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6168 need_setup = 1;
6169 }
6170 if (! netif_carrier_ok(tp->dev) &&
6171 (mac_stat & (MAC_STATUS_PCS_SYNCED |
6172 MAC_STATUS_SIGNAL_DET))) {
6173 need_setup = 1;
6174 }
6175 if (need_setup) {
6176 tw32_f(MAC_MODE,
6177 (tp->mac_mode &
6178 ~MAC_MODE_PORT_MODE_MASK));
6179 udelay(40);
6180 tw32_f(MAC_MODE, tp->mac_mode);
6181 udelay(40);
6182 tg3_setup_phy(tp, 0);
6183 }
747e8f8b
MC
6184 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6185 tg3_serdes_parallel_detect(tp);
1da177e4
LT
6186
6187 tp->timer_counter = tp->timer_multiplier;
6188 }
6189
6190 /* Heartbeat is only sent once every 120 seconds. */
6191 if (!--tp->asf_counter) {
6192 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6193 u32 val;
6194
6195 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
6196 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6197 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
6198 val = tr32(GRC_RX_CPU_EVENT);
6199 val |= (1 << 14);
6200 tw32(GRC_RX_CPU_EVENT, val);
6201 }
6202 tp->asf_counter = tp->asf_multiplier;
6203 }
6204
f47c11ee 6205 spin_unlock(&tp->lock);
1da177e4
LT
6206
6207 tp->timer.expires = jiffies + tp->timer_offset;
6208 add_timer(&tp->timer);
6209}
6210
7938109f
MC
6211static int tg3_test_interrupt(struct tg3 *tp)
6212{
6213 struct net_device *dev = tp->dev;
6214 int err, i;
6215 u32 int_mbox = 0;
6216
d4bc3927
MC
6217 if (!netif_running(dev))
6218 return -ENODEV;
6219
7938109f
MC
6220 tg3_disable_ints(tp);
6221
6222 free_irq(tp->pdev->irq, dev);
6223
6224 err = request_irq(tp->pdev->irq, tg3_test_isr,
f4d0ee98 6225 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
7938109f
MC
6226 if (err)
6227 return err;
6228
38f3843e 6229 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7938109f
MC
6230 tg3_enable_ints(tp);
6231
6232 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6233 HOSTCC_MODE_NOW);
6234
6235 for (i = 0; i < 5; i++) {
09ee929c
MC
6236 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6237 TG3_64BIT_REG_LOW);
7938109f
MC
6238 if (int_mbox != 0)
6239 break;
6240 msleep(10);
6241 }
6242
6243 tg3_disable_ints(tp);
6244
6245 free_irq(tp->pdev->irq, dev);
6246
6247 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6248 err = request_irq(tp->pdev->irq, tg3_msi,
f4d0ee98 6249 SA_SAMPLE_RANDOM, dev->name, dev);
fac9b83e
DM
6250 else {
6251 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6252 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6253 fn = tg3_interrupt_tagged;
6254 err = request_irq(tp->pdev->irq, fn,
f4d0ee98 6255 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
fac9b83e 6256 }
7938109f
MC
6257
6258 if (err)
6259 return err;
6260
6261 if (int_mbox != 0)
6262 return 0;
6263
6264 return -EIO;
6265}
6266
6267/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6268 * successfully restored
6269 */
6270static int tg3_test_msi(struct tg3 *tp)
6271{
6272 struct net_device *dev = tp->dev;
6273 int err;
6274 u16 pci_cmd;
6275
6276 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6277 return 0;
6278
6279 /* Turn off SERR reporting in case MSI terminates with Master
6280 * Abort.
6281 */
6282 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6283 pci_write_config_word(tp->pdev, PCI_COMMAND,
6284 pci_cmd & ~PCI_COMMAND_SERR);
6285
6286 err = tg3_test_interrupt(tp);
6287
6288 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6289
6290 if (!err)
6291 return 0;
6292
6293 /* other failures */
6294 if (err != -EIO)
6295 return err;
6296
6297 /* MSI test failed, go back to INTx mode */
6298 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6299 "switching to INTx mode. Please report this failure to "
6300 "the PCI maintainer and include system chipset information.\n",
6301 tp->dev->name);
6302
6303 free_irq(tp->pdev->irq, dev);
6304 pci_disable_msi(tp->pdev);
6305
6306 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6307
fac9b83e
DM
6308 {
6309 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6310 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6311 fn = tg3_interrupt_tagged;
7938109f 6312
fac9b83e
DM
6313 err = request_irq(tp->pdev->irq, fn,
6314 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6315 }
7938109f
MC
6316 if (err)
6317 return err;
6318
6319 /* Need to reset the chip because the MSI cycle may have terminated
6320 * with Master Abort.
6321 */
f47c11ee 6322 tg3_full_lock(tp, 1);
7938109f 6323
944d980e 6324 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f
MC
6325 err = tg3_init_hw(tp);
6326
f47c11ee 6327 tg3_full_unlock(tp);
7938109f
MC
6328
6329 if (err)
6330 free_irq(tp->pdev->irq, dev);
6331
6332 return err;
6333}
6334
1da177e4
LT
6335static int tg3_open(struct net_device *dev)
6336{
6337 struct tg3 *tp = netdev_priv(dev);
6338 int err;
6339
f47c11ee 6340 tg3_full_lock(tp, 0);
1da177e4
LT
6341
6342 tg3_disable_ints(tp);
6343 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6344
f47c11ee 6345 tg3_full_unlock(tp);
1da177e4
LT
6346
6347 /* The placement of this call is tied
6348 * to the setup and use of Host TX descriptors.
6349 */
6350 err = tg3_alloc_consistent(tp);
6351 if (err)
6352 return err;
6353
88b06bc2
MC
6354 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6355 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6356 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
fac9b83e
DM
6357 /* All MSI supporting chips should support tagged
6358 * status. Assert that this is the case.
6359 */
6360 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6361 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6362 "Not using MSI.\n", tp->dev->name);
6363 } else if (pci_enable_msi(tp->pdev) == 0) {
88b06bc2
MC
6364 u32 msi_mode;
6365
6366 msi_mode = tr32(MSGINT_MODE);
6367 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6368 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6369 }
6370 }
6371 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6372 err = request_irq(tp->pdev->irq, tg3_msi,
f4d0ee98 6373 SA_SAMPLE_RANDOM, dev->name, dev);
fac9b83e
DM
6374 else {
6375 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6376 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6377 fn = tg3_interrupt_tagged;
6378
6379 err = request_irq(tp->pdev->irq, fn,
f4d0ee98 6380 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
fac9b83e 6381 }
1da177e4
LT
6382
6383 if (err) {
88b06bc2
MC
6384 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6385 pci_disable_msi(tp->pdev);
6386 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6387 }
1da177e4
LT
6388 tg3_free_consistent(tp);
6389 return err;
6390 }
6391
f47c11ee 6392 tg3_full_lock(tp, 0);
1da177e4
LT
6393
6394 err = tg3_init_hw(tp);
6395 if (err) {
944d980e 6396 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
6397 tg3_free_rings(tp);
6398 } else {
fac9b83e
DM
6399 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6400 tp->timer_offset = HZ;
6401 else
6402 tp->timer_offset = HZ / 10;
6403
6404 BUG_ON(tp->timer_offset > HZ);
6405 tp->timer_counter = tp->timer_multiplier =
6406 (HZ / tp->timer_offset);
6407 tp->asf_counter = tp->asf_multiplier =
6408 ((HZ / tp->timer_offset) * 120);
1da177e4
LT
6409
6410 init_timer(&tp->timer);
6411 tp->timer.expires = jiffies + tp->timer_offset;
6412 tp->timer.data = (unsigned long) tp;
6413 tp->timer.function = tg3_timer;
1da177e4
LT
6414 }
6415
f47c11ee 6416 tg3_full_unlock(tp);
1da177e4
LT
6417
6418 if (err) {
88b06bc2
MC
6419 free_irq(tp->pdev->irq, dev);
6420 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6421 pci_disable_msi(tp->pdev);
6422 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6423 }
1da177e4
LT
6424 tg3_free_consistent(tp);
6425 return err;
6426 }
6427
7938109f
MC
6428 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6429 err = tg3_test_msi(tp);
fac9b83e 6430
7938109f 6431 if (err) {
f47c11ee 6432 tg3_full_lock(tp, 0);
7938109f
MC
6433
6434 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6435 pci_disable_msi(tp->pdev);
6436 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6437 }
944d980e 6438 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f
MC
6439 tg3_free_rings(tp);
6440 tg3_free_consistent(tp);
6441
f47c11ee 6442 tg3_full_unlock(tp);
7938109f
MC
6443
6444 return err;
6445 }
6446 }
6447
f47c11ee 6448 tg3_full_lock(tp, 0);
1da177e4 6449
7938109f
MC
6450 add_timer(&tp->timer);
6451 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
1da177e4
LT
6452 tg3_enable_ints(tp);
6453
f47c11ee 6454 tg3_full_unlock(tp);
1da177e4
LT
6455
6456 netif_start_queue(dev);
6457
6458 return 0;
6459}
6460
6461#if 0
6462/*static*/ void tg3_dump_state(struct tg3 *tp)
6463{
6464 u32 val32, val32_2, val32_3, val32_4, val32_5;
6465 u16 val16;
6466 int i;
6467
6468 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6469 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6470 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6471 val16, val32);
6472
6473 /* MAC block */
6474 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6475 tr32(MAC_MODE), tr32(MAC_STATUS));
6476 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6477 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6478 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6479 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6480 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6481 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6482
6483 /* Send data initiator control block */
6484 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6485 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6486 printk(" SNDDATAI_STATSCTRL[%08x]\n",
6487 tr32(SNDDATAI_STATSCTRL));
6488
6489 /* Send data completion control block */
6490 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6491
6492 /* Send BD ring selector block */
6493 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6494 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6495
6496 /* Send BD initiator control block */
6497 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6498 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6499
6500 /* Send BD completion control block */
6501 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6502
6503 /* Receive list placement control block */
6504 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6505 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6506 printk(" RCVLPC_STATSCTRL[%08x]\n",
6507 tr32(RCVLPC_STATSCTRL));
6508
6509 /* Receive data and receive BD initiator control block */
6510 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6511 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6512
6513 /* Receive data completion control block */
6514 printk("DEBUG: RCVDCC_MODE[%08x]\n",
6515 tr32(RCVDCC_MODE));
6516
6517 /* Receive BD initiator control block */
6518 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6519 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6520
6521 /* Receive BD completion control block */
6522 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6523 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6524
6525 /* Receive list selector control block */
6526 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6527 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6528
6529 /* Mbuf cluster free block */
6530 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6531 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6532
6533 /* Host coalescing control block */
6534 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6535 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6536 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6537 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6538 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6539 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6540 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6541 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6542 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6543 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6544 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6545 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6546
6547 /* Memory arbiter control block */
6548 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6549 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6550
6551 /* Buffer manager control block */
6552 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6553 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6554 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6555 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6556 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6557 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6558 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6559 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6560
6561 /* Read DMA control block */
6562 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6563 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6564
6565 /* Write DMA control block */
6566 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6567 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6568
6569 /* DMA completion block */
6570 printk("DEBUG: DMAC_MODE[%08x]\n",
6571 tr32(DMAC_MODE));
6572
6573 /* GRC block */
6574 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6575 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6576 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6577 tr32(GRC_LOCAL_CTRL));
6578
6579 /* TG3_BDINFOs */
6580 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6581 tr32(RCVDBDI_JUMBO_BD + 0x0),
6582 tr32(RCVDBDI_JUMBO_BD + 0x4),
6583 tr32(RCVDBDI_JUMBO_BD + 0x8),
6584 tr32(RCVDBDI_JUMBO_BD + 0xc));
6585 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6586 tr32(RCVDBDI_STD_BD + 0x0),
6587 tr32(RCVDBDI_STD_BD + 0x4),
6588 tr32(RCVDBDI_STD_BD + 0x8),
6589 tr32(RCVDBDI_STD_BD + 0xc));
6590 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6591 tr32(RCVDBDI_MINI_BD + 0x0),
6592 tr32(RCVDBDI_MINI_BD + 0x4),
6593 tr32(RCVDBDI_MINI_BD + 0x8),
6594 tr32(RCVDBDI_MINI_BD + 0xc));
6595
6596 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6597 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6598 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6599 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6600 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6601 val32, val32_2, val32_3, val32_4);
6602
6603 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6604 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6605 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6606 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6607 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6608 val32, val32_2, val32_3, val32_4);
6609
6610 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6611 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6612 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6613 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6614 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6615 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6616 val32, val32_2, val32_3, val32_4, val32_5);
6617
6618 /* SW status block */
6619 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6620 tp->hw_status->status,
6621 tp->hw_status->status_tag,
6622 tp->hw_status->rx_jumbo_consumer,
6623 tp->hw_status->rx_consumer,
6624 tp->hw_status->rx_mini_consumer,
6625 tp->hw_status->idx[0].rx_producer,
6626 tp->hw_status->idx[0].tx_consumer);
6627
6628 /* SW statistics block */
6629 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6630 ((u32 *)tp->hw_stats)[0],
6631 ((u32 *)tp->hw_stats)[1],
6632 ((u32 *)tp->hw_stats)[2],
6633 ((u32 *)tp->hw_stats)[3]);
6634
6635 /* Mailboxes */
6636 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
09ee929c
MC
6637 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6638 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6639 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6640 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
1da177e4
LT
6641
6642 /* NIC side send descriptors. */
6643 for (i = 0; i < 6; i++) {
6644 unsigned long txd;
6645
6646 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6647 + (i * sizeof(struct tg3_tx_buffer_desc));
6648 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6649 i,
6650 readl(txd + 0x0), readl(txd + 0x4),
6651 readl(txd + 0x8), readl(txd + 0xc));
6652 }
6653
6654 /* NIC side RX descriptors. */
6655 for (i = 0; i < 6; i++) {
6656 unsigned long rxd;
6657
6658 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6659 + (i * sizeof(struct tg3_rx_buffer_desc));
6660 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6661 i,
6662 readl(rxd + 0x0), readl(rxd + 0x4),
6663 readl(rxd + 0x8), readl(rxd + 0xc));
6664 rxd += (4 * sizeof(u32));
6665 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6666 i,
6667 readl(rxd + 0x0), readl(rxd + 0x4),
6668 readl(rxd + 0x8), readl(rxd + 0xc));
6669 }
6670
6671 for (i = 0; i < 6; i++) {
6672 unsigned long rxd;
6673
6674 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6675 + (i * sizeof(struct tg3_rx_buffer_desc));
6676 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6677 i,
6678 readl(rxd + 0x0), readl(rxd + 0x4),
6679 readl(rxd + 0x8), readl(rxd + 0xc));
6680 rxd += (4 * sizeof(u32));
6681 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6682 i,
6683 readl(rxd + 0x0), readl(rxd + 0x4),
6684 readl(rxd + 0x8), readl(rxd + 0xc));
6685 }
6686}
6687#endif
6688
6689static struct net_device_stats *tg3_get_stats(struct net_device *);
6690static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6691
6692static int tg3_close(struct net_device *dev)
6693{
6694 struct tg3 *tp = netdev_priv(dev);
6695
6696 netif_stop_queue(dev);
6697
6698 del_timer_sync(&tp->timer);
6699
f47c11ee 6700 tg3_full_lock(tp, 1);
1da177e4
LT
6701#if 0
6702 tg3_dump_state(tp);
6703#endif
6704
6705 tg3_disable_ints(tp);
6706
944d980e 6707 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
6708 tg3_free_rings(tp);
6709 tp->tg3_flags &=
6710 ~(TG3_FLAG_INIT_COMPLETE |
6711 TG3_FLAG_GOT_SERDES_FLOWCTL);
6712 netif_carrier_off(tp->dev);
6713
f47c11ee 6714 tg3_full_unlock(tp);
1da177e4 6715
88b06bc2
MC
6716 free_irq(tp->pdev->irq, dev);
6717 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6718 pci_disable_msi(tp->pdev);
6719 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6720 }
1da177e4
LT
6721
6722 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6723 sizeof(tp->net_stats_prev));
6724 memcpy(&tp->estats_prev, tg3_get_estats(tp),
6725 sizeof(tp->estats_prev));
6726
6727 tg3_free_consistent(tp);
6728
6729 return 0;
6730}
6731
6732static inline unsigned long get_stat64(tg3_stat64_t *val)
6733{
6734 unsigned long ret;
6735
6736#if (BITS_PER_LONG == 32)
6737 ret = val->low;
6738#else
6739 ret = ((u64)val->high << 32) | ((u64)val->low);
6740#endif
6741 return ret;
6742}
6743
6744static unsigned long calc_crc_errors(struct tg3 *tp)
6745{
6746 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6747
6748 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6749 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6750 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1da177e4
LT
6751 u32 val;
6752
f47c11ee 6753 spin_lock_bh(&tp->lock);
1da177e4
LT
6754 if (!tg3_readphy(tp, 0x1e, &val)) {
6755 tg3_writephy(tp, 0x1e, val | 0x8000);
6756 tg3_readphy(tp, 0x14, &val);
6757 } else
6758 val = 0;
f47c11ee 6759 spin_unlock_bh(&tp->lock);
1da177e4
LT
6760
6761 tp->phy_crc_errors += val;
6762
6763 return tp->phy_crc_errors;
6764 }
6765
6766 return get_stat64(&hw_stats->rx_fcs_errors);
6767}
6768
6769#define ESTAT_ADD(member) \
6770 estats->member = old_estats->member + \
6771 get_stat64(&hw_stats->member)
6772
6773static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6774{
6775 struct tg3_ethtool_stats *estats = &tp->estats;
6776 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6777 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6778
6779 if (!hw_stats)
6780 return old_estats;
6781
6782 ESTAT_ADD(rx_octets);
6783 ESTAT_ADD(rx_fragments);
6784 ESTAT_ADD(rx_ucast_packets);
6785 ESTAT_ADD(rx_mcast_packets);
6786 ESTAT_ADD(rx_bcast_packets);
6787 ESTAT_ADD(rx_fcs_errors);
6788 ESTAT_ADD(rx_align_errors);
6789 ESTAT_ADD(rx_xon_pause_rcvd);
6790 ESTAT_ADD(rx_xoff_pause_rcvd);
6791 ESTAT_ADD(rx_mac_ctrl_rcvd);
6792 ESTAT_ADD(rx_xoff_entered);
6793 ESTAT_ADD(rx_frame_too_long_errors);
6794 ESTAT_ADD(rx_jabbers);
6795 ESTAT_ADD(rx_undersize_packets);
6796 ESTAT_ADD(rx_in_length_errors);
6797 ESTAT_ADD(rx_out_length_errors);
6798 ESTAT_ADD(rx_64_or_less_octet_packets);
6799 ESTAT_ADD(rx_65_to_127_octet_packets);
6800 ESTAT_ADD(rx_128_to_255_octet_packets);
6801 ESTAT_ADD(rx_256_to_511_octet_packets);
6802 ESTAT_ADD(rx_512_to_1023_octet_packets);
6803 ESTAT_ADD(rx_1024_to_1522_octet_packets);
6804 ESTAT_ADD(rx_1523_to_2047_octet_packets);
6805 ESTAT_ADD(rx_2048_to_4095_octet_packets);
6806 ESTAT_ADD(rx_4096_to_8191_octet_packets);
6807 ESTAT_ADD(rx_8192_to_9022_octet_packets);
6808
6809 ESTAT_ADD(tx_octets);
6810 ESTAT_ADD(tx_collisions);
6811 ESTAT_ADD(tx_xon_sent);
6812 ESTAT_ADD(tx_xoff_sent);
6813 ESTAT_ADD(tx_flow_control);
6814 ESTAT_ADD(tx_mac_errors);
6815 ESTAT_ADD(tx_single_collisions);
6816 ESTAT_ADD(tx_mult_collisions);
6817 ESTAT_ADD(tx_deferred);
6818 ESTAT_ADD(tx_excessive_collisions);
6819 ESTAT_ADD(tx_late_collisions);
6820 ESTAT_ADD(tx_collide_2times);
6821 ESTAT_ADD(tx_collide_3times);
6822 ESTAT_ADD(tx_collide_4times);
6823 ESTAT_ADD(tx_collide_5times);
6824 ESTAT_ADD(tx_collide_6times);
6825 ESTAT_ADD(tx_collide_7times);
6826 ESTAT_ADD(tx_collide_8times);
6827 ESTAT_ADD(tx_collide_9times);
6828 ESTAT_ADD(tx_collide_10times);
6829 ESTAT_ADD(tx_collide_11times);
6830 ESTAT_ADD(tx_collide_12times);
6831 ESTAT_ADD(tx_collide_13times);
6832 ESTAT_ADD(tx_collide_14times);
6833 ESTAT_ADD(tx_collide_15times);
6834 ESTAT_ADD(tx_ucast_packets);
6835 ESTAT_ADD(tx_mcast_packets);
6836 ESTAT_ADD(tx_bcast_packets);
6837 ESTAT_ADD(tx_carrier_sense_errors);
6838 ESTAT_ADD(tx_discards);
6839 ESTAT_ADD(tx_errors);
6840
6841 ESTAT_ADD(dma_writeq_full);
6842 ESTAT_ADD(dma_write_prioq_full);
6843 ESTAT_ADD(rxbds_empty);
6844 ESTAT_ADD(rx_discards);
6845 ESTAT_ADD(rx_errors);
6846 ESTAT_ADD(rx_threshold_hit);
6847
6848 ESTAT_ADD(dma_readq_full);
6849 ESTAT_ADD(dma_read_prioq_full);
6850 ESTAT_ADD(tx_comp_queue_full);
6851
6852 ESTAT_ADD(ring_set_send_prod_index);
6853 ESTAT_ADD(ring_status_update);
6854 ESTAT_ADD(nic_irqs);
6855 ESTAT_ADD(nic_avoided_irqs);
6856 ESTAT_ADD(nic_tx_threshold_hit);
6857
6858 return estats;
6859}
6860
6861static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6862{
6863 struct tg3 *tp = netdev_priv(dev);
6864 struct net_device_stats *stats = &tp->net_stats;
6865 struct net_device_stats *old_stats = &tp->net_stats_prev;
6866 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6867
6868 if (!hw_stats)
6869 return old_stats;
6870
6871 stats->rx_packets = old_stats->rx_packets +
6872 get_stat64(&hw_stats->rx_ucast_packets) +
6873 get_stat64(&hw_stats->rx_mcast_packets) +
6874 get_stat64(&hw_stats->rx_bcast_packets);
6875
6876 stats->tx_packets = old_stats->tx_packets +
6877 get_stat64(&hw_stats->tx_ucast_packets) +
6878 get_stat64(&hw_stats->tx_mcast_packets) +
6879 get_stat64(&hw_stats->tx_bcast_packets);
6880
6881 stats->rx_bytes = old_stats->rx_bytes +
6882 get_stat64(&hw_stats->rx_octets);
6883 stats->tx_bytes = old_stats->tx_bytes +
6884 get_stat64(&hw_stats->tx_octets);
6885
6886 stats->rx_errors = old_stats->rx_errors +
4f63b877 6887 get_stat64(&hw_stats->rx_errors);
1da177e4
LT
6888 stats->tx_errors = old_stats->tx_errors +
6889 get_stat64(&hw_stats->tx_errors) +
6890 get_stat64(&hw_stats->tx_mac_errors) +
6891 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6892 get_stat64(&hw_stats->tx_discards);
6893
6894 stats->multicast = old_stats->multicast +
6895 get_stat64(&hw_stats->rx_mcast_packets);
6896 stats->collisions = old_stats->collisions +
6897 get_stat64(&hw_stats->tx_collisions);
6898
6899 stats->rx_length_errors = old_stats->rx_length_errors +
6900 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6901 get_stat64(&hw_stats->rx_undersize_packets);
6902
6903 stats->rx_over_errors = old_stats->rx_over_errors +
6904 get_stat64(&hw_stats->rxbds_empty);
6905 stats->rx_frame_errors = old_stats->rx_frame_errors +
6906 get_stat64(&hw_stats->rx_align_errors);
6907 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6908 get_stat64(&hw_stats->tx_discards);
6909 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6910 get_stat64(&hw_stats->tx_carrier_sense_errors);
6911
6912 stats->rx_crc_errors = old_stats->rx_crc_errors +
6913 calc_crc_errors(tp);
6914
4f63b877
JL
6915 stats->rx_missed_errors = old_stats->rx_missed_errors +
6916 get_stat64(&hw_stats->rx_discards);
6917
1da177e4
LT
6918 return stats;
6919}
6920
6921static inline u32 calc_crc(unsigned char *buf, int len)
6922{
6923 u32 reg;
6924 u32 tmp;
6925 int j, k;
6926
6927 reg = 0xffffffff;
6928
6929 for (j = 0; j < len; j++) {
6930 reg ^= buf[j];
6931
6932 for (k = 0; k < 8; k++) {
6933 tmp = reg & 0x01;
6934
6935 reg >>= 1;
6936
6937 if (tmp) {
6938 reg ^= 0xedb88320;
6939 }
6940 }
6941 }
6942
6943 return ~reg;
6944}
6945
6946static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6947{
6948 /* accept or reject all multicast frames */
6949 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6950 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6951 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6952 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6953}
6954
6955static void __tg3_set_rx_mode(struct net_device *dev)
6956{
6957 struct tg3 *tp = netdev_priv(dev);
6958 u32 rx_mode;
6959
6960 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6961 RX_MODE_KEEP_VLAN_TAG);
6962
6963 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6964 * flag clear.
6965 */
6966#if TG3_VLAN_TAG_USED
6967 if (!tp->vlgrp &&
6968 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6969 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6970#else
6971 /* By definition, VLAN is disabled always in this
6972 * case.
6973 */
6974 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6975 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6976#endif
6977
6978 if (dev->flags & IFF_PROMISC) {
6979 /* Promiscuous mode. */
6980 rx_mode |= RX_MODE_PROMISC;
6981 } else if (dev->flags & IFF_ALLMULTI) {
6982 /* Accept all multicast. */
6983 tg3_set_multi (tp, 1);
6984 } else if (dev->mc_count < 1) {
6985 /* Reject all multicast. */
6986 tg3_set_multi (tp, 0);
6987 } else {
6988 /* Accept one or more multicast(s). */
6989 struct dev_mc_list *mclist;
6990 unsigned int i;
6991 u32 mc_filter[4] = { 0, };
6992 u32 regidx;
6993 u32 bit;
6994 u32 crc;
6995
6996 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6997 i++, mclist = mclist->next) {
6998
6999 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7000 bit = ~crc & 0x7f;
7001 regidx = (bit & 0x60) >> 5;
7002 bit &= 0x1f;
7003 mc_filter[regidx] |= (1 << bit);
7004 }
7005
7006 tw32(MAC_HASH_REG_0, mc_filter[0]);
7007 tw32(MAC_HASH_REG_1, mc_filter[1]);
7008 tw32(MAC_HASH_REG_2, mc_filter[2]);
7009 tw32(MAC_HASH_REG_3, mc_filter[3]);
7010 }
7011
7012 if (rx_mode != tp->rx_mode) {
7013 tp->rx_mode = rx_mode;
7014 tw32_f(MAC_RX_MODE, rx_mode);
7015 udelay(10);
7016 }
7017}
7018
7019static void tg3_set_rx_mode(struct net_device *dev)
7020{
7021 struct tg3 *tp = netdev_priv(dev);
7022
f47c11ee 7023 tg3_full_lock(tp, 0);
1da177e4 7024 __tg3_set_rx_mode(dev);
f47c11ee 7025 tg3_full_unlock(tp);
1da177e4
LT
7026}
7027
7028#define TG3_REGDUMP_LEN (32 * 1024)
7029
7030static int tg3_get_regs_len(struct net_device *dev)
7031{
7032 return TG3_REGDUMP_LEN;
7033}
7034
7035static void tg3_get_regs(struct net_device *dev,
7036 struct ethtool_regs *regs, void *_p)
7037{
7038 u32 *p = _p;
7039 struct tg3 *tp = netdev_priv(dev);
7040 u8 *orig_p = _p;
7041 int i;
7042
7043 regs->version = 0;
7044
7045 memset(p, 0, TG3_REGDUMP_LEN);
7046
f47c11ee 7047 tg3_full_lock(tp, 0);
1da177e4
LT
7048
7049#define __GET_REG32(reg) (*(p)++ = tr32(reg))
7050#define GET_REG32_LOOP(base,len) \
7051do { p = (u32 *)(orig_p + (base)); \
7052 for (i = 0; i < len; i += 4) \
7053 __GET_REG32((base) + i); \
7054} while (0)
7055#define GET_REG32_1(reg) \
7056do { p = (u32 *)(orig_p + (reg)); \
7057 __GET_REG32((reg)); \
7058} while (0)
7059
7060 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7061 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7062 GET_REG32_LOOP(MAC_MODE, 0x4f0);
7063 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7064 GET_REG32_1(SNDDATAC_MODE);
7065 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7066 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7067 GET_REG32_1(SNDBDC_MODE);
7068 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7069 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7070 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7071 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7072 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7073 GET_REG32_1(RCVDCC_MODE);
7074 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7075 GET_REG32_LOOP(RCVCC_MODE, 0x14);
7076 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7077 GET_REG32_1(MBFREE_MODE);
7078 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7079 GET_REG32_LOOP(MEMARB_MODE, 0x10);
7080 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7081 GET_REG32_LOOP(RDMAC_MODE, 0x08);
7082 GET_REG32_LOOP(WDMAC_MODE, 0x08);
7083 GET_REG32_LOOP(RX_CPU_BASE, 0x280);
7084 GET_REG32_LOOP(TX_CPU_BASE, 0x280);
7085 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7086 GET_REG32_LOOP(FTQ_RESET, 0x120);
7087 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7088 GET_REG32_1(DMAC_MODE);
7089 GET_REG32_LOOP(GRC_MODE, 0x4c);
7090 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7091 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7092
7093#undef __GET_REG32
7094#undef GET_REG32_LOOP
7095#undef GET_REG32_1
7096
f47c11ee 7097 tg3_full_unlock(tp);
1da177e4
LT
7098}
7099
7100static int tg3_get_eeprom_len(struct net_device *dev)
7101{
7102 struct tg3 *tp = netdev_priv(dev);
7103
7104 return tp->nvram_size;
7105}
7106
7107static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7108
7109static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7110{
7111 struct tg3 *tp = netdev_priv(dev);
7112 int ret;
7113 u8 *pd;
7114 u32 i, offset, len, val, b_offset, b_count;
7115
7116 offset = eeprom->offset;
7117 len = eeprom->len;
7118 eeprom->len = 0;
7119
7120 eeprom->magic = TG3_EEPROM_MAGIC;
7121
7122 if (offset & 3) {
7123 /* adjustments to start on required 4 byte boundary */
7124 b_offset = offset & 3;
7125 b_count = 4 - b_offset;
7126 if (b_count > len) {
7127 /* i.e. offset=1 len=2 */
7128 b_count = len;
7129 }
7130 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7131 if (ret)
7132 return ret;
7133 val = cpu_to_le32(val);
7134 memcpy(data, ((char*)&val) + b_offset, b_count);
7135 len -= b_count;
7136 offset += b_count;
7137 eeprom->len += b_count;
7138 }
7139
7140 /* read bytes upto the last 4 byte boundary */
7141 pd = &data[eeprom->len];
7142 for (i = 0; i < (len - (len & 3)); i += 4) {
7143 ret = tg3_nvram_read(tp, offset + i, &val);
7144 if (ret) {
7145 eeprom->len += i;
7146 return ret;
7147 }
7148 val = cpu_to_le32(val);
7149 memcpy(pd + i, &val, 4);
7150 }
7151 eeprom->len += i;
7152
7153 if (len & 3) {
7154 /* read last bytes not ending on 4 byte boundary */
7155 pd = &data[eeprom->len];
7156 b_count = len & 3;
7157 b_offset = offset + len - b_count;
7158 ret = tg3_nvram_read(tp, b_offset, &val);
7159 if (ret)
7160 return ret;
7161 val = cpu_to_le32(val);
7162 memcpy(pd, ((char*)&val), b_count);
7163 eeprom->len += b_count;
7164 }
7165 return 0;
7166}
7167
7168static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7169
7170static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7171{
7172 struct tg3 *tp = netdev_priv(dev);
7173 int ret;
7174 u32 offset, len, b_offset, odd_len, start, end;
7175 u8 *buf;
7176
7177 if (eeprom->magic != TG3_EEPROM_MAGIC)
7178 return -EINVAL;
7179
7180 offset = eeprom->offset;
7181 len = eeprom->len;
7182
7183 if ((b_offset = (offset & 3))) {
7184 /* adjustments to start on required 4 byte boundary */
7185 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7186 if (ret)
7187 return ret;
7188 start = cpu_to_le32(start);
7189 len += b_offset;
7190 offset &= ~3;
1c8594b4
MC
7191 if (len < 4)
7192 len = 4;
1da177e4
LT
7193 }
7194
7195 odd_len = 0;
1c8594b4 7196 if (len & 3) {
1da177e4
LT
7197 /* adjustments to end on required 4 byte boundary */
7198 odd_len = 1;
7199 len = (len + 3) & ~3;
7200 ret = tg3_nvram_read(tp, offset+len-4, &end);
7201 if (ret)
7202 return ret;
7203 end = cpu_to_le32(end);
7204 }
7205
7206 buf = data;
7207 if (b_offset || odd_len) {
7208 buf = kmalloc(len, GFP_KERNEL);
7209 if (buf == 0)
7210 return -ENOMEM;
7211 if (b_offset)
7212 memcpy(buf, &start, 4);
7213 if (odd_len)
7214 memcpy(buf+len-4, &end, 4);
7215 memcpy(buf + b_offset, data, eeprom->len);
7216 }
7217
7218 ret = tg3_nvram_write_block(tp, offset, len, buf);
7219
7220 if (buf != data)
7221 kfree(buf);
7222
7223 return ret;
7224}
7225
7226static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7227{
7228 struct tg3 *tp = netdev_priv(dev);
7229
7230 cmd->supported = (SUPPORTED_Autoneg);
7231
7232 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7233 cmd->supported |= (SUPPORTED_1000baseT_Half |
7234 SUPPORTED_1000baseT_Full);
7235
7236 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
7237 cmd->supported |= (SUPPORTED_100baseT_Half |
7238 SUPPORTED_100baseT_Full |
7239 SUPPORTED_10baseT_Half |
7240 SUPPORTED_10baseT_Full |
7241 SUPPORTED_MII);
7242 else
7243 cmd->supported |= SUPPORTED_FIBRE;
7244
7245 cmd->advertising = tp->link_config.advertising;
7246 if (netif_running(dev)) {
7247 cmd->speed = tp->link_config.active_speed;
7248 cmd->duplex = tp->link_config.active_duplex;
7249 }
7250 cmd->port = 0;
7251 cmd->phy_address = PHY_ADDR;
7252 cmd->transceiver = 0;
7253 cmd->autoneg = tp->link_config.autoneg;
7254 cmd->maxtxpkt = 0;
7255 cmd->maxrxpkt = 0;
7256 return 0;
7257}
7258
7259static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7260{
7261 struct tg3 *tp = netdev_priv(dev);
7262
7263 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7264 /* These are the only valid advertisement bits allowed. */
7265 if (cmd->autoneg == AUTONEG_ENABLE &&
7266 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7267 ADVERTISED_1000baseT_Full |
7268 ADVERTISED_Autoneg |
7269 ADVERTISED_FIBRE)))
7270 return -EINVAL;
7271 }
7272
f47c11ee 7273 tg3_full_lock(tp, 0);
1da177e4
LT
7274
7275 tp->link_config.autoneg = cmd->autoneg;
7276 if (cmd->autoneg == AUTONEG_ENABLE) {
7277 tp->link_config.advertising = cmd->advertising;
7278 tp->link_config.speed = SPEED_INVALID;
7279 tp->link_config.duplex = DUPLEX_INVALID;
7280 } else {
7281 tp->link_config.advertising = 0;
7282 tp->link_config.speed = cmd->speed;
7283 tp->link_config.duplex = cmd->duplex;
7284 }
7285
7286 if (netif_running(dev))
7287 tg3_setup_phy(tp, 1);
7288
f47c11ee 7289 tg3_full_unlock(tp);
1da177e4
LT
7290
7291 return 0;
7292}
7293
7294static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7295{
7296 struct tg3 *tp = netdev_priv(dev);
7297
7298 strcpy(info->driver, DRV_MODULE_NAME);
7299 strcpy(info->version, DRV_MODULE_VERSION);
7300 strcpy(info->bus_info, pci_name(tp->pdev));
7301}
7302
7303static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7304{
7305 struct tg3 *tp = netdev_priv(dev);
7306
7307 wol->supported = WAKE_MAGIC;
7308 wol->wolopts = 0;
7309 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7310 wol->wolopts = WAKE_MAGIC;
7311 memset(&wol->sopass, 0, sizeof(wol->sopass));
7312}
7313
7314static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7315{
7316 struct tg3 *tp = netdev_priv(dev);
7317
7318 if (wol->wolopts & ~WAKE_MAGIC)
7319 return -EINVAL;
7320 if ((wol->wolopts & WAKE_MAGIC) &&
7321 tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7322 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7323 return -EINVAL;
7324
f47c11ee 7325 spin_lock_bh(&tp->lock);
1da177e4
LT
7326 if (wol->wolopts & WAKE_MAGIC)
7327 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7328 else
7329 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
f47c11ee 7330 spin_unlock_bh(&tp->lock);
1da177e4
LT
7331
7332 return 0;
7333}
7334
7335static u32 tg3_get_msglevel(struct net_device *dev)
7336{
7337 struct tg3 *tp = netdev_priv(dev);
7338 return tp->msg_enable;
7339}
7340
7341static void tg3_set_msglevel(struct net_device *dev, u32 value)
7342{
7343 struct tg3 *tp = netdev_priv(dev);
7344 tp->msg_enable = value;
7345}
7346
7347#if TG3_TSO_SUPPORT != 0
7348static int tg3_set_tso(struct net_device *dev, u32 value)
7349{
7350 struct tg3 *tp = netdev_priv(dev);
7351
7352 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7353 if (value)
7354 return -EINVAL;
7355 return 0;
7356 }
7357 return ethtool_op_set_tso(dev, value);
7358}
7359#endif
7360
7361static int tg3_nway_reset(struct net_device *dev)
7362{
7363 struct tg3 *tp = netdev_priv(dev);
7364 u32 bmcr;
7365 int r;
7366
7367 if (!netif_running(dev))
7368 return -EAGAIN;
7369
c94e3941
MC
7370 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7371 return -EINVAL;
7372
f47c11ee 7373 spin_lock_bh(&tp->lock);
1da177e4
LT
7374 r = -EINVAL;
7375 tg3_readphy(tp, MII_BMCR, &bmcr);
7376 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
c94e3941
MC
7377 ((bmcr & BMCR_ANENABLE) ||
7378 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7379 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7380 BMCR_ANENABLE);
1da177e4
LT
7381 r = 0;
7382 }
f47c11ee 7383 spin_unlock_bh(&tp->lock);
1da177e4
LT
7384
7385 return r;
7386}
7387
7388static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7389{
7390 struct tg3 *tp = netdev_priv(dev);
7391
7392 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7393 ering->rx_mini_max_pending = 0;
7394 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7395
7396 ering->rx_pending = tp->rx_pending;
7397 ering->rx_mini_pending = 0;
7398 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7399 ering->tx_pending = tp->tx_pending;
7400}
7401
7402static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7403{
7404 struct tg3 *tp = netdev_priv(dev);
bbe832c0 7405 int irq_sync = 0;
1da177e4
LT
7406
7407 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7408 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7409 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7410 return -EINVAL;
7411
bbe832c0 7412 if (netif_running(dev)) {
1da177e4 7413 tg3_netif_stop(tp);
bbe832c0
MC
7414 irq_sync = 1;
7415 }
1da177e4 7416
bbe832c0 7417 tg3_full_lock(tp, irq_sync);
1da177e4
LT
7418
7419 tp->rx_pending = ering->rx_pending;
7420
7421 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7422 tp->rx_pending > 63)
7423 tp->rx_pending = 63;
7424 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7425 tp->tx_pending = ering->tx_pending;
7426
7427 if (netif_running(dev)) {
944d980e 7428 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7429 tg3_init_hw(tp);
7430 tg3_netif_start(tp);
7431 }
7432
f47c11ee 7433 tg3_full_unlock(tp);
1da177e4
LT
7434
7435 return 0;
7436}
7437
7438static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7439{
7440 struct tg3 *tp = netdev_priv(dev);
7441
7442 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7443 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7444 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7445}
7446
7447static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7448{
7449 struct tg3 *tp = netdev_priv(dev);
bbe832c0 7450 int irq_sync = 0;
1da177e4 7451
bbe832c0 7452 if (netif_running(dev)) {
1da177e4 7453 tg3_netif_stop(tp);
bbe832c0
MC
7454 irq_sync = 1;
7455 }
1da177e4 7456
bbe832c0 7457 tg3_full_lock(tp, irq_sync);
f47c11ee 7458
1da177e4
LT
7459 if (epause->autoneg)
7460 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7461 else
7462 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7463 if (epause->rx_pause)
7464 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7465 else
7466 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7467 if (epause->tx_pause)
7468 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7469 else
7470 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7471
7472 if (netif_running(dev)) {
944d980e 7473 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7474 tg3_init_hw(tp);
7475 tg3_netif_start(tp);
7476 }
f47c11ee
DM
7477
7478 tg3_full_unlock(tp);
1da177e4
LT
7479
7480 return 0;
7481}
7482
7483static u32 tg3_get_rx_csum(struct net_device *dev)
7484{
7485 struct tg3 *tp = netdev_priv(dev);
7486 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7487}
7488
7489static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7490{
7491 struct tg3 *tp = netdev_priv(dev);
7492
7493 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7494 if (data != 0)
7495 return -EINVAL;
7496 return 0;
7497 }
7498
f47c11ee 7499 spin_lock_bh(&tp->lock);
1da177e4
LT
7500 if (data)
7501 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7502 else
7503 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
f47c11ee 7504 spin_unlock_bh(&tp->lock);
1da177e4
LT
7505
7506 return 0;
7507}
7508
7509static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7510{
7511 struct tg3 *tp = netdev_priv(dev);
7512
7513 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7514 if (data != 0)
7515 return -EINVAL;
7516 return 0;
7517 }
7518
7519 if (data)
7520 dev->features |= NETIF_F_IP_CSUM;
7521 else
7522 dev->features &= ~NETIF_F_IP_CSUM;
7523
7524 return 0;
7525}
7526
7527static int tg3_get_stats_count (struct net_device *dev)
7528{
7529 return TG3_NUM_STATS;
7530}
7531
4cafd3f5
MC
7532static int tg3_get_test_count (struct net_device *dev)
7533{
7534 return TG3_NUM_TEST;
7535}
7536
1da177e4
LT
7537static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7538{
7539 switch (stringset) {
7540 case ETH_SS_STATS:
7541 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7542 break;
4cafd3f5
MC
7543 case ETH_SS_TEST:
7544 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7545 break;
1da177e4
LT
7546 default:
7547 WARN_ON(1); /* we need a WARN() */
7548 break;
7549 }
7550}
7551
4009a93d
MC
7552static int tg3_phys_id(struct net_device *dev, u32 data)
7553{
7554 struct tg3 *tp = netdev_priv(dev);
7555 int i;
7556
7557 if (!netif_running(tp->dev))
7558 return -EAGAIN;
7559
7560 if (data == 0)
7561 data = 2;
7562
7563 for (i = 0; i < (data * 2); i++) {
7564 if ((i % 2) == 0)
7565 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7566 LED_CTRL_1000MBPS_ON |
7567 LED_CTRL_100MBPS_ON |
7568 LED_CTRL_10MBPS_ON |
7569 LED_CTRL_TRAFFIC_OVERRIDE |
7570 LED_CTRL_TRAFFIC_BLINK |
7571 LED_CTRL_TRAFFIC_LED);
7572
7573 else
7574 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7575 LED_CTRL_TRAFFIC_OVERRIDE);
7576
7577 if (msleep_interruptible(500))
7578 break;
7579 }
7580 tw32(MAC_LED_CTRL, tp->led_ctrl);
7581 return 0;
7582}
7583
1da177e4
LT
7584static void tg3_get_ethtool_stats (struct net_device *dev,
7585 struct ethtool_stats *estats, u64 *tmp_stats)
7586{
7587 struct tg3 *tp = netdev_priv(dev);
7588 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7589}
7590
566f86ad
MC
7591#define NVRAM_TEST_SIZE 0x100
7592
7593static int tg3_test_nvram(struct tg3 *tp)
7594{
7595 u32 *buf, csum;
7596 int i, j, err = 0;
7597
7598 buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7599 if (buf == NULL)
7600 return -ENOMEM;
7601
7602 for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7603 u32 val;
7604
7605 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7606 break;
7607 buf[j] = cpu_to_le32(val);
7608 }
7609 if (i < NVRAM_TEST_SIZE)
7610 goto out;
7611
7612 err = -EIO;
7613 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7614 goto out;
7615
7616 /* Bootstrap checksum at offset 0x10 */
7617 csum = calc_crc((unsigned char *) buf, 0x10);
7618 if(csum != cpu_to_le32(buf[0x10/4]))
7619 goto out;
7620
7621 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7622 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7623 if (csum != cpu_to_le32(buf[0xfc/4]))
7624 goto out;
7625
7626 err = 0;
7627
7628out:
7629 kfree(buf);
7630 return err;
7631}
7632
ca43007a
MC
7633#define TG3_SERDES_TIMEOUT_SEC 2
7634#define TG3_COPPER_TIMEOUT_SEC 6
7635
7636static int tg3_test_link(struct tg3 *tp)
7637{
7638 int i, max;
7639
7640 if (!netif_running(tp->dev))
7641 return -ENODEV;
7642
4c987487 7643 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
ca43007a
MC
7644 max = TG3_SERDES_TIMEOUT_SEC;
7645 else
7646 max = TG3_COPPER_TIMEOUT_SEC;
7647
7648 for (i = 0; i < max; i++) {
7649 if (netif_carrier_ok(tp->dev))
7650 return 0;
7651
7652 if (msleep_interruptible(1000))
7653 break;
7654 }
7655
7656 return -EIO;
7657}
7658
a71116d1
MC
7659/* Only test the commonly used registers */
7660static int tg3_test_registers(struct tg3 *tp)
7661{
7662 int i, is_5705;
7663 u32 offset, read_mask, write_mask, val, save_val, read_val;
7664 static struct {
7665 u16 offset;
7666 u16 flags;
7667#define TG3_FL_5705 0x1
7668#define TG3_FL_NOT_5705 0x2
7669#define TG3_FL_NOT_5788 0x4
7670 u32 read_mask;
7671 u32 write_mask;
7672 } reg_tbl[] = {
7673 /* MAC Control Registers */
7674 { MAC_MODE, TG3_FL_NOT_5705,
7675 0x00000000, 0x00ef6f8c },
7676 { MAC_MODE, TG3_FL_5705,
7677 0x00000000, 0x01ef6b8c },
7678 { MAC_STATUS, TG3_FL_NOT_5705,
7679 0x03800107, 0x00000000 },
7680 { MAC_STATUS, TG3_FL_5705,
7681 0x03800100, 0x00000000 },
7682 { MAC_ADDR_0_HIGH, 0x0000,
7683 0x00000000, 0x0000ffff },
7684 { MAC_ADDR_0_LOW, 0x0000,
7685 0x00000000, 0xffffffff },
7686 { MAC_RX_MTU_SIZE, 0x0000,
7687 0x00000000, 0x0000ffff },
7688 { MAC_TX_MODE, 0x0000,
7689 0x00000000, 0x00000070 },
7690 { MAC_TX_LENGTHS, 0x0000,
7691 0x00000000, 0x00003fff },
7692 { MAC_RX_MODE, TG3_FL_NOT_5705,
7693 0x00000000, 0x000007fc },
7694 { MAC_RX_MODE, TG3_FL_5705,
7695 0x00000000, 0x000007dc },
7696 { MAC_HASH_REG_0, 0x0000,
7697 0x00000000, 0xffffffff },
7698 { MAC_HASH_REG_1, 0x0000,
7699 0x00000000, 0xffffffff },
7700 { MAC_HASH_REG_2, 0x0000,
7701 0x00000000, 0xffffffff },
7702 { MAC_HASH_REG_3, 0x0000,
7703 0x00000000, 0xffffffff },
7704
7705 /* Receive Data and Receive BD Initiator Control Registers. */
7706 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7707 0x00000000, 0xffffffff },
7708 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7709 0x00000000, 0xffffffff },
7710 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7711 0x00000000, 0x00000003 },
7712 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7713 0x00000000, 0xffffffff },
7714 { RCVDBDI_STD_BD+0, 0x0000,
7715 0x00000000, 0xffffffff },
7716 { RCVDBDI_STD_BD+4, 0x0000,
7717 0x00000000, 0xffffffff },
7718 { RCVDBDI_STD_BD+8, 0x0000,
7719 0x00000000, 0xffff0002 },
7720 { RCVDBDI_STD_BD+0xc, 0x0000,
7721 0x00000000, 0xffffffff },
7722
7723 /* Receive BD Initiator Control Registers. */
7724 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7725 0x00000000, 0xffffffff },
7726 { RCVBDI_STD_THRESH, TG3_FL_5705,
7727 0x00000000, 0x000003ff },
7728 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7729 0x00000000, 0xffffffff },
7730
7731 /* Host Coalescing Control Registers. */
7732 { HOSTCC_MODE, TG3_FL_NOT_5705,
7733 0x00000000, 0x00000004 },
7734 { HOSTCC_MODE, TG3_FL_5705,
7735 0x00000000, 0x000000f6 },
7736 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7737 0x00000000, 0xffffffff },
7738 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7739 0x00000000, 0x000003ff },
7740 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7741 0x00000000, 0xffffffff },
7742 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7743 0x00000000, 0x000003ff },
7744 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7745 0x00000000, 0xffffffff },
7746 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7747 0x00000000, 0x000000ff },
7748 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7749 0x00000000, 0xffffffff },
7750 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7751 0x00000000, 0x000000ff },
7752 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7753 0x00000000, 0xffffffff },
7754 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7755 0x00000000, 0xffffffff },
7756 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7757 0x00000000, 0xffffffff },
7758 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7759 0x00000000, 0x000000ff },
7760 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7761 0x00000000, 0xffffffff },
7762 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7763 0x00000000, 0x000000ff },
7764 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7765 0x00000000, 0xffffffff },
7766 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7767 0x00000000, 0xffffffff },
7768 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7769 0x00000000, 0xffffffff },
7770 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7771 0x00000000, 0xffffffff },
7772 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7773 0x00000000, 0xffffffff },
7774 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7775 0xffffffff, 0x00000000 },
7776 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7777 0xffffffff, 0x00000000 },
7778
7779 /* Buffer Manager Control Registers. */
7780 { BUFMGR_MB_POOL_ADDR, 0x0000,
7781 0x00000000, 0x007fff80 },
7782 { BUFMGR_MB_POOL_SIZE, 0x0000,
7783 0x00000000, 0x007fffff },
7784 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7785 0x00000000, 0x0000003f },
7786 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7787 0x00000000, 0x000001ff },
7788 { BUFMGR_MB_HIGH_WATER, 0x0000,
7789 0x00000000, 0x000001ff },
7790 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7791 0xffffffff, 0x00000000 },
7792 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7793 0xffffffff, 0x00000000 },
7794
7795 /* Mailbox Registers */
7796 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7797 0x00000000, 0x000001ff },
7798 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7799 0x00000000, 0x000001ff },
7800 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7801 0x00000000, 0x000007ff },
7802 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7803 0x00000000, 0x000001ff },
7804
7805 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7806 };
7807
7808 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7809 is_5705 = 1;
7810 else
7811 is_5705 = 0;
7812
7813 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7814 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7815 continue;
7816
7817 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7818 continue;
7819
7820 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7821 (reg_tbl[i].flags & TG3_FL_NOT_5788))
7822 continue;
7823
7824 offset = (u32) reg_tbl[i].offset;
7825 read_mask = reg_tbl[i].read_mask;
7826 write_mask = reg_tbl[i].write_mask;
7827
7828 /* Save the original register content */
7829 save_val = tr32(offset);
7830
7831 /* Determine the read-only value. */
7832 read_val = save_val & read_mask;
7833
7834 /* Write zero to the register, then make sure the read-only bits
7835 * are not changed and the read/write bits are all zeros.
7836 */
7837 tw32(offset, 0);
7838
7839 val = tr32(offset);
7840
7841 /* Test the read-only and read/write bits. */
7842 if (((val & read_mask) != read_val) || (val & write_mask))
7843 goto out;
7844
7845 /* Write ones to all the bits defined by RdMask and WrMask, then
7846 * make sure the read-only bits are not changed and the
7847 * read/write bits are all ones.
7848 */
7849 tw32(offset, read_mask | write_mask);
7850
7851 val = tr32(offset);
7852
7853 /* Test the read-only bits. */
7854 if ((val & read_mask) != read_val)
7855 goto out;
7856
7857 /* Test the read/write bits. */
7858 if ((val & write_mask) != write_mask)
7859 goto out;
7860
7861 tw32(offset, save_val);
7862 }
7863
7864 return 0;
7865
7866out:
7867 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7868 tw32(offset, save_val);
7869 return -EIO;
7870}
7871
7942e1db
MC
7872static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7873{
7874 static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7875 int i;
7876 u32 j;
7877
7878 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7879 for (j = 0; j < len; j += 4) {
7880 u32 val;
7881
7882 tg3_write_mem(tp, offset + j, test_pattern[i]);
7883 tg3_read_mem(tp, offset + j, &val);
7884 if (val != test_pattern[i])
7885 return -EIO;
7886 }
7887 }
7888 return 0;
7889}
7890
7891static int tg3_test_memory(struct tg3 *tp)
7892{
7893 static struct mem_entry {
7894 u32 offset;
7895 u32 len;
7896 } mem_tbl_570x[] = {
7897 { 0x00000000, 0x01000},
7898 { 0x00002000, 0x1c000},
7899 { 0xffffffff, 0x00000}
7900 }, mem_tbl_5705[] = {
7901 { 0x00000100, 0x0000c},
7902 { 0x00000200, 0x00008},
7903 { 0x00000b50, 0x00400},
7904 { 0x00004000, 0x00800},
7905 { 0x00006000, 0x01000},
7906 { 0x00008000, 0x02000},
7907 { 0x00010000, 0x0e000},
7908 { 0xffffffff, 0x00000}
7909 };
7910 struct mem_entry *mem_tbl;
7911 int err = 0;
7912 int i;
7913
7914 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7915 mem_tbl = mem_tbl_5705;
7916 else
7917 mem_tbl = mem_tbl_570x;
7918
7919 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
7920 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
7921 mem_tbl[i].len)) != 0)
7922 break;
7923 }
7924
7925 return err;
7926}
7927
9f40dead
MC
7928#define TG3_MAC_LOOPBACK 0
7929#define TG3_PHY_LOOPBACK 1
7930
7931static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
c76949a6 7932{
9f40dead 7933 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
c76949a6
MC
7934 u32 desc_idx;
7935 struct sk_buff *skb, *rx_skb;
7936 u8 *tx_data;
7937 dma_addr_t map;
7938 int num_pkts, tx_len, rx_len, i, err;
7939 struct tg3_rx_buffer_desc *desc;
7940
9f40dead 7941 if (loopback_mode == TG3_MAC_LOOPBACK) {
c94e3941
MC
7942 /* HW errata - mac loopback fails in some cases on 5780.
7943 * Normal traffic and PHY loopback are not affected by
7944 * errata.
7945 */
7946 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
7947 return 0;
7948
9f40dead
MC
7949 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7950 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7951 MAC_MODE_PORT_MODE_GMII;
7952 tw32(MAC_MODE, mac_mode);
7953 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
c94e3941
MC
7954 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
7955 BMCR_SPEED1000);
7956 udelay(40);
7957 /* reset to prevent losing 1st rx packet intermittently */
7958 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7959 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7960 udelay(10);
7961 tw32_f(MAC_RX_MODE, tp->rx_mode);
7962 }
9f40dead
MC
7963 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7964 MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
7965 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
7966 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7967 tw32(MAC_MODE, mac_mode);
9f40dead
MC
7968 }
7969 else
7970 return -EINVAL;
c76949a6
MC
7971
7972 err = -EIO;
7973
c76949a6
MC
7974 tx_len = 1514;
7975 skb = dev_alloc_skb(tx_len);
7976 tx_data = skb_put(skb, tx_len);
7977 memcpy(tx_data, tp->dev->dev_addr, 6);
7978 memset(tx_data + 6, 0x0, 8);
7979
7980 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
7981
7982 for (i = 14; i < tx_len; i++)
7983 tx_data[i] = (u8) (i & 0xff);
7984
7985 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
7986
7987 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7988 HOSTCC_MODE_NOW);
7989
7990 udelay(10);
7991
7992 rx_start_idx = tp->hw_status->idx[0].rx_producer;
7993
c76949a6
MC
7994 num_pkts = 0;
7995
9f40dead 7996 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
c76949a6 7997
9f40dead 7998 tp->tx_prod++;
c76949a6
MC
7999 num_pkts++;
8000
9f40dead
MC
8001 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8002 tp->tx_prod);
09ee929c 8003 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
c76949a6
MC
8004
8005 udelay(10);
8006
8007 for (i = 0; i < 10; i++) {
8008 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8009 HOSTCC_MODE_NOW);
8010
8011 udelay(10);
8012
8013 tx_idx = tp->hw_status->idx[0].tx_consumer;
8014 rx_idx = tp->hw_status->idx[0].rx_producer;
9f40dead 8015 if ((tx_idx == tp->tx_prod) &&
c76949a6
MC
8016 (rx_idx == (rx_start_idx + num_pkts)))
8017 break;
8018 }
8019
8020 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8021 dev_kfree_skb(skb);
8022
9f40dead 8023 if (tx_idx != tp->tx_prod)
c76949a6
MC
8024 goto out;
8025
8026 if (rx_idx != rx_start_idx + num_pkts)
8027 goto out;
8028
8029 desc = &tp->rx_rcb[rx_start_idx];
8030 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8031 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8032 if (opaque_key != RXD_OPAQUE_RING_STD)
8033 goto out;
8034
8035 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8036 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8037 goto out;
8038
8039 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8040 if (rx_len != tx_len)
8041 goto out;
8042
8043 rx_skb = tp->rx_std_buffers[desc_idx].skb;
8044
8045 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8046 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8047
8048 for (i = 14; i < tx_len; i++) {
8049 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8050 goto out;
8051 }
8052 err = 0;
8053
8054 /* tg3_free_rings will unmap and free the rx_skb */
8055out:
8056 return err;
8057}
8058
9f40dead
MC
8059#define TG3_MAC_LOOPBACK_FAILED 1
8060#define TG3_PHY_LOOPBACK_FAILED 2
8061#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
8062 TG3_PHY_LOOPBACK_FAILED)
8063
8064static int tg3_test_loopback(struct tg3 *tp)
8065{
8066 int err = 0;
8067
8068 if (!netif_running(tp->dev))
8069 return TG3_LOOPBACK_FAILED;
8070
8071 tg3_reset_hw(tp);
8072
8073 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8074 err |= TG3_MAC_LOOPBACK_FAILED;
8075 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8076 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8077 err |= TG3_PHY_LOOPBACK_FAILED;
8078 }
8079
8080 return err;
8081}
8082
4cafd3f5
MC
8083static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8084 u64 *data)
8085{
566f86ad
MC
8086 struct tg3 *tp = netdev_priv(dev);
8087
8088 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8089
8090 if (tg3_test_nvram(tp) != 0) {
8091 etest->flags |= ETH_TEST_FL_FAILED;
8092 data[0] = 1;
8093 }
ca43007a
MC
8094 if (tg3_test_link(tp) != 0) {
8095 etest->flags |= ETH_TEST_FL_FAILED;
8096 data[1] = 1;
8097 }
a71116d1 8098 if (etest->flags & ETH_TEST_FL_OFFLINE) {
bbe832c0
MC
8099 int irq_sync = 0;
8100
8101 if (netif_running(dev)) {
a71116d1 8102 tg3_netif_stop(tp);
bbe832c0
MC
8103 irq_sync = 1;
8104 }
a71116d1 8105
bbe832c0 8106 tg3_full_lock(tp, irq_sync);
a71116d1
MC
8107
8108 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8109 tg3_nvram_lock(tp);
8110 tg3_halt_cpu(tp, RX_CPU_BASE);
8111 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8112 tg3_halt_cpu(tp, TX_CPU_BASE);
8113 tg3_nvram_unlock(tp);
8114
8115 if (tg3_test_registers(tp) != 0) {
8116 etest->flags |= ETH_TEST_FL_FAILED;
8117 data[2] = 1;
8118 }
7942e1db
MC
8119 if (tg3_test_memory(tp) != 0) {
8120 etest->flags |= ETH_TEST_FL_FAILED;
8121 data[3] = 1;
8122 }
9f40dead 8123 if ((data[4] = tg3_test_loopback(tp)) != 0)
c76949a6 8124 etest->flags |= ETH_TEST_FL_FAILED;
a71116d1 8125
f47c11ee
DM
8126 tg3_full_unlock(tp);
8127
d4bc3927
MC
8128 if (tg3_test_interrupt(tp) != 0) {
8129 etest->flags |= ETH_TEST_FL_FAILED;
8130 data[5] = 1;
8131 }
f47c11ee
DM
8132
8133 tg3_full_lock(tp, 0);
d4bc3927 8134
a71116d1
MC
8135 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8136 if (netif_running(dev)) {
8137 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8138 tg3_init_hw(tp);
8139 tg3_netif_start(tp);
8140 }
f47c11ee
DM
8141
8142 tg3_full_unlock(tp);
a71116d1 8143 }
4cafd3f5
MC
8144}
8145
1da177e4
LT
8146static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8147{
8148 struct mii_ioctl_data *data = if_mii(ifr);
8149 struct tg3 *tp = netdev_priv(dev);
8150 int err;
8151
8152 switch(cmd) {
8153 case SIOCGMIIPHY:
8154 data->phy_id = PHY_ADDR;
8155
8156 /* fallthru */
8157 case SIOCGMIIREG: {
8158 u32 mii_regval;
8159
8160 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8161 break; /* We have no PHY */
8162
f47c11ee 8163 spin_lock_bh(&tp->lock);
1da177e4 8164 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
f47c11ee 8165 spin_unlock_bh(&tp->lock);
1da177e4
LT
8166
8167 data->val_out = mii_regval;
8168
8169 return err;
8170 }
8171
8172 case SIOCSMIIREG:
8173 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8174 break; /* We have no PHY */
8175
8176 if (!capable(CAP_NET_ADMIN))
8177 return -EPERM;
8178
f47c11ee 8179 spin_lock_bh(&tp->lock);
1da177e4 8180 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
f47c11ee 8181 spin_unlock_bh(&tp->lock);
1da177e4
LT
8182
8183 return err;
8184
8185 default:
8186 /* do nothing */
8187 break;
8188 }
8189 return -EOPNOTSUPP;
8190}
8191
8192#if TG3_VLAN_TAG_USED
8193static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8194{
8195 struct tg3 *tp = netdev_priv(dev);
8196
f47c11ee 8197 tg3_full_lock(tp, 0);
1da177e4
LT
8198
8199 tp->vlgrp = grp;
8200
8201 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8202 __tg3_set_rx_mode(dev);
8203
f47c11ee 8204 tg3_full_unlock(tp);
1da177e4
LT
8205}
8206
8207static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8208{
8209 struct tg3 *tp = netdev_priv(dev);
8210
f47c11ee 8211 tg3_full_lock(tp, 0);
1da177e4
LT
8212 if (tp->vlgrp)
8213 tp->vlgrp->vlan_devices[vid] = NULL;
f47c11ee 8214 tg3_full_unlock(tp);
1da177e4
LT
8215}
8216#endif
8217
15f9850d
DM
8218static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8219{
8220 struct tg3 *tp = netdev_priv(dev);
8221
8222 memcpy(ec, &tp->coal, sizeof(*ec));
8223 return 0;
8224}
8225
d244c892
MC
8226static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8227{
8228 struct tg3 *tp = netdev_priv(dev);
8229 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8230 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8231
8232 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8233 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8234 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8235 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8236 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8237 }
8238
8239 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8240 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8241 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8242 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8243 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8244 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8245 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8246 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8247 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8248 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8249 return -EINVAL;
8250
8251 /* No rx interrupts will be generated if both are zero */
8252 if ((ec->rx_coalesce_usecs == 0) &&
8253 (ec->rx_max_coalesced_frames == 0))
8254 return -EINVAL;
8255
8256 /* No tx interrupts will be generated if both are zero */
8257 if ((ec->tx_coalesce_usecs == 0) &&
8258 (ec->tx_max_coalesced_frames == 0))
8259 return -EINVAL;
8260
8261 /* Only copy relevant parameters, ignore all others. */
8262 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8263 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8264 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8265 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8266 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8267 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8268 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8269 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8270 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8271
8272 if (netif_running(dev)) {
8273 tg3_full_lock(tp, 0);
8274 __tg3_set_coalesce(tp, &tp->coal);
8275 tg3_full_unlock(tp);
8276 }
8277 return 0;
8278}
8279
1da177e4
LT
8280static struct ethtool_ops tg3_ethtool_ops = {
8281 .get_settings = tg3_get_settings,
8282 .set_settings = tg3_set_settings,
8283 .get_drvinfo = tg3_get_drvinfo,
8284 .get_regs_len = tg3_get_regs_len,
8285 .get_regs = tg3_get_regs,
8286 .get_wol = tg3_get_wol,
8287 .set_wol = tg3_set_wol,
8288 .get_msglevel = tg3_get_msglevel,
8289 .set_msglevel = tg3_set_msglevel,
8290 .nway_reset = tg3_nway_reset,
8291 .get_link = ethtool_op_get_link,
8292 .get_eeprom_len = tg3_get_eeprom_len,
8293 .get_eeprom = tg3_get_eeprom,
8294 .set_eeprom = tg3_set_eeprom,
8295 .get_ringparam = tg3_get_ringparam,
8296 .set_ringparam = tg3_set_ringparam,
8297 .get_pauseparam = tg3_get_pauseparam,
8298 .set_pauseparam = tg3_set_pauseparam,
8299 .get_rx_csum = tg3_get_rx_csum,
8300 .set_rx_csum = tg3_set_rx_csum,
8301 .get_tx_csum = ethtool_op_get_tx_csum,
8302 .set_tx_csum = tg3_set_tx_csum,
8303 .get_sg = ethtool_op_get_sg,
8304 .set_sg = ethtool_op_set_sg,
8305#if TG3_TSO_SUPPORT != 0
8306 .get_tso = ethtool_op_get_tso,
8307 .set_tso = tg3_set_tso,
8308#endif
4cafd3f5
MC
8309 .self_test_count = tg3_get_test_count,
8310 .self_test = tg3_self_test,
1da177e4 8311 .get_strings = tg3_get_strings,
4009a93d 8312 .phys_id = tg3_phys_id,
1da177e4
LT
8313 .get_stats_count = tg3_get_stats_count,
8314 .get_ethtool_stats = tg3_get_ethtool_stats,
15f9850d 8315 .get_coalesce = tg3_get_coalesce,
d244c892 8316 .set_coalesce = tg3_set_coalesce,
2ff43697 8317 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
8318};
8319
8320static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8321{
8322 u32 cursize, val;
8323
8324 tp->nvram_size = EEPROM_CHIP_SIZE;
8325
8326 if (tg3_nvram_read(tp, 0, &val) != 0)
8327 return;
8328
8329 if (swab32(val) != TG3_EEPROM_MAGIC)
8330 return;
8331
8332 /*
8333 * Size the chip by reading offsets at increasing powers of two.
8334 * When we encounter our validation signature, we know the addressing
8335 * has wrapped around, and thus have our chip size.
8336 */
8337 cursize = 0x800;
8338
8339 while (cursize < tp->nvram_size) {
8340 if (tg3_nvram_read(tp, cursize, &val) != 0)
8341 return;
8342
8343 if (swab32(val) == TG3_EEPROM_MAGIC)
8344 break;
8345
8346 cursize <<= 1;
8347 }
8348
8349 tp->nvram_size = cursize;
8350}
8351
8352static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8353{
8354 u32 val;
8355
8356 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8357 if (val != 0) {
8358 tp->nvram_size = (val >> 16) * 1024;
8359 return;
8360 }
8361 }
8362 tp->nvram_size = 0x20000;
8363}
8364
8365static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8366{
8367 u32 nvcfg1;
8368
8369 nvcfg1 = tr32(NVRAM_CFG1);
8370 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8371 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8372 }
8373 else {
8374 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8375 tw32(NVRAM_CFG1, nvcfg1);
8376 }
8377
4c987487
MC
8378 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8379 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)) {
1da177e4
LT
8380 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8381 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8382 tp->nvram_jedecnum = JEDEC_ATMEL;
8383 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8384 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8385 break;
8386 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8387 tp->nvram_jedecnum = JEDEC_ATMEL;
8388 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8389 break;
8390 case FLASH_VENDOR_ATMEL_EEPROM:
8391 tp->nvram_jedecnum = JEDEC_ATMEL;
8392 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8393 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8394 break;
8395 case FLASH_VENDOR_ST:
8396 tp->nvram_jedecnum = JEDEC_ST;
8397 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8398 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8399 break;
8400 case FLASH_VENDOR_SAIFUN:
8401 tp->nvram_jedecnum = JEDEC_SAIFUN;
8402 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8403 break;
8404 case FLASH_VENDOR_SST_SMALL:
8405 case FLASH_VENDOR_SST_LARGE:
8406 tp->nvram_jedecnum = JEDEC_SST;
8407 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8408 break;
8409 }
8410 }
8411 else {
8412 tp->nvram_jedecnum = JEDEC_ATMEL;
8413 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8414 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8415 }
8416}
8417
361b4ac2
MC
8418static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8419{
8420 u32 nvcfg1;
8421
8422 nvcfg1 = tr32(NVRAM_CFG1);
8423
e6af301b
MC
8424 /* NVRAM protection for TPM */
8425 if (nvcfg1 & (1 << 27))
8426 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8427
361b4ac2
MC
8428 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8429 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8430 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8431 tp->nvram_jedecnum = JEDEC_ATMEL;
8432 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8433 break;
8434 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8435 tp->nvram_jedecnum = JEDEC_ATMEL;
8436 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8437 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8438 break;
8439 case FLASH_5752VENDOR_ST_M45PE10:
8440 case FLASH_5752VENDOR_ST_M45PE20:
8441 case FLASH_5752VENDOR_ST_M45PE40:
8442 tp->nvram_jedecnum = JEDEC_ST;
8443 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8444 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8445 break;
8446 }
8447
8448 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8449 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8450 case FLASH_5752PAGE_SIZE_256:
8451 tp->nvram_pagesize = 256;
8452 break;
8453 case FLASH_5752PAGE_SIZE_512:
8454 tp->nvram_pagesize = 512;
8455 break;
8456 case FLASH_5752PAGE_SIZE_1K:
8457 tp->nvram_pagesize = 1024;
8458 break;
8459 case FLASH_5752PAGE_SIZE_2K:
8460 tp->nvram_pagesize = 2048;
8461 break;
8462 case FLASH_5752PAGE_SIZE_4K:
8463 tp->nvram_pagesize = 4096;
8464 break;
8465 case FLASH_5752PAGE_SIZE_264:
8466 tp->nvram_pagesize = 264;
8467 break;
8468 }
8469 }
8470 else {
8471 /* For eeprom, set pagesize to maximum eeprom size */
8472 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8473
8474 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8475 tw32(NVRAM_CFG1, nvcfg1);
8476 }
8477}
8478
1da177e4
LT
8479/* Chips other than 5700/5701 use the NVRAM for fetching info. */
8480static void __devinit tg3_nvram_init(struct tg3 *tp)
8481{
8482 int j;
8483
8484 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8485 return;
8486
8487 tw32_f(GRC_EEPROM_ADDR,
8488 (EEPROM_ADDR_FSM_RESET |
8489 (EEPROM_DEFAULT_CLOCK_PERIOD <<
8490 EEPROM_ADDR_CLKPERD_SHIFT)));
8491
8492 /* XXX schedule_timeout() ... */
8493 for (j = 0; j < 100; j++)
8494 udelay(10);
8495
8496 /* Enable seeprom accesses. */
8497 tw32_f(GRC_LOCAL_CTRL,
8498 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8499 udelay(100);
8500
8501 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8502 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8503 tp->tg3_flags |= TG3_FLAG_NVRAM;
8504
e6af301b 8505 tg3_enable_nvram_access(tp);
1da177e4 8506
361b4ac2
MC
8507 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8508 tg3_get_5752_nvram_info(tp);
8509 else
8510 tg3_get_nvram_info(tp);
8511
1da177e4
LT
8512 tg3_get_nvram_size(tp);
8513
e6af301b 8514 tg3_disable_nvram_access(tp);
1da177e4
LT
8515
8516 } else {
8517 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8518
8519 tg3_get_eeprom_size(tp);
8520 }
8521}
8522
8523static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8524 u32 offset, u32 *val)
8525{
8526 u32 tmp;
8527 int i;
8528
8529 if (offset > EEPROM_ADDR_ADDR_MASK ||
8530 (offset % 4) != 0)
8531 return -EINVAL;
8532
8533 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8534 EEPROM_ADDR_DEVID_MASK |
8535 EEPROM_ADDR_READ);
8536 tw32(GRC_EEPROM_ADDR,
8537 tmp |
8538 (0 << EEPROM_ADDR_DEVID_SHIFT) |
8539 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8540 EEPROM_ADDR_ADDR_MASK) |
8541 EEPROM_ADDR_READ | EEPROM_ADDR_START);
8542
8543 for (i = 0; i < 10000; i++) {
8544 tmp = tr32(GRC_EEPROM_ADDR);
8545
8546 if (tmp & EEPROM_ADDR_COMPLETE)
8547 break;
8548 udelay(100);
8549 }
8550 if (!(tmp & EEPROM_ADDR_COMPLETE))
8551 return -EBUSY;
8552
8553 *val = tr32(GRC_EEPROM_DATA);
8554 return 0;
8555}
8556
8557#define NVRAM_CMD_TIMEOUT 10000
8558
8559static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8560{
8561 int i;
8562
8563 tw32(NVRAM_CMD, nvram_cmd);
8564 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8565 udelay(10);
8566 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8567 udelay(10);
8568 break;
8569 }
8570 }
8571 if (i == NVRAM_CMD_TIMEOUT) {
8572 return -EBUSY;
8573 }
8574 return 0;
8575}
8576
8577static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8578{
8579 int ret;
8580
8581 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8582 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8583 return -EINVAL;
8584 }
8585
8586 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8587 return tg3_nvram_read_using_eeprom(tp, offset, val);
8588
8589 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8590 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8591 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8592
8593 offset = ((offset / tp->nvram_pagesize) <<
8594 ATMEL_AT45DB0X1B_PAGE_POS) +
8595 (offset % tp->nvram_pagesize);
8596 }
8597
8598 if (offset > NVRAM_ADDR_MSK)
8599 return -EINVAL;
8600
8601 tg3_nvram_lock(tp);
8602
e6af301b 8603 tg3_enable_nvram_access(tp);
1da177e4
LT
8604
8605 tw32(NVRAM_ADDR, offset);
8606 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8607 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8608
8609 if (ret == 0)
8610 *val = swab32(tr32(NVRAM_RDDATA));
8611
8612 tg3_nvram_unlock(tp);
8613
e6af301b 8614 tg3_disable_nvram_access(tp);
1da177e4
LT
8615
8616 return ret;
8617}
8618
8619static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8620 u32 offset, u32 len, u8 *buf)
8621{
8622 int i, j, rc = 0;
8623 u32 val;
8624
8625 for (i = 0; i < len; i += 4) {
8626 u32 addr, data;
8627
8628 addr = offset + i;
8629
8630 memcpy(&data, buf + i, 4);
8631
8632 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8633
8634 val = tr32(GRC_EEPROM_ADDR);
8635 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8636
8637 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8638 EEPROM_ADDR_READ);
8639 tw32(GRC_EEPROM_ADDR, val |
8640 (0 << EEPROM_ADDR_DEVID_SHIFT) |
8641 (addr & EEPROM_ADDR_ADDR_MASK) |
8642 EEPROM_ADDR_START |
8643 EEPROM_ADDR_WRITE);
8644
8645 for (j = 0; j < 10000; j++) {
8646 val = tr32(GRC_EEPROM_ADDR);
8647
8648 if (val & EEPROM_ADDR_COMPLETE)
8649 break;
8650 udelay(100);
8651 }
8652 if (!(val & EEPROM_ADDR_COMPLETE)) {
8653 rc = -EBUSY;
8654 break;
8655 }
8656 }
8657
8658 return rc;
8659}
8660
8661/* offset and length are dword aligned */
8662static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8663 u8 *buf)
8664{
8665 int ret = 0;
8666 u32 pagesize = tp->nvram_pagesize;
8667 u32 pagemask = pagesize - 1;
8668 u32 nvram_cmd;
8669 u8 *tmp;
8670
8671 tmp = kmalloc(pagesize, GFP_KERNEL);
8672 if (tmp == NULL)
8673 return -ENOMEM;
8674
8675 while (len) {
8676 int j;
e6af301b 8677 u32 phy_addr, page_off, size;
1da177e4
LT
8678
8679 phy_addr = offset & ~pagemask;
8680
8681 for (j = 0; j < pagesize; j += 4) {
8682 if ((ret = tg3_nvram_read(tp, phy_addr + j,
8683 (u32 *) (tmp + j))))
8684 break;
8685 }
8686 if (ret)
8687 break;
8688
8689 page_off = offset & pagemask;
8690 size = pagesize;
8691 if (len < size)
8692 size = len;
8693
8694 len -= size;
8695
8696 memcpy(tmp + page_off, buf, size);
8697
8698 offset = offset + (pagesize - page_off);
8699
e6af301b 8700 tg3_enable_nvram_access(tp);
1da177e4
LT
8701
8702 /*
8703 * Before we can erase the flash page, we need
8704 * to issue a special "write enable" command.
8705 */
8706 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8707
8708 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8709 break;
8710
8711 /* Erase the target page */
8712 tw32(NVRAM_ADDR, phy_addr);
8713
8714 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8715 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8716
8717 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8718 break;
8719
8720 /* Issue another write enable to start the write. */
8721 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8722
8723 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8724 break;
8725
8726 for (j = 0; j < pagesize; j += 4) {
8727 u32 data;
8728
8729 data = *((u32 *) (tmp + j));
8730 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8731
8732 tw32(NVRAM_ADDR, phy_addr + j);
8733
8734 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8735 NVRAM_CMD_WR;
8736
8737 if (j == 0)
8738 nvram_cmd |= NVRAM_CMD_FIRST;
8739 else if (j == (pagesize - 4))
8740 nvram_cmd |= NVRAM_CMD_LAST;
8741
8742 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8743 break;
8744 }
8745 if (ret)
8746 break;
8747 }
8748
8749 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8750 tg3_nvram_exec_cmd(tp, nvram_cmd);
8751
8752 kfree(tmp);
8753
8754 return ret;
8755}
8756
8757/* offset and length are dword aligned */
8758static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8759 u8 *buf)
8760{
8761 int i, ret = 0;
8762
8763 for (i = 0; i < len; i += 4, offset += 4) {
8764 u32 data, page_off, phy_addr, nvram_cmd;
8765
8766 memcpy(&data, buf + i, 4);
8767 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8768
8769 page_off = offset % tp->nvram_pagesize;
8770
8771 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8772 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8773
8774 phy_addr = ((offset / tp->nvram_pagesize) <<
8775 ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8776 }
8777 else {
8778 phy_addr = offset;
8779 }
8780
8781 tw32(NVRAM_ADDR, phy_addr);
8782
8783 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
8784
8785 if ((page_off == 0) || (i == 0))
8786 nvram_cmd |= NVRAM_CMD_FIRST;
8787 else if (page_off == (tp->nvram_pagesize - 4))
8788 nvram_cmd |= NVRAM_CMD_LAST;
8789
8790 if (i == (len - 4))
8791 nvram_cmd |= NVRAM_CMD_LAST;
8792
4c987487
MC
8793 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
8794 (tp->nvram_jedecnum == JEDEC_ST) &&
8795 (nvram_cmd & NVRAM_CMD_FIRST)) {
1da177e4
LT
8796
8797 if ((ret = tg3_nvram_exec_cmd(tp,
8798 NVRAM_CMD_WREN | NVRAM_CMD_GO |
8799 NVRAM_CMD_DONE)))
8800
8801 break;
8802 }
8803 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8804 /* We always do complete word writes to eeprom. */
8805 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
8806 }
8807
8808 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8809 break;
8810 }
8811 return ret;
8812}
8813
8814/* offset and length are dword aligned */
8815static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
8816{
8817 int ret;
8818
8819 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8820 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
8821 return -EINVAL;
8822 }
8823
8824 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34
MC
8825 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
8826 ~GRC_LCLCTRL_GPIO_OUTPUT1);
1da177e4
LT
8827 udelay(40);
8828 }
8829
8830 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
8831 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
8832 }
8833 else {
8834 u32 grc_mode;
8835
8836 tg3_nvram_lock(tp);
8837
e6af301b
MC
8838 tg3_enable_nvram_access(tp);
8839 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
8840 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
1da177e4 8841 tw32(NVRAM_WRITE1, 0x406);
1da177e4
LT
8842
8843 grc_mode = tr32(GRC_MODE);
8844 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
8845
8846 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
8847 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8848
8849 ret = tg3_nvram_write_block_buffered(tp, offset, len,
8850 buf);
8851 }
8852 else {
8853 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
8854 buf);
8855 }
8856
8857 grc_mode = tr32(GRC_MODE);
8858 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
8859
e6af301b 8860 tg3_disable_nvram_access(tp);
1da177e4
LT
8861 tg3_nvram_unlock(tp);
8862 }
8863
8864 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34 8865 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1da177e4
LT
8866 udelay(40);
8867 }
8868
8869 return ret;
8870}
8871
8872struct subsys_tbl_ent {
8873 u16 subsys_vendor, subsys_devid;
8874 u32 phy_id;
8875};
8876
8877static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
8878 /* Broadcom boards. */
8879 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
8880 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
8881 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
8882 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
8883 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
8884 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
8885 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
8886 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
8887 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
8888 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
8889 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
8890
8891 /* 3com boards. */
8892 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
8893 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
8894 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
8895 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
8896 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
8897
8898 /* DELL boards. */
8899 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
8900 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
8901 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
8902 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
8903
8904 /* Compaq boards. */
8905 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
8906 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
8907 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
8908 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
8909 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
8910
8911 /* IBM boards. */
8912 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
8913};
8914
8915static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
8916{
8917 int i;
8918
8919 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
8920 if ((subsys_id_to_phy_id[i].subsys_vendor ==
8921 tp->pdev->subsystem_vendor) &&
8922 (subsys_id_to_phy_id[i].subsys_devid ==
8923 tp->pdev->subsystem_device))
8924 return &subsys_id_to_phy_id[i];
8925 }
8926 return NULL;
8927}
8928
7d0c41ef
MC
8929/* Since this function may be called in D3-hot power state during
8930 * tg3_init_one(), only config cycles are allowed.
8931 */
8932static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
1da177e4 8933{
1da177e4 8934 u32 val;
7d0c41ef
MC
8935
8936 /* Make sure register accesses (indirect or otherwise)
8937 * will function correctly.
8938 */
8939 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8940 tp->misc_host_ctrl);
1da177e4
LT
8941
8942 tp->phy_id = PHY_ID_INVALID;
7d0c41ef
MC
8943 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8944
1da177e4
LT
8945 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8946 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8947 u32 nic_cfg, led_cfg;
7d0c41ef
MC
8948 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
8949 int eeprom_phy_serdes = 0;
1da177e4
LT
8950
8951 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8952 tp->nic_sram_data_cfg = nic_cfg;
8953
8954 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
8955 ver >>= NIC_SRAM_DATA_VER_SHIFT;
8956 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8957 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8958 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
8959 (ver > 0) && (ver < 0x100))
8960 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
8961
1da177e4
LT
8962 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
8963 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
8964 eeprom_phy_serdes = 1;
8965
8966 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
8967 if (nic_phy_id != 0) {
8968 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
8969 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
8970
8971 eeprom_phy_id = (id1 >> 16) << 10;
8972 eeprom_phy_id |= (id2 & 0xfc00) << 16;
8973 eeprom_phy_id |= (id2 & 0x03ff) << 0;
8974 } else
8975 eeprom_phy_id = 0;
8976
7d0c41ef 8977 tp->phy_id = eeprom_phy_id;
747e8f8b
MC
8978 if (eeprom_phy_serdes) {
8979 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8980 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
8981 else
8982 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8983 }
7d0c41ef 8984
cbf46853 8985 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
8986 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
8987 SHASTA_EXT_LED_MODE_MASK);
cbf46853 8988 else
1da177e4
LT
8989 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
8990
8991 switch (led_cfg) {
8992 default:
8993 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
8994 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8995 break;
8996
8997 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
8998 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8999 break;
9000
9001 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9002 tp->led_ctrl = LED_CTRL_MODE_MAC;
9ba27794
MC
9003
9004 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9005 * read on some older 5700/5701 bootcode.
9006 */
9007 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9008 ASIC_REV_5700 ||
9009 GET_ASIC_REV(tp->pci_chip_rev_id) ==
9010 ASIC_REV_5701)
9011 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9012
1da177e4
LT
9013 break;
9014
9015 case SHASTA_EXT_LED_SHARED:
9016 tp->led_ctrl = LED_CTRL_MODE_SHARED;
9017 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9018 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9019 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9020 LED_CTRL_MODE_PHY_2);
9021 break;
9022
9023 case SHASTA_EXT_LED_MAC:
9024 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9025 break;
9026
9027 case SHASTA_EXT_LED_COMBO:
9028 tp->led_ctrl = LED_CTRL_MODE_COMBO;
9029 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9030 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9031 LED_CTRL_MODE_PHY_2);
9032 break;
9033
9034 };
9035
9036 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9037 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9038 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9039 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9040
9041 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9042 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9043 (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9044 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9045
9046 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9047 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 9048 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
9049 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9050 }
9051 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9052 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9053
9054 if (cfg2 & (1 << 17))
9055 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9056
9057 /* serdes signal pre-emphasis in register 0x590 set by */
9058 /* bootcode if bit 18 is set */
9059 if (cfg2 & (1 << 18))
9060 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9061 }
7d0c41ef
MC
9062}
9063
9064static int __devinit tg3_phy_probe(struct tg3 *tp)
9065{
9066 u32 hw_phy_id_1, hw_phy_id_2;
9067 u32 hw_phy_id, hw_phy_id_masked;
9068 int err;
1da177e4
LT
9069
9070 /* Reading the PHY ID register can conflict with ASF
9071 * firwmare access to the PHY hardware.
9072 */
9073 err = 0;
9074 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9075 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9076 } else {
9077 /* Now read the physical PHY_ID from the chip and verify
9078 * that it is sane. If it doesn't look good, we fall back
9079 * to either the hard-coded table based PHY_ID and failing
9080 * that the value found in the eeprom area.
9081 */
9082 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9083 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9084
9085 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
9086 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9087 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
9088
9089 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9090 }
9091
9092 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9093 tp->phy_id = hw_phy_id;
9094 if (hw_phy_id_masked == PHY_ID_BCM8002)
9095 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
da6b2d01
MC
9096 else
9097 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
1da177e4 9098 } else {
7d0c41ef
MC
9099 if (tp->phy_id != PHY_ID_INVALID) {
9100 /* Do nothing, phy ID already set up in
9101 * tg3_get_eeprom_hw_cfg().
9102 */
1da177e4
LT
9103 } else {
9104 struct subsys_tbl_ent *p;
9105
9106 /* No eeprom signature? Try the hardcoded
9107 * subsys device table.
9108 */
9109 p = lookup_by_subsys(tp);
9110 if (!p)
9111 return -ENODEV;
9112
9113 tp->phy_id = p->phy_id;
9114 if (!tp->phy_id ||
9115 tp->phy_id == PHY_ID_BCM8002)
9116 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9117 }
9118 }
9119
747e8f8b 9120 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
1da177e4
LT
9121 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9122 u32 bmsr, adv_reg, tg3_ctrl;
9123
9124 tg3_readphy(tp, MII_BMSR, &bmsr);
9125 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9126 (bmsr & BMSR_LSTATUS))
9127 goto skip_phy_reset;
9128
9129 err = tg3_phy_reset(tp);
9130 if (err)
9131 return err;
9132
9133 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9134 ADVERTISE_100HALF | ADVERTISE_100FULL |
9135 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9136 tg3_ctrl = 0;
9137 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9138 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9139 MII_TG3_CTRL_ADV_1000_FULL);
9140 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9141 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9142 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9143 MII_TG3_CTRL_ENABLE_AS_MASTER);
9144 }
9145
9146 if (!tg3_copper_is_advertising_all(tp)) {
9147 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9148
9149 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9150 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9151
9152 tg3_writephy(tp, MII_BMCR,
9153 BMCR_ANENABLE | BMCR_ANRESTART);
9154 }
9155 tg3_phy_set_wirespeed(tp);
9156
9157 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9158 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9159 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9160 }
9161
9162skip_phy_reset:
9163 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9164 err = tg3_init_5401phy_dsp(tp);
9165 if (err)
9166 return err;
9167 }
9168
9169 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9170 err = tg3_init_5401phy_dsp(tp);
9171 }
9172
747e8f8b 9173 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1da177e4
LT
9174 tp->link_config.advertising =
9175 (ADVERTISED_1000baseT_Half |
9176 ADVERTISED_1000baseT_Full |
9177 ADVERTISED_Autoneg |
9178 ADVERTISED_FIBRE);
9179 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9180 tp->link_config.advertising &=
9181 ~(ADVERTISED_1000baseT_Half |
9182 ADVERTISED_1000baseT_Full);
9183
9184 return err;
9185}
9186
9187static void __devinit tg3_read_partno(struct tg3 *tp)
9188{
9189 unsigned char vpd_data[256];
9190 int i;
9191
9192 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9193 /* Sun decided not to put the necessary bits in the
9194 * NVRAM of their onboard tg3 parts :(
9195 */
9196 strcpy(tp->board_part_number, "Sun 570X");
9197 return;
9198 }
9199
9200 for (i = 0; i < 256; i += 4) {
9201 u32 tmp;
9202
9203 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9204 goto out_not_found;
9205
9206 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
9207 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
9208 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9209 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9210 }
9211
9212 /* Now parse and find the part number. */
9213 for (i = 0; i < 256; ) {
9214 unsigned char val = vpd_data[i];
9215 int block_end;
9216
9217 if (val == 0x82 || val == 0x91) {
9218 i = (i + 3 +
9219 (vpd_data[i + 1] +
9220 (vpd_data[i + 2] << 8)));
9221 continue;
9222 }
9223
9224 if (val != 0x90)
9225 goto out_not_found;
9226
9227 block_end = (i + 3 +
9228 (vpd_data[i + 1] +
9229 (vpd_data[i + 2] << 8)));
9230 i += 3;
9231 while (i < block_end) {
9232 if (vpd_data[i + 0] == 'P' &&
9233 vpd_data[i + 1] == 'N') {
9234 int partno_len = vpd_data[i + 2];
9235
9236 if (partno_len > 24)
9237 goto out_not_found;
9238
9239 memcpy(tp->board_part_number,
9240 &vpd_data[i + 3],
9241 partno_len);
9242
9243 /* Success. */
9244 return;
9245 }
9246 }
9247
9248 /* Part number not found. */
9249 goto out_not_found;
9250 }
9251
9252out_not_found:
9253 strcpy(tp->board_part_number, "none");
9254}
9255
9256#ifdef CONFIG_SPARC64
9257static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9258{
9259 struct pci_dev *pdev = tp->pdev;
9260 struct pcidev_cookie *pcp = pdev->sysdata;
9261
9262 if (pcp != NULL) {
9263 int node = pcp->prom_node;
9264 u32 venid;
9265 int err;
9266
9267 err = prom_getproperty(node, "subsystem-vendor-id",
9268 (char *) &venid, sizeof(venid));
9269 if (err == 0 || err == -1)
9270 return 0;
9271 if (venid == PCI_VENDOR_ID_SUN)
9272 return 1;
9273 }
9274 return 0;
9275}
9276#endif
9277
9278static int __devinit tg3_get_invariants(struct tg3 *tp)
9279{
9280 static struct pci_device_id write_reorder_chipsets[] = {
1da177e4
LT
9281 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9282 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
1cbf0747
DM
9283 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9284 PCI_DEVICE_ID_AMD_K8_NB) },
1da177e4
LT
9285 { },
9286 };
9287 u32 misc_ctrl_reg;
9288 u32 cacheline_sz_reg;
9289 u32 pci_state_reg, grc_misc_cfg;
9290 u32 val;
9291 u16 pci_cmd;
9292 int err;
9293
9294#ifdef CONFIG_SPARC64
9295 if (tg3_is_sun_570X(tp))
9296 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9297#endif
9298
1cbf0747 9299 /* If we have an AMD 762 or K8 chipset, write
1da177e4
LT
9300 * reordering to the mailbox registers done by the host
9301 * controller can cause major troubles. We read back from
9302 * every mailbox register write to force the writes to be
9303 * posted to the chip in order.
9304 */
9305 if (pci_dev_present(write_reorder_chipsets))
9306 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9307
9308 /* Force memory write invalidate off. If we leave it on,
9309 * then on 5700_BX chips we have to enable a workaround.
9310 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9311 * to match the cacheline size. The Broadcom driver have this
9312 * workaround but turns MWI off all the times so never uses
9313 * it. This seems to suggest that the workaround is insufficient.
9314 */
9315 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9316 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9317 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9318
9319 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9320 * has the register indirect write enable bit set before
9321 * we try to access any of the MMIO registers. It is also
9322 * critical that the PCI-X hw workaround situation is decided
9323 * before that as well.
9324 */
9325 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9326 &misc_ctrl_reg);
9327
9328 tp->pci_chip_rev_id = (misc_ctrl_reg >>
9329 MISC_HOST_CTRL_CHIPREV_SHIFT);
9330
ff645bec
MC
9331 /* Wrong chip ID in 5752 A0. This code can be removed later
9332 * as A0 is not in production.
9333 */
9334 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9335 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9336
6892914f
MC
9337 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9338 * we need to disable memory and use config. cycles
9339 * only to access all registers. The 5702/03 chips
9340 * can mistakenly decode the special cycles from the
9341 * ICH chipsets as memory write cycles, causing corruption
9342 * of register and memory space. Only certain ICH bridges
9343 * will drive special cycles with non-zero data during the
9344 * address phase which can fall within the 5703's address
9345 * range. This is not an ICH bug as the PCI spec allows
9346 * non-zero address during special cycles. However, only
9347 * these ICH bridges are known to drive non-zero addresses
9348 * during special cycles.
9349 *
9350 * Since special cycles do not cross PCI bridges, we only
9351 * enable this workaround if the 5703 is on the secondary
9352 * bus of these ICH bridges.
9353 */
9354 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
9355 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
9356 static struct tg3_dev_id {
9357 u32 vendor;
9358 u32 device;
9359 u32 rev;
9360 } ich_chipsets[] = {
9361 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
9362 PCI_ANY_ID },
9363 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
9364 PCI_ANY_ID },
9365 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
9366 0xa },
9367 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
9368 PCI_ANY_ID },
9369 { },
9370 };
9371 struct tg3_dev_id *pci_id = &ich_chipsets[0];
9372 struct pci_dev *bridge = NULL;
9373
9374 while (pci_id->vendor != 0) {
9375 bridge = pci_get_device(pci_id->vendor, pci_id->device,
9376 bridge);
9377 if (!bridge) {
9378 pci_id++;
9379 continue;
9380 }
9381 if (pci_id->rev != PCI_ANY_ID) {
9382 u8 rev;
9383
9384 pci_read_config_byte(bridge, PCI_REVISION_ID,
9385 &rev);
9386 if (rev > pci_id->rev)
9387 continue;
9388 }
9389 if (bridge->subordinate &&
9390 (bridge->subordinate->number ==
9391 tp->pdev->bus->number)) {
9392
9393 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
9394 pci_dev_put(bridge);
9395 break;
9396 }
9397 }
9398 }
9399
4cf78e4f
MC
9400 /* Find msi capability. */
9401 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9402 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
9403
1da177e4
LT
9404 /* Initialize misc host control in PCI block. */
9405 tp->misc_host_ctrl |= (misc_ctrl_reg &
9406 MISC_HOST_CTRL_CHIPREV);
9407 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9408 tp->misc_host_ctrl);
9409
9410 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9411 &cacheline_sz_reg);
9412
9413 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
9414 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
9415 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
9416 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
9417
6708e5cc 9418 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
4cf78e4f
MC
9419 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
9420 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
6708e5cc
JL
9421 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9422
1b440c56
JL
9423 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
9424 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9425 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9426
bb7064dc 9427 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
9428 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
9429
0f893dc6
MC
9430 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9431 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9432 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
9433 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9434
1da177e4
LT
9435 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9436 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9437
9438 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9439 tp->pci_lat_timer < 64) {
9440 tp->pci_lat_timer = 64;
9441
9442 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
9443 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
9444 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
9445 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
9446
9447 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9448 cacheline_sz_reg);
9449 }
9450
9451 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9452 &pci_state_reg);
9453
9454 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9455 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9456
9457 /* If this is a 5700 BX chipset, and we are in PCI-X
9458 * mode, enable register write workaround.
9459 *
9460 * The workaround is to use indirect register accesses
9461 * for all chip writes not to mailbox registers.
9462 */
9463 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9464 u32 pm_reg;
9465 u16 pci_cmd;
9466
9467 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9468
9469 /* The chip can have it's power management PCI config
9470 * space registers clobbered due to this bug.
9471 * So explicitly force the chip into D0 here.
9472 */
9473 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9474 &pm_reg);
9475 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9476 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9477 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9478 pm_reg);
9479
9480 /* Also, force SERR#/PERR# in PCI command. */
9481 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9482 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9483 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9484 }
9485 }
9486
087fe256
MC
9487 /* 5700 BX chips need to have their TX producer index mailboxes
9488 * written twice to workaround a bug.
9489 */
9490 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9491 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9492
1da177e4
LT
9493 /* Back to back register writes can cause problems on this chip,
9494 * the workaround is to read back all reg writes except those to
9495 * mailbox regs. See tg3_write_indirect_reg32().
9496 *
9497 * PCI Express 5750_A0 rev chips need this workaround too.
9498 */
9499 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9500 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9501 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9502 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9503
9504 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9505 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9506 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9507 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9508
9509 /* Chip-specific fixup from Broadcom driver */
9510 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9511 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9512 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9513 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9514 }
9515
1ee582d8 9516 /* Default fast path register access methods */
20094930 9517 tp->read32 = tg3_read32;
1ee582d8 9518 tp->write32 = tg3_write32;
09ee929c 9519 tp->read32_mbox = tg3_read32;
20094930 9520 tp->write32_mbox = tg3_write32;
1ee582d8
MC
9521 tp->write32_tx_mbox = tg3_write32;
9522 tp->write32_rx_mbox = tg3_write32;
9523
9524 /* Various workaround register access methods */
9525 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
9526 tp->write32 = tg3_write_indirect_reg32;
9527 else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
9528 tp->write32 = tg3_write_flush_reg32;
9529
9530 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
9531 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
9532 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9533 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
9534 tp->write32_rx_mbox = tg3_write_flush_reg32;
9535 }
20094930 9536
6892914f
MC
9537 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
9538 tp->read32 = tg3_read_indirect_reg32;
9539 tp->write32 = tg3_write_indirect_reg32;
9540 tp->read32_mbox = tg3_read_indirect_mbox;
9541 tp->write32_mbox = tg3_write_indirect_mbox;
9542 tp->write32_tx_mbox = tg3_write_indirect_mbox;
9543 tp->write32_rx_mbox = tg3_write_indirect_mbox;
9544
9545 iounmap(tp->regs);
22abe310 9546 tp->regs = NULL;
6892914f
MC
9547
9548 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9549 pci_cmd &= ~PCI_COMMAND_MEMORY;
9550 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9551 }
9552
7d0c41ef
MC
9553 /* Get eeprom hw config before calling tg3_set_power_state().
9554 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9555 * determined before calling tg3_set_power_state() so that
9556 * we know whether or not to switch out of Vaux power.
9557 * When the flag is set, it means that GPIO1 is used for eeprom
9558 * write protect and also implies that it is a LOM where GPIOs
9559 * are not used to switch power.
9560 */
9561 tg3_get_eeprom_hw_cfg(tp);
9562
314fba34
MC
9563 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9564 * GPIO1 driven high will bring 5700's external PHY out of reset.
9565 * It is also used as eeprom write protect on LOMs.
9566 */
9567 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9568 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9569 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9570 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9571 GRC_LCLCTRL_GPIO_OUTPUT1);
3e7d83bc
MC
9572 /* Unused GPIO3 must be driven as output on 5752 because there
9573 * are no pull-up resistors on unused GPIO pins.
9574 */
9575 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9576 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
314fba34 9577
1da177e4
LT
9578 /* Force the chip into D0. */
9579 err = tg3_set_power_state(tp, 0);
9580 if (err) {
9581 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9582 pci_name(tp->pdev));
9583 return err;
9584 }
9585
9586 /* 5700 B0 chips do not support checksumming correctly due
9587 * to hardware bugs.
9588 */
9589 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9590 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9591
9592 /* Pseudo-header checksum is done by hardware logic and not
9593 * the offload processers, so make the chip do the pseudo-
9594 * header checksums on receive. For transmit it is more
9595 * convenient to do the pseudo-header checksum in software
9596 * as Linux does that on transmit for us in all cases.
9597 */
9598 tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9599 tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9600
9601 /* Derive initial jumbo mode from MTU assigned in
9602 * ether_setup() via the alloc_etherdev() call
9603 */
0f893dc6
MC
9604 if (tp->dev->mtu > ETH_DATA_LEN &&
9605 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780)
9606 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
1da177e4
LT
9607
9608 /* Determine WakeOnLan speed to use. */
9609 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9610 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9611 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9612 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9613 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9614 } else {
9615 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9616 }
9617
9618 /* A few boards don't want Ethernet@WireSpeed phy feature */
9619 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9620 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9621 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
747e8f8b
MC
9622 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
9623 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1da177e4
LT
9624 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9625
9626 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9627 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9628 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9629 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9630 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9631
bb7064dc 9632 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
1da177e4
LT
9633 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9634
1da177e4 9635 tp->coalesce_mode = 0;
1da177e4
LT
9636 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9637 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9638 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9639
9640 /* Initialize MAC MI mode, polling disabled. */
9641 tw32_f(MAC_MI_MODE, tp->mi_mode);
9642 udelay(80);
9643
9644 /* Initialize data/descriptor byte/word swapping. */
9645 val = tr32(GRC_MODE);
9646 val &= GRC_MODE_HOST_STACKUP;
9647 tw32(GRC_MODE, val | tp->grc_mode);
9648
9649 tg3_switch_clocks(tp);
9650
9651 /* Clear this out for sanity. */
9652 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9653
9654 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9655 &pci_state_reg);
9656 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9657 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9658 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9659
9660 if (chiprevid == CHIPREV_ID_5701_A0 ||
9661 chiprevid == CHIPREV_ID_5701_B0 ||
9662 chiprevid == CHIPREV_ID_5701_B2 ||
9663 chiprevid == CHIPREV_ID_5701_B5) {
9664 void __iomem *sram_base;
9665
9666 /* Write some dummy words into the SRAM status block
9667 * area, see if it reads back correctly. If the return
9668 * value is bad, force enable the PCIX workaround.
9669 */
9670 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9671
9672 writel(0x00000000, sram_base);
9673 writel(0x00000000, sram_base + 4);
9674 writel(0xffffffff, sram_base + 4);
9675 if (readl(sram_base) != 0x00000000)
9676 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9677 }
9678 }
9679
9680 udelay(50);
9681 tg3_nvram_init(tp);
9682
9683 grc_misc_cfg = tr32(GRC_MISC_CFG);
9684 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9685
9686 /* Broadcom's driver says that CIOBE multisplit has a bug */
9687#if 0
9688 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9689 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9690 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9691 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9692 }
9693#endif
9694 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9695 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9696 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9697 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9698
fac9b83e
DM
9699 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9700 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9701 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9702 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9703 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9704 HOSTCC_MODE_CLRTICK_TXBD);
9705
9706 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9707 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9708 tp->misc_host_ctrl);
9709 }
9710
1da177e4
LT
9711 /* these are limited to 10/100 only */
9712 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9713 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9714 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9715 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9716 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9717 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9718 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9719 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9720 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9721 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9722 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9723
9724 err = tg3_phy_probe(tp);
9725 if (err) {
9726 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
9727 pci_name(tp->pdev), err);
9728 /* ... but do not return immediately ... */
9729 }
9730
9731 tg3_read_partno(tp);
9732
9733 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9734 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9735 } else {
9736 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9737 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
9738 else
9739 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9740 }
9741
9742 /* 5700 {AX,BX} chips have a broken status block link
9743 * change bit implementation, so we must use the
9744 * status register in those cases.
9745 */
9746 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9747 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
9748 else
9749 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
9750
9751 /* The led_ctrl is set during tg3_phy_probe, here we might
9752 * have to force the link status polling mechanism based
9753 * upon subsystem IDs.
9754 */
9755 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
9756 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9757 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
9758 TG3_FLAG_USE_LINKCHG_REG);
9759 }
9760
9761 /* For all SERDES we poll the MAC status register. */
9762 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9763 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
9764 else
9765 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9766
1da177e4
LT
9767 /* It seems all chips can get confused if TX buffers
9768 * straddle the 4GB address boundary in some cases.
9769 */
9770 tp->dev->hard_start_xmit = tg3_start_xmit;
9771
9772 tp->rx_offset = 2;
9773 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
9774 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
9775 tp->rx_offset = 0;
9776
9777 /* By default, disable wake-on-lan. User can change this
9778 * using ETHTOOL_SWOL.
9779 */
9780 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9781
9782 return err;
9783}
9784
9785#ifdef CONFIG_SPARC64
9786static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
9787{
9788 struct net_device *dev = tp->dev;
9789 struct pci_dev *pdev = tp->pdev;
9790 struct pcidev_cookie *pcp = pdev->sysdata;
9791
9792 if (pcp != NULL) {
9793 int node = pcp->prom_node;
9794
9795 if (prom_getproplen(node, "local-mac-address") == 6) {
9796 prom_getproperty(node, "local-mac-address",
9797 dev->dev_addr, 6);
2ff43697 9798 memcpy(dev->perm_addr, dev->dev_addr, 6);
1da177e4
LT
9799 return 0;
9800 }
9801 }
9802 return -ENODEV;
9803}
9804
9805static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
9806{
9807 struct net_device *dev = tp->dev;
9808
9809 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2ff43697 9810 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
1da177e4
LT
9811 return 0;
9812}
9813#endif
9814
9815static int __devinit tg3_get_device_address(struct tg3 *tp)
9816{
9817 struct net_device *dev = tp->dev;
9818 u32 hi, lo, mac_offset;
9819
9820#ifdef CONFIG_SPARC64
9821 if (!tg3_get_macaddr_sparc(tp))
9822 return 0;
9823#endif
9824
9825 mac_offset = 0x7c;
4cf78e4f
MC
9826 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9827 !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
9828 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
1da177e4
LT
9829 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
9830 mac_offset = 0xcc;
9831 if (tg3_nvram_lock(tp))
9832 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
9833 else
9834 tg3_nvram_unlock(tp);
9835 }
9836
9837 /* First try to get it from MAC address mailbox. */
9838 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
9839 if ((hi >> 16) == 0x484b) {
9840 dev->dev_addr[0] = (hi >> 8) & 0xff;
9841 dev->dev_addr[1] = (hi >> 0) & 0xff;
9842
9843 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
9844 dev->dev_addr[2] = (lo >> 24) & 0xff;
9845 dev->dev_addr[3] = (lo >> 16) & 0xff;
9846 dev->dev_addr[4] = (lo >> 8) & 0xff;
9847 dev->dev_addr[5] = (lo >> 0) & 0xff;
9848 }
9849 /* Next, try NVRAM. */
9850 else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
9851 !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
9852 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
9853 dev->dev_addr[0] = ((hi >> 16) & 0xff);
9854 dev->dev_addr[1] = ((hi >> 24) & 0xff);
9855 dev->dev_addr[2] = ((lo >> 0) & 0xff);
9856 dev->dev_addr[3] = ((lo >> 8) & 0xff);
9857 dev->dev_addr[4] = ((lo >> 16) & 0xff);
9858 dev->dev_addr[5] = ((lo >> 24) & 0xff);
9859 }
9860 /* Finally just fetch it out of the MAC control regs. */
9861 else {
9862 hi = tr32(MAC_ADDR_0_HIGH);
9863 lo = tr32(MAC_ADDR_0_LOW);
9864
9865 dev->dev_addr[5] = lo & 0xff;
9866 dev->dev_addr[4] = (lo >> 8) & 0xff;
9867 dev->dev_addr[3] = (lo >> 16) & 0xff;
9868 dev->dev_addr[2] = (lo >> 24) & 0xff;
9869 dev->dev_addr[1] = hi & 0xff;
9870 dev->dev_addr[0] = (hi >> 8) & 0xff;
9871 }
9872
9873 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
9874#ifdef CONFIG_SPARC64
9875 if (!tg3_get_default_macaddr_sparc(tp))
9876 return 0;
9877#endif
9878 return -EINVAL;
9879 }
2ff43697 9880 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
9881 return 0;
9882}
9883
59e6b434
DM
9884#define BOUNDARY_SINGLE_CACHELINE 1
9885#define BOUNDARY_MULTI_CACHELINE 2
9886
9887static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
9888{
9889 int cacheline_size;
9890 u8 byte;
9891 int goal;
9892
9893 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
9894 if (byte == 0)
9895 cacheline_size = 1024;
9896 else
9897 cacheline_size = (int) byte * 4;
9898
9899 /* On 5703 and later chips, the boundary bits have no
9900 * effect.
9901 */
9902 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9903 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
9904 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9905 goto out;
9906
9907#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
9908 goal = BOUNDARY_MULTI_CACHELINE;
9909#else
9910#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
9911 goal = BOUNDARY_SINGLE_CACHELINE;
9912#else
9913 goal = 0;
9914#endif
9915#endif
9916
9917 if (!goal)
9918 goto out;
9919
9920 /* PCI controllers on most RISC systems tend to disconnect
9921 * when a device tries to burst across a cache-line boundary.
9922 * Therefore, letting tg3 do so just wastes PCI bandwidth.
9923 *
9924 * Unfortunately, for PCI-E there are only limited
9925 * write-side controls for this, and thus for reads
9926 * we will still get the disconnects. We'll also waste
9927 * these PCI cycles for both read and write for chips
9928 * other than 5700 and 5701 which do not implement the
9929 * boundary bits.
9930 */
9931 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
9932 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
9933 switch (cacheline_size) {
9934 case 16:
9935 case 32:
9936 case 64:
9937 case 128:
9938 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9939 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
9940 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
9941 } else {
9942 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9943 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9944 }
9945 break;
9946
9947 case 256:
9948 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
9949 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
9950 break;
9951
9952 default:
9953 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9954 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9955 break;
9956 };
9957 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9958 switch (cacheline_size) {
9959 case 16:
9960 case 32:
9961 case 64:
9962 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9963 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9964 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
9965 break;
9966 }
9967 /* fallthrough */
9968 case 128:
9969 default:
9970 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9971 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
9972 break;
9973 };
9974 } else {
9975 switch (cacheline_size) {
9976 case 16:
9977 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9978 val |= (DMA_RWCTRL_READ_BNDRY_16 |
9979 DMA_RWCTRL_WRITE_BNDRY_16);
9980 break;
9981 }
9982 /* fallthrough */
9983 case 32:
9984 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9985 val |= (DMA_RWCTRL_READ_BNDRY_32 |
9986 DMA_RWCTRL_WRITE_BNDRY_32);
9987 break;
9988 }
9989 /* fallthrough */
9990 case 64:
9991 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9992 val |= (DMA_RWCTRL_READ_BNDRY_64 |
9993 DMA_RWCTRL_WRITE_BNDRY_64);
9994 break;
9995 }
9996 /* fallthrough */
9997 case 128:
9998 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9999 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10000 DMA_RWCTRL_WRITE_BNDRY_128);
10001 break;
10002 }
10003 /* fallthrough */
10004 case 256:
10005 val |= (DMA_RWCTRL_READ_BNDRY_256 |
10006 DMA_RWCTRL_WRITE_BNDRY_256);
10007 break;
10008 case 512:
10009 val |= (DMA_RWCTRL_READ_BNDRY_512 |
10010 DMA_RWCTRL_WRITE_BNDRY_512);
10011 break;
10012 case 1024:
10013 default:
10014 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10015 DMA_RWCTRL_WRITE_BNDRY_1024);
10016 break;
10017 };
10018 }
10019
10020out:
10021 return val;
10022}
10023
1da177e4
LT
10024static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10025{
10026 struct tg3_internal_buffer_desc test_desc;
10027 u32 sram_dma_descs;
10028 int i, ret;
10029
10030 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10031
10032 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10033 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10034 tw32(RDMAC_STATUS, 0);
10035 tw32(WDMAC_STATUS, 0);
10036
10037 tw32(BUFMGR_MODE, 0);
10038 tw32(FTQ_RESET, 0);
10039
10040 test_desc.addr_hi = ((u64) buf_dma) >> 32;
10041 test_desc.addr_lo = buf_dma & 0xffffffff;
10042 test_desc.nic_mbuf = 0x00002100;
10043 test_desc.len = size;
10044
10045 /*
10046 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10047 * the *second* time the tg3 driver was getting loaded after an
10048 * initial scan.
10049 *
10050 * Broadcom tells me:
10051 * ...the DMA engine is connected to the GRC block and a DMA
10052 * reset may affect the GRC block in some unpredictable way...
10053 * The behavior of resets to individual blocks has not been tested.
10054 *
10055 * Broadcom noted the GRC reset will also reset all sub-components.
10056 */
10057 if (to_device) {
10058 test_desc.cqid_sqid = (13 << 8) | 2;
10059
10060 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10061 udelay(40);
10062 } else {
10063 test_desc.cqid_sqid = (16 << 8) | 7;
10064
10065 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10066 udelay(40);
10067 }
10068 test_desc.flags = 0x00000005;
10069
10070 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10071 u32 val;
10072
10073 val = *(((u32 *)&test_desc) + i);
10074 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10075 sram_dma_descs + (i * sizeof(u32)));
10076 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10077 }
10078 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10079
10080 if (to_device) {
10081 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10082 } else {
10083 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10084 }
10085
10086 ret = -ENODEV;
10087 for (i = 0; i < 40; i++) {
10088 u32 val;
10089
10090 if (to_device)
10091 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10092 else
10093 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10094 if ((val & 0xffff) == sram_dma_descs) {
10095 ret = 0;
10096 break;
10097 }
10098
10099 udelay(100);
10100 }
10101
10102 return ret;
10103}
10104
ded7340d 10105#define TEST_BUFFER_SIZE 0x2000
1da177e4
LT
10106
10107static int __devinit tg3_test_dma(struct tg3 *tp)
10108{
10109 dma_addr_t buf_dma;
59e6b434 10110 u32 *buf, saved_dma_rwctrl;
1da177e4
LT
10111 int ret;
10112
10113 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10114 if (!buf) {
10115 ret = -ENOMEM;
10116 goto out_nofree;
10117 }
10118
10119 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10120 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10121
59e6b434 10122 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
1da177e4
LT
10123
10124 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10125 /* DMA read watermark not used on PCIE */
10126 tp->dma_rwctrl |= 0x00180000;
10127 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
85e94ced
MC
10128 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10129 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
1da177e4
LT
10130 tp->dma_rwctrl |= 0x003f0000;
10131 else
10132 tp->dma_rwctrl |= 0x003f000f;
10133 } else {
10134 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10135 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10136 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10137
10138 if (ccval == 0x6 || ccval == 0x7)
10139 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10140
59e6b434 10141 /* Set bit 23 to enable PCIX hw bug fix */
1da177e4 10142 tp->dma_rwctrl |= 0x009f0000;
4cf78e4f
MC
10143 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10144 /* 5780 always in PCIX mode */
10145 tp->dma_rwctrl |= 0x00144000;
1da177e4
LT
10146 } else {
10147 tp->dma_rwctrl |= 0x001b000f;
10148 }
10149 }
10150
10151 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10152 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10153 tp->dma_rwctrl &= 0xfffffff0;
10154
10155 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10156 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10157 /* Remove this if it causes problems for some boards. */
10158 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10159
10160 /* On 5700/5701 chips, we need to set this bit.
10161 * Otherwise the chip will issue cacheline transactions
10162 * to streamable DMA memory with not all the byte
10163 * enables turned on. This is an error on several
10164 * RISC PCI controllers, in particular sparc64.
10165 *
10166 * On 5703/5704 chips, this bit has been reassigned
10167 * a different meaning. In particular, it is used
10168 * on those chips to enable a PCI-X workaround.
10169 */
10170 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10171 }
10172
10173 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10174
10175#if 0
10176 /* Unneeded, already done by tg3_get_invariants. */
10177 tg3_switch_clocks(tp);
10178#endif
10179
10180 ret = 0;
10181 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10182 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10183 goto out;
10184
59e6b434
DM
10185 /* It is best to perform DMA test with maximum write burst size
10186 * to expose the 5700/5701 write DMA bug.
10187 */
10188 saved_dma_rwctrl = tp->dma_rwctrl;
10189 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10190 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10191
1da177e4
LT
10192 while (1) {
10193 u32 *p = buf, i;
10194
10195 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10196 p[i] = i;
10197
10198 /* Send the buffer to the chip. */
10199 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10200 if (ret) {
10201 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10202 break;
10203 }
10204
10205#if 0
10206 /* validate data reached card RAM correctly. */
10207 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10208 u32 val;
10209 tg3_read_mem(tp, 0x2100 + (i*4), &val);
10210 if (le32_to_cpu(val) != p[i]) {
10211 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
10212 /* ret = -ENODEV here? */
10213 }
10214 p[i] = 0;
10215 }
10216#endif
10217 /* Now read it back. */
10218 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10219 if (ret) {
10220 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10221
10222 break;
10223 }
10224
10225 /* Verify it. */
10226 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10227 if (p[i] == i)
10228 continue;
10229
59e6b434
DM
10230 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10231 DMA_RWCTRL_WRITE_BNDRY_16) {
10232 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
1da177e4
LT
10233 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10234 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10235 break;
10236 } else {
10237 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10238 ret = -ENODEV;
10239 goto out;
10240 }
10241 }
10242
10243 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10244 /* Success. */
10245 ret = 0;
10246 break;
10247 }
10248 }
59e6b434
DM
10249 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10250 DMA_RWCTRL_WRITE_BNDRY_16) {
6d1cfbab
MC
10251 static struct pci_device_id dma_wait_state_chipsets[] = {
10252 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10253 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10254 { },
10255 };
10256
59e6b434 10257 /* DMA test passed without adjusting DMA boundary,
6d1cfbab
MC
10258 * now look for chipsets that are known to expose the
10259 * DMA bug without failing the test.
59e6b434 10260 */
6d1cfbab
MC
10261 if (pci_dev_present(dma_wait_state_chipsets)) {
10262 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10263 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10264 }
10265 else
10266 /* Safe to use the calculated DMA boundary. */
10267 tp->dma_rwctrl = saved_dma_rwctrl;
10268
59e6b434
DM
10269 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10270 }
1da177e4
LT
10271
10272out:
10273 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10274out_nofree:
10275 return ret;
10276}
10277
10278static void __devinit tg3_init_link_config(struct tg3 *tp)
10279{
10280 tp->link_config.advertising =
10281 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10282 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10283 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10284 ADVERTISED_Autoneg | ADVERTISED_MII);
10285 tp->link_config.speed = SPEED_INVALID;
10286 tp->link_config.duplex = DUPLEX_INVALID;
10287 tp->link_config.autoneg = AUTONEG_ENABLE;
10288 netif_carrier_off(tp->dev);
10289 tp->link_config.active_speed = SPEED_INVALID;
10290 tp->link_config.active_duplex = DUPLEX_INVALID;
10291 tp->link_config.phy_is_low_power = 0;
10292 tp->link_config.orig_speed = SPEED_INVALID;
10293 tp->link_config.orig_duplex = DUPLEX_INVALID;
10294 tp->link_config.orig_autoneg = AUTONEG_INVALID;
10295}
10296
10297static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10298{
fdfec172
MC
10299 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10300 tp->bufmgr_config.mbuf_read_dma_low_water =
10301 DEFAULT_MB_RDMA_LOW_WATER_5705;
10302 tp->bufmgr_config.mbuf_mac_rx_low_water =
10303 DEFAULT_MB_MACRX_LOW_WATER_5705;
10304 tp->bufmgr_config.mbuf_high_water =
10305 DEFAULT_MB_HIGH_WATER_5705;
10306
10307 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10308 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10309 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10310 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10311 tp->bufmgr_config.mbuf_high_water_jumbo =
10312 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10313 } else {
10314 tp->bufmgr_config.mbuf_read_dma_low_water =
10315 DEFAULT_MB_RDMA_LOW_WATER;
10316 tp->bufmgr_config.mbuf_mac_rx_low_water =
10317 DEFAULT_MB_MACRX_LOW_WATER;
10318 tp->bufmgr_config.mbuf_high_water =
10319 DEFAULT_MB_HIGH_WATER;
10320
10321 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10322 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10323 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10324 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10325 tp->bufmgr_config.mbuf_high_water_jumbo =
10326 DEFAULT_MB_HIGH_WATER_JUMBO;
10327 }
1da177e4
LT
10328
10329 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10330 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10331}
10332
10333static char * __devinit tg3_phy_string(struct tg3 *tp)
10334{
10335 switch (tp->phy_id & PHY_ID_MASK) {
10336 case PHY_ID_BCM5400: return "5400";
10337 case PHY_ID_BCM5401: return "5401";
10338 case PHY_ID_BCM5411: return "5411";
10339 case PHY_ID_BCM5701: return "5701";
10340 case PHY_ID_BCM5703: return "5703";
10341 case PHY_ID_BCM5704: return "5704";
10342 case PHY_ID_BCM5705: return "5705";
10343 case PHY_ID_BCM5750: return "5750";
85e94ced 10344 case PHY_ID_BCM5752: return "5752";
4cf78e4f 10345 case PHY_ID_BCM5780: return "5780";
1da177e4
LT
10346 case PHY_ID_BCM8002: return "8002/serdes";
10347 case 0: return "serdes";
10348 default: return "unknown";
10349 };
10350}
10351
10352static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
10353{
10354 struct pci_dev *peer;
10355 unsigned int func, devnr = tp->pdev->devfn & ~7;
10356
10357 for (func = 0; func < 8; func++) {
10358 peer = pci_get_slot(tp->pdev->bus, devnr | func);
10359 if (peer && peer != tp->pdev)
10360 break;
10361 pci_dev_put(peer);
10362 }
10363 if (!peer || peer == tp->pdev)
10364 BUG();
10365
10366 /*
10367 * We don't need to keep the refcount elevated; there's no way
10368 * to remove one half of this device without removing the other
10369 */
10370 pci_dev_put(peer);
10371
10372 return peer;
10373}
10374
15f9850d
DM
10375static void __devinit tg3_init_coal(struct tg3 *tp)
10376{
10377 struct ethtool_coalesce *ec = &tp->coal;
10378
10379 memset(ec, 0, sizeof(*ec));
10380 ec->cmd = ETHTOOL_GCOALESCE;
10381 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
10382 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
10383 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
10384 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
10385 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
10386 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
10387 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
10388 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
10389 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
10390
10391 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
10392 HOSTCC_MODE_CLRTICK_TXBD)) {
10393 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
10394 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
10395 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
10396 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
10397 }
d244c892
MC
10398
10399 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10400 ec->rx_coalesce_usecs_irq = 0;
10401 ec->tx_coalesce_usecs_irq = 0;
10402 ec->stats_block_coalesce_usecs = 0;
10403 }
15f9850d
DM
10404}
10405
1da177e4
LT
10406static int __devinit tg3_init_one(struct pci_dev *pdev,
10407 const struct pci_device_id *ent)
10408{
10409 static int tg3_version_printed = 0;
10410 unsigned long tg3reg_base, tg3reg_len;
10411 struct net_device *dev;
10412 struct tg3 *tp;
10413 int i, err, pci_using_dac, pm_cap;
10414
10415 if (tg3_version_printed++ == 0)
10416 printk(KERN_INFO "%s", version);
10417
10418 err = pci_enable_device(pdev);
10419 if (err) {
10420 printk(KERN_ERR PFX "Cannot enable PCI device, "
10421 "aborting.\n");
10422 return err;
10423 }
10424
10425 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10426 printk(KERN_ERR PFX "Cannot find proper PCI device "
10427 "base address, aborting.\n");
10428 err = -ENODEV;
10429 goto err_out_disable_pdev;
10430 }
10431
10432 err = pci_request_regions(pdev, DRV_MODULE_NAME);
10433 if (err) {
10434 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
10435 "aborting.\n");
10436 goto err_out_disable_pdev;
10437 }
10438
10439 pci_set_master(pdev);
10440
10441 /* Find power-management capability. */
10442 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10443 if (pm_cap == 0) {
10444 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
10445 "aborting.\n");
10446 err = -EIO;
10447 goto err_out_free_res;
10448 }
10449
10450 /* Configure DMA attributes. */
10451 err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
10452 if (!err) {
10453 pci_using_dac = 1;
10454 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
10455 if (err < 0) {
10456 printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
10457 "for consistent allocations\n");
10458 goto err_out_free_res;
10459 }
10460 } else {
10461 err = pci_set_dma_mask(pdev, 0xffffffffULL);
10462 if (err) {
10463 printk(KERN_ERR PFX "No usable DMA configuration, "
10464 "aborting.\n");
10465 goto err_out_free_res;
10466 }
10467 pci_using_dac = 0;
10468 }
10469
10470 tg3reg_base = pci_resource_start(pdev, 0);
10471 tg3reg_len = pci_resource_len(pdev, 0);
10472
10473 dev = alloc_etherdev(sizeof(*tp));
10474 if (!dev) {
10475 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
10476 err = -ENOMEM;
10477 goto err_out_free_res;
10478 }
10479
10480 SET_MODULE_OWNER(dev);
10481 SET_NETDEV_DEV(dev, &pdev->dev);
10482
10483 if (pci_using_dac)
10484 dev->features |= NETIF_F_HIGHDMA;
10485 dev->features |= NETIF_F_LLTX;
10486#if TG3_VLAN_TAG_USED
10487 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
10488 dev->vlan_rx_register = tg3_vlan_rx_register;
10489 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
10490#endif
10491
10492 tp = netdev_priv(dev);
10493 tp->pdev = pdev;
10494 tp->dev = dev;
10495 tp->pm_cap = pm_cap;
10496 tp->mac_mode = TG3_DEF_MAC_MODE;
10497 tp->rx_mode = TG3_DEF_RX_MODE;
10498 tp->tx_mode = TG3_DEF_TX_MODE;
10499 tp->mi_mode = MAC_MI_MODE_BASE;
10500 if (tg3_debug > 0)
10501 tp->msg_enable = tg3_debug;
10502 else
10503 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10504
10505 /* The word/byte swap controls here control register access byte
10506 * swapping. DMA data byte swapping is controlled in the GRC_MODE
10507 * setting below.
10508 */
10509 tp->misc_host_ctrl =
10510 MISC_HOST_CTRL_MASK_PCI_INT |
10511 MISC_HOST_CTRL_WORD_SWAP |
10512 MISC_HOST_CTRL_INDIR_ACCESS |
10513 MISC_HOST_CTRL_PCISTATE_RW;
10514
10515 /* The NONFRM (non-frame) byte/word swap controls take effect
10516 * on descriptor entries, anything which isn't packet data.
10517 *
10518 * The StrongARM chips on the board (one for tx, one for rx)
10519 * are running in big-endian mode.
10520 */
10521 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10522 GRC_MODE_WSWAP_NONFRM_DATA);
10523#ifdef __BIG_ENDIAN
10524 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10525#endif
10526 spin_lock_init(&tp->lock);
10527 spin_lock_init(&tp->tx_lock);
10528 spin_lock_init(&tp->indirect_lock);
10529 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10530
10531 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10532 if (tp->regs == 0UL) {
10533 printk(KERN_ERR PFX "Cannot map device registers, "
10534 "aborting.\n");
10535 err = -ENOMEM;
10536 goto err_out_free_dev;
10537 }
10538
10539 tg3_init_link_config(tp);
10540
1da177e4
LT
10541 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10542 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10543 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10544
10545 dev->open = tg3_open;
10546 dev->stop = tg3_close;
10547 dev->get_stats = tg3_get_stats;
10548 dev->set_multicast_list = tg3_set_rx_mode;
10549 dev->set_mac_address = tg3_set_mac_addr;
10550 dev->do_ioctl = tg3_ioctl;
10551 dev->tx_timeout = tg3_tx_timeout;
10552 dev->poll = tg3_poll;
10553 dev->ethtool_ops = &tg3_ethtool_ops;
10554 dev->weight = 64;
10555 dev->watchdog_timeo = TG3_TX_TIMEOUT;
10556 dev->change_mtu = tg3_change_mtu;
10557 dev->irq = pdev->irq;
10558#ifdef CONFIG_NET_POLL_CONTROLLER
10559 dev->poll_controller = tg3_poll_controller;
10560#endif
10561
10562 err = tg3_get_invariants(tp);
10563 if (err) {
10564 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10565 "aborting.\n");
10566 goto err_out_iounmap;
10567 }
10568
fdfec172 10569 tg3_init_bufmgr_config(tp);
1da177e4
LT
10570
10571#if TG3_TSO_SUPPORT != 0
10572 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10573 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10574 }
10575 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10576 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10577 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10578 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10579 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10580 } else {
10581 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10582 }
10583
10584 /* TSO is off by default, user can enable using ethtool. */
10585#if 0
10586 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
10587 dev->features |= NETIF_F_TSO;
10588#endif
10589
10590#endif
10591
10592 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10593 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10594 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10595 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10596 tp->rx_pending = 63;
10597 }
10598
10599 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10600 tp->pdev_peer = tg3_find_5704_peer(tp);
10601
10602 err = tg3_get_device_address(tp);
10603 if (err) {
10604 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10605 "aborting.\n");
10606 goto err_out_iounmap;
10607 }
10608
10609 /*
10610 * Reset chip in case UNDI or EFI driver did not shutdown
10611 * DMA self test will enable WDMAC and we'll see (spurious)
10612 * pending DMA on the PCI bus at that point.
10613 */
10614 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10615 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10616 pci_save_state(tp->pdev);
10617 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
944d980e 10618 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
10619 }
10620
10621 err = tg3_test_dma(tp);
10622 if (err) {
10623 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10624 goto err_out_iounmap;
10625 }
10626
10627 /* Tigon3 can do ipv4 only... and some chips have buggy
10628 * checksumming.
10629 */
10630 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10631 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10632 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10633 } else
10634 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10635
10636 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10637 dev->features &= ~NETIF_F_HIGHDMA;
10638
10639 /* flow control autonegotiation is default behavior */
10640 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10641
15f9850d
DM
10642 tg3_init_coal(tp);
10643
7d3f4c97
DM
10644 /* Now that we have fully setup the chip, save away a snapshot
10645 * of the PCI config space. We need to restore this after
10646 * GRC_MISC_CFG core clock resets and some resume events.
10647 */
10648 pci_save_state(tp->pdev);
10649
1da177e4
LT
10650 err = register_netdev(dev);
10651 if (err) {
10652 printk(KERN_ERR PFX "Cannot register net device, "
10653 "aborting.\n");
10654 goto err_out_iounmap;
10655 }
10656
10657 pci_set_drvdata(pdev, dev);
10658
1da177e4
LT
10659 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
10660 dev->name,
10661 tp->board_part_number,
10662 tp->pci_chip_rev_id,
10663 tg3_phy_string(tp),
10664 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
10665 ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
10666 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
10667 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
10668 ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
10669 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10670
10671 for (i = 0; i < 6; i++)
10672 printk("%2.2x%c", dev->dev_addr[i],
10673 i == 5 ? '\n' : ':');
10674
10675 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
10676 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
10677 "TSOcap[%d] \n",
10678 dev->name,
10679 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
10680 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
10681 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
10682 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
10683 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
10684 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
10685 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
59e6b434
DM
10686 printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10687 dev->name, tp->dma_rwctrl);
1da177e4
LT
10688
10689 return 0;
10690
10691err_out_iounmap:
6892914f
MC
10692 if (tp->regs) {
10693 iounmap(tp->regs);
22abe310 10694 tp->regs = NULL;
6892914f 10695 }
1da177e4
LT
10696
10697err_out_free_dev:
10698 free_netdev(dev);
10699
10700err_out_free_res:
10701 pci_release_regions(pdev);
10702
10703err_out_disable_pdev:
10704 pci_disable_device(pdev);
10705 pci_set_drvdata(pdev, NULL);
10706 return err;
10707}
10708
10709static void __devexit tg3_remove_one(struct pci_dev *pdev)
10710{
10711 struct net_device *dev = pci_get_drvdata(pdev);
10712
10713 if (dev) {
10714 struct tg3 *tp = netdev_priv(dev);
10715
10716 unregister_netdev(dev);
6892914f
MC
10717 if (tp->regs) {
10718 iounmap(tp->regs);
22abe310 10719 tp->regs = NULL;
6892914f 10720 }
1da177e4
LT
10721 free_netdev(dev);
10722 pci_release_regions(pdev);
10723 pci_disable_device(pdev);
10724 pci_set_drvdata(pdev, NULL);
10725 }
10726}
10727
10728static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10729{
10730 struct net_device *dev = pci_get_drvdata(pdev);
10731 struct tg3 *tp = netdev_priv(dev);
10732 int err;
10733
10734 if (!netif_running(dev))
10735 return 0;
10736
10737 tg3_netif_stop(tp);
10738
10739 del_timer_sync(&tp->timer);
10740
f47c11ee 10741 tg3_full_lock(tp, 1);
1da177e4 10742 tg3_disable_ints(tp);
f47c11ee 10743 tg3_full_unlock(tp);
1da177e4
LT
10744
10745 netif_device_detach(dev);
10746
f47c11ee 10747 tg3_full_lock(tp, 0);
944d980e 10748 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
f47c11ee 10749 tg3_full_unlock(tp);
1da177e4
LT
10750
10751 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
10752 if (err) {
f47c11ee 10753 tg3_full_lock(tp, 0);
1da177e4
LT
10754
10755 tg3_init_hw(tp);
10756
10757 tp->timer.expires = jiffies + tp->timer_offset;
10758 add_timer(&tp->timer);
10759
10760 netif_device_attach(dev);
10761 tg3_netif_start(tp);
10762
f47c11ee 10763 tg3_full_unlock(tp);
1da177e4
LT
10764 }
10765
10766 return err;
10767}
10768
10769static int tg3_resume(struct pci_dev *pdev)
10770{
10771 struct net_device *dev = pci_get_drvdata(pdev);
10772 struct tg3 *tp = netdev_priv(dev);
10773 int err;
10774
10775 if (!netif_running(dev))
10776 return 0;
10777
10778 pci_restore_state(tp->pdev);
10779
10780 err = tg3_set_power_state(tp, 0);
10781 if (err)
10782 return err;
10783
10784 netif_device_attach(dev);
10785
f47c11ee 10786 tg3_full_lock(tp, 0);
1da177e4
LT
10787
10788 tg3_init_hw(tp);
10789
10790 tp->timer.expires = jiffies + tp->timer_offset;
10791 add_timer(&tp->timer);
10792
1da177e4
LT
10793 tg3_netif_start(tp);
10794
f47c11ee 10795 tg3_full_unlock(tp);
1da177e4
LT
10796
10797 return 0;
10798}
10799
10800static struct pci_driver tg3_driver = {
10801 .name = DRV_MODULE_NAME,
10802 .id_table = tg3_pci_tbl,
10803 .probe = tg3_init_one,
10804 .remove = __devexit_p(tg3_remove_one),
10805 .suspend = tg3_suspend,
10806 .resume = tg3_resume
10807};
10808
10809static int __init tg3_init(void)
10810{
10811 return pci_module_init(&tg3_driver);
10812}
10813
10814static void __exit tg3_cleanup(void)
10815{
10816 pci_unregister_driver(&tg3_driver);
10817}
10818
10819module_init(tg3_init);
10820module_exit(tg3_cleanup);