]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/tg3.c
[TG3]: Add various register methods
[net-next-2.6.git] / drivers / net / tg3.c
CommitLineData
1da177e4
LT
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
8 *
9 * Firmware is:
49cabf49
MC
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
1da177e4
LT
16 */
17
18#include <linux/config.h>
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
27#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/workqueue.h>
39
40#include <net/checksum.h>
41
42#include <asm/system.h>
43#include <asm/io.h>
44#include <asm/byteorder.h>
45#include <asm/uaccess.h>
46
47#ifdef CONFIG_SPARC64
48#include <asm/idprom.h>
49#include <asm/oplib.h>
50#include <asm/pbm.h>
51#endif
52
53#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
54#define TG3_VLAN_TAG_USED 1
55#else
56#define TG3_VLAN_TAG_USED 0
57#endif
58
59#ifdef NETIF_F_TSO
60#define TG3_TSO_SUPPORT 1
61#else
62#define TG3_TSO_SUPPORT 0
63#endif
64
65#include "tg3.h"
66
67#define DRV_MODULE_NAME "tg3"
68#define PFX DRV_MODULE_NAME ": "
d4ef1608
MC
69#define DRV_MODULE_VERSION "3.37"
70#define DRV_MODULE_RELDATE "August 25, 2005"
1da177e4
LT
71
72#define TG3_DEF_MAC_MODE 0
73#define TG3_DEF_RX_MODE 0
74#define TG3_DEF_TX_MODE 0
75#define TG3_DEF_MSG_ENABLE \
76 (NETIF_MSG_DRV | \
77 NETIF_MSG_PROBE | \
78 NETIF_MSG_LINK | \
79 NETIF_MSG_TIMER | \
80 NETIF_MSG_IFDOWN | \
81 NETIF_MSG_IFUP | \
82 NETIF_MSG_RX_ERR | \
83 NETIF_MSG_TX_ERR)
84
85/* length of time before we decide the hardware is borked,
86 * and dev->tx_timeout() should be called to fix the problem
87 */
88#define TG3_TX_TIMEOUT (5 * HZ)
89
90/* hardware minimum and maximum for a single frame's data payload */
91#define TG3_MIN_MTU 60
92#define TG3_MAX_MTU(tp) \
0f893dc6 93 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
1da177e4
LT
94
95/* These numbers seem to be hard coded in the NIC firmware somehow.
96 * You can't change the ring sizes, but you can change where you place
97 * them in the NIC onboard memory.
98 */
99#define TG3_RX_RING_SIZE 512
100#define TG3_DEF_RX_RING_PENDING 200
101#define TG3_RX_JUMBO_RING_SIZE 256
102#define TG3_DEF_RX_JUMBO_RING_PENDING 100
103
104/* Do not place this n-ring entries value into the tp struct itself,
105 * we really want to expose these constants to GCC so that modulo et
106 * al. operations are done with shifts and masks instead of with
107 * hw multiply/modulo instructions. Another solution would be to
108 * replace things like '% foo' with '& (foo - 1)'.
109 */
110#define TG3_RX_RCB_RING_SIZE(tp) \
111 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
112
113#define TG3_TX_RING_SIZE 512
114#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
115
116#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117 TG3_RX_RING_SIZE)
118#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_JUMBO_RING_SIZE)
120#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
121 TG3_RX_RCB_RING_SIZE(tp))
122#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
123 TG3_TX_RING_SIZE)
124#define TX_RING_GAP(TP) \
125 (TG3_TX_RING_SIZE - (TP)->tx_pending)
126#define TX_BUFFS_AVAIL(TP) \
127 (((TP)->tx_cons <= (TP)->tx_prod) ? \
128 (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod : \
129 (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
130#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
133#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
134
135/* minimum number of free TX descriptors required to wake up TX process */
136#define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
137
138/* number of ETHTOOL_GSTATS u64's */
139#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
4cafd3f5
MC
141#define TG3_NUM_TEST 6
142
1da177e4
LT
143static char version[] __devinitdata =
144 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148MODULE_LICENSE("GPL");
149MODULE_VERSION(DRV_MODULE_VERSION);
150
151static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
152module_param(tg3_debug, int, 0);
153MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155static struct pci_device_id tg3_pci_tbl[] = {
156 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
6e9017a7 214 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
af2bcd97 215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
d8659255
XVP
216 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1da177e4
LT
218 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
4cf78e4f
MC
224 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1da177e4
LT
228 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
237 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
239 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244 { 0, }
245};
246
247MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
248
249static struct {
250 const char string[ETH_GSTRING_LEN];
251} ethtool_stats_keys[TG3_NUM_STATS] = {
252 { "rx_octets" },
253 { "rx_fragments" },
254 { "rx_ucast_packets" },
255 { "rx_mcast_packets" },
256 { "rx_bcast_packets" },
257 { "rx_fcs_errors" },
258 { "rx_align_errors" },
259 { "rx_xon_pause_rcvd" },
260 { "rx_xoff_pause_rcvd" },
261 { "rx_mac_ctrl_rcvd" },
262 { "rx_xoff_entered" },
263 { "rx_frame_too_long_errors" },
264 { "rx_jabbers" },
265 { "rx_undersize_packets" },
266 { "rx_in_length_errors" },
267 { "rx_out_length_errors" },
268 { "rx_64_or_less_octet_packets" },
269 { "rx_65_to_127_octet_packets" },
270 { "rx_128_to_255_octet_packets" },
271 { "rx_256_to_511_octet_packets" },
272 { "rx_512_to_1023_octet_packets" },
273 { "rx_1024_to_1522_octet_packets" },
274 { "rx_1523_to_2047_octet_packets" },
275 { "rx_2048_to_4095_octet_packets" },
276 { "rx_4096_to_8191_octet_packets" },
277 { "rx_8192_to_9022_octet_packets" },
278
279 { "tx_octets" },
280 { "tx_collisions" },
281
282 { "tx_xon_sent" },
283 { "tx_xoff_sent" },
284 { "tx_flow_control" },
285 { "tx_mac_errors" },
286 { "tx_single_collisions" },
287 { "tx_mult_collisions" },
288 { "tx_deferred" },
289 { "tx_excessive_collisions" },
290 { "tx_late_collisions" },
291 { "tx_collide_2times" },
292 { "tx_collide_3times" },
293 { "tx_collide_4times" },
294 { "tx_collide_5times" },
295 { "tx_collide_6times" },
296 { "tx_collide_7times" },
297 { "tx_collide_8times" },
298 { "tx_collide_9times" },
299 { "tx_collide_10times" },
300 { "tx_collide_11times" },
301 { "tx_collide_12times" },
302 { "tx_collide_13times" },
303 { "tx_collide_14times" },
304 { "tx_collide_15times" },
305 { "tx_ucast_packets" },
306 { "tx_mcast_packets" },
307 { "tx_bcast_packets" },
308 { "tx_carrier_sense_errors" },
309 { "tx_discards" },
310 { "tx_errors" },
311
312 { "dma_writeq_full" },
313 { "dma_write_prioq_full" },
314 { "rxbds_empty" },
315 { "rx_discards" },
316 { "rx_errors" },
317 { "rx_threshold_hit" },
318
319 { "dma_readq_full" },
320 { "dma_read_prioq_full" },
321 { "tx_comp_queue_full" },
322
323 { "ring_set_send_prod_index" },
324 { "ring_status_update" },
325 { "nic_irqs" },
326 { "nic_avoided_irqs" },
327 { "nic_tx_threshold_hit" }
328};
329
4cafd3f5
MC
330static struct {
331 const char string[ETH_GSTRING_LEN];
332} ethtool_test_keys[TG3_NUM_TEST] = {
333 { "nvram test (online) " },
334 { "link test (online) " },
335 { "register test (offline)" },
336 { "memory test (offline)" },
337 { "loopback test (offline)" },
338 { "interrupt test (offline)" },
339};
340
1da177e4
LT
341static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
342{
1ee582d8
MC
343 spin_lock_bh(&tp->indirect_lock);
344 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
345 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
346 spin_unlock_bh(&tp->indirect_lock);
347}
348
349static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
350{
351 writel(val, tp->regs + off);
352 readl(tp->regs + off);
1da177e4
LT
353}
354
355static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
356{
357 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
f47c11ee 358 spin_lock_bh(&tp->indirect_lock);
1da177e4
LT
359 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
360 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
f47c11ee 361 spin_unlock_bh(&tp->indirect_lock);
1da177e4
LT
362 } else {
363 void __iomem *dest = tp->regs + off;
364 writel(val, dest);
365 readl(dest); /* always flush PCI write */
366 }
367}
368
20094930 369static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
1da177e4
LT
370{
371 void __iomem *mbox = tp->regs + off;
372 writel(val, mbox);
373 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
374 writel(val, mbox);
375 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
376 readl(mbox);
377}
378
20094930
MC
379static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
380{
381 writel(val, tp->regs + off);
382}
1da177e4 383
20094930
MC
384static u32 tg3_read32(struct tg3 *tp, u32 off)
385{
386 return (readl(tp->regs + off));
387}
388
389#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
390#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
391#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
392
393#define tw32(reg,val) tp->write32(tp, reg, val)
1da177e4 394#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val))
20094930 395#define tr32(reg) tp->read32(tp, reg)
1da177e4
LT
396
397static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
398{
f47c11ee 399 spin_lock_bh(&tp->indirect_lock);
1da177e4
LT
400 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
401 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
402
403 /* Always leave this as zero. */
404 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
f47c11ee 405 spin_unlock_bh(&tp->indirect_lock);
1da177e4
LT
406}
407
408static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
409{
f47c11ee 410 spin_lock_bh(&tp->indirect_lock);
1da177e4
LT
411 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
412 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
413
414 /* Always leave this as zero. */
415 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
f47c11ee 416 spin_unlock_bh(&tp->indirect_lock);
1da177e4
LT
417}
418
419static void tg3_disable_ints(struct tg3 *tp)
420{
421 tw32(TG3PCI_MISC_HOST_CTRL,
422 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
423 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
424 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
425}
426
427static inline void tg3_cond_int(struct tg3 *tp)
428{
429 if (tp->hw_status->status & SD_STATUS_UPDATED)
430 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
431}
432
433static void tg3_enable_ints(struct tg3 *tp)
434{
bbe832c0
MC
435 tp->irq_sync = 0;
436 wmb();
437
1da177e4
LT
438 tw32(TG3PCI_MISC_HOST_CTRL,
439 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
fac9b83e
DM
440 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
441 (tp->last_tag << 24));
1da177e4 442 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
1da177e4
LT
443 tg3_cond_int(tp);
444}
445
04237ddd
MC
446static inline unsigned int tg3_has_work(struct tg3 *tp)
447{
448 struct tg3_hw_status *sblk = tp->hw_status;
449 unsigned int work_exists = 0;
450
451 /* check for phy events */
452 if (!(tp->tg3_flags &
453 (TG3_FLAG_USE_LINKCHG_REG |
454 TG3_FLAG_POLL_SERDES))) {
455 if (sblk->status & SD_STATUS_LINK_CHG)
456 work_exists = 1;
457 }
458 /* check for RX/TX work to do */
459 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
460 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
461 work_exists = 1;
462
463 return work_exists;
464}
465
1da177e4 466/* tg3_restart_ints
04237ddd
MC
467 * similar to tg3_enable_ints, but it accurately determines whether there
468 * is new work pending and can return without flushing the PIO write
469 * which reenables interrupts
1da177e4
LT
470 */
471static void tg3_restart_ints(struct tg3 *tp)
472{
473 tw32(TG3PCI_MISC_HOST_CTRL,
474 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
fac9b83e
DM
475 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
476 tp->last_tag << 24);
1da177e4
LT
477 mmiowb();
478
fac9b83e
DM
479 /* When doing tagged status, this work check is unnecessary.
480 * The last_tag we write above tells the chip which piece of
481 * work we've completed.
482 */
483 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
484 tg3_has_work(tp))
04237ddd
MC
485 tw32(HOSTCC_MODE, tp->coalesce_mode |
486 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
1da177e4
LT
487}
488
489static inline void tg3_netif_stop(struct tg3 *tp)
490{
bbe832c0 491 tp->dev->trans_start = jiffies; /* prevent tx timeout */
1da177e4
LT
492 netif_poll_disable(tp->dev);
493 netif_tx_disable(tp->dev);
494}
495
496static inline void tg3_netif_start(struct tg3 *tp)
497{
498 netif_wake_queue(tp->dev);
499 /* NOTE: unconditional netif_wake_queue is only appropriate
500 * so long as all callers are assured to have free tx slots
501 * (such as after tg3_init_hw)
502 */
503 netif_poll_enable(tp->dev);
f47c11ee
DM
504 tp->hw_status->status |= SD_STATUS_UPDATED;
505 tg3_enable_ints(tp);
1da177e4
LT
506}
507
508static void tg3_switch_clocks(struct tg3 *tp)
509{
510 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
511 u32 orig_clock_ctrl;
512
4cf78e4f
MC
513 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
514 return;
515
1da177e4
LT
516 orig_clock_ctrl = clock_ctrl;
517 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
518 CLOCK_CTRL_CLKRUN_OENABLE |
519 0x1f);
520 tp->pci_clock_ctrl = clock_ctrl;
521
522 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
523 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
524 tw32_f(TG3PCI_CLOCK_CTRL,
525 clock_ctrl | CLOCK_CTRL_625_CORE);
526 udelay(40);
527 }
528 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
529 tw32_f(TG3PCI_CLOCK_CTRL,
530 clock_ctrl |
531 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
532 udelay(40);
533 tw32_f(TG3PCI_CLOCK_CTRL,
534 clock_ctrl | (CLOCK_CTRL_ALTCLK));
535 udelay(40);
536 }
537 tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
538 udelay(40);
539}
540
541#define PHY_BUSY_LOOPS 5000
542
543static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
544{
545 u32 frame_val;
546 unsigned int loops;
547 int ret;
548
549 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
550 tw32_f(MAC_MI_MODE,
551 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
552 udelay(80);
553 }
554
555 *val = 0x0;
556
557 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
558 MI_COM_PHY_ADDR_MASK);
559 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
560 MI_COM_REG_ADDR_MASK);
561 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
562
563 tw32_f(MAC_MI_COM, frame_val);
564
565 loops = PHY_BUSY_LOOPS;
566 while (loops != 0) {
567 udelay(10);
568 frame_val = tr32(MAC_MI_COM);
569
570 if ((frame_val & MI_COM_BUSY) == 0) {
571 udelay(5);
572 frame_val = tr32(MAC_MI_COM);
573 break;
574 }
575 loops -= 1;
576 }
577
578 ret = -EBUSY;
579 if (loops != 0) {
580 *val = frame_val & MI_COM_DATA_MASK;
581 ret = 0;
582 }
583
584 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
585 tw32_f(MAC_MI_MODE, tp->mi_mode);
586 udelay(80);
587 }
588
589 return ret;
590}
591
592static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
593{
594 u32 frame_val;
595 unsigned int loops;
596 int ret;
597
598 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
599 tw32_f(MAC_MI_MODE,
600 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
601 udelay(80);
602 }
603
604 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
605 MI_COM_PHY_ADDR_MASK);
606 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
607 MI_COM_REG_ADDR_MASK);
608 frame_val |= (val & MI_COM_DATA_MASK);
609 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
610
611 tw32_f(MAC_MI_COM, frame_val);
612
613 loops = PHY_BUSY_LOOPS;
614 while (loops != 0) {
615 udelay(10);
616 frame_val = tr32(MAC_MI_COM);
617 if ((frame_val & MI_COM_BUSY) == 0) {
618 udelay(5);
619 frame_val = tr32(MAC_MI_COM);
620 break;
621 }
622 loops -= 1;
623 }
624
625 ret = -EBUSY;
626 if (loops != 0)
627 ret = 0;
628
629 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
630 tw32_f(MAC_MI_MODE, tp->mi_mode);
631 udelay(80);
632 }
633
634 return ret;
635}
636
637static void tg3_phy_set_wirespeed(struct tg3 *tp)
638{
639 u32 val;
640
641 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
642 return;
643
644 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
645 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
646 tg3_writephy(tp, MII_TG3_AUX_CTRL,
647 (val | (1 << 15) | (1 << 4)));
648}
649
650static int tg3_bmcr_reset(struct tg3 *tp)
651{
652 u32 phy_control;
653 int limit, err;
654
655 /* OK, reset it, and poll the BMCR_RESET bit until it
656 * clears or we time out.
657 */
658 phy_control = BMCR_RESET;
659 err = tg3_writephy(tp, MII_BMCR, phy_control);
660 if (err != 0)
661 return -EBUSY;
662
663 limit = 5000;
664 while (limit--) {
665 err = tg3_readphy(tp, MII_BMCR, &phy_control);
666 if (err != 0)
667 return -EBUSY;
668
669 if ((phy_control & BMCR_RESET) == 0) {
670 udelay(40);
671 break;
672 }
673 udelay(10);
674 }
675 if (limit <= 0)
676 return -EBUSY;
677
678 return 0;
679}
680
681static int tg3_wait_macro_done(struct tg3 *tp)
682{
683 int limit = 100;
684
685 while (limit--) {
686 u32 tmp32;
687
688 if (!tg3_readphy(tp, 0x16, &tmp32)) {
689 if ((tmp32 & 0x1000) == 0)
690 break;
691 }
692 }
693 if (limit <= 0)
694 return -EBUSY;
695
696 return 0;
697}
698
699static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
700{
701 static const u32 test_pat[4][6] = {
702 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
703 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
704 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
705 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
706 };
707 int chan;
708
709 for (chan = 0; chan < 4; chan++) {
710 int i;
711
712 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
713 (chan * 0x2000) | 0x0200);
714 tg3_writephy(tp, 0x16, 0x0002);
715
716 for (i = 0; i < 6; i++)
717 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
718 test_pat[chan][i]);
719
720 tg3_writephy(tp, 0x16, 0x0202);
721 if (tg3_wait_macro_done(tp)) {
722 *resetp = 1;
723 return -EBUSY;
724 }
725
726 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
727 (chan * 0x2000) | 0x0200);
728 tg3_writephy(tp, 0x16, 0x0082);
729 if (tg3_wait_macro_done(tp)) {
730 *resetp = 1;
731 return -EBUSY;
732 }
733
734 tg3_writephy(tp, 0x16, 0x0802);
735 if (tg3_wait_macro_done(tp)) {
736 *resetp = 1;
737 return -EBUSY;
738 }
739
740 for (i = 0; i < 6; i += 2) {
741 u32 low, high;
742
743 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
744 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
745 tg3_wait_macro_done(tp)) {
746 *resetp = 1;
747 return -EBUSY;
748 }
749 low &= 0x7fff;
750 high &= 0x000f;
751 if (low != test_pat[chan][i] ||
752 high != test_pat[chan][i+1]) {
753 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
754 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
755 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
756
757 return -EBUSY;
758 }
759 }
760 }
761
762 return 0;
763}
764
765static int tg3_phy_reset_chanpat(struct tg3 *tp)
766{
767 int chan;
768
769 for (chan = 0; chan < 4; chan++) {
770 int i;
771
772 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
773 (chan * 0x2000) | 0x0200);
774 tg3_writephy(tp, 0x16, 0x0002);
775 for (i = 0; i < 6; i++)
776 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
777 tg3_writephy(tp, 0x16, 0x0202);
778 if (tg3_wait_macro_done(tp))
779 return -EBUSY;
780 }
781
782 return 0;
783}
784
785static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
786{
787 u32 reg32, phy9_orig;
788 int retries, do_phy_reset, err;
789
790 retries = 10;
791 do_phy_reset = 1;
792 do {
793 if (do_phy_reset) {
794 err = tg3_bmcr_reset(tp);
795 if (err)
796 return err;
797 do_phy_reset = 0;
798 }
799
800 /* Disable transmitter and interrupt. */
801 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
802 continue;
803
804 reg32 |= 0x3000;
805 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
806
807 /* Set full-duplex, 1000 mbps. */
808 tg3_writephy(tp, MII_BMCR,
809 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
810
811 /* Set to master mode. */
812 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
813 continue;
814
815 tg3_writephy(tp, MII_TG3_CTRL,
816 (MII_TG3_CTRL_AS_MASTER |
817 MII_TG3_CTRL_ENABLE_AS_MASTER));
818
819 /* Enable SM_DSP_CLOCK and 6dB. */
820 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
821
822 /* Block the PHY control access. */
823 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
824 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
825
826 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
827 if (!err)
828 break;
829 } while (--retries);
830
831 err = tg3_phy_reset_chanpat(tp);
832 if (err)
833 return err;
834
835 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
836 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
837
838 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
839 tg3_writephy(tp, 0x16, 0x0000);
840
841 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
842 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
843 /* Set Extended packet length bit for jumbo frames */
844 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
845 }
846 else {
847 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
848 }
849
850 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
851
852 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
853 reg32 &= ~0x3000;
854 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
855 } else if (!err)
856 err = -EBUSY;
857
858 return err;
859}
860
861/* This will reset the tigon3 PHY if there is no valid
862 * link unless the FORCE argument is non-zero.
863 */
864static int tg3_phy_reset(struct tg3 *tp)
865{
866 u32 phy_status;
867 int err;
868
869 err = tg3_readphy(tp, MII_BMSR, &phy_status);
870 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
871 if (err != 0)
872 return -EBUSY;
873
874 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
875 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
876 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
877 err = tg3_phy_reset_5703_4_5(tp);
878 if (err)
879 return err;
880 goto out;
881 }
882
883 err = tg3_bmcr_reset(tp);
884 if (err)
885 return err;
886
887out:
888 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
889 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
890 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
891 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
892 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
893 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
894 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
895 }
896 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
897 tg3_writephy(tp, 0x1c, 0x8d68);
898 tg3_writephy(tp, 0x1c, 0x8d68);
899 }
900 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
901 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
902 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
903 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
904 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
905 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
906 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
907 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
908 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
909 }
910 /* Set Extended packet length bit (bit 14) on all chips that */
911 /* support jumbo frames */
912 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
913 /* Cannot do read-modify-write on 5401 */
914 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
0f893dc6 915 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
916 u32 phy_reg;
917
918 /* Set bit 14 with read-modify-write to preserve other bits */
919 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
920 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
921 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
922 }
923
924 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
925 * jumbo frames transmission.
926 */
0f893dc6 927 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
928 u32 phy_reg;
929
930 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
931 tg3_writephy(tp, MII_TG3_EXT_CTRL,
932 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
933 }
934
935 tg3_phy_set_wirespeed(tp);
936 return 0;
937}
938
939static void tg3_frob_aux_power(struct tg3 *tp)
940{
941 struct tg3 *tp_peer = tp;
942
943 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
944 return;
945
946 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
947 tp_peer = pci_get_drvdata(tp->pdev_peer);
948 if (!tp_peer)
949 BUG();
950 }
951
952
953 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
954 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
955 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
956 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
957 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
958 (GRC_LCLCTRL_GPIO_OE0 |
959 GRC_LCLCTRL_GPIO_OE1 |
960 GRC_LCLCTRL_GPIO_OE2 |
961 GRC_LCLCTRL_GPIO_OUTPUT0 |
962 GRC_LCLCTRL_GPIO_OUTPUT1));
963 udelay(100);
964 } else {
965 u32 no_gpio2;
966 u32 grc_local_ctrl;
967
968 if (tp_peer != tp &&
969 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
970 return;
971
972 /* On 5753 and variants, GPIO2 cannot be used. */
973 no_gpio2 = tp->nic_sram_data_cfg &
974 NIC_SRAM_DATA_CFG_NO_GPIO2;
975
976 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
977 GRC_LCLCTRL_GPIO_OE1 |
978 GRC_LCLCTRL_GPIO_OE2 |
979 GRC_LCLCTRL_GPIO_OUTPUT1 |
980 GRC_LCLCTRL_GPIO_OUTPUT2;
981 if (no_gpio2) {
982 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
983 GRC_LCLCTRL_GPIO_OUTPUT2);
984 }
985 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
986 grc_local_ctrl);
987 udelay(100);
988
989 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
990
991 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
992 grc_local_ctrl);
993 udelay(100);
994
995 if (!no_gpio2) {
996 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
997 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
998 grc_local_ctrl);
999 udelay(100);
1000 }
1001 }
1002 } else {
1003 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1004 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1005 if (tp_peer != tp &&
1006 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1007 return;
1008
1009 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1010 (GRC_LCLCTRL_GPIO_OE1 |
1011 GRC_LCLCTRL_GPIO_OUTPUT1));
1012 udelay(100);
1013
1014 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1015 (GRC_LCLCTRL_GPIO_OE1));
1016 udelay(100);
1017
1018 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1019 (GRC_LCLCTRL_GPIO_OE1 |
1020 GRC_LCLCTRL_GPIO_OUTPUT1));
1021 udelay(100);
1022 }
1023 }
1024}
1025
1026static int tg3_setup_phy(struct tg3 *, int);
1027
1028#define RESET_KIND_SHUTDOWN 0
1029#define RESET_KIND_INIT 1
1030#define RESET_KIND_SUSPEND 2
1031
1032static void tg3_write_sig_post_reset(struct tg3 *, int);
1033static int tg3_halt_cpu(struct tg3 *, u32);
1034
1035static int tg3_set_power_state(struct tg3 *tp, int state)
1036{
1037 u32 misc_host_ctrl;
1038 u16 power_control, power_caps;
1039 int pm = tp->pm_cap;
1040
1041 /* Make sure register accesses (indirect or otherwise)
1042 * will function correctly.
1043 */
1044 pci_write_config_dword(tp->pdev,
1045 TG3PCI_MISC_HOST_CTRL,
1046 tp->misc_host_ctrl);
1047
1048 pci_read_config_word(tp->pdev,
1049 pm + PCI_PM_CTRL,
1050 &power_control);
1051 power_control |= PCI_PM_CTRL_PME_STATUS;
1052 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1053 switch (state) {
1054 case 0:
1055 power_control |= 0;
1056 pci_write_config_word(tp->pdev,
1057 pm + PCI_PM_CTRL,
1058 power_control);
8c6bda1a
MC
1059 udelay(100); /* Delay after power state change */
1060
1061 /* Switch out of Vaux if it is not a LOM */
1062 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1063 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1064 udelay(100);
1065 }
1da177e4
LT
1066
1067 return 0;
1068
1069 case 1:
1070 power_control |= 1;
1071 break;
1072
1073 case 2:
1074 power_control |= 2;
1075 break;
1076
1077 case 3:
1078 power_control |= 3;
1079 break;
1080
1081 default:
1082 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1083 "requested.\n",
1084 tp->dev->name, state);
1085 return -EINVAL;
1086 };
1087
1088 power_control |= PCI_PM_CTRL_PME_ENABLE;
1089
1090 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1091 tw32(TG3PCI_MISC_HOST_CTRL,
1092 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1093
1094 if (tp->link_config.phy_is_low_power == 0) {
1095 tp->link_config.phy_is_low_power = 1;
1096 tp->link_config.orig_speed = tp->link_config.speed;
1097 tp->link_config.orig_duplex = tp->link_config.duplex;
1098 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1099 }
1100
747e8f8b 1101 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1da177e4
LT
1102 tp->link_config.speed = SPEED_10;
1103 tp->link_config.duplex = DUPLEX_HALF;
1104 tp->link_config.autoneg = AUTONEG_ENABLE;
1105 tg3_setup_phy(tp, 0);
1106 }
1107
1108 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1109
1110 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1111 u32 mac_mode;
1112
1113 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1114 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1115 udelay(40);
1116
1117 mac_mode = MAC_MODE_PORT_MODE_MII;
1118
1119 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1120 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1121 mac_mode |= MAC_MODE_LINK_POLARITY;
1122 } else {
1123 mac_mode = MAC_MODE_PORT_MODE_TBI;
1124 }
1125
cbf46853 1126 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1da177e4
LT
1127 tw32(MAC_LED_CTRL, tp->led_ctrl);
1128
1129 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1130 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1131 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1132
1133 tw32_f(MAC_MODE, mac_mode);
1134 udelay(100);
1135
1136 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1137 udelay(10);
1138 }
1139
1140 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1141 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1142 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1143 u32 base_val;
1144
1145 base_val = tp->pci_clock_ctrl;
1146 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1147 CLOCK_CTRL_TXCLK_DISABLE);
1148
1149 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1150 CLOCK_CTRL_ALTCLK |
1151 CLOCK_CTRL_PWRDOWN_PLL133);
1152 udelay(40);
4cf78e4f
MC
1153 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
1154 /* do nothing */
85e94ced 1155 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1da177e4
LT
1156 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1157 u32 newbits1, newbits2;
1158
1159 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1160 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1161 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1162 CLOCK_CTRL_TXCLK_DISABLE |
1163 CLOCK_CTRL_ALTCLK);
1164 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1165 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1166 newbits1 = CLOCK_CTRL_625_CORE;
1167 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1168 } else {
1169 newbits1 = CLOCK_CTRL_ALTCLK;
1170 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1171 }
1172
1173 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1174 udelay(40);
1175
1176 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1177 udelay(40);
1178
1179 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1180 u32 newbits3;
1181
1182 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1183 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1184 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1185 CLOCK_CTRL_TXCLK_DISABLE |
1186 CLOCK_CTRL_44MHZ_CORE);
1187 } else {
1188 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1189 }
1190
1191 tw32_f(TG3PCI_CLOCK_CTRL,
1192 tp->pci_clock_ctrl | newbits3);
1193 udelay(40);
1194 }
1195 }
1196
1197 tg3_frob_aux_power(tp);
1198
1199 /* Workaround for unstable PLL clock */
1200 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1201 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1202 u32 val = tr32(0x7d00);
1203
1204 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1205 tw32(0x7d00, val);
1206 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1207 tg3_halt_cpu(tp, RX_CPU_BASE);
1208 }
1209
1210 /* Finally, set the new power state. */
1211 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
8c6bda1a 1212 udelay(100); /* Delay after power state change */
1da177e4
LT
1213
1214 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1215
1216 return 0;
1217}
1218
1219static void tg3_link_report(struct tg3 *tp)
1220{
1221 if (!netif_carrier_ok(tp->dev)) {
1222 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1223 } else {
1224 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1225 tp->dev->name,
1226 (tp->link_config.active_speed == SPEED_1000 ?
1227 1000 :
1228 (tp->link_config.active_speed == SPEED_100 ?
1229 100 : 10)),
1230 (tp->link_config.active_duplex == DUPLEX_FULL ?
1231 "full" : "half"));
1232
1233 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1234 "%s for RX.\n",
1235 tp->dev->name,
1236 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1237 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1238 }
1239}
1240
1241static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1242{
1243 u32 new_tg3_flags = 0;
1244 u32 old_rx_mode = tp->rx_mode;
1245 u32 old_tx_mode = tp->tx_mode;
1246
1247 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
747e8f8b
MC
1248
1249 /* Convert 1000BaseX flow control bits to 1000BaseT
1250 * bits before resolving flow control.
1251 */
1252 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1253 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1254 ADVERTISE_PAUSE_ASYM);
1255 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1256
1257 if (local_adv & ADVERTISE_1000XPAUSE)
1258 local_adv |= ADVERTISE_PAUSE_CAP;
1259 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1260 local_adv |= ADVERTISE_PAUSE_ASYM;
1261 if (remote_adv & LPA_1000XPAUSE)
1262 remote_adv |= LPA_PAUSE_CAP;
1263 if (remote_adv & LPA_1000XPAUSE_ASYM)
1264 remote_adv |= LPA_PAUSE_ASYM;
1265 }
1266
1da177e4
LT
1267 if (local_adv & ADVERTISE_PAUSE_CAP) {
1268 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1269 if (remote_adv & LPA_PAUSE_CAP)
1270 new_tg3_flags |=
1271 (TG3_FLAG_RX_PAUSE |
1272 TG3_FLAG_TX_PAUSE);
1273 else if (remote_adv & LPA_PAUSE_ASYM)
1274 new_tg3_flags |=
1275 (TG3_FLAG_RX_PAUSE);
1276 } else {
1277 if (remote_adv & LPA_PAUSE_CAP)
1278 new_tg3_flags |=
1279 (TG3_FLAG_RX_PAUSE |
1280 TG3_FLAG_TX_PAUSE);
1281 }
1282 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1283 if ((remote_adv & LPA_PAUSE_CAP) &&
1284 (remote_adv & LPA_PAUSE_ASYM))
1285 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1286 }
1287
1288 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1289 tp->tg3_flags |= new_tg3_flags;
1290 } else {
1291 new_tg3_flags = tp->tg3_flags;
1292 }
1293
1294 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1295 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1296 else
1297 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1298
1299 if (old_rx_mode != tp->rx_mode) {
1300 tw32_f(MAC_RX_MODE, tp->rx_mode);
1301 }
1302
1303 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1304 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1305 else
1306 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1307
1308 if (old_tx_mode != tp->tx_mode) {
1309 tw32_f(MAC_TX_MODE, tp->tx_mode);
1310 }
1311}
1312
1313static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1314{
1315 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1316 case MII_TG3_AUX_STAT_10HALF:
1317 *speed = SPEED_10;
1318 *duplex = DUPLEX_HALF;
1319 break;
1320
1321 case MII_TG3_AUX_STAT_10FULL:
1322 *speed = SPEED_10;
1323 *duplex = DUPLEX_FULL;
1324 break;
1325
1326 case MII_TG3_AUX_STAT_100HALF:
1327 *speed = SPEED_100;
1328 *duplex = DUPLEX_HALF;
1329 break;
1330
1331 case MII_TG3_AUX_STAT_100FULL:
1332 *speed = SPEED_100;
1333 *duplex = DUPLEX_FULL;
1334 break;
1335
1336 case MII_TG3_AUX_STAT_1000HALF:
1337 *speed = SPEED_1000;
1338 *duplex = DUPLEX_HALF;
1339 break;
1340
1341 case MII_TG3_AUX_STAT_1000FULL:
1342 *speed = SPEED_1000;
1343 *duplex = DUPLEX_FULL;
1344 break;
1345
1346 default:
1347 *speed = SPEED_INVALID;
1348 *duplex = DUPLEX_INVALID;
1349 break;
1350 };
1351}
1352
1353static void tg3_phy_copper_begin(struct tg3 *tp)
1354{
1355 u32 new_adv;
1356 int i;
1357
1358 if (tp->link_config.phy_is_low_power) {
1359 /* Entering low power mode. Disable gigabit and
1360 * 100baseT advertisements.
1361 */
1362 tg3_writephy(tp, MII_TG3_CTRL, 0);
1363
1364 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1365 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1366 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1367 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1368
1369 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1370 } else if (tp->link_config.speed == SPEED_INVALID) {
1371 tp->link_config.advertising =
1372 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1373 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1374 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1375 ADVERTISED_Autoneg | ADVERTISED_MII);
1376
1377 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1378 tp->link_config.advertising &=
1379 ~(ADVERTISED_1000baseT_Half |
1380 ADVERTISED_1000baseT_Full);
1381
1382 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1383 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1384 new_adv |= ADVERTISE_10HALF;
1385 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1386 new_adv |= ADVERTISE_10FULL;
1387 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1388 new_adv |= ADVERTISE_100HALF;
1389 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1390 new_adv |= ADVERTISE_100FULL;
1391 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1392
1393 if (tp->link_config.advertising &
1394 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1395 new_adv = 0;
1396 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1397 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1398 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1399 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1400 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1401 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1402 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1403 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1404 MII_TG3_CTRL_ENABLE_AS_MASTER);
1405 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1406 } else {
1407 tg3_writephy(tp, MII_TG3_CTRL, 0);
1408 }
1409 } else {
1410 /* Asking for a specific link mode. */
1411 if (tp->link_config.speed == SPEED_1000) {
1412 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1413 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1414
1415 if (tp->link_config.duplex == DUPLEX_FULL)
1416 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1417 else
1418 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1419 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1420 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1421 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1422 MII_TG3_CTRL_ENABLE_AS_MASTER);
1423 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1424 } else {
1425 tg3_writephy(tp, MII_TG3_CTRL, 0);
1426
1427 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1428 if (tp->link_config.speed == SPEED_100) {
1429 if (tp->link_config.duplex == DUPLEX_FULL)
1430 new_adv |= ADVERTISE_100FULL;
1431 else
1432 new_adv |= ADVERTISE_100HALF;
1433 } else {
1434 if (tp->link_config.duplex == DUPLEX_FULL)
1435 new_adv |= ADVERTISE_10FULL;
1436 else
1437 new_adv |= ADVERTISE_10HALF;
1438 }
1439 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1440 }
1441 }
1442
1443 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1444 tp->link_config.speed != SPEED_INVALID) {
1445 u32 bmcr, orig_bmcr;
1446
1447 tp->link_config.active_speed = tp->link_config.speed;
1448 tp->link_config.active_duplex = tp->link_config.duplex;
1449
1450 bmcr = 0;
1451 switch (tp->link_config.speed) {
1452 default:
1453 case SPEED_10:
1454 break;
1455
1456 case SPEED_100:
1457 bmcr |= BMCR_SPEED100;
1458 break;
1459
1460 case SPEED_1000:
1461 bmcr |= TG3_BMCR_SPEED1000;
1462 break;
1463 };
1464
1465 if (tp->link_config.duplex == DUPLEX_FULL)
1466 bmcr |= BMCR_FULLDPLX;
1467
1468 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1469 (bmcr != orig_bmcr)) {
1470 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1471 for (i = 0; i < 1500; i++) {
1472 u32 tmp;
1473
1474 udelay(10);
1475 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1476 tg3_readphy(tp, MII_BMSR, &tmp))
1477 continue;
1478 if (!(tmp & BMSR_LSTATUS)) {
1479 udelay(40);
1480 break;
1481 }
1482 }
1483 tg3_writephy(tp, MII_BMCR, bmcr);
1484 udelay(40);
1485 }
1486 } else {
1487 tg3_writephy(tp, MII_BMCR,
1488 BMCR_ANENABLE | BMCR_ANRESTART);
1489 }
1490}
1491
1492static int tg3_init_5401phy_dsp(struct tg3 *tp)
1493{
1494 int err;
1495
1496 /* Turn off tap power management. */
1497 /* Set Extended packet length bit */
1498 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1499
1500 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1501 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1502
1503 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1504 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1505
1506 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1507 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1508
1509 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1510 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1511
1512 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1513 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1514
1515 udelay(40);
1516
1517 return err;
1518}
1519
1520static int tg3_copper_is_advertising_all(struct tg3 *tp)
1521{
1522 u32 adv_reg, all_mask;
1523
1524 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1525 return 0;
1526
1527 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1528 ADVERTISE_100HALF | ADVERTISE_100FULL);
1529 if ((adv_reg & all_mask) != all_mask)
1530 return 0;
1531 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1532 u32 tg3_ctrl;
1533
1534 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1535 return 0;
1536
1537 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1538 MII_TG3_CTRL_ADV_1000_FULL);
1539 if ((tg3_ctrl & all_mask) != all_mask)
1540 return 0;
1541 }
1542 return 1;
1543}
1544
1545static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1546{
1547 int current_link_up;
1548 u32 bmsr, dummy;
1549 u16 current_speed;
1550 u8 current_duplex;
1551 int i, err;
1552
1553 tw32(MAC_EVENT, 0);
1554
1555 tw32_f(MAC_STATUS,
1556 (MAC_STATUS_SYNC_CHANGED |
1557 MAC_STATUS_CFG_CHANGED |
1558 MAC_STATUS_MI_COMPLETION |
1559 MAC_STATUS_LNKSTATE_CHANGED));
1560 udelay(40);
1561
1562 tp->mi_mode = MAC_MI_MODE_BASE;
1563 tw32_f(MAC_MI_MODE, tp->mi_mode);
1564 udelay(80);
1565
1566 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1567
1568 /* Some third-party PHYs need to be reset on link going
1569 * down.
1570 */
1571 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1572 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1573 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1574 netif_carrier_ok(tp->dev)) {
1575 tg3_readphy(tp, MII_BMSR, &bmsr);
1576 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1577 !(bmsr & BMSR_LSTATUS))
1578 force_reset = 1;
1579 }
1580 if (force_reset)
1581 tg3_phy_reset(tp);
1582
1583 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1584 tg3_readphy(tp, MII_BMSR, &bmsr);
1585 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1586 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1587 bmsr = 0;
1588
1589 if (!(bmsr & BMSR_LSTATUS)) {
1590 err = tg3_init_5401phy_dsp(tp);
1591 if (err)
1592 return err;
1593
1594 tg3_readphy(tp, MII_BMSR, &bmsr);
1595 for (i = 0; i < 1000; i++) {
1596 udelay(10);
1597 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1598 (bmsr & BMSR_LSTATUS)) {
1599 udelay(40);
1600 break;
1601 }
1602 }
1603
1604 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1605 !(bmsr & BMSR_LSTATUS) &&
1606 tp->link_config.active_speed == SPEED_1000) {
1607 err = tg3_phy_reset(tp);
1608 if (!err)
1609 err = tg3_init_5401phy_dsp(tp);
1610 if (err)
1611 return err;
1612 }
1613 }
1614 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1615 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1616 /* 5701 {A0,B0} CRC bug workaround */
1617 tg3_writephy(tp, 0x15, 0x0a75);
1618 tg3_writephy(tp, 0x1c, 0x8c68);
1619 tg3_writephy(tp, 0x1c, 0x8d68);
1620 tg3_writephy(tp, 0x1c, 0x8c68);
1621 }
1622
1623 /* Clear pending interrupts... */
1624 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1625 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1626
1627 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1628 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1629 else
1630 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1631
1632 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1633 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1634 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1635 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1636 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1637 else
1638 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1639 }
1640
1641 current_link_up = 0;
1642 current_speed = SPEED_INVALID;
1643 current_duplex = DUPLEX_INVALID;
1644
1645 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1646 u32 val;
1647
1648 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1649 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1650 if (!(val & (1 << 10))) {
1651 val |= (1 << 10);
1652 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1653 goto relink;
1654 }
1655 }
1656
1657 bmsr = 0;
1658 for (i = 0; i < 100; i++) {
1659 tg3_readphy(tp, MII_BMSR, &bmsr);
1660 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1661 (bmsr & BMSR_LSTATUS))
1662 break;
1663 udelay(40);
1664 }
1665
1666 if (bmsr & BMSR_LSTATUS) {
1667 u32 aux_stat, bmcr;
1668
1669 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1670 for (i = 0; i < 2000; i++) {
1671 udelay(10);
1672 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1673 aux_stat)
1674 break;
1675 }
1676
1677 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1678 &current_speed,
1679 &current_duplex);
1680
1681 bmcr = 0;
1682 for (i = 0; i < 200; i++) {
1683 tg3_readphy(tp, MII_BMCR, &bmcr);
1684 if (tg3_readphy(tp, MII_BMCR, &bmcr))
1685 continue;
1686 if (bmcr && bmcr != 0x7fff)
1687 break;
1688 udelay(10);
1689 }
1690
1691 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1692 if (bmcr & BMCR_ANENABLE) {
1693 current_link_up = 1;
1694
1695 /* Force autoneg restart if we are exiting
1696 * low power mode.
1697 */
1698 if (!tg3_copper_is_advertising_all(tp))
1699 current_link_up = 0;
1700 } else {
1701 current_link_up = 0;
1702 }
1703 } else {
1704 if (!(bmcr & BMCR_ANENABLE) &&
1705 tp->link_config.speed == current_speed &&
1706 tp->link_config.duplex == current_duplex) {
1707 current_link_up = 1;
1708 } else {
1709 current_link_up = 0;
1710 }
1711 }
1712
1713 tp->link_config.active_speed = current_speed;
1714 tp->link_config.active_duplex = current_duplex;
1715 }
1716
1717 if (current_link_up == 1 &&
1718 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1719 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1720 u32 local_adv, remote_adv;
1721
1722 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1723 local_adv = 0;
1724 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1725
1726 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1727 remote_adv = 0;
1728
1729 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1730
1731 /* If we are not advertising full pause capability,
1732 * something is wrong. Bring the link down and reconfigure.
1733 */
1734 if (local_adv != ADVERTISE_PAUSE_CAP) {
1735 current_link_up = 0;
1736 } else {
1737 tg3_setup_flow_control(tp, local_adv, remote_adv);
1738 }
1739 }
1740relink:
1741 if (current_link_up == 0) {
1742 u32 tmp;
1743
1744 tg3_phy_copper_begin(tp);
1745
1746 tg3_readphy(tp, MII_BMSR, &tmp);
1747 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1748 (tmp & BMSR_LSTATUS))
1749 current_link_up = 1;
1750 }
1751
1752 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1753 if (current_link_up == 1) {
1754 if (tp->link_config.active_speed == SPEED_100 ||
1755 tp->link_config.active_speed == SPEED_10)
1756 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1757 else
1758 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1759 } else
1760 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1761
1762 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1763 if (tp->link_config.active_duplex == DUPLEX_HALF)
1764 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1765
1766 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1767 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1768 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1769 (current_link_up == 1 &&
1770 tp->link_config.active_speed == SPEED_10))
1771 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1772 } else {
1773 if (current_link_up == 1)
1774 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1775 }
1776
1777 /* ??? Without this setting Netgear GA302T PHY does not
1778 * ??? send/receive packets...
1779 */
1780 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1781 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1782 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1783 tw32_f(MAC_MI_MODE, tp->mi_mode);
1784 udelay(80);
1785 }
1786
1787 tw32_f(MAC_MODE, tp->mac_mode);
1788 udelay(40);
1789
1790 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1791 /* Polled via timer. */
1792 tw32_f(MAC_EVENT, 0);
1793 } else {
1794 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1795 }
1796 udelay(40);
1797
1798 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1799 current_link_up == 1 &&
1800 tp->link_config.active_speed == SPEED_1000 &&
1801 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1802 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1803 udelay(120);
1804 tw32_f(MAC_STATUS,
1805 (MAC_STATUS_SYNC_CHANGED |
1806 MAC_STATUS_CFG_CHANGED));
1807 udelay(40);
1808 tg3_write_mem(tp,
1809 NIC_SRAM_FIRMWARE_MBOX,
1810 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1811 }
1812
1813 if (current_link_up != netif_carrier_ok(tp->dev)) {
1814 if (current_link_up)
1815 netif_carrier_on(tp->dev);
1816 else
1817 netif_carrier_off(tp->dev);
1818 tg3_link_report(tp);
1819 }
1820
1821 return 0;
1822}
1823
1824struct tg3_fiber_aneginfo {
1825 int state;
1826#define ANEG_STATE_UNKNOWN 0
1827#define ANEG_STATE_AN_ENABLE 1
1828#define ANEG_STATE_RESTART_INIT 2
1829#define ANEG_STATE_RESTART 3
1830#define ANEG_STATE_DISABLE_LINK_OK 4
1831#define ANEG_STATE_ABILITY_DETECT_INIT 5
1832#define ANEG_STATE_ABILITY_DETECT 6
1833#define ANEG_STATE_ACK_DETECT_INIT 7
1834#define ANEG_STATE_ACK_DETECT 8
1835#define ANEG_STATE_COMPLETE_ACK_INIT 9
1836#define ANEG_STATE_COMPLETE_ACK 10
1837#define ANEG_STATE_IDLE_DETECT_INIT 11
1838#define ANEG_STATE_IDLE_DETECT 12
1839#define ANEG_STATE_LINK_OK 13
1840#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
1841#define ANEG_STATE_NEXT_PAGE_WAIT 15
1842
1843 u32 flags;
1844#define MR_AN_ENABLE 0x00000001
1845#define MR_RESTART_AN 0x00000002
1846#define MR_AN_COMPLETE 0x00000004
1847#define MR_PAGE_RX 0x00000008
1848#define MR_NP_LOADED 0x00000010
1849#define MR_TOGGLE_TX 0x00000020
1850#define MR_LP_ADV_FULL_DUPLEX 0x00000040
1851#define MR_LP_ADV_HALF_DUPLEX 0x00000080
1852#define MR_LP_ADV_SYM_PAUSE 0x00000100
1853#define MR_LP_ADV_ASYM_PAUSE 0x00000200
1854#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1855#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1856#define MR_LP_ADV_NEXT_PAGE 0x00001000
1857#define MR_TOGGLE_RX 0x00002000
1858#define MR_NP_RX 0x00004000
1859
1860#define MR_LINK_OK 0x80000000
1861
1862 unsigned long link_time, cur_time;
1863
1864 u32 ability_match_cfg;
1865 int ability_match_count;
1866
1867 char ability_match, idle_match, ack_match;
1868
1869 u32 txconfig, rxconfig;
1870#define ANEG_CFG_NP 0x00000080
1871#define ANEG_CFG_ACK 0x00000040
1872#define ANEG_CFG_RF2 0x00000020
1873#define ANEG_CFG_RF1 0x00000010
1874#define ANEG_CFG_PS2 0x00000001
1875#define ANEG_CFG_PS1 0x00008000
1876#define ANEG_CFG_HD 0x00004000
1877#define ANEG_CFG_FD 0x00002000
1878#define ANEG_CFG_INVAL 0x00001f06
1879
1880};
1881#define ANEG_OK 0
1882#define ANEG_DONE 1
1883#define ANEG_TIMER_ENAB 2
1884#define ANEG_FAILED -1
1885
1886#define ANEG_STATE_SETTLE_TIME 10000
1887
1888static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1889 struct tg3_fiber_aneginfo *ap)
1890{
1891 unsigned long delta;
1892 u32 rx_cfg_reg;
1893 int ret;
1894
1895 if (ap->state == ANEG_STATE_UNKNOWN) {
1896 ap->rxconfig = 0;
1897 ap->link_time = 0;
1898 ap->cur_time = 0;
1899 ap->ability_match_cfg = 0;
1900 ap->ability_match_count = 0;
1901 ap->ability_match = 0;
1902 ap->idle_match = 0;
1903 ap->ack_match = 0;
1904 }
1905 ap->cur_time++;
1906
1907 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1908 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1909
1910 if (rx_cfg_reg != ap->ability_match_cfg) {
1911 ap->ability_match_cfg = rx_cfg_reg;
1912 ap->ability_match = 0;
1913 ap->ability_match_count = 0;
1914 } else {
1915 if (++ap->ability_match_count > 1) {
1916 ap->ability_match = 1;
1917 ap->ability_match_cfg = rx_cfg_reg;
1918 }
1919 }
1920 if (rx_cfg_reg & ANEG_CFG_ACK)
1921 ap->ack_match = 1;
1922 else
1923 ap->ack_match = 0;
1924
1925 ap->idle_match = 0;
1926 } else {
1927 ap->idle_match = 1;
1928 ap->ability_match_cfg = 0;
1929 ap->ability_match_count = 0;
1930 ap->ability_match = 0;
1931 ap->ack_match = 0;
1932
1933 rx_cfg_reg = 0;
1934 }
1935
1936 ap->rxconfig = rx_cfg_reg;
1937 ret = ANEG_OK;
1938
1939 switch(ap->state) {
1940 case ANEG_STATE_UNKNOWN:
1941 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1942 ap->state = ANEG_STATE_AN_ENABLE;
1943
1944 /* fallthru */
1945 case ANEG_STATE_AN_ENABLE:
1946 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1947 if (ap->flags & MR_AN_ENABLE) {
1948 ap->link_time = 0;
1949 ap->cur_time = 0;
1950 ap->ability_match_cfg = 0;
1951 ap->ability_match_count = 0;
1952 ap->ability_match = 0;
1953 ap->idle_match = 0;
1954 ap->ack_match = 0;
1955
1956 ap->state = ANEG_STATE_RESTART_INIT;
1957 } else {
1958 ap->state = ANEG_STATE_DISABLE_LINK_OK;
1959 }
1960 break;
1961
1962 case ANEG_STATE_RESTART_INIT:
1963 ap->link_time = ap->cur_time;
1964 ap->flags &= ~(MR_NP_LOADED);
1965 ap->txconfig = 0;
1966 tw32(MAC_TX_AUTO_NEG, 0);
1967 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1968 tw32_f(MAC_MODE, tp->mac_mode);
1969 udelay(40);
1970
1971 ret = ANEG_TIMER_ENAB;
1972 ap->state = ANEG_STATE_RESTART;
1973
1974 /* fallthru */
1975 case ANEG_STATE_RESTART:
1976 delta = ap->cur_time - ap->link_time;
1977 if (delta > ANEG_STATE_SETTLE_TIME) {
1978 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1979 } else {
1980 ret = ANEG_TIMER_ENAB;
1981 }
1982 break;
1983
1984 case ANEG_STATE_DISABLE_LINK_OK:
1985 ret = ANEG_DONE;
1986 break;
1987
1988 case ANEG_STATE_ABILITY_DETECT_INIT:
1989 ap->flags &= ~(MR_TOGGLE_TX);
1990 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1991 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1992 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1993 tw32_f(MAC_MODE, tp->mac_mode);
1994 udelay(40);
1995
1996 ap->state = ANEG_STATE_ABILITY_DETECT;
1997 break;
1998
1999 case ANEG_STATE_ABILITY_DETECT:
2000 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2001 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2002 }
2003 break;
2004
2005 case ANEG_STATE_ACK_DETECT_INIT:
2006 ap->txconfig |= ANEG_CFG_ACK;
2007 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2008 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2009 tw32_f(MAC_MODE, tp->mac_mode);
2010 udelay(40);
2011
2012 ap->state = ANEG_STATE_ACK_DETECT;
2013
2014 /* fallthru */
2015 case ANEG_STATE_ACK_DETECT:
2016 if (ap->ack_match != 0) {
2017 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2018 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2019 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2020 } else {
2021 ap->state = ANEG_STATE_AN_ENABLE;
2022 }
2023 } else if (ap->ability_match != 0 &&
2024 ap->rxconfig == 0) {
2025 ap->state = ANEG_STATE_AN_ENABLE;
2026 }
2027 break;
2028
2029 case ANEG_STATE_COMPLETE_ACK_INIT:
2030 if (ap->rxconfig & ANEG_CFG_INVAL) {
2031 ret = ANEG_FAILED;
2032 break;
2033 }
2034 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2035 MR_LP_ADV_HALF_DUPLEX |
2036 MR_LP_ADV_SYM_PAUSE |
2037 MR_LP_ADV_ASYM_PAUSE |
2038 MR_LP_ADV_REMOTE_FAULT1 |
2039 MR_LP_ADV_REMOTE_FAULT2 |
2040 MR_LP_ADV_NEXT_PAGE |
2041 MR_TOGGLE_RX |
2042 MR_NP_RX);
2043 if (ap->rxconfig & ANEG_CFG_FD)
2044 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2045 if (ap->rxconfig & ANEG_CFG_HD)
2046 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2047 if (ap->rxconfig & ANEG_CFG_PS1)
2048 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2049 if (ap->rxconfig & ANEG_CFG_PS2)
2050 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2051 if (ap->rxconfig & ANEG_CFG_RF1)
2052 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2053 if (ap->rxconfig & ANEG_CFG_RF2)
2054 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2055 if (ap->rxconfig & ANEG_CFG_NP)
2056 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2057
2058 ap->link_time = ap->cur_time;
2059
2060 ap->flags ^= (MR_TOGGLE_TX);
2061 if (ap->rxconfig & 0x0008)
2062 ap->flags |= MR_TOGGLE_RX;
2063 if (ap->rxconfig & ANEG_CFG_NP)
2064 ap->flags |= MR_NP_RX;
2065 ap->flags |= MR_PAGE_RX;
2066
2067 ap->state = ANEG_STATE_COMPLETE_ACK;
2068 ret = ANEG_TIMER_ENAB;
2069 break;
2070
2071 case ANEG_STATE_COMPLETE_ACK:
2072 if (ap->ability_match != 0 &&
2073 ap->rxconfig == 0) {
2074 ap->state = ANEG_STATE_AN_ENABLE;
2075 break;
2076 }
2077 delta = ap->cur_time - ap->link_time;
2078 if (delta > ANEG_STATE_SETTLE_TIME) {
2079 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2080 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2081 } else {
2082 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2083 !(ap->flags & MR_NP_RX)) {
2084 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2085 } else {
2086 ret = ANEG_FAILED;
2087 }
2088 }
2089 }
2090 break;
2091
2092 case ANEG_STATE_IDLE_DETECT_INIT:
2093 ap->link_time = ap->cur_time;
2094 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2095 tw32_f(MAC_MODE, tp->mac_mode);
2096 udelay(40);
2097
2098 ap->state = ANEG_STATE_IDLE_DETECT;
2099 ret = ANEG_TIMER_ENAB;
2100 break;
2101
2102 case ANEG_STATE_IDLE_DETECT:
2103 if (ap->ability_match != 0 &&
2104 ap->rxconfig == 0) {
2105 ap->state = ANEG_STATE_AN_ENABLE;
2106 break;
2107 }
2108 delta = ap->cur_time - ap->link_time;
2109 if (delta > ANEG_STATE_SETTLE_TIME) {
2110 /* XXX another gem from the Broadcom driver :( */
2111 ap->state = ANEG_STATE_LINK_OK;
2112 }
2113 break;
2114
2115 case ANEG_STATE_LINK_OK:
2116 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2117 ret = ANEG_DONE;
2118 break;
2119
2120 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2121 /* ??? unimplemented */
2122 break;
2123
2124 case ANEG_STATE_NEXT_PAGE_WAIT:
2125 /* ??? unimplemented */
2126 break;
2127
2128 default:
2129 ret = ANEG_FAILED;
2130 break;
2131 };
2132
2133 return ret;
2134}
2135
2136static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2137{
2138 int res = 0;
2139 struct tg3_fiber_aneginfo aninfo;
2140 int status = ANEG_FAILED;
2141 unsigned int tick;
2142 u32 tmp;
2143
2144 tw32_f(MAC_TX_AUTO_NEG, 0);
2145
2146 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2147 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2148 udelay(40);
2149
2150 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2151 udelay(40);
2152
2153 memset(&aninfo, 0, sizeof(aninfo));
2154 aninfo.flags |= MR_AN_ENABLE;
2155 aninfo.state = ANEG_STATE_UNKNOWN;
2156 aninfo.cur_time = 0;
2157 tick = 0;
2158 while (++tick < 195000) {
2159 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2160 if (status == ANEG_DONE || status == ANEG_FAILED)
2161 break;
2162
2163 udelay(1);
2164 }
2165
2166 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2167 tw32_f(MAC_MODE, tp->mac_mode);
2168 udelay(40);
2169
2170 *flags = aninfo.flags;
2171
2172 if (status == ANEG_DONE &&
2173 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2174 MR_LP_ADV_FULL_DUPLEX)))
2175 res = 1;
2176
2177 return res;
2178}
2179
2180static void tg3_init_bcm8002(struct tg3 *tp)
2181{
2182 u32 mac_status = tr32(MAC_STATUS);
2183 int i;
2184
2185 /* Reset when initting first time or we have a link. */
2186 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2187 !(mac_status & MAC_STATUS_PCS_SYNCED))
2188 return;
2189
2190 /* Set PLL lock range. */
2191 tg3_writephy(tp, 0x16, 0x8007);
2192
2193 /* SW reset */
2194 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2195
2196 /* Wait for reset to complete. */
2197 /* XXX schedule_timeout() ... */
2198 for (i = 0; i < 500; i++)
2199 udelay(10);
2200
2201 /* Config mode; select PMA/Ch 1 regs. */
2202 tg3_writephy(tp, 0x10, 0x8411);
2203
2204 /* Enable auto-lock and comdet, select txclk for tx. */
2205 tg3_writephy(tp, 0x11, 0x0a10);
2206
2207 tg3_writephy(tp, 0x18, 0x00a0);
2208 tg3_writephy(tp, 0x16, 0x41ff);
2209
2210 /* Assert and deassert POR. */
2211 tg3_writephy(tp, 0x13, 0x0400);
2212 udelay(40);
2213 tg3_writephy(tp, 0x13, 0x0000);
2214
2215 tg3_writephy(tp, 0x11, 0x0a50);
2216 udelay(40);
2217 tg3_writephy(tp, 0x11, 0x0a10);
2218
2219 /* Wait for signal to stabilize */
2220 /* XXX schedule_timeout() ... */
2221 for (i = 0; i < 15000; i++)
2222 udelay(10);
2223
2224 /* Deselect the channel register so we can read the PHYID
2225 * later.
2226 */
2227 tg3_writephy(tp, 0x10, 0x8011);
2228}
2229
2230static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2231{
2232 u32 sg_dig_ctrl, sg_dig_status;
2233 u32 serdes_cfg, expected_sg_dig_ctrl;
2234 int workaround, port_a;
2235 int current_link_up;
2236
2237 serdes_cfg = 0;
2238 expected_sg_dig_ctrl = 0;
2239 workaround = 0;
2240 port_a = 1;
2241 current_link_up = 0;
2242
2243 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2244 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2245 workaround = 1;
2246 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2247 port_a = 0;
2248
2249 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2250 /* preserve bits 20-23 for voltage regulator */
2251 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2252 }
2253
2254 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2255
2256 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2257 if (sg_dig_ctrl & (1 << 31)) {
2258 if (workaround) {
2259 u32 val = serdes_cfg;
2260
2261 if (port_a)
2262 val |= 0xc010000;
2263 else
2264 val |= 0x4010000;
2265 tw32_f(MAC_SERDES_CFG, val);
2266 }
2267 tw32_f(SG_DIG_CTRL, 0x01388400);
2268 }
2269 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2270 tg3_setup_flow_control(tp, 0, 0);
2271 current_link_up = 1;
2272 }
2273 goto out;
2274 }
2275
2276 /* Want auto-negotiation. */
2277 expected_sg_dig_ctrl = 0x81388400;
2278
2279 /* Pause capability */
2280 expected_sg_dig_ctrl |= (1 << 11);
2281
2282 /* Asymettric pause */
2283 expected_sg_dig_ctrl |= (1 << 12);
2284
2285 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2286 if (workaround)
2287 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2288 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2289 udelay(5);
2290 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2291
2292 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2293 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2294 MAC_STATUS_SIGNAL_DET)) {
2295 int i;
2296
2297 /* Giver time to negotiate (~200ms) */
2298 for (i = 0; i < 40000; i++) {
2299 sg_dig_status = tr32(SG_DIG_STATUS);
2300 if (sg_dig_status & (0x3))
2301 break;
2302 udelay(5);
2303 }
2304 mac_status = tr32(MAC_STATUS);
2305
2306 if ((sg_dig_status & (1 << 1)) &&
2307 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2308 u32 local_adv, remote_adv;
2309
2310 local_adv = ADVERTISE_PAUSE_CAP;
2311 remote_adv = 0;
2312 if (sg_dig_status & (1 << 19))
2313 remote_adv |= LPA_PAUSE_CAP;
2314 if (sg_dig_status & (1 << 20))
2315 remote_adv |= LPA_PAUSE_ASYM;
2316
2317 tg3_setup_flow_control(tp, local_adv, remote_adv);
2318 current_link_up = 1;
2319 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2320 } else if (!(sg_dig_status & (1 << 1))) {
2321 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2322 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2323 else {
2324 if (workaround) {
2325 u32 val = serdes_cfg;
2326
2327 if (port_a)
2328 val |= 0xc010000;
2329 else
2330 val |= 0x4010000;
2331
2332 tw32_f(MAC_SERDES_CFG, val);
2333 }
2334
2335 tw32_f(SG_DIG_CTRL, 0x01388400);
2336 udelay(40);
2337
2338 /* Link parallel detection - link is up */
2339 /* only if we have PCS_SYNC and not */
2340 /* receiving config code words */
2341 mac_status = tr32(MAC_STATUS);
2342 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2343 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2344 tg3_setup_flow_control(tp, 0, 0);
2345 current_link_up = 1;
2346 }
2347 }
2348 }
2349 }
2350
2351out:
2352 return current_link_up;
2353}
2354
2355static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2356{
2357 int current_link_up = 0;
2358
2359 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2360 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2361 goto out;
2362 }
2363
2364 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2365 u32 flags;
2366 int i;
2367
2368 if (fiber_autoneg(tp, &flags)) {
2369 u32 local_adv, remote_adv;
2370
2371 local_adv = ADVERTISE_PAUSE_CAP;
2372 remote_adv = 0;
2373 if (flags & MR_LP_ADV_SYM_PAUSE)
2374 remote_adv |= LPA_PAUSE_CAP;
2375 if (flags & MR_LP_ADV_ASYM_PAUSE)
2376 remote_adv |= LPA_PAUSE_ASYM;
2377
2378 tg3_setup_flow_control(tp, local_adv, remote_adv);
2379
2380 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2381 current_link_up = 1;
2382 }
2383 for (i = 0; i < 30; i++) {
2384 udelay(20);
2385 tw32_f(MAC_STATUS,
2386 (MAC_STATUS_SYNC_CHANGED |
2387 MAC_STATUS_CFG_CHANGED));
2388 udelay(40);
2389 if ((tr32(MAC_STATUS) &
2390 (MAC_STATUS_SYNC_CHANGED |
2391 MAC_STATUS_CFG_CHANGED)) == 0)
2392 break;
2393 }
2394
2395 mac_status = tr32(MAC_STATUS);
2396 if (current_link_up == 0 &&
2397 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2398 !(mac_status & MAC_STATUS_RCVD_CFG))
2399 current_link_up = 1;
2400 } else {
2401 /* Forcing 1000FD link up. */
2402 current_link_up = 1;
2403 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2404
2405 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2406 udelay(40);
2407 }
2408
2409out:
2410 return current_link_up;
2411}
2412
2413static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2414{
2415 u32 orig_pause_cfg;
2416 u16 orig_active_speed;
2417 u8 orig_active_duplex;
2418 u32 mac_status;
2419 int current_link_up;
2420 int i;
2421
2422 orig_pause_cfg =
2423 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2424 TG3_FLAG_TX_PAUSE));
2425 orig_active_speed = tp->link_config.active_speed;
2426 orig_active_duplex = tp->link_config.active_duplex;
2427
2428 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2429 netif_carrier_ok(tp->dev) &&
2430 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2431 mac_status = tr32(MAC_STATUS);
2432 mac_status &= (MAC_STATUS_PCS_SYNCED |
2433 MAC_STATUS_SIGNAL_DET |
2434 MAC_STATUS_CFG_CHANGED |
2435 MAC_STATUS_RCVD_CFG);
2436 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2437 MAC_STATUS_SIGNAL_DET)) {
2438 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2439 MAC_STATUS_CFG_CHANGED));
2440 return 0;
2441 }
2442 }
2443
2444 tw32_f(MAC_TX_AUTO_NEG, 0);
2445
2446 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2447 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2448 tw32_f(MAC_MODE, tp->mac_mode);
2449 udelay(40);
2450
2451 if (tp->phy_id == PHY_ID_BCM8002)
2452 tg3_init_bcm8002(tp);
2453
2454 /* Enable link change event even when serdes polling. */
2455 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2456 udelay(40);
2457
2458 current_link_up = 0;
2459 mac_status = tr32(MAC_STATUS);
2460
2461 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2462 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2463 else
2464 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2465
2466 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2467 tw32_f(MAC_MODE, tp->mac_mode);
2468 udelay(40);
2469
2470 tp->hw_status->status =
2471 (SD_STATUS_UPDATED |
2472 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2473
2474 for (i = 0; i < 100; i++) {
2475 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2476 MAC_STATUS_CFG_CHANGED));
2477 udelay(5);
2478 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2479 MAC_STATUS_CFG_CHANGED)) == 0)
2480 break;
2481 }
2482
2483 mac_status = tr32(MAC_STATUS);
2484 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2485 current_link_up = 0;
2486 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2487 tw32_f(MAC_MODE, (tp->mac_mode |
2488 MAC_MODE_SEND_CONFIGS));
2489 udelay(1);
2490 tw32_f(MAC_MODE, tp->mac_mode);
2491 }
2492 }
2493
2494 if (current_link_up == 1) {
2495 tp->link_config.active_speed = SPEED_1000;
2496 tp->link_config.active_duplex = DUPLEX_FULL;
2497 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2498 LED_CTRL_LNKLED_OVERRIDE |
2499 LED_CTRL_1000MBPS_ON));
2500 } else {
2501 tp->link_config.active_speed = SPEED_INVALID;
2502 tp->link_config.active_duplex = DUPLEX_INVALID;
2503 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2504 LED_CTRL_LNKLED_OVERRIDE |
2505 LED_CTRL_TRAFFIC_OVERRIDE));
2506 }
2507
2508 if (current_link_up != netif_carrier_ok(tp->dev)) {
2509 if (current_link_up)
2510 netif_carrier_on(tp->dev);
2511 else
2512 netif_carrier_off(tp->dev);
2513 tg3_link_report(tp);
2514 } else {
2515 u32 now_pause_cfg =
2516 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2517 TG3_FLAG_TX_PAUSE);
2518 if (orig_pause_cfg != now_pause_cfg ||
2519 orig_active_speed != tp->link_config.active_speed ||
2520 orig_active_duplex != tp->link_config.active_duplex)
2521 tg3_link_report(tp);
2522 }
2523
2524 return 0;
2525}
2526
747e8f8b
MC
2527static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2528{
2529 int current_link_up, err = 0;
2530 u32 bmsr, bmcr;
2531 u16 current_speed;
2532 u8 current_duplex;
2533
2534 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2535 tw32_f(MAC_MODE, tp->mac_mode);
2536 udelay(40);
2537
2538 tw32(MAC_EVENT, 0);
2539
2540 tw32_f(MAC_STATUS,
2541 (MAC_STATUS_SYNC_CHANGED |
2542 MAC_STATUS_CFG_CHANGED |
2543 MAC_STATUS_MI_COMPLETION |
2544 MAC_STATUS_LNKSTATE_CHANGED));
2545 udelay(40);
2546
2547 if (force_reset)
2548 tg3_phy_reset(tp);
2549
2550 current_link_up = 0;
2551 current_speed = SPEED_INVALID;
2552 current_duplex = DUPLEX_INVALID;
2553
2554 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2555 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2556
2557 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2558
2559 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2560 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2561 /* do nothing, just check for link up at the end */
2562 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2563 u32 adv, new_adv;
2564
2565 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2566 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2567 ADVERTISE_1000XPAUSE |
2568 ADVERTISE_1000XPSE_ASYM |
2569 ADVERTISE_SLCT);
2570
2571 /* Always advertise symmetric PAUSE just like copper */
2572 new_adv |= ADVERTISE_1000XPAUSE;
2573
2574 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2575 new_adv |= ADVERTISE_1000XHALF;
2576 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2577 new_adv |= ADVERTISE_1000XFULL;
2578
2579 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2580 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2581 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2582 tg3_writephy(tp, MII_BMCR, bmcr);
2583
2584 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2585 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2586 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2587
2588 return err;
2589 }
2590 } else {
2591 u32 new_bmcr;
2592
2593 bmcr &= ~BMCR_SPEED1000;
2594 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2595
2596 if (tp->link_config.duplex == DUPLEX_FULL)
2597 new_bmcr |= BMCR_FULLDPLX;
2598
2599 if (new_bmcr != bmcr) {
2600 /* BMCR_SPEED1000 is a reserved bit that needs
2601 * to be set on write.
2602 */
2603 new_bmcr |= BMCR_SPEED1000;
2604
2605 /* Force a linkdown */
2606 if (netif_carrier_ok(tp->dev)) {
2607 u32 adv;
2608
2609 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2610 adv &= ~(ADVERTISE_1000XFULL |
2611 ADVERTISE_1000XHALF |
2612 ADVERTISE_SLCT);
2613 tg3_writephy(tp, MII_ADVERTISE, adv);
2614 tg3_writephy(tp, MII_BMCR, bmcr |
2615 BMCR_ANRESTART |
2616 BMCR_ANENABLE);
2617 udelay(10);
2618 netif_carrier_off(tp->dev);
2619 }
2620 tg3_writephy(tp, MII_BMCR, new_bmcr);
2621 bmcr = new_bmcr;
2622 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2623 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2624 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2625 }
2626 }
2627
2628 if (bmsr & BMSR_LSTATUS) {
2629 current_speed = SPEED_1000;
2630 current_link_up = 1;
2631 if (bmcr & BMCR_FULLDPLX)
2632 current_duplex = DUPLEX_FULL;
2633 else
2634 current_duplex = DUPLEX_HALF;
2635
2636 if (bmcr & BMCR_ANENABLE) {
2637 u32 local_adv, remote_adv, common;
2638
2639 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2640 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2641 common = local_adv & remote_adv;
2642 if (common & (ADVERTISE_1000XHALF |
2643 ADVERTISE_1000XFULL)) {
2644 if (common & ADVERTISE_1000XFULL)
2645 current_duplex = DUPLEX_FULL;
2646 else
2647 current_duplex = DUPLEX_HALF;
2648
2649 tg3_setup_flow_control(tp, local_adv,
2650 remote_adv);
2651 }
2652 else
2653 current_link_up = 0;
2654 }
2655 }
2656
2657 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2658 if (tp->link_config.active_duplex == DUPLEX_HALF)
2659 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2660
2661 tw32_f(MAC_MODE, tp->mac_mode);
2662 udelay(40);
2663
2664 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2665
2666 tp->link_config.active_speed = current_speed;
2667 tp->link_config.active_duplex = current_duplex;
2668
2669 if (current_link_up != netif_carrier_ok(tp->dev)) {
2670 if (current_link_up)
2671 netif_carrier_on(tp->dev);
2672 else {
2673 netif_carrier_off(tp->dev);
2674 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2675 }
2676 tg3_link_report(tp);
2677 }
2678 return err;
2679}
2680
2681static void tg3_serdes_parallel_detect(struct tg3 *tp)
2682{
2683 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2684 /* Give autoneg time to complete. */
2685 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2686 return;
2687 }
2688 if (!netif_carrier_ok(tp->dev) &&
2689 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2690 u32 bmcr;
2691
2692 tg3_readphy(tp, MII_BMCR, &bmcr);
2693 if (bmcr & BMCR_ANENABLE) {
2694 u32 phy1, phy2;
2695
2696 /* Select shadow register 0x1f */
2697 tg3_writephy(tp, 0x1c, 0x7c00);
2698 tg3_readphy(tp, 0x1c, &phy1);
2699
2700 /* Select expansion interrupt status register */
2701 tg3_writephy(tp, 0x17, 0x0f01);
2702 tg3_readphy(tp, 0x15, &phy2);
2703 tg3_readphy(tp, 0x15, &phy2);
2704
2705 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2706 /* We have signal detect and not receiving
2707 * config code words, link is up by parallel
2708 * detection.
2709 */
2710
2711 bmcr &= ~BMCR_ANENABLE;
2712 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2713 tg3_writephy(tp, MII_BMCR, bmcr);
2714 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2715 }
2716 }
2717 }
2718 else if (netif_carrier_ok(tp->dev) &&
2719 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2720 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2721 u32 phy2;
2722
2723 /* Select expansion interrupt status register */
2724 tg3_writephy(tp, 0x17, 0x0f01);
2725 tg3_readphy(tp, 0x15, &phy2);
2726 if (phy2 & 0x20) {
2727 u32 bmcr;
2728
2729 /* Config code words received, turn on autoneg. */
2730 tg3_readphy(tp, MII_BMCR, &bmcr);
2731 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2732
2733 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2734
2735 }
2736 }
2737}
2738
1da177e4
LT
2739static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2740{
2741 int err;
2742
2743 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2744 err = tg3_setup_fiber_phy(tp, force_reset);
747e8f8b
MC
2745 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2746 err = tg3_setup_fiber_mii_phy(tp, force_reset);
1da177e4
LT
2747 } else {
2748 err = tg3_setup_copper_phy(tp, force_reset);
2749 }
2750
2751 if (tp->link_config.active_speed == SPEED_1000 &&
2752 tp->link_config.active_duplex == DUPLEX_HALF)
2753 tw32(MAC_TX_LENGTHS,
2754 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2755 (6 << TX_LENGTHS_IPG_SHIFT) |
2756 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2757 else
2758 tw32(MAC_TX_LENGTHS,
2759 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2760 (6 << TX_LENGTHS_IPG_SHIFT) |
2761 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2762
2763 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2764 if (netif_carrier_ok(tp->dev)) {
2765 tw32(HOSTCC_STAT_COAL_TICKS,
15f9850d 2766 tp->coal.stats_block_coalesce_usecs);
1da177e4
LT
2767 } else {
2768 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2769 }
2770 }
2771
2772 return err;
2773}
2774
2775/* Tigon3 never reports partial packet sends. So we do not
2776 * need special logic to handle SKBs that have not had all
2777 * of their frags sent yet, like SunGEM does.
2778 */
2779static void tg3_tx(struct tg3 *tp)
2780{
2781 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2782 u32 sw_idx = tp->tx_cons;
2783
2784 while (sw_idx != hw_idx) {
2785 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2786 struct sk_buff *skb = ri->skb;
2787 int i;
2788
2789 if (unlikely(skb == NULL))
2790 BUG();
2791
2792 pci_unmap_single(tp->pdev,
2793 pci_unmap_addr(ri, mapping),
2794 skb_headlen(skb),
2795 PCI_DMA_TODEVICE);
2796
2797 ri->skb = NULL;
2798
2799 sw_idx = NEXT_TX(sw_idx);
2800
2801 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2802 if (unlikely(sw_idx == hw_idx))
2803 BUG();
2804
2805 ri = &tp->tx_buffers[sw_idx];
2806 if (unlikely(ri->skb != NULL))
2807 BUG();
2808
2809 pci_unmap_page(tp->pdev,
2810 pci_unmap_addr(ri, mapping),
2811 skb_shinfo(skb)->frags[i].size,
2812 PCI_DMA_TODEVICE);
2813
2814 sw_idx = NEXT_TX(sw_idx);
2815 }
2816
f47c11ee 2817 dev_kfree_skb(skb);
1da177e4
LT
2818 }
2819
2820 tp->tx_cons = sw_idx;
2821
2822 if (netif_queue_stopped(tp->dev) &&
2823 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2824 netif_wake_queue(tp->dev);
2825}
2826
2827/* Returns size of skb allocated or < 0 on error.
2828 *
2829 * We only need to fill in the address because the other members
2830 * of the RX descriptor are invariant, see tg3_init_rings.
2831 *
2832 * Note the purposeful assymetry of cpu vs. chip accesses. For
2833 * posting buffers we only dirty the first cache line of the RX
2834 * descriptor (containing the address). Whereas for the RX status
2835 * buffers the cpu only reads the last cacheline of the RX descriptor
2836 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2837 */
2838static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2839 int src_idx, u32 dest_idx_unmasked)
2840{
2841 struct tg3_rx_buffer_desc *desc;
2842 struct ring_info *map, *src_map;
2843 struct sk_buff *skb;
2844 dma_addr_t mapping;
2845 int skb_size, dest_idx;
2846
2847 src_map = NULL;
2848 switch (opaque_key) {
2849 case RXD_OPAQUE_RING_STD:
2850 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2851 desc = &tp->rx_std[dest_idx];
2852 map = &tp->rx_std_buffers[dest_idx];
2853 if (src_idx >= 0)
2854 src_map = &tp->rx_std_buffers[src_idx];
7e72aad4 2855 skb_size = tp->rx_pkt_buf_sz;
1da177e4
LT
2856 break;
2857
2858 case RXD_OPAQUE_RING_JUMBO:
2859 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2860 desc = &tp->rx_jumbo[dest_idx];
2861 map = &tp->rx_jumbo_buffers[dest_idx];
2862 if (src_idx >= 0)
2863 src_map = &tp->rx_jumbo_buffers[src_idx];
2864 skb_size = RX_JUMBO_PKT_BUF_SZ;
2865 break;
2866
2867 default:
2868 return -EINVAL;
2869 };
2870
2871 /* Do not overwrite any of the map or rp information
2872 * until we are sure we can commit to a new buffer.
2873 *
2874 * Callers depend upon this behavior and assume that
2875 * we leave everything unchanged if we fail.
2876 */
2877 skb = dev_alloc_skb(skb_size);
2878 if (skb == NULL)
2879 return -ENOMEM;
2880
2881 skb->dev = tp->dev;
2882 skb_reserve(skb, tp->rx_offset);
2883
2884 mapping = pci_map_single(tp->pdev, skb->data,
2885 skb_size - tp->rx_offset,
2886 PCI_DMA_FROMDEVICE);
2887
2888 map->skb = skb;
2889 pci_unmap_addr_set(map, mapping, mapping);
2890
2891 if (src_map != NULL)
2892 src_map->skb = NULL;
2893
2894 desc->addr_hi = ((u64)mapping >> 32);
2895 desc->addr_lo = ((u64)mapping & 0xffffffff);
2896
2897 return skb_size;
2898}
2899
2900/* We only need to move over in the address because the other
2901 * members of the RX descriptor are invariant. See notes above
2902 * tg3_alloc_rx_skb for full details.
2903 */
2904static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2905 int src_idx, u32 dest_idx_unmasked)
2906{
2907 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2908 struct ring_info *src_map, *dest_map;
2909 int dest_idx;
2910
2911 switch (opaque_key) {
2912 case RXD_OPAQUE_RING_STD:
2913 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2914 dest_desc = &tp->rx_std[dest_idx];
2915 dest_map = &tp->rx_std_buffers[dest_idx];
2916 src_desc = &tp->rx_std[src_idx];
2917 src_map = &tp->rx_std_buffers[src_idx];
2918 break;
2919
2920 case RXD_OPAQUE_RING_JUMBO:
2921 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2922 dest_desc = &tp->rx_jumbo[dest_idx];
2923 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2924 src_desc = &tp->rx_jumbo[src_idx];
2925 src_map = &tp->rx_jumbo_buffers[src_idx];
2926 break;
2927
2928 default:
2929 return;
2930 };
2931
2932 dest_map->skb = src_map->skb;
2933 pci_unmap_addr_set(dest_map, mapping,
2934 pci_unmap_addr(src_map, mapping));
2935 dest_desc->addr_hi = src_desc->addr_hi;
2936 dest_desc->addr_lo = src_desc->addr_lo;
2937
2938 src_map->skb = NULL;
2939}
2940
2941#if TG3_VLAN_TAG_USED
2942static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2943{
2944 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2945}
2946#endif
2947
2948/* The RX ring scheme is composed of multiple rings which post fresh
2949 * buffers to the chip, and one special ring the chip uses to report
2950 * status back to the host.
2951 *
2952 * The special ring reports the status of received packets to the
2953 * host. The chip does not write into the original descriptor the
2954 * RX buffer was obtained from. The chip simply takes the original
2955 * descriptor as provided by the host, updates the status and length
2956 * field, then writes this into the next status ring entry.
2957 *
2958 * Each ring the host uses to post buffers to the chip is described
2959 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
2960 * it is first placed into the on-chip ram. When the packet's length
2961 * is known, it walks down the TG3_BDINFO entries to select the ring.
2962 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2963 * which is within the range of the new packet's length is chosen.
2964 *
2965 * The "separate ring for rx status" scheme may sound queer, but it makes
2966 * sense from a cache coherency perspective. If only the host writes
2967 * to the buffer post rings, and only the chip writes to the rx status
2968 * rings, then cache lines never move beyond shared-modified state.
2969 * If both the host and chip were to write into the same ring, cache line
2970 * eviction could occur since both entities want it in an exclusive state.
2971 */
2972static int tg3_rx(struct tg3 *tp, int budget)
2973{
2974 u32 work_mask;
483ba50b
MC
2975 u32 sw_idx = tp->rx_rcb_ptr;
2976 u16 hw_idx;
1da177e4
LT
2977 int received;
2978
2979 hw_idx = tp->hw_status->idx[0].rx_producer;
2980 /*
2981 * We need to order the read of hw_idx and the read of
2982 * the opaque cookie.
2983 */
2984 rmb();
1da177e4
LT
2985 work_mask = 0;
2986 received = 0;
2987 while (sw_idx != hw_idx && budget > 0) {
2988 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2989 unsigned int len;
2990 struct sk_buff *skb;
2991 dma_addr_t dma_addr;
2992 u32 opaque_key, desc_idx, *post_ptr;
2993
2994 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2995 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2996 if (opaque_key == RXD_OPAQUE_RING_STD) {
2997 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2998 mapping);
2999 skb = tp->rx_std_buffers[desc_idx].skb;
3000 post_ptr = &tp->rx_std_ptr;
3001 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3002 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3003 mapping);
3004 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3005 post_ptr = &tp->rx_jumbo_ptr;
3006 }
3007 else {
3008 goto next_pkt_nopost;
3009 }
3010
3011 work_mask |= opaque_key;
3012
3013 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3014 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3015 drop_it:
3016 tg3_recycle_rx(tp, opaque_key,
3017 desc_idx, *post_ptr);
3018 drop_it_no_recycle:
3019 /* Other statistics kept track of by card. */
3020 tp->net_stats.rx_dropped++;
3021 goto next_pkt;
3022 }
3023
3024 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3025
3026 if (len > RX_COPY_THRESHOLD
3027 && tp->rx_offset == 2
3028 /* rx_offset != 2 iff this is a 5701 card running
3029 * in PCI-X mode [see tg3_get_invariants()] */
3030 ) {
3031 int skb_size;
3032
3033 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3034 desc_idx, *post_ptr);
3035 if (skb_size < 0)
3036 goto drop_it;
3037
3038 pci_unmap_single(tp->pdev, dma_addr,
3039 skb_size - tp->rx_offset,
3040 PCI_DMA_FROMDEVICE);
3041
3042 skb_put(skb, len);
3043 } else {
3044 struct sk_buff *copy_skb;
3045
3046 tg3_recycle_rx(tp, opaque_key,
3047 desc_idx, *post_ptr);
3048
3049 copy_skb = dev_alloc_skb(len + 2);
3050 if (copy_skb == NULL)
3051 goto drop_it_no_recycle;
3052
3053 copy_skb->dev = tp->dev;
3054 skb_reserve(copy_skb, 2);
3055 skb_put(copy_skb, len);
3056 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3057 memcpy(copy_skb->data, skb->data, len);
3058 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3059
3060 /* We'll reuse the original ring buffer. */
3061 skb = copy_skb;
3062 }
3063
3064 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3065 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3066 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3067 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3068 skb->ip_summed = CHECKSUM_UNNECESSARY;
3069 else
3070 skb->ip_summed = CHECKSUM_NONE;
3071
3072 skb->protocol = eth_type_trans(skb, tp->dev);
3073#if TG3_VLAN_TAG_USED
3074 if (tp->vlgrp != NULL &&
3075 desc->type_flags & RXD_FLAG_VLAN) {
3076 tg3_vlan_rx(tp, skb,
3077 desc->err_vlan & RXD_VLAN_MASK);
3078 } else
3079#endif
3080 netif_receive_skb(skb);
3081
3082 tp->dev->last_rx = jiffies;
3083 received++;
3084 budget--;
3085
3086next_pkt:
3087 (*post_ptr)++;
3088next_pkt_nopost:
483ba50b
MC
3089 sw_idx++;
3090 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
52f6d697
MC
3091
3092 /* Refresh hw_idx to see if there is new work */
3093 if (sw_idx == hw_idx) {
3094 hw_idx = tp->hw_status->idx[0].rx_producer;
3095 rmb();
3096 }
1da177e4
LT
3097 }
3098
3099 /* ACK the status ring. */
483ba50b
MC
3100 tp->rx_rcb_ptr = sw_idx;
3101 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
1da177e4
LT
3102
3103 /* Refill RX ring(s). */
3104 if (work_mask & RXD_OPAQUE_RING_STD) {
3105 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3106 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3107 sw_idx);
3108 }
3109 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3110 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3111 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3112 sw_idx);
3113 }
3114 mmiowb();
3115
3116 return received;
3117}
3118
3119static int tg3_poll(struct net_device *netdev, int *budget)
3120{
3121 struct tg3 *tp = netdev_priv(netdev);
3122 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4
LT
3123 int done;
3124
1da177e4
LT
3125 /* handle link change and other phy events */
3126 if (!(tp->tg3_flags &
3127 (TG3_FLAG_USE_LINKCHG_REG |
3128 TG3_FLAG_POLL_SERDES))) {
3129 if (sblk->status & SD_STATUS_LINK_CHG) {
3130 sblk->status = SD_STATUS_UPDATED |
3131 (sblk->status & ~SD_STATUS_LINK_CHG);
f47c11ee 3132 spin_lock(&tp->lock);
1da177e4 3133 tg3_setup_phy(tp, 0);
f47c11ee 3134 spin_unlock(&tp->lock);
1da177e4
LT
3135 }
3136 }
3137
3138 /* run TX completion thread */
3139 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3140 spin_lock(&tp->tx_lock);
3141 tg3_tx(tp);
3142 spin_unlock(&tp->tx_lock);
3143 }
3144
1da177e4
LT
3145 /* run RX thread, within the bounds set by NAPI.
3146 * All RX "locking" is done by ensuring outside
3147 * code synchronizes with dev->poll()
3148 */
1da177e4
LT
3149 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3150 int orig_budget = *budget;
3151 int work_done;
3152
3153 if (orig_budget > netdev->quota)
3154 orig_budget = netdev->quota;
3155
3156 work_done = tg3_rx(tp, orig_budget);
3157
3158 *budget -= work_done;
3159 netdev->quota -= work_done;
1da177e4
LT
3160 }
3161
f7383c22
DM
3162 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
3163 tp->last_tag = sblk->status_tag;
3164 rmb();
cd024c8b 3165 sblk->status &= ~SD_STATUS_UPDATED;
f7383c22 3166
1da177e4 3167 /* if no more work, tell net stack and NIC we're done */
f7383c22 3168 done = !tg3_has_work(tp);
1da177e4 3169 if (done) {
f47c11ee
DM
3170 spin_lock(&tp->lock);
3171 netif_rx_complete(netdev);
1da177e4 3172 tg3_restart_ints(tp);
f47c11ee 3173 spin_unlock(&tp->lock);
1da177e4
LT
3174 }
3175
3176 return (done ? 0 : 1);
3177}
3178
f47c11ee
DM
3179static void tg3_irq_quiesce(struct tg3 *tp)
3180{
3181 BUG_ON(tp->irq_sync);
3182
3183 tp->irq_sync = 1;
3184 smp_mb();
3185
3186 synchronize_irq(tp->pdev->irq);
3187}
3188
3189static inline int tg3_irq_sync(struct tg3 *tp)
3190{
3191 return tp->irq_sync;
3192}
3193
3194/* Fully shutdown all tg3 driver activity elsewhere in the system.
3195 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3196 * with as well. Most of the time, this is not necessary except when
3197 * shutting down the device.
3198 */
3199static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3200{
3201 if (irq_sync)
3202 tg3_irq_quiesce(tp);
3203 spin_lock_bh(&tp->lock);
3204 spin_lock(&tp->tx_lock);
3205}
3206
3207static inline void tg3_full_unlock(struct tg3 *tp)
3208{
3209 spin_unlock(&tp->tx_lock);
3210 spin_unlock_bh(&tp->lock);
3211}
3212
88b06bc2
MC
3213/* MSI ISR - No need to check for interrupt sharing and no need to
3214 * flush status block and interrupt mailbox. PCI ordering rules
3215 * guarantee that MSI will arrive after the status block.
3216 */
3217static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3218{
3219 struct net_device *dev = dev_id;
3220 struct tg3 *tp = netdev_priv(dev);
3221 struct tg3_hw_status *sblk = tp->hw_status;
88b06bc2
MC
3222
3223 /*
fac9b83e 3224 * Writing any value to intr-mbox-0 clears PCI INTA# and
88b06bc2 3225 * chip-internal interrupt pending events.
fac9b83e 3226 * Writing non-zero to intr-mbox-0 additional tells the
88b06bc2
MC
3227 * NIC to stop sending us irqs, engaging "in-intr-handler"
3228 * event coalescing.
3229 */
3230 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
fac9b83e 3231 tp->last_tag = sblk->status_tag;
cd024c8b 3232 rmb();
f47c11ee
DM
3233 if (tg3_irq_sync(tp))
3234 goto out;
88b06bc2 3235 sblk->status &= ~SD_STATUS_UPDATED;
04237ddd 3236 if (likely(tg3_has_work(tp)))
88b06bc2
MC
3237 netif_rx_schedule(dev); /* schedule NAPI poll */
3238 else {
fac9b83e 3239 /* No work, re-enable interrupts. */
88b06bc2 3240 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
fac9b83e 3241 tp->last_tag << 24);
88b06bc2 3242 }
f47c11ee 3243out:
88b06bc2
MC
3244 return IRQ_RETVAL(1);
3245}
3246
1da177e4
LT
3247static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3248{
3249 struct net_device *dev = dev_id;
3250 struct tg3 *tp = netdev_priv(dev);
3251 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4
LT
3252 unsigned int handled = 1;
3253
1da177e4
LT
3254 /* In INTx mode, it is possible for the interrupt to arrive at
3255 * the CPU before the status block posted prior to the interrupt.
3256 * Reading the PCI State register will confirm whether the
3257 * interrupt is ours and will flush the status block.
3258 */
3259 if ((sblk->status & SD_STATUS_UPDATED) ||
3260 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3261 /*
fac9b83e 3262 * Writing any value to intr-mbox-0 clears PCI INTA# and
1da177e4 3263 * chip-internal interrupt pending events.
fac9b83e 3264 * Writing non-zero to intr-mbox-0 additional tells the
1da177e4
LT
3265 * NIC to stop sending us irqs, engaging "in-intr-handler"
3266 * event coalescing.
3267 */
3268 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3269 0x00000001);
f47c11ee
DM
3270 if (tg3_irq_sync(tp))
3271 goto out;
fac9b83e
DM
3272 sblk->status &= ~SD_STATUS_UPDATED;
3273 if (likely(tg3_has_work(tp)))
3274 netif_rx_schedule(dev); /* schedule NAPI poll */
3275 else {
3276 /* No work, shared interrupt perhaps? re-enable
3277 * interrupts, and flush that PCI write
3278 */
3279 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3280 0x00000000);
3281 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3282 }
3283 } else { /* shared interrupt */
3284 handled = 0;
3285 }
f47c11ee 3286out:
fac9b83e
DM
3287 return IRQ_RETVAL(handled);
3288}
3289
3290static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3291{
3292 struct net_device *dev = dev_id;
3293 struct tg3 *tp = netdev_priv(dev);
3294 struct tg3_hw_status *sblk = tp->hw_status;
fac9b83e
DM
3295 unsigned int handled = 1;
3296
fac9b83e
DM
3297 /* In INTx mode, it is possible for the interrupt to arrive at
3298 * the CPU before the status block posted prior to the interrupt.
3299 * Reading the PCI State register will confirm whether the
3300 * interrupt is ours and will flush the status block.
3301 */
3302 if ((sblk->status & SD_STATUS_UPDATED) ||
3303 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
1da177e4 3304 /*
fac9b83e
DM
3305 * writing any value to intr-mbox-0 clears PCI INTA# and
3306 * chip-internal interrupt pending events.
3307 * writing non-zero to intr-mbox-0 additional tells the
3308 * NIC to stop sending us irqs, engaging "in-intr-handler"
3309 * event coalescing.
1da177e4 3310 */
fac9b83e
DM
3311 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3312 0x00000001);
3313 tp->last_tag = sblk->status_tag;
cd024c8b 3314 rmb();
f47c11ee
DM
3315 if (tg3_irq_sync(tp))
3316 goto out;
1da177e4 3317 sblk->status &= ~SD_STATUS_UPDATED;
04237ddd 3318 if (likely(tg3_has_work(tp)))
1da177e4
LT
3319 netif_rx_schedule(dev); /* schedule NAPI poll */
3320 else {
3321 /* no work, shared interrupt perhaps? re-enable
3322 * interrupts, and flush that PCI write
3323 */
3324 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
fac9b83e 3325 tp->last_tag << 24);
1da177e4
LT
3326 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3327 }
3328 } else { /* shared interrupt */
3329 handled = 0;
3330 }
f47c11ee 3331out:
1da177e4
LT
3332 return IRQ_RETVAL(handled);
3333}
3334
7938109f
MC
3335/* ISR for interrupt test */
3336static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3337 struct pt_regs *regs)
3338{
3339 struct net_device *dev = dev_id;
3340 struct tg3 *tp = netdev_priv(dev);
3341 struct tg3_hw_status *sblk = tp->hw_status;
3342
3343 if (sblk->status & SD_STATUS_UPDATED) {
3344 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3345 0x00000001);
3346 return IRQ_RETVAL(1);
3347 }
3348 return IRQ_RETVAL(0);
3349}
3350
1da177e4 3351static int tg3_init_hw(struct tg3 *);
944d980e 3352static int tg3_halt(struct tg3 *, int, int);
1da177e4
LT
3353
3354#ifdef CONFIG_NET_POLL_CONTROLLER
3355static void tg3_poll_controller(struct net_device *dev)
3356{
88b06bc2
MC
3357 struct tg3 *tp = netdev_priv(dev);
3358
3359 tg3_interrupt(tp->pdev->irq, dev, NULL);
1da177e4
LT
3360}
3361#endif
3362
3363static void tg3_reset_task(void *_data)
3364{
3365 struct tg3 *tp = _data;
3366 unsigned int restart_timer;
3367
3368 tg3_netif_stop(tp);
3369
f47c11ee 3370 tg3_full_lock(tp, 1);
1da177e4
LT
3371
3372 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3373 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3374
944d980e 3375 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
1da177e4
LT
3376 tg3_init_hw(tp);
3377
3378 tg3_netif_start(tp);
3379
f47c11ee 3380 tg3_full_unlock(tp);
1da177e4
LT
3381
3382 if (restart_timer)
3383 mod_timer(&tp->timer, jiffies + 1);
3384}
3385
3386static void tg3_tx_timeout(struct net_device *dev)
3387{
3388 struct tg3 *tp = netdev_priv(dev);
3389
3390 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3391 dev->name);
3392
3393 schedule_work(&tp->reset_task);
3394}
3395
3396static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3397
3398static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3399 u32 guilty_entry, int guilty_len,
3400 u32 last_plus_one, u32 *start, u32 mss)
3401{
3402 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3403 dma_addr_t new_addr;
3404 u32 entry = *start;
3405 int i;
3406
3407 if (!new_skb) {
3408 dev_kfree_skb(skb);
3409 return -1;
3410 }
3411
3412 /* New SKB is guaranteed to be linear. */
3413 entry = *start;
3414 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3415 PCI_DMA_TODEVICE);
3416 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3417 (skb->ip_summed == CHECKSUM_HW) ?
3418 TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3419 *start = NEXT_TX(entry);
3420
3421 /* Now clean up the sw ring entries. */
3422 i = 0;
3423 while (entry != last_plus_one) {
3424 int len;
3425
3426 if (i == 0)
3427 len = skb_headlen(skb);
3428 else
3429 len = skb_shinfo(skb)->frags[i-1].size;
3430 pci_unmap_single(tp->pdev,
3431 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3432 len, PCI_DMA_TODEVICE);
3433 if (i == 0) {
3434 tp->tx_buffers[entry].skb = new_skb;
3435 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3436 } else {
3437 tp->tx_buffers[entry].skb = NULL;
3438 }
3439 entry = NEXT_TX(entry);
3440 i++;
3441 }
3442
3443 dev_kfree_skb(skb);
3444
3445 return 0;
3446}
3447
3448static void tg3_set_txd(struct tg3 *tp, int entry,
3449 dma_addr_t mapping, int len, u32 flags,
3450 u32 mss_and_is_end)
3451{
3452 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3453 int is_end = (mss_and_is_end & 0x1);
3454 u32 mss = (mss_and_is_end >> 1);
3455 u32 vlan_tag = 0;
3456
3457 if (is_end)
3458 flags |= TXD_FLAG_END;
3459 if (flags & TXD_FLAG_VLAN) {
3460 vlan_tag = flags >> 16;
3461 flags &= 0xffff;
3462 }
3463 vlan_tag |= (mss << TXD_MSS_SHIFT);
3464
3465 txd->addr_hi = ((u64) mapping >> 32);
3466 txd->addr_lo = ((u64) mapping & 0xffffffff);
3467 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3468 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3469}
3470
3471static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3472{
3473 u32 base = (u32) mapping & 0xffffffff;
3474
3475 return ((base > 0xffffdcc0) &&
3476 (base + len + 8 < base));
3477}
3478
3479static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3480{
3481 struct tg3 *tp = netdev_priv(dev);
3482 dma_addr_t mapping;
3483 unsigned int i;
3484 u32 len, entry, base_flags, mss;
3485 int would_hit_hwbug;
1da177e4
LT
3486
3487 len = skb_headlen(skb);
3488
3489 /* No BH disabling for tx_lock here. We are running in BH disabled
3490 * context and TX reclaim runs via tp->poll inside of a software
f47c11ee
DM
3491 * interrupt. Furthermore, IRQ processing runs lockless so we have
3492 * no IRQ context deadlocks to worry about either. Rejoice!
1da177e4 3493 */
f47c11ee 3494 if (!spin_trylock(&tp->tx_lock))
1da177e4 3495 return NETDEV_TX_LOCKED;
1da177e4
LT
3496
3497 /* This is a hard error, log it. */
3498 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3499 netif_stop_queue(dev);
f47c11ee 3500 spin_unlock(&tp->tx_lock);
1da177e4
LT
3501 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3502 dev->name);
3503 return NETDEV_TX_BUSY;
3504 }
3505
3506 entry = tp->tx_prod;
3507 base_flags = 0;
3508 if (skb->ip_summed == CHECKSUM_HW)
3509 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3510#if TG3_TSO_SUPPORT != 0
3511 mss = 0;
3512 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3513 (mss = skb_shinfo(skb)->tso_size) != 0) {
3514 int tcp_opt_len, ip_tcp_len;
3515
3516 if (skb_header_cloned(skb) &&
3517 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3518 dev_kfree_skb(skb);
3519 goto out_unlock;
3520 }
3521
3522 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3523 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3524
3525 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3526 TXD_FLAG_CPU_POST_DMA);
3527
3528 skb->nh.iph->check = 0;
3529 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3530 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3531 skb->h.th->check = 0;
3532 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3533 }
3534 else {
3535 skb->h.th->check =
3536 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3537 skb->nh.iph->daddr,
3538 0, IPPROTO_TCP, 0);
3539 }
3540
3541 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3542 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3543 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3544 int tsflags;
3545
3546 tsflags = ((skb->nh.iph->ihl - 5) +
3547 (tcp_opt_len >> 2));
3548 mss |= (tsflags << 11);
3549 }
3550 } else {
3551 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3552 int tsflags;
3553
3554 tsflags = ((skb->nh.iph->ihl - 5) +
3555 (tcp_opt_len >> 2));
3556 base_flags |= tsflags << 12;
3557 }
3558 }
3559 }
3560#else
3561 mss = 0;
3562#endif
3563#if TG3_VLAN_TAG_USED
3564 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3565 base_flags |= (TXD_FLAG_VLAN |
3566 (vlan_tx_tag_get(skb) << 16));
3567#endif
3568
3569 /* Queue skb data, a.k.a. the main skb fragment. */
3570 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3571
3572 tp->tx_buffers[entry].skb = skb;
3573 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3574
3575 would_hit_hwbug = 0;
3576
3577 if (tg3_4g_overflow_test(mapping, len))
3578 would_hit_hwbug = entry + 1;
3579
3580 tg3_set_txd(tp, entry, mapping, len, base_flags,
3581 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3582
3583 entry = NEXT_TX(entry);
3584
3585 /* Now loop through additional data fragments, and queue them. */
3586 if (skb_shinfo(skb)->nr_frags > 0) {
3587 unsigned int i, last;
3588
3589 last = skb_shinfo(skb)->nr_frags - 1;
3590 for (i = 0; i <= last; i++) {
3591 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3592
3593 len = frag->size;
3594 mapping = pci_map_page(tp->pdev,
3595 frag->page,
3596 frag->page_offset,
3597 len, PCI_DMA_TODEVICE);
3598
3599 tp->tx_buffers[entry].skb = NULL;
3600 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3601
3602 if (tg3_4g_overflow_test(mapping, len)) {
3603 /* Only one should match. */
3604 if (would_hit_hwbug)
3605 BUG();
3606 would_hit_hwbug = entry + 1;
3607 }
3608
3609 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3610 tg3_set_txd(tp, entry, mapping, len,
3611 base_flags, (i == last)|(mss << 1));
3612 else
3613 tg3_set_txd(tp, entry, mapping, len,
3614 base_flags, (i == last));
3615
3616 entry = NEXT_TX(entry);
3617 }
3618 }
3619
3620 if (would_hit_hwbug) {
3621 u32 last_plus_one = entry;
3622 u32 start;
3623 unsigned int len = 0;
3624
3625 would_hit_hwbug -= 1;
3626 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3627 entry &= (TG3_TX_RING_SIZE - 1);
3628 start = entry;
3629 i = 0;
3630 while (entry != last_plus_one) {
3631 if (i == 0)
3632 len = skb_headlen(skb);
3633 else
3634 len = skb_shinfo(skb)->frags[i-1].size;
3635
3636 if (entry == would_hit_hwbug)
3637 break;
3638
3639 i++;
3640 entry = NEXT_TX(entry);
3641
3642 }
3643
3644 /* If the workaround fails due to memory/mapping
3645 * failure, silently drop this packet.
3646 */
3647 if (tigon3_4gb_hwbug_workaround(tp, skb,
3648 entry, len,
3649 last_plus_one,
3650 &start, mss))
3651 goto out_unlock;
3652
3653 entry = start;
3654 }
3655
3656 /* Packets are ready, update Tx producer idx local and on card. */
3657 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3658
3659 tp->tx_prod = entry;
3660 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3661 netif_stop_queue(dev);
3662
3663out_unlock:
3664 mmiowb();
f47c11ee 3665 spin_unlock(&tp->tx_lock);
1da177e4
LT
3666
3667 dev->trans_start = jiffies;
3668
3669 return NETDEV_TX_OK;
3670}
3671
3672static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3673 int new_mtu)
3674{
3675 dev->mtu = new_mtu;
3676
ef7f5ec0
MC
3677 if (new_mtu > ETH_DATA_LEN) {
3678 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
3679 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3680 ethtool_op_set_tso(dev, 0);
3681 }
3682 else
3683 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3684 } else {
3685 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
3686 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
0f893dc6 3687 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
ef7f5ec0 3688 }
1da177e4
LT
3689}
3690
3691static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3692{
3693 struct tg3 *tp = netdev_priv(dev);
3694
3695 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3696 return -EINVAL;
3697
3698 if (!netif_running(dev)) {
3699 /* We'll just catch it later when the
3700 * device is up'd.
3701 */
3702 tg3_set_mtu(dev, tp, new_mtu);
3703 return 0;
3704 }
3705
3706 tg3_netif_stop(tp);
f47c11ee
DM
3707
3708 tg3_full_lock(tp, 1);
1da177e4 3709
944d980e 3710 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
3711
3712 tg3_set_mtu(dev, tp, new_mtu);
3713
3714 tg3_init_hw(tp);
3715
3716 tg3_netif_start(tp);
3717
f47c11ee 3718 tg3_full_unlock(tp);
1da177e4
LT
3719
3720 return 0;
3721}
3722
3723/* Free up pending packets in all rx/tx rings.
3724 *
3725 * The chip has been shut down and the driver detached from
3726 * the networking, so no interrupts or new tx packets will
3727 * end up in the driver. tp->{tx,}lock is not held and we are not
3728 * in an interrupt context and thus may sleep.
3729 */
3730static void tg3_free_rings(struct tg3 *tp)
3731{
3732 struct ring_info *rxp;
3733 int i;
3734
3735 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3736 rxp = &tp->rx_std_buffers[i];
3737
3738 if (rxp->skb == NULL)
3739 continue;
3740 pci_unmap_single(tp->pdev,
3741 pci_unmap_addr(rxp, mapping),
7e72aad4 3742 tp->rx_pkt_buf_sz - tp->rx_offset,
1da177e4
LT
3743 PCI_DMA_FROMDEVICE);
3744 dev_kfree_skb_any(rxp->skb);
3745 rxp->skb = NULL;
3746 }
3747
3748 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3749 rxp = &tp->rx_jumbo_buffers[i];
3750
3751 if (rxp->skb == NULL)
3752 continue;
3753 pci_unmap_single(tp->pdev,
3754 pci_unmap_addr(rxp, mapping),
3755 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3756 PCI_DMA_FROMDEVICE);
3757 dev_kfree_skb_any(rxp->skb);
3758 rxp->skb = NULL;
3759 }
3760
3761 for (i = 0; i < TG3_TX_RING_SIZE; ) {
3762 struct tx_ring_info *txp;
3763 struct sk_buff *skb;
3764 int j;
3765
3766 txp = &tp->tx_buffers[i];
3767 skb = txp->skb;
3768
3769 if (skb == NULL) {
3770 i++;
3771 continue;
3772 }
3773
3774 pci_unmap_single(tp->pdev,
3775 pci_unmap_addr(txp, mapping),
3776 skb_headlen(skb),
3777 PCI_DMA_TODEVICE);
3778 txp->skb = NULL;
3779
3780 i++;
3781
3782 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3783 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3784 pci_unmap_page(tp->pdev,
3785 pci_unmap_addr(txp, mapping),
3786 skb_shinfo(skb)->frags[j].size,
3787 PCI_DMA_TODEVICE);
3788 i++;
3789 }
3790
3791 dev_kfree_skb_any(skb);
3792 }
3793}
3794
3795/* Initialize tx/rx rings for packet processing.
3796 *
3797 * The chip has been shut down and the driver detached from
3798 * the networking, so no interrupts or new tx packets will
3799 * end up in the driver. tp->{tx,}lock are held and thus
3800 * we may not sleep.
3801 */
3802static void tg3_init_rings(struct tg3 *tp)
3803{
3804 u32 i;
3805
3806 /* Free up all the SKBs. */
3807 tg3_free_rings(tp);
3808
3809 /* Zero out all descriptors. */
3810 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3811 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3812 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3813 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3814
7e72aad4
MC
3815 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
3816 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) &&
3817 (tp->dev->mtu > ETH_DATA_LEN))
3818 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3819
1da177e4
LT
3820 /* Initialize invariants of the rings, we only set this
3821 * stuff once. This works because the card does not
3822 * write into the rx buffer posting rings.
3823 */
3824 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3825 struct tg3_rx_buffer_desc *rxd;
3826
3827 rxd = &tp->rx_std[i];
7e72aad4 3828 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
1da177e4
LT
3829 << RXD_LEN_SHIFT;
3830 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3831 rxd->opaque = (RXD_OPAQUE_RING_STD |
3832 (i << RXD_OPAQUE_INDEX_SHIFT));
3833 }
3834
0f893dc6 3835 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
3836 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3837 struct tg3_rx_buffer_desc *rxd;
3838
3839 rxd = &tp->rx_jumbo[i];
3840 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3841 << RXD_LEN_SHIFT;
3842 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3843 RXD_FLAG_JUMBO;
3844 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3845 (i << RXD_OPAQUE_INDEX_SHIFT));
3846 }
3847 }
3848
3849 /* Now allocate fresh SKBs for each rx ring. */
3850 for (i = 0; i < tp->rx_pending; i++) {
3851 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3852 -1, i) < 0)
3853 break;
3854 }
3855
0f893dc6 3856 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
3857 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3858 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3859 -1, i) < 0)
3860 break;
3861 }
3862 }
3863}
3864
3865/*
3866 * Must not be invoked with interrupt sources disabled and
3867 * the hardware shutdown down.
3868 */
3869static void tg3_free_consistent(struct tg3 *tp)
3870{
3871 if (tp->rx_std_buffers) {
3872 kfree(tp->rx_std_buffers);
3873 tp->rx_std_buffers = NULL;
3874 }
3875 if (tp->rx_std) {
3876 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3877 tp->rx_std, tp->rx_std_mapping);
3878 tp->rx_std = NULL;
3879 }
3880 if (tp->rx_jumbo) {
3881 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3882 tp->rx_jumbo, tp->rx_jumbo_mapping);
3883 tp->rx_jumbo = NULL;
3884 }
3885 if (tp->rx_rcb) {
3886 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3887 tp->rx_rcb, tp->rx_rcb_mapping);
3888 tp->rx_rcb = NULL;
3889 }
3890 if (tp->tx_ring) {
3891 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3892 tp->tx_ring, tp->tx_desc_mapping);
3893 tp->tx_ring = NULL;
3894 }
3895 if (tp->hw_status) {
3896 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3897 tp->hw_status, tp->status_mapping);
3898 tp->hw_status = NULL;
3899 }
3900 if (tp->hw_stats) {
3901 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3902 tp->hw_stats, tp->stats_mapping);
3903 tp->hw_stats = NULL;
3904 }
3905}
3906
3907/*
3908 * Must not be invoked with interrupt sources disabled and
3909 * the hardware shutdown down. Can sleep.
3910 */
3911static int tg3_alloc_consistent(struct tg3 *tp)
3912{
3913 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3914 (TG3_RX_RING_SIZE +
3915 TG3_RX_JUMBO_RING_SIZE)) +
3916 (sizeof(struct tx_ring_info) *
3917 TG3_TX_RING_SIZE),
3918 GFP_KERNEL);
3919 if (!tp->rx_std_buffers)
3920 return -ENOMEM;
3921
3922 memset(tp->rx_std_buffers, 0,
3923 (sizeof(struct ring_info) *
3924 (TG3_RX_RING_SIZE +
3925 TG3_RX_JUMBO_RING_SIZE)) +
3926 (sizeof(struct tx_ring_info) *
3927 TG3_TX_RING_SIZE));
3928
3929 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3930 tp->tx_buffers = (struct tx_ring_info *)
3931 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3932
3933 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3934 &tp->rx_std_mapping);
3935 if (!tp->rx_std)
3936 goto err_out;
3937
3938 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3939 &tp->rx_jumbo_mapping);
3940
3941 if (!tp->rx_jumbo)
3942 goto err_out;
3943
3944 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3945 &tp->rx_rcb_mapping);
3946 if (!tp->rx_rcb)
3947 goto err_out;
3948
3949 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3950 &tp->tx_desc_mapping);
3951 if (!tp->tx_ring)
3952 goto err_out;
3953
3954 tp->hw_status = pci_alloc_consistent(tp->pdev,
3955 TG3_HW_STATUS_SIZE,
3956 &tp->status_mapping);
3957 if (!tp->hw_status)
3958 goto err_out;
3959
3960 tp->hw_stats = pci_alloc_consistent(tp->pdev,
3961 sizeof(struct tg3_hw_stats),
3962 &tp->stats_mapping);
3963 if (!tp->hw_stats)
3964 goto err_out;
3965
3966 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3967 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3968
3969 return 0;
3970
3971err_out:
3972 tg3_free_consistent(tp);
3973 return -ENOMEM;
3974}
3975
3976#define MAX_WAIT_CNT 1000
3977
3978/* To stop a block, clear the enable bit and poll till it
3979 * clears. tp->lock is held.
3980 */
b3b7d6be 3981static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
1da177e4
LT
3982{
3983 unsigned int i;
3984 u32 val;
3985
3986 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
3987 switch (ofs) {
3988 case RCVLSC_MODE:
3989 case DMAC_MODE:
3990 case MBFREE_MODE:
3991 case BUFMGR_MODE:
3992 case MEMARB_MODE:
3993 /* We can't enable/disable these bits of the
3994 * 5705/5750, just say success.
3995 */
3996 return 0;
3997
3998 default:
3999 break;
4000 };
4001 }
4002
4003 val = tr32(ofs);
4004 val &= ~enable_bit;
4005 tw32_f(ofs, val);
4006
4007 for (i = 0; i < MAX_WAIT_CNT; i++) {
4008 udelay(100);
4009 val = tr32(ofs);
4010 if ((val & enable_bit) == 0)
4011 break;
4012 }
4013
b3b7d6be 4014 if (i == MAX_WAIT_CNT && !silent) {
1da177e4
LT
4015 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4016 "ofs=%lx enable_bit=%x\n",
4017 ofs, enable_bit);
4018 return -ENODEV;
4019 }
4020
4021 return 0;
4022}
4023
4024/* tp->lock is held. */
b3b7d6be 4025static int tg3_abort_hw(struct tg3 *tp, int silent)
1da177e4
LT
4026{
4027 int i, err;
4028
4029 tg3_disable_ints(tp);
4030
4031 tp->rx_mode &= ~RX_MODE_ENABLE;
4032 tw32_f(MAC_RX_MODE, tp->rx_mode);
4033 udelay(10);
4034
b3b7d6be
DM
4035 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4036 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4037 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4038 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4039 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4040 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4041
4042 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4043 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4044 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4045 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4046 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4047 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4048 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
1da177e4
LT
4049
4050 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4051 tw32_f(MAC_MODE, tp->mac_mode);
4052 udelay(40);
4053
4054 tp->tx_mode &= ~TX_MODE_ENABLE;
4055 tw32_f(MAC_TX_MODE, tp->tx_mode);
4056
4057 for (i = 0; i < MAX_WAIT_CNT; i++) {
4058 udelay(100);
4059 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4060 break;
4061 }
4062 if (i >= MAX_WAIT_CNT) {
4063 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4064 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4065 tp->dev->name, tr32(MAC_TX_MODE));
e6de8ad1 4066 err |= -ENODEV;
1da177e4
LT
4067 }
4068
e6de8ad1 4069 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
b3b7d6be
DM
4070 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4071 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
1da177e4
LT
4072
4073 tw32(FTQ_RESET, 0xffffffff);
4074 tw32(FTQ_RESET, 0x00000000);
4075
b3b7d6be
DM
4076 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4077 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
1da177e4
LT
4078
4079 if (tp->hw_status)
4080 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4081 if (tp->hw_stats)
4082 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4083
1da177e4
LT
4084 return err;
4085}
4086
4087/* tp->lock is held. */
4088static int tg3_nvram_lock(struct tg3 *tp)
4089{
4090 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4091 int i;
4092
4093 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4094 for (i = 0; i < 8000; i++) {
4095 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4096 break;
4097 udelay(20);
4098 }
4099 if (i == 8000)
4100 return -ENODEV;
4101 }
4102 return 0;
4103}
4104
4105/* tp->lock is held. */
4106static void tg3_nvram_unlock(struct tg3 *tp)
4107{
4108 if (tp->tg3_flags & TG3_FLAG_NVRAM)
4109 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4110}
4111
e6af301b
MC
4112/* tp->lock is held. */
4113static void tg3_enable_nvram_access(struct tg3 *tp)
4114{
4115 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4116 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4117 u32 nvaccess = tr32(NVRAM_ACCESS);
4118
4119 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4120 }
4121}
4122
4123/* tp->lock is held. */
4124static void tg3_disable_nvram_access(struct tg3 *tp)
4125{
4126 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4127 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4128 u32 nvaccess = tr32(NVRAM_ACCESS);
4129
4130 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4131 }
4132}
4133
1da177e4
LT
4134/* tp->lock is held. */
4135static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4136{
4137 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4138 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4139 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4140
4141 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4142 switch (kind) {
4143 case RESET_KIND_INIT:
4144 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4145 DRV_STATE_START);
4146 break;
4147
4148 case RESET_KIND_SHUTDOWN:
4149 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4150 DRV_STATE_UNLOAD);
4151 break;
4152
4153 case RESET_KIND_SUSPEND:
4154 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4155 DRV_STATE_SUSPEND);
4156 break;
4157
4158 default:
4159 break;
4160 };
4161 }
4162}
4163
4164/* tp->lock is held. */
4165static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4166{
4167 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4168 switch (kind) {
4169 case RESET_KIND_INIT:
4170 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4171 DRV_STATE_START_DONE);
4172 break;
4173
4174 case RESET_KIND_SHUTDOWN:
4175 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4176 DRV_STATE_UNLOAD_DONE);
4177 break;
4178
4179 default:
4180 break;
4181 };
4182 }
4183}
4184
4185/* tp->lock is held. */
4186static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4187{
4188 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4189 switch (kind) {
4190 case RESET_KIND_INIT:
4191 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4192 DRV_STATE_START);
4193 break;
4194
4195 case RESET_KIND_SHUTDOWN:
4196 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4197 DRV_STATE_UNLOAD);
4198 break;
4199
4200 case RESET_KIND_SUSPEND:
4201 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4202 DRV_STATE_SUSPEND);
4203 break;
4204
4205 default:
4206 break;
4207 };
4208 }
4209}
4210
4211static void tg3_stop_fw(struct tg3 *);
4212
4213/* tp->lock is held. */
4214static int tg3_chip_reset(struct tg3 *tp)
4215{
4216 u32 val;
1ee582d8 4217 void (*write_op)(struct tg3 *, u32, u32);
1da177e4
LT
4218 int i;
4219
4220 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4221 tg3_nvram_lock(tp);
4222
4223 /*
4224 * We must avoid the readl() that normally takes place.
4225 * It locks machines, causes machine checks, and other
4226 * fun things. So, temporarily disable the 5701
4227 * hardware workaround, while we do the reset.
4228 */
1ee582d8
MC
4229 write_op = tp->write32;
4230 if (write_op == tg3_write_flush_reg32)
4231 tp->write32 = tg3_write32;
1da177e4
LT
4232
4233 /* do the reset */
4234 val = GRC_MISC_CFG_CORECLK_RESET;
4235
4236 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4237 if (tr32(0x7e2c) == 0x60) {
4238 tw32(0x7e2c, 0x20);
4239 }
4240 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4241 tw32(GRC_MISC_CFG, (1 << 29));
4242 val |= (1 << 29);
4243 }
4244 }
4245
4246 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4247 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4248 tw32(GRC_MISC_CFG, val);
4249
1ee582d8
MC
4250 /* restore 5701 hardware bug workaround write method */
4251 tp->write32 = write_op;
1da177e4
LT
4252
4253 /* Unfortunately, we have to delay before the PCI read back.
4254 * Some 575X chips even will not respond to a PCI cfg access
4255 * when the reset command is given to the chip.
4256 *
4257 * How do these hardware designers expect things to work
4258 * properly if the PCI write is posted for a long period
4259 * of time? It is always necessary to have some method by
4260 * which a register read back can occur to push the write
4261 * out which does the reset.
4262 *
4263 * For most tg3 variants the trick below was working.
4264 * Ho hum...
4265 */
4266 udelay(120);
4267
4268 /* Flush PCI posted writes. The normal MMIO registers
4269 * are inaccessible at this time so this is the only
4270 * way to make this reliably (actually, this is no longer
4271 * the case, see above). I tried to use indirect
4272 * register read/write but this upset some 5701 variants.
4273 */
4274 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4275
4276 udelay(120);
4277
4278 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4279 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4280 int i;
4281 u32 cfg_val;
4282
4283 /* Wait for link training to complete. */
4284 for (i = 0; i < 5000; i++)
4285 udelay(100);
4286
4287 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4288 pci_write_config_dword(tp->pdev, 0xc4,
4289 cfg_val | (1 << 15));
4290 }
4291 /* Set PCIE max payload size and clear error status. */
4292 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4293 }
4294
4295 /* Re-enable indirect register accesses. */
4296 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4297 tp->misc_host_ctrl);
4298
4299 /* Set MAX PCI retry to zero. */
4300 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4301 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4302 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4303 val |= PCISTATE_RETRY_SAME_DMA;
4304 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4305
4306 pci_restore_state(tp->pdev);
4307
4308 /* Make sure PCI-X relaxed ordering bit is clear. */
4309 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4310 val &= ~PCIX_CAPS_RELAXED_ORDERING;
4311 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4312
4cf78e4f
MC
4313 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
4314 u32 val;
4315
4316 /* Chip reset on 5780 will reset MSI enable bit,
4317 * so need to restore it.
4318 */
4319 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4320 u16 ctrl;
4321
4322 pci_read_config_word(tp->pdev,
4323 tp->msi_cap + PCI_MSI_FLAGS,
4324 &ctrl);
4325 pci_write_config_word(tp->pdev,
4326 tp->msi_cap + PCI_MSI_FLAGS,
4327 ctrl | PCI_MSI_FLAGS_ENABLE);
4328 val = tr32(MSGINT_MODE);
4329 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4330 }
4331
4332 val = tr32(MEMARB_MODE);
4333 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4334
4335 } else
4336 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
1da177e4
LT
4337
4338 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4339 tg3_stop_fw(tp);
4340 tw32(0x5000, 0x400);
4341 }
4342
4343 tw32(GRC_MODE, tp->grc_mode);
4344
4345 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4346 u32 val = tr32(0xc4);
4347
4348 tw32(0xc4, val | (1 << 15));
4349 }
4350
4351 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4352 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4353 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4354 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4355 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4356 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4357 }
4358
4359 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4360 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4361 tw32_f(MAC_MODE, tp->mac_mode);
747e8f8b
MC
4362 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4363 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4364 tw32_f(MAC_MODE, tp->mac_mode);
1da177e4
LT
4365 } else
4366 tw32_f(MAC_MODE, 0);
4367 udelay(40);
4368
4369 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4370 /* Wait for firmware initialization to complete. */
4371 for (i = 0; i < 100000; i++) {
4372 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4373 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4374 break;
4375 udelay(10);
4376 }
4377 if (i >= 100000) {
4378 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4379 "firmware will not restart magic=%08x\n",
4380 tp->dev->name, val);
4381 return -ENODEV;
4382 }
4383 }
4384
4385 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4386 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4387 u32 val = tr32(0x7c00);
4388
4389 tw32(0x7c00, val | (1 << 25));
4390 }
4391
4392 /* Reprobe ASF enable state. */
4393 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4394 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4395 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4396 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4397 u32 nic_cfg;
4398
4399 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4400 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4401 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 4402 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
4403 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4404 }
4405 }
4406
4407 return 0;
4408}
4409
4410/* tp->lock is held. */
4411static void tg3_stop_fw(struct tg3 *tp)
4412{
4413 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4414 u32 val;
4415 int i;
4416
4417 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4418 val = tr32(GRC_RX_CPU_EVENT);
4419 val |= (1 << 14);
4420 tw32(GRC_RX_CPU_EVENT, val);
4421
4422 /* Wait for RX cpu to ACK the event. */
4423 for (i = 0; i < 100; i++) {
4424 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4425 break;
4426 udelay(1);
4427 }
4428 }
4429}
4430
4431/* tp->lock is held. */
944d980e 4432static int tg3_halt(struct tg3 *tp, int kind, int silent)
1da177e4
LT
4433{
4434 int err;
4435
4436 tg3_stop_fw(tp);
4437
944d980e 4438 tg3_write_sig_pre_reset(tp, kind);
1da177e4 4439
b3b7d6be 4440 tg3_abort_hw(tp, silent);
1da177e4
LT
4441 err = tg3_chip_reset(tp);
4442
944d980e
MC
4443 tg3_write_sig_legacy(tp, kind);
4444 tg3_write_sig_post_reset(tp, kind);
1da177e4
LT
4445
4446 if (err)
4447 return err;
4448
4449 return 0;
4450}
4451
4452#define TG3_FW_RELEASE_MAJOR 0x0
4453#define TG3_FW_RELASE_MINOR 0x0
4454#define TG3_FW_RELEASE_FIX 0x0
4455#define TG3_FW_START_ADDR 0x08000000
4456#define TG3_FW_TEXT_ADDR 0x08000000
4457#define TG3_FW_TEXT_LEN 0x9c0
4458#define TG3_FW_RODATA_ADDR 0x080009c0
4459#define TG3_FW_RODATA_LEN 0x60
4460#define TG3_FW_DATA_ADDR 0x08000a40
4461#define TG3_FW_DATA_LEN 0x20
4462#define TG3_FW_SBSS_ADDR 0x08000a60
4463#define TG3_FW_SBSS_LEN 0xc
4464#define TG3_FW_BSS_ADDR 0x08000a70
4465#define TG3_FW_BSS_LEN 0x10
4466
4467static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4468 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4469 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4470 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4471 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4472 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4473 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4474 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4475 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4476 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4477 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4478 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4479 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4480 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4481 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4482 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4483 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4484 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4485 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4486 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4487 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4488 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4489 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4490 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4491 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4492 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4493 0, 0, 0, 0, 0, 0,
4494 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4495 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4496 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4497 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4498 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4499 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4500 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4501 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4502 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4503 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4504 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4505 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4506 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4507 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4508 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4509 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4510 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4511 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4512 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4513 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4514 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4515 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4516 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4517 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4518 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4519 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4520 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4521 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4522 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4523 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4524 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4525 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4526 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4527 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4528 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4529 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4530 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4531 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4532 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4533 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4534 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4535 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4536 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4537 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4538 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4539 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4540 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4541 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4542 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4543 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4544 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4545 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4546 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4547 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4548 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4549 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4550 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4551 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4552 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4553 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4554 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4555 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4556 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4557 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4558 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4559};
4560
4561static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4562 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4563 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4564 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4565 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4566 0x00000000
4567};
4568
4569#if 0 /* All zeros, don't eat up space with it. */
4570u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4571 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4572 0x00000000, 0x00000000, 0x00000000, 0x00000000
4573};
4574#endif
4575
4576#define RX_CPU_SCRATCH_BASE 0x30000
4577#define RX_CPU_SCRATCH_SIZE 0x04000
4578#define TX_CPU_SCRATCH_BASE 0x34000
4579#define TX_CPU_SCRATCH_SIZE 0x04000
4580
4581/* tp->lock is held. */
4582static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4583{
4584 int i;
4585
4586 if (offset == TX_CPU_BASE &&
4587 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4588 BUG();
4589
4590 if (offset == RX_CPU_BASE) {
4591 for (i = 0; i < 10000; i++) {
4592 tw32(offset + CPU_STATE, 0xffffffff);
4593 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4594 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4595 break;
4596 }
4597
4598 tw32(offset + CPU_STATE, 0xffffffff);
4599 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
4600 udelay(10);
4601 } else {
4602 for (i = 0; i < 10000; i++) {
4603 tw32(offset + CPU_STATE, 0xffffffff);
4604 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4605 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4606 break;
4607 }
4608 }
4609
4610 if (i >= 10000) {
4611 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4612 "and %s CPU\n",
4613 tp->dev->name,
4614 (offset == RX_CPU_BASE ? "RX" : "TX"));
4615 return -ENODEV;
4616 }
4617 return 0;
4618}
4619
4620struct fw_info {
4621 unsigned int text_base;
4622 unsigned int text_len;
4623 u32 *text_data;
4624 unsigned int rodata_base;
4625 unsigned int rodata_len;
4626 u32 *rodata_data;
4627 unsigned int data_base;
4628 unsigned int data_len;
4629 u32 *data_data;
4630};
4631
4632/* tp->lock is held. */
4633static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4634 int cpu_scratch_size, struct fw_info *info)
4635{
4636 int err, i;
1da177e4
LT
4637 void (*write_op)(struct tg3 *, u32, u32);
4638
4639 if (cpu_base == TX_CPU_BASE &&
4640 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4641 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4642 "TX cpu firmware on %s which is 5705.\n",
4643 tp->dev->name);
4644 return -EINVAL;
4645 }
4646
4647 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4648 write_op = tg3_write_mem;
4649 else
4650 write_op = tg3_write_indirect_reg32;
4651
1b628151
MC
4652 /* It is possible that bootcode is still loading at this point.
4653 * Get the nvram lock first before halting the cpu.
4654 */
4655 tg3_nvram_lock(tp);
1da177e4 4656 err = tg3_halt_cpu(tp, cpu_base);
1b628151 4657 tg3_nvram_unlock(tp);
1da177e4
LT
4658 if (err)
4659 goto out;
4660
4661 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4662 write_op(tp, cpu_scratch_base + i, 0);
4663 tw32(cpu_base + CPU_STATE, 0xffffffff);
4664 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4665 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4666 write_op(tp, (cpu_scratch_base +
4667 (info->text_base & 0xffff) +
4668 (i * sizeof(u32))),
4669 (info->text_data ?
4670 info->text_data[i] : 0));
4671 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4672 write_op(tp, (cpu_scratch_base +
4673 (info->rodata_base & 0xffff) +
4674 (i * sizeof(u32))),
4675 (info->rodata_data ?
4676 info->rodata_data[i] : 0));
4677 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4678 write_op(tp, (cpu_scratch_base +
4679 (info->data_base & 0xffff) +
4680 (i * sizeof(u32))),
4681 (info->data_data ?
4682 info->data_data[i] : 0));
4683
4684 err = 0;
4685
4686out:
1da177e4
LT
4687 return err;
4688}
4689
4690/* tp->lock is held. */
4691static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4692{
4693 struct fw_info info;
4694 int err, i;
4695
4696 info.text_base = TG3_FW_TEXT_ADDR;
4697 info.text_len = TG3_FW_TEXT_LEN;
4698 info.text_data = &tg3FwText[0];
4699 info.rodata_base = TG3_FW_RODATA_ADDR;
4700 info.rodata_len = TG3_FW_RODATA_LEN;
4701 info.rodata_data = &tg3FwRodata[0];
4702 info.data_base = TG3_FW_DATA_ADDR;
4703 info.data_len = TG3_FW_DATA_LEN;
4704 info.data_data = NULL;
4705
4706 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4707 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4708 &info);
4709 if (err)
4710 return err;
4711
4712 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4713 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4714 &info);
4715 if (err)
4716 return err;
4717
4718 /* Now startup only the RX cpu. */
4719 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4720 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4721
4722 for (i = 0; i < 5; i++) {
4723 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4724 break;
4725 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4726 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
4727 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4728 udelay(1000);
4729 }
4730 if (i >= 5) {
4731 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4732 "to set RX CPU PC, is %08x should be %08x\n",
4733 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4734 TG3_FW_TEXT_ADDR);
4735 return -ENODEV;
4736 }
4737 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4738 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
4739
4740 return 0;
4741}
4742
4743#if TG3_TSO_SUPPORT != 0
4744
4745#define TG3_TSO_FW_RELEASE_MAJOR 0x1
4746#define TG3_TSO_FW_RELASE_MINOR 0x6
4747#define TG3_TSO_FW_RELEASE_FIX 0x0
4748#define TG3_TSO_FW_START_ADDR 0x08000000
4749#define TG3_TSO_FW_TEXT_ADDR 0x08000000
4750#define TG3_TSO_FW_TEXT_LEN 0x1aa0
4751#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
4752#define TG3_TSO_FW_RODATA_LEN 0x60
4753#define TG3_TSO_FW_DATA_ADDR 0x08001b20
4754#define TG3_TSO_FW_DATA_LEN 0x30
4755#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
4756#define TG3_TSO_FW_SBSS_LEN 0x2c
4757#define TG3_TSO_FW_BSS_ADDR 0x08001b80
4758#define TG3_TSO_FW_BSS_LEN 0x894
4759
4760static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4761 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4762 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4763 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4764 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4765 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4766 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4767 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4768 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4769 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4770 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4771 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4772 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4773 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4774 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4775 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4776 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4777 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4778 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4779 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4780 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4781 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4782 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4783 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4784 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4785 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4786 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4787 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4788 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4789 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4790 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4791 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4792 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4793 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4794 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4795 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4796 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4797 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4798 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4799 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4800 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4801 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4802 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4803 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4804 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4805 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4806 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4807 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4808 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4809 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4810 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4811 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4812 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4813 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4814 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4815 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4816 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4817 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4818 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4819 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4820 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4821 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4822 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4823 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4824 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4825 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4826 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4827 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4828 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4829 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4830 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4831 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4832 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4833 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4834 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4835 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4836 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4837 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4838 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4839 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4840 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4841 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4842 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4843 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4844 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4845 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4846 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4847 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4848 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4849 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4850 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4851 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4852 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4853 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4854 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4855 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4856 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4857 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4858 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4859 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4860 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4861 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4862 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4863 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4864 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4865 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4866 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4867 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4868 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4869 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4870 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4871 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4872 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4873 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4874 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4875 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4876 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4877 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4878 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4879 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4880 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4881 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4882 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4883 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4884 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4885 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4886 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4887 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4888 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4889 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4890 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4891 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4892 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4893 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4894 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4895 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4896 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4897 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4898 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4899 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4900 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4901 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4902 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4903 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4904 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4905 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4906 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4907 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4908 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4909 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4910 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4911 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4912 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4913 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4914 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4915 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4916 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4917 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4918 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4919 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4920 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4921 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4922 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4923 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4924 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4925 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4926 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4927 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4928 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4929 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4930 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4931 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4932 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4933 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4934 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4935 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4936 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4937 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4938 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4939 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4940 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4941 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4942 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4943 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4944 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4945 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4946 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4947 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4948 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4949 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4950 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4951 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4952 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4953 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4954 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4955 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4956 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4957 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4958 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4959 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4960 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4961 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4962 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4963 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4964 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4965 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4966 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4967 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4968 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4969 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4970 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4971 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4972 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4973 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4974 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4975 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4976 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4977 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4978 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4979 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4980 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4981 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4982 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4983 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4984 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4985 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4986 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4987 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4988 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4989 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4990 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4991 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4992 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4993 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4994 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4995 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4996 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4997 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4998 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4999 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5000 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5001 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5002 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5003 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5004 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5005 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5006 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5007 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5008 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5009 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5010 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5011 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5012 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5013 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5014 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5015 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5016 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5017 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5018 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5019 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5020 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5021 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5022 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5023 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5024 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5025 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5026 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5027 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5028 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5029 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5030 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5031 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5032 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5033 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5034 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5035 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5036 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5037 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5038 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5039 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5040 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5041 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5042 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5043 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5044 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5045};
5046
5047static u32 tg3TsoFwRodata[] = {
5048 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5049 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5050 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5051 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5052 0x00000000,
5053};
5054
5055static u32 tg3TsoFwData[] = {
5056 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5057 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5058 0x00000000,
5059};
5060
5061/* 5705 needs a special version of the TSO firmware. */
5062#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5063#define TG3_TSO5_FW_RELASE_MINOR 0x2
5064#define TG3_TSO5_FW_RELEASE_FIX 0x0
5065#define TG3_TSO5_FW_START_ADDR 0x00010000
5066#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5067#define TG3_TSO5_FW_TEXT_LEN 0xe90
5068#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5069#define TG3_TSO5_FW_RODATA_LEN 0x50
5070#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5071#define TG3_TSO5_FW_DATA_LEN 0x20
5072#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5073#define TG3_TSO5_FW_SBSS_LEN 0x28
5074#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5075#define TG3_TSO5_FW_BSS_LEN 0x88
5076
5077static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5078 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5079 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5080 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5081 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5082 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5083 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5084 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5085 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5086 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5087 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5088 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5089 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5090 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5091 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5092 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5093 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5094 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5095 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5096 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5097 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5098 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5099 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5100 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5101 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5102 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5103 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5104 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5105 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5106 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5107 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5108 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5109 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5110 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5111 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5112 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5113 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5114 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5115 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5116 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5117 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5118 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5119 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5120 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5121 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5122 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5123 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5124 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5125 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5126 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5127 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5128 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5129 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5130 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5131 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5132 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5133 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5134 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5135 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5136 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5137 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5138 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5139 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5140 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5141 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5142 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5143 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5144 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5145 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5146 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5147 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5148 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5149 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5150 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5151 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5152 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5153 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5154 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5155 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5156 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5157 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5158 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5159 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5160 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5161 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5162 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5163 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5164 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5165 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5166 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5167 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5168 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5169 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5170 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5171 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5172 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5173 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5174 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5175 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5176 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5177 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5178 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5179 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5180 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5181 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5182 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5183 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5184 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5185 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5186 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5187 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5188 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5189 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5190 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5191 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5192 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5193 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5194 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5195 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5196 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5197 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5198 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5199 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5200 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5201 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5202 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5203 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5204 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5205 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5206 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5207 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5208 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5209 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5210 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5211 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5212 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5213 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5214 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5215 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5216 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5217 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5218 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5219 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5220 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5221 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5222 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5223 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5224 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5225 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5226 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5227 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5228 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5229 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5230 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5231 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5232 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5233 0x00000000, 0x00000000, 0x00000000,
5234};
5235
5236static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5237 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5238 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5239 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5240 0x00000000, 0x00000000, 0x00000000,
5241};
5242
5243static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5244 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5245 0x00000000, 0x00000000, 0x00000000,
5246};
5247
5248/* tp->lock is held. */
5249static int tg3_load_tso_firmware(struct tg3 *tp)
5250{
5251 struct fw_info info;
5252 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5253 int err, i;
5254
5255 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5256 return 0;
5257
5258 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5259 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5260 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5261 info.text_data = &tg3Tso5FwText[0];
5262 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5263 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5264 info.rodata_data = &tg3Tso5FwRodata[0];
5265 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5266 info.data_len = TG3_TSO5_FW_DATA_LEN;
5267 info.data_data = &tg3Tso5FwData[0];
5268 cpu_base = RX_CPU_BASE;
5269 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5270 cpu_scratch_size = (info.text_len +
5271 info.rodata_len +
5272 info.data_len +
5273 TG3_TSO5_FW_SBSS_LEN +
5274 TG3_TSO5_FW_BSS_LEN);
5275 } else {
5276 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5277 info.text_len = TG3_TSO_FW_TEXT_LEN;
5278 info.text_data = &tg3TsoFwText[0];
5279 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5280 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5281 info.rodata_data = &tg3TsoFwRodata[0];
5282 info.data_base = TG3_TSO_FW_DATA_ADDR;
5283 info.data_len = TG3_TSO_FW_DATA_LEN;
5284 info.data_data = &tg3TsoFwData[0];
5285 cpu_base = TX_CPU_BASE;
5286 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5287 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5288 }
5289
5290 err = tg3_load_firmware_cpu(tp, cpu_base,
5291 cpu_scratch_base, cpu_scratch_size,
5292 &info);
5293 if (err)
5294 return err;
5295
5296 /* Now startup the cpu. */
5297 tw32(cpu_base + CPU_STATE, 0xffffffff);
5298 tw32_f(cpu_base + CPU_PC, info.text_base);
5299
5300 for (i = 0; i < 5; i++) {
5301 if (tr32(cpu_base + CPU_PC) == info.text_base)
5302 break;
5303 tw32(cpu_base + CPU_STATE, 0xffffffff);
5304 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
5305 tw32_f(cpu_base + CPU_PC, info.text_base);
5306 udelay(1000);
5307 }
5308 if (i >= 5) {
5309 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5310 "to set CPU PC, is %08x should be %08x\n",
5311 tp->dev->name, tr32(cpu_base + CPU_PC),
5312 info.text_base);
5313 return -ENODEV;
5314 }
5315 tw32(cpu_base + CPU_STATE, 0xffffffff);
5316 tw32_f(cpu_base + CPU_MODE, 0x00000000);
5317 return 0;
5318}
5319
5320#endif /* TG3_TSO_SUPPORT != 0 */
5321
5322/* tp->lock is held. */
5323static void __tg3_set_mac_addr(struct tg3 *tp)
5324{
5325 u32 addr_high, addr_low;
5326 int i;
5327
5328 addr_high = ((tp->dev->dev_addr[0] << 8) |
5329 tp->dev->dev_addr[1]);
5330 addr_low = ((tp->dev->dev_addr[2] << 24) |
5331 (tp->dev->dev_addr[3] << 16) |
5332 (tp->dev->dev_addr[4] << 8) |
5333 (tp->dev->dev_addr[5] << 0));
5334 for (i = 0; i < 4; i++) {
5335 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5336 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5337 }
5338
5339 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5340 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5341 for (i = 0; i < 12; i++) {
5342 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5343 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5344 }
5345 }
5346
5347 addr_high = (tp->dev->dev_addr[0] +
5348 tp->dev->dev_addr[1] +
5349 tp->dev->dev_addr[2] +
5350 tp->dev->dev_addr[3] +
5351 tp->dev->dev_addr[4] +
5352 tp->dev->dev_addr[5]) &
5353 TX_BACKOFF_SEED_MASK;
5354 tw32(MAC_TX_BACKOFF_SEED, addr_high);
5355}
5356
5357static int tg3_set_mac_addr(struct net_device *dev, void *p)
5358{
5359 struct tg3 *tp = netdev_priv(dev);
5360 struct sockaddr *addr = p;
5361
5362 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5363
f47c11ee 5364 spin_lock_bh(&tp->lock);
1da177e4 5365 __tg3_set_mac_addr(tp);
f47c11ee 5366 spin_unlock_bh(&tp->lock);
1da177e4
LT
5367
5368 return 0;
5369}
5370
5371/* tp->lock is held. */
5372static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5373 dma_addr_t mapping, u32 maxlen_flags,
5374 u32 nic_addr)
5375{
5376 tg3_write_mem(tp,
5377 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5378 ((u64) mapping >> 32));
5379 tg3_write_mem(tp,
5380 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5381 ((u64) mapping & 0xffffffff));
5382 tg3_write_mem(tp,
5383 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5384 maxlen_flags);
5385
5386 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5387 tg3_write_mem(tp,
5388 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5389 nic_addr);
5390}
5391
5392static void __tg3_set_rx_mode(struct net_device *);
d244c892 5393static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
15f9850d
DM
5394{
5395 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5396 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5397 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5398 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5399 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5400 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5401 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5402 }
5403 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5404 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5405 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5406 u32 val = ec->stats_block_coalesce_usecs;
5407
5408 if (!netif_carrier_ok(tp->dev))
5409 val = 0;
5410
5411 tw32(HOSTCC_STAT_COAL_TICKS, val);
5412 }
5413}
1da177e4
LT
5414
5415/* tp->lock is held. */
5416static int tg3_reset_hw(struct tg3 *tp)
5417{
5418 u32 val, rdmac_mode;
5419 int i, err, limit;
5420
5421 tg3_disable_ints(tp);
5422
5423 tg3_stop_fw(tp);
5424
5425 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5426
5427 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
e6de8ad1 5428 tg3_abort_hw(tp, 1);
1da177e4
LT
5429 }
5430
5431 err = tg3_chip_reset(tp);
5432 if (err)
5433 return err;
5434
5435 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5436
5437 /* This works around an issue with Athlon chipsets on
5438 * B3 tigon3 silicon. This bit has no effect on any
5439 * other revision. But do not set this on PCI Express
5440 * chips.
5441 */
5442 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5443 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5444 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5445
5446 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5447 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5448 val = tr32(TG3PCI_PCISTATE);
5449 val |= PCISTATE_RETRY_SAME_DMA;
5450 tw32(TG3PCI_PCISTATE, val);
5451 }
5452
5453 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5454 /* Enable some hw fixes. */
5455 val = tr32(TG3PCI_MSI_DATA);
5456 val |= (1 << 26) | (1 << 28) | (1 << 29);
5457 tw32(TG3PCI_MSI_DATA, val);
5458 }
5459
5460 /* Descriptor ring init may make accesses to the
5461 * NIC SRAM area to setup the TX descriptors, so we
5462 * can only do this after the hardware has been
5463 * successfully reset.
5464 */
5465 tg3_init_rings(tp);
5466
5467 /* This value is determined during the probe time DMA
5468 * engine test, tg3_test_dma.
5469 */
5470 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5471
5472 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5473 GRC_MODE_4X_NIC_SEND_RINGS |
5474 GRC_MODE_NO_TX_PHDR_CSUM |
5475 GRC_MODE_NO_RX_PHDR_CSUM);
5476 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5477 if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5478 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5479 if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5480 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5481
5482 tw32(GRC_MODE,
5483 tp->grc_mode |
5484 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5485
5486 /* Setup the timer prescalar register. Clock is always 66Mhz. */
5487 val = tr32(GRC_MISC_CFG);
5488 val &= ~0xff;
5489 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5490 tw32(GRC_MISC_CFG, val);
5491
5492 /* Initialize MBUF/DESC pool. */
cbf46853 5493 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
1da177e4
LT
5494 /* Do nothing. */
5495 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5496 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5497 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5498 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5499 else
5500 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5501 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5502 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5503 }
5504#if TG3_TSO_SUPPORT != 0
5505 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5506 int fw_len;
5507
5508 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5509 TG3_TSO5_FW_RODATA_LEN +
5510 TG3_TSO5_FW_DATA_LEN +
5511 TG3_TSO5_FW_SBSS_LEN +
5512 TG3_TSO5_FW_BSS_LEN);
5513 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5514 tw32(BUFMGR_MB_POOL_ADDR,
5515 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5516 tw32(BUFMGR_MB_POOL_SIZE,
5517 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5518 }
5519#endif
5520
0f893dc6 5521 if (tp->dev->mtu <= ETH_DATA_LEN) {
1da177e4
LT
5522 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5523 tp->bufmgr_config.mbuf_read_dma_low_water);
5524 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5525 tp->bufmgr_config.mbuf_mac_rx_low_water);
5526 tw32(BUFMGR_MB_HIGH_WATER,
5527 tp->bufmgr_config.mbuf_high_water);
5528 } else {
5529 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5530 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5531 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5532 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5533 tw32(BUFMGR_MB_HIGH_WATER,
5534 tp->bufmgr_config.mbuf_high_water_jumbo);
5535 }
5536 tw32(BUFMGR_DMA_LOW_WATER,
5537 tp->bufmgr_config.dma_low_water);
5538 tw32(BUFMGR_DMA_HIGH_WATER,
5539 tp->bufmgr_config.dma_high_water);
5540
5541 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5542 for (i = 0; i < 2000; i++) {
5543 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5544 break;
5545 udelay(10);
5546 }
5547 if (i >= 2000) {
5548 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5549 tp->dev->name);
5550 return -ENODEV;
5551 }
5552
5553 /* Setup replenish threshold. */
5554 tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5555
5556 /* Initialize TG3_BDINFO's at:
5557 * RCVDBDI_STD_BD: standard eth size rx ring
5558 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
5559 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
5560 *
5561 * like so:
5562 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
5563 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
5564 * ring attribute flags
5565 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
5566 *
5567 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5568 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5569 *
5570 * The size of each ring is fixed in the firmware, but the location is
5571 * configurable.
5572 */
5573 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5574 ((u64) tp->rx_std_mapping >> 32));
5575 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5576 ((u64) tp->rx_std_mapping & 0xffffffff));
5577 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5578 NIC_SRAM_RX_BUFFER_DESC);
5579
5580 /* Don't even try to program the JUMBO/MINI buffer descriptor
5581 * configs on 5705.
5582 */
5583 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5584 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5585 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5586 } else {
5587 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5588 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5589
5590 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5591 BDINFO_FLAGS_DISABLED);
5592
5593 /* Setup replenish threshold. */
5594 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5595
0f893dc6 5596 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
5597 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5598 ((u64) tp->rx_jumbo_mapping >> 32));
5599 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5600 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5601 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5602 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5603 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5604 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5605 } else {
5606 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5607 BDINFO_FLAGS_DISABLED);
5608 }
5609
5610 }
5611
5612 /* There is only one send ring on 5705/5750, no need to explicitly
5613 * disable the others.
5614 */
5615 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5616 /* Clear out send RCB ring in SRAM. */
5617 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5618 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5619 BDINFO_FLAGS_DISABLED);
5620 }
5621
5622 tp->tx_prod = 0;
5623 tp->tx_cons = 0;
5624 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5625 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5626
5627 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5628 tp->tx_desc_mapping,
5629 (TG3_TX_RING_SIZE <<
5630 BDINFO_FLAGS_MAXLEN_SHIFT),
5631 NIC_SRAM_TX_BUFFER_DESC);
5632
5633 /* There is only one receive return ring on 5705/5750, no need
5634 * to explicitly disable the others.
5635 */
5636 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5637 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5638 i += TG3_BDINFO_SIZE) {
5639 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5640 BDINFO_FLAGS_DISABLED);
5641 }
5642 }
5643
5644 tp->rx_rcb_ptr = 0;
5645 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5646
5647 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5648 tp->rx_rcb_mapping,
5649 (TG3_RX_RCB_RING_SIZE(tp) <<
5650 BDINFO_FLAGS_MAXLEN_SHIFT),
5651 0);
5652
5653 tp->rx_std_ptr = tp->rx_pending;
5654 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5655 tp->rx_std_ptr);
5656
0f893dc6 5657 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
1da177e4
LT
5658 tp->rx_jumbo_pending : 0;
5659 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5660 tp->rx_jumbo_ptr);
5661
5662 /* Initialize MAC address and backoff seed. */
5663 __tg3_set_mac_addr(tp);
5664
5665 /* MTU + ethernet header + FCS + optional VLAN tag */
5666 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5667
5668 /* The slot time is changed by tg3_setup_phy if we
5669 * run at gigabit with half duplex.
5670 */
5671 tw32(MAC_TX_LENGTHS,
5672 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5673 (6 << TX_LENGTHS_IPG_SHIFT) |
5674 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5675
5676 /* Receive rules. */
5677 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5678 tw32(RCVLPC_CONFIG, 0x0181);
5679
5680 /* Calculate RDMAC_MODE setting early, we need it to determine
5681 * the RCVLPC_STATE_ENABLE mask.
5682 */
5683 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5684 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5685 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5686 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5687 RDMAC_MODE_LNGREAD_ENAB);
5688 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5689 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
85e94ced
MC
5690
5691 /* If statement applies to 5705 and 5750 PCI devices only */
5692 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5693 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5694 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
1da177e4
LT
5695 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5696 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5697 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5698 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5699 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5700 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5701 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5702 }
5703 }
5704
85e94ced
MC
5705 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5706 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5707
1da177e4
LT
5708#if TG3_TSO_SUPPORT != 0
5709 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5710 rdmac_mode |= (1 << 27);
5711#endif
5712
5713 /* Receive/send statistics. */
5714 if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5715 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5716 val = tr32(RCVLPC_STATS_ENABLE);
5717 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5718 tw32(RCVLPC_STATS_ENABLE, val);
5719 } else {
5720 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5721 }
5722 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5723 tw32(SNDDATAI_STATSENAB, 0xffffff);
5724 tw32(SNDDATAI_STATSCTRL,
5725 (SNDDATAI_SCTRL_ENABLE |
5726 SNDDATAI_SCTRL_FASTUPD));
5727
5728 /* Setup host coalescing engine. */
5729 tw32(HOSTCC_MODE, 0);
5730 for (i = 0; i < 2000; i++) {
5731 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5732 break;
5733 udelay(10);
5734 }
5735
d244c892 5736 __tg3_set_coalesce(tp, &tp->coal);
1da177e4
LT
5737
5738 /* set status block DMA address */
5739 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5740 ((u64) tp->status_mapping >> 32));
5741 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5742 ((u64) tp->status_mapping & 0xffffffff));
5743
5744 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5745 /* Status/statistics block address. See tg3_timer,
5746 * the tg3_periodic_fetch_stats call there, and
5747 * tg3_get_stats to see how this works for 5705/5750 chips.
5748 */
1da177e4
LT
5749 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5750 ((u64) tp->stats_mapping >> 32));
5751 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5752 ((u64) tp->stats_mapping & 0xffffffff));
5753 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5754 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5755 }
5756
5757 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5758
5759 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5760 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5761 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5762 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5763
5764 /* Clear statistics/status block in chip, and status block in ram. */
5765 for (i = NIC_SRAM_STATS_BLK;
5766 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5767 i += sizeof(u32)) {
5768 tg3_write_mem(tp, i, 0);
5769 udelay(40);
5770 }
5771 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5772
5773 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5774 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5775 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5776 udelay(40);
5777
314fba34
MC
5778 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5779 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5780 * register to preserve the GPIO settings for LOMs. The GPIOs,
5781 * whether used as inputs or outputs, are set by boot code after
5782 * reset.
5783 */
5784 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5785 u32 gpio_mask;
5786
5787 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5788 GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
3e7d83bc
MC
5789
5790 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5791 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5792 GRC_LCLCTRL_GPIO_OUTPUT3;
5793
314fba34
MC
5794 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5795
5796 /* GPIO1 must be driven high for eeprom write protect */
1da177e4
LT
5797 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5798 GRC_LCLCTRL_GPIO_OUTPUT1);
314fba34 5799 }
1da177e4
LT
5800 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5801 udelay(100);
5802
5803 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
fac9b83e
DM
5804 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5805 tp->last_tag = 0;
1da177e4
LT
5806
5807 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5808 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5809 udelay(40);
5810 }
5811
5812 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5813 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5814 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5815 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5816 WDMAC_MODE_LNGREAD_ENAB);
5817
85e94ced
MC
5818 /* If statement applies to 5705 and 5750 PCI devices only */
5819 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5820 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5821 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1da177e4
LT
5822 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5823 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5824 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5825 /* nothing */
5826 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5827 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5828 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5829 val |= WDMAC_MODE_RX_ACCEL;
5830 }
5831 }
5832
5833 tw32_f(WDMAC_MODE, val);
5834 udelay(40);
5835
5836 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5837 val = tr32(TG3PCI_X_CAPS);
5838 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5839 val &= ~PCIX_CAPS_BURST_MASK;
5840 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5841 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5842 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5843 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5844 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5845 val |= (tp->split_mode_max_reqs <<
5846 PCIX_CAPS_SPLIT_SHIFT);
5847 }
5848 tw32(TG3PCI_X_CAPS, val);
5849 }
5850
5851 tw32_f(RDMAC_MODE, rdmac_mode);
5852 udelay(40);
5853
5854 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5855 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5856 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5857 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5858 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5859 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5860 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5861 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5862#if TG3_TSO_SUPPORT != 0
5863 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5864 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5865#endif
5866 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5867 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5868
5869 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5870 err = tg3_load_5701_a0_firmware_fix(tp);
5871 if (err)
5872 return err;
5873 }
5874
5875#if TG3_TSO_SUPPORT != 0
5876 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5877 err = tg3_load_tso_firmware(tp);
5878 if (err)
5879 return err;
5880 }
5881#endif
5882
5883 tp->tx_mode = TX_MODE_ENABLE;
5884 tw32_f(MAC_TX_MODE, tp->tx_mode);
5885 udelay(100);
5886
5887 tp->rx_mode = RX_MODE_ENABLE;
5888 tw32_f(MAC_RX_MODE, tp->rx_mode);
5889 udelay(10);
5890
5891 if (tp->link_config.phy_is_low_power) {
5892 tp->link_config.phy_is_low_power = 0;
5893 tp->link_config.speed = tp->link_config.orig_speed;
5894 tp->link_config.duplex = tp->link_config.orig_duplex;
5895 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5896 }
5897
5898 tp->mi_mode = MAC_MI_MODE_BASE;
5899 tw32_f(MAC_MI_MODE, tp->mi_mode);
5900 udelay(80);
5901
5902 tw32(MAC_LED_CTRL, tp->led_ctrl);
5903
5904 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5905 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5906 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5907 udelay(10);
5908 }
5909 tw32_f(MAC_RX_MODE, tp->rx_mode);
5910 udelay(10);
5911
5912 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5913 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5914 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5915 /* Set drive transmission level to 1.2V */
5916 /* only if the signal pre-emphasis bit is not set */
5917 val = tr32(MAC_SERDES_CFG);
5918 val &= 0xfffff000;
5919 val |= 0x880;
5920 tw32(MAC_SERDES_CFG, val);
5921 }
5922 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5923 tw32(MAC_SERDES_CFG, 0x616000);
5924 }
5925
5926 /* Prevent chip from dropping frames when flow control
5927 * is enabled.
5928 */
5929 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5930
5931 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5932 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5933 /* Use hardware link auto-negotiation */
5934 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5935 }
5936
5937 err = tg3_setup_phy(tp, 1);
5938 if (err)
5939 return err;
5940
5941 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5942 u32 tmp;
5943
5944 /* Clear CRC stats. */
5945 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5946 tg3_writephy(tp, 0x1e, tmp | 0x8000);
5947 tg3_readphy(tp, 0x14, &tmp);
5948 }
5949 }
5950
5951 __tg3_set_rx_mode(tp->dev);
5952
5953 /* Initialize receive rules. */
5954 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
5955 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5956 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
5957 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5958
4cf78e4f
MC
5959 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
5960 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780))
1da177e4
LT
5961 limit = 8;
5962 else
5963 limit = 16;
5964 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5965 limit -= 4;
5966 switch (limit) {
5967 case 16:
5968 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
5969 case 15:
5970 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
5971 case 14:
5972 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
5973 case 13:
5974 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
5975 case 12:
5976 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
5977 case 11:
5978 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
5979 case 10:
5980 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
5981 case 9:
5982 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
5983 case 8:
5984 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
5985 case 7:
5986 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
5987 case 6:
5988 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
5989 case 5:
5990 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
5991 case 4:
5992 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
5993 case 3:
5994 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
5995 case 2:
5996 case 1:
5997
5998 default:
5999 break;
6000 };
6001
6002 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6003
1da177e4
LT
6004 return 0;
6005}
6006
6007/* Called at device open time to get the chip ready for
6008 * packet processing. Invoked with tp->lock held.
6009 */
6010static int tg3_init_hw(struct tg3 *tp)
6011{
6012 int err;
6013
6014 /* Force the chip into D0. */
6015 err = tg3_set_power_state(tp, 0);
6016 if (err)
6017 goto out;
6018
6019 tg3_switch_clocks(tp);
6020
6021 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6022
6023 err = tg3_reset_hw(tp);
6024
6025out:
6026 return err;
6027}
6028
6029#define TG3_STAT_ADD32(PSTAT, REG) \
6030do { u32 __val = tr32(REG); \
6031 (PSTAT)->low += __val; \
6032 if ((PSTAT)->low < __val) \
6033 (PSTAT)->high += 1; \
6034} while (0)
6035
6036static void tg3_periodic_fetch_stats(struct tg3 *tp)
6037{
6038 struct tg3_hw_stats *sp = tp->hw_stats;
6039
6040 if (!netif_carrier_ok(tp->dev))
6041 return;
6042
6043 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6044 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6045 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6046 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6047 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6048 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6049 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6050 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6051 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6052 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6053 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6054 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6055 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6056
6057 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6058 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6059 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6060 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6061 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6062 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6063 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6064 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6065 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6066 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6067 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6068 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6069 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6070 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6071}
6072
6073static void tg3_timer(unsigned long __opaque)
6074{
6075 struct tg3 *tp = (struct tg3 *) __opaque;
1da177e4 6076
f47c11ee 6077 spin_lock(&tp->lock);
1da177e4 6078
fac9b83e
DM
6079 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6080 /* All of this garbage is because when using non-tagged
6081 * IRQ status the mailbox/status_block protocol the chip
6082 * uses with the cpu is race prone.
6083 */
6084 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6085 tw32(GRC_LOCAL_CTRL,
6086 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6087 } else {
6088 tw32(HOSTCC_MODE, tp->coalesce_mode |
6089 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6090 }
1da177e4 6091
fac9b83e
DM
6092 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6093 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
f47c11ee 6094 spin_unlock(&tp->lock);
fac9b83e
DM
6095 schedule_work(&tp->reset_task);
6096 return;
6097 }
1da177e4
LT
6098 }
6099
1da177e4
LT
6100 /* This part only runs once per second. */
6101 if (!--tp->timer_counter) {
fac9b83e
DM
6102 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6103 tg3_periodic_fetch_stats(tp);
6104
1da177e4
LT
6105 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6106 u32 mac_stat;
6107 int phy_event;
6108
6109 mac_stat = tr32(MAC_STATUS);
6110
6111 phy_event = 0;
6112 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6113 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6114 phy_event = 1;
6115 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6116 phy_event = 1;
6117
6118 if (phy_event)
6119 tg3_setup_phy(tp, 0);
6120 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6121 u32 mac_stat = tr32(MAC_STATUS);
6122 int need_setup = 0;
6123
6124 if (netif_carrier_ok(tp->dev) &&
6125 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6126 need_setup = 1;
6127 }
6128 if (! netif_carrier_ok(tp->dev) &&
6129 (mac_stat & (MAC_STATUS_PCS_SYNCED |
6130 MAC_STATUS_SIGNAL_DET))) {
6131 need_setup = 1;
6132 }
6133 if (need_setup) {
6134 tw32_f(MAC_MODE,
6135 (tp->mac_mode &
6136 ~MAC_MODE_PORT_MODE_MASK));
6137 udelay(40);
6138 tw32_f(MAC_MODE, tp->mac_mode);
6139 udelay(40);
6140 tg3_setup_phy(tp, 0);
6141 }
747e8f8b
MC
6142 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6143 tg3_serdes_parallel_detect(tp);
1da177e4
LT
6144
6145 tp->timer_counter = tp->timer_multiplier;
6146 }
6147
6148 /* Heartbeat is only sent once every 120 seconds. */
6149 if (!--tp->asf_counter) {
6150 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6151 u32 val;
6152
6153 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
6154 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6155 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
6156 val = tr32(GRC_RX_CPU_EVENT);
6157 val |= (1 << 14);
6158 tw32(GRC_RX_CPU_EVENT, val);
6159 }
6160 tp->asf_counter = tp->asf_multiplier;
6161 }
6162
f47c11ee 6163 spin_unlock(&tp->lock);
1da177e4
LT
6164
6165 tp->timer.expires = jiffies + tp->timer_offset;
6166 add_timer(&tp->timer);
6167}
6168
7938109f
MC
6169static int tg3_test_interrupt(struct tg3 *tp)
6170{
6171 struct net_device *dev = tp->dev;
6172 int err, i;
6173 u32 int_mbox = 0;
6174
d4bc3927
MC
6175 if (!netif_running(dev))
6176 return -ENODEV;
6177
7938109f
MC
6178 tg3_disable_ints(tp);
6179
6180 free_irq(tp->pdev->irq, dev);
6181
6182 err = request_irq(tp->pdev->irq, tg3_test_isr,
f4d0ee98 6183 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
7938109f
MC
6184 if (err)
6185 return err;
6186
6187 tg3_enable_ints(tp);
6188
6189 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6190 HOSTCC_MODE_NOW);
6191
6192 for (i = 0; i < 5; i++) {
6193 int_mbox = tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
6194 if (int_mbox != 0)
6195 break;
6196 msleep(10);
6197 }
6198
6199 tg3_disable_ints(tp);
6200
6201 free_irq(tp->pdev->irq, dev);
6202
6203 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6204 err = request_irq(tp->pdev->irq, tg3_msi,
f4d0ee98 6205 SA_SAMPLE_RANDOM, dev->name, dev);
fac9b83e
DM
6206 else {
6207 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6208 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6209 fn = tg3_interrupt_tagged;
6210 err = request_irq(tp->pdev->irq, fn,
f4d0ee98 6211 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
fac9b83e 6212 }
7938109f
MC
6213
6214 if (err)
6215 return err;
6216
6217 if (int_mbox != 0)
6218 return 0;
6219
6220 return -EIO;
6221}
6222
6223/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6224 * successfully restored
6225 */
6226static int tg3_test_msi(struct tg3 *tp)
6227{
6228 struct net_device *dev = tp->dev;
6229 int err;
6230 u16 pci_cmd;
6231
6232 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6233 return 0;
6234
6235 /* Turn off SERR reporting in case MSI terminates with Master
6236 * Abort.
6237 */
6238 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6239 pci_write_config_word(tp->pdev, PCI_COMMAND,
6240 pci_cmd & ~PCI_COMMAND_SERR);
6241
6242 err = tg3_test_interrupt(tp);
6243
6244 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6245
6246 if (!err)
6247 return 0;
6248
6249 /* other failures */
6250 if (err != -EIO)
6251 return err;
6252
6253 /* MSI test failed, go back to INTx mode */
6254 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6255 "switching to INTx mode. Please report this failure to "
6256 "the PCI maintainer and include system chipset information.\n",
6257 tp->dev->name);
6258
6259 free_irq(tp->pdev->irq, dev);
6260 pci_disable_msi(tp->pdev);
6261
6262 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6263
fac9b83e
DM
6264 {
6265 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6266 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6267 fn = tg3_interrupt_tagged;
7938109f 6268
fac9b83e
DM
6269 err = request_irq(tp->pdev->irq, fn,
6270 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6271 }
7938109f
MC
6272 if (err)
6273 return err;
6274
6275 /* Need to reset the chip because the MSI cycle may have terminated
6276 * with Master Abort.
6277 */
f47c11ee 6278 tg3_full_lock(tp, 1);
7938109f 6279
944d980e 6280 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f
MC
6281 err = tg3_init_hw(tp);
6282
f47c11ee 6283 tg3_full_unlock(tp);
7938109f
MC
6284
6285 if (err)
6286 free_irq(tp->pdev->irq, dev);
6287
6288 return err;
6289}
6290
1da177e4
LT
6291static int tg3_open(struct net_device *dev)
6292{
6293 struct tg3 *tp = netdev_priv(dev);
6294 int err;
6295
f47c11ee 6296 tg3_full_lock(tp, 0);
1da177e4
LT
6297
6298 tg3_disable_ints(tp);
6299 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6300
f47c11ee 6301 tg3_full_unlock(tp);
1da177e4
LT
6302
6303 /* The placement of this call is tied
6304 * to the setup and use of Host TX descriptors.
6305 */
6306 err = tg3_alloc_consistent(tp);
6307 if (err)
6308 return err;
6309
88b06bc2
MC
6310 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6311 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6312 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
fac9b83e
DM
6313 /* All MSI supporting chips should support tagged
6314 * status. Assert that this is the case.
6315 */
6316 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6317 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6318 "Not using MSI.\n", tp->dev->name);
6319 } else if (pci_enable_msi(tp->pdev) == 0) {
88b06bc2
MC
6320 u32 msi_mode;
6321
6322 msi_mode = tr32(MSGINT_MODE);
6323 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6324 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6325 }
6326 }
6327 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6328 err = request_irq(tp->pdev->irq, tg3_msi,
f4d0ee98 6329 SA_SAMPLE_RANDOM, dev->name, dev);
fac9b83e
DM
6330 else {
6331 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6332 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6333 fn = tg3_interrupt_tagged;
6334
6335 err = request_irq(tp->pdev->irq, fn,
f4d0ee98 6336 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
fac9b83e 6337 }
1da177e4
LT
6338
6339 if (err) {
88b06bc2
MC
6340 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6341 pci_disable_msi(tp->pdev);
6342 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6343 }
1da177e4
LT
6344 tg3_free_consistent(tp);
6345 return err;
6346 }
6347
f47c11ee 6348 tg3_full_lock(tp, 0);
1da177e4
LT
6349
6350 err = tg3_init_hw(tp);
6351 if (err) {
944d980e 6352 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
6353 tg3_free_rings(tp);
6354 } else {
fac9b83e
DM
6355 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6356 tp->timer_offset = HZ;
6357 else
6358 tp->timer_offset = HZ / 10;
6359
6360 BUG_ON(tp->timer_offset > HZ);
6361 tp->timer_counter = tp->timer_multiplier =
6362 (HZ / tp->timer_offset);
6363 tp->asf_counter = tp->asf_multiplier =
6364 ((HZ / tp->timer_offset) * 120);
1da177e4
LT
6365
6366 init_timer(&tp->timer);
6367 tp->timer.expires = jiffies + tp->timer_offset;
6368 tp->timer.data = (unsigned long) tp;
6369 tp->timer.function = tg3_timer;
1da177e4
LT
6370 }
6371
f47c11ee 6372 tg3_full_unlock(tp);
1da177e4
LT
6373
6374 if (err) {
88b06bc2
MC
6375 free_irq(tp->pdev->irq, dev);
6376 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6377 pci_disable_msi(tp->pdev);
6378 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6379 }
1da177e4
LT
6380 tg3_free_consistent(tp);
6381 return err;
6382 }
6383
7938109f
MC
6384 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6385 err = tg3_test_msi(tp);
fac9b83e 6386
7938109f 6387 if (err) {
f47c11ee 6388 tg3_full_lock(tp, 0);
7938109f
MC
6389
6390 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6391 pci_disable_msi(tp->pdev);
6392 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6393 }
944d980e 6394 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f
MC
6395 tg3_free_rings(tp);
6396 tg3_free_consistent(tp);
6397
f47c11ee 6398 tg3_full_unlock(tp);
7938109f
MC
6399
6400 return err;
6401 }
6402 }
6403
f47c11ee 6404 tg3_full_lock(tp, 0);
1da177e4 6405
7938109f
MC
6406 add_timer(&tp->timer);
6407 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
1da177e4
LT
6408 tg3_enable_ints(tp);
6409
f47c11ee 6410 tg3_full_unlock(tp);
1da177e4
LT
6411
6412 netif_start_queue(dev);
6413
6414 return 0;
6415}
6416
6417#if 0
6418/*static*/ void tg3_dump_state(struct tg3 *tp)
6419{
6420 u32 val32, val32_2, val32_3, val32_4, val32_5;
6421 u16 val16;
6422 int i;
6423
6424 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6425 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6426 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6427 val16, val32);
6428
6429 /* MAC block */
6430 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6431 tr32(MAC_MODE), tr32(MAC_STATUS));
6432 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6433 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6434 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6435 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6436 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6437 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6438
6439 /* Send data initiator control block */
6440 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6441 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6442 printk(" SNDDATAI_STATSCTRL[%08x]\n",
6443 tr32(SNDDATAI_STATSCTRL));
6444
6445 /* Send data completion control block */
6446 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6447
6448 /* Send BD ring selector block */
6449 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6450 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6451
6452 /* Send BD initiator control block */
6453 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6454 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6455
6456 /* Send BD completion control block */
6457 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6458
6459 /* Receive list placement control block */
6460 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6461 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6462 printk(" RCVLPC_STATSCTRL[%08x]\n",
6463 tr32(RCVLPC_STATSCTRL));
6464
6465 /* Receive data and receive BD initiator control block */
6466 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6467 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6468
6469 /* Receive data completion control block */
6470 printk("DEBUG: RCVDCC_MODE[%08x]\n",
6471 tr32(RCVDCC_MODE));
6472
6473 /* Receive BD initiator control block */
6474 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6475 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6476
6477 /* Receive BD completion control block */
6478 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6479 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6480
6481 /* Receive list selector control block */
6482 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6483 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6484
6485 /* Mbuf cluster free block */
6486 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6487 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6488
6489 /* Host coalescing control block */
6490 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6491 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6492 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6493 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6494 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6495 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6496 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6497 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6498 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6499 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6500 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6501 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6502
6503 /* Memory arbiter control block */
6504 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6505 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6506
6507 /* Buffer manager control block */
6508 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6509 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6510 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6511 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6512 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6513 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6514 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6515 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6516
6517 /* Read DMA control block */
6518 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6519 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6520
6521 /* Write DMA control block */
6522 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6523 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6524
6525 /* DMA completion block */
6526 printk("DEBUG: DMAC_MODE[%08x]\n",
6527 tr32(DMAC_MODE));
6528
6529 /* GRC block */
6530 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6531 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6532 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6533 tr32(GRC_LOCAL_CTRL));
6534
6535 /* TG3_BDINFOs */
6536 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6537 tr32(RCVDBDI_JUMBO_BD + 0x0),
6538 tr32(RCVDBDI_JUMBO_BD + 0x4),
6539 tr32(RCVDBDI_JUMBO_BD + 0x8),
6540 tr32(RCVDBDI_JUMBO_BD + 0xc));
6541 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6542 tr32(RCVDBDI_STD_BD + 0x0),
6543 tr32(RCVDBDI_STD_BD + 0x4),
6544 tr32(RCVDBDI_STD_BD + 0x8),
6545 tr32(RCVDBDI_STD_BD + 0xc));
6546 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6547 tr32(RCVDBDI_MINI_BD + 0x0),
6548 tr32(RCVDBDI_MINI_BD + 0x4),
6549 tr32(RCVDBDI_MINI_BD + 0x8),
6550 tr32(RCVDBDI_MINI_BD + 0xc));
6551
6552 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6553 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6554 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6555 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6556 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6557 val32, val32_2, val32_3, val32_4);
6558
6559 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6560 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6561 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6562 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6563 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6564 val32, val32_2, val32_3, val32_4);
6565
6566 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6567 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6568 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6569 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6570 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6571 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6572 val32, val32_2, val32_3, val32_4, val32_5);
6573
6574 /* SW status block */
6575 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6576 tp->hw_status->status,
6577 tp->hw_status->status_tag,
6578 tp->hw_status->rx_jumbo_consumer,
6579 tp->hw_status->rx_consumer,
6580 tp->hw_status->rx_mini_consumer,
6581 tp->hw_status->idx[0].rx_producer,
6582 tp->hw_status->idx[0].tx_consumer);
6583
6584 /* SW statistics block */
6585 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6586 ((u32 *)tp->hw_stats)[0],
6587 ((u32 *)tp->hw_stats)[1],
6588 ((u32 *)tp->hw_stats)[2],
6589 ((u32 *)tp->hw_stats)[3]);
6590
6591 /* Mailboxes */
6592 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6593 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6594 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6595 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6596 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6597
6598 /* NIC side send descriptors. */
6599 for (i = 0; i < 6; i++) {
6600 unsigned long txd;
6601
6602 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6603 + (i * sizeof(struct tg3_tx_buffer_desc));
6604 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6605 i,
6606 readl(txd + 0x0), readl(txd + 0x4),
6607 readl(txd + 0x8), readl(txd + 0xc));
6608 }
6609
6610 /* NIC side RX descriptors. */
6611 for (i = 0; i < 6; i++) {
6612 unsigned long rxd;
6613
6614 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6615 + (i * sizeof(struct tg3_rx_buffer_desc));
6616 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6617 i,
6618 readl(rxd + 0x0), readl(rxd + 0x4),
6619 readl(rxd + 0x8), readl(rxd + 0xc));
6620 rxd += (4 * sizeof(u32));
6621 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6622 i,
6623 readl(rxd + 0x0), readl(rxd + 0x4),
6624 readl(rxd + 0x8), readl(rxd + 0xc));
6625 }
6626
6627 for (i = 0; i < 6; i++) {
6628 unsigned long rxd;
6629
6630 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6631 + (i * sizeof(struct tg3_rx_buffer_desc));
6632 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6633 i,
6634 readl(rxd + 0x0), readl(rxd + 0x4),
6635 readl(rxd + 0x8), readl(rxd + 0xc));
6636 rxd += (4 * sizeof(u32));
6637 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6638 i,
6639 readl(rxd + 0x0), readl(rxd + 0x4),
6640 readl(rxd + 0x8), readl(rxd + 0xc));
6641 }
6642}
6643#endif
6644
6645static struct net_device_stats *tg3_get_stats(struct net_device *);
6646static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6647
6648static int tg3_close(struct net_device *dev)
6649{
6650 struct tg3 *tp = netdev_priv(dev);
6651
6652 netif_stop_queue(dev);
6653
6654 del_timer_sync(&tp->timer);
6655
f47c11ee 6656 tg3_full_lock(tp, 1);
1da177e4
LT
6657#if 0
6658 tg3_dump_state(tp);
6659#endif
6660
6661 tg3_disable_ints(tp);
6662
944d980e 6663 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
6664 tg3_free_rings(tp);
6665 tp->tg3_flags &=
6666 ~(TG3_FLAG_INIT_COMPLETE |
6667 TG3_FLAG_GOT_SERDES_FLOWCTL);
6668 netif_carrier_off(tp->dev);
6669
f47c11ee 6670 tg3_full_unlock(tp);
1da177e4 6671
88b06bc2
MC
6672 free_irq(tp->pdev->irq, dev);
6673 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6674 pci_disable_msi(tp->pdev);
6675 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6676 }
1da177e4
LT
6677
6678 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6679 sizeof(tp->net_stats_prev));
6680 memcpy(&tp->estats_prev, tg3_get_estats(tp),
6681 sizeof(tp->estats_prev));
6682
6683 tg3_free_consistent(tp);
6684
6685 return 0;
6686}
6687
6688static inline unsigned long get_stat64(tg3_stat64_t *val)
6689{
6690 unsigned long ret;
6691
6692#if (BITS_PER_LONG == 32)
6693 ret = val->low;
6694#else
6695 ret = ((u64)val->high << 32) | ((u64)val->low);
6696#endif
6697 return ret;
6698}
6699
6700static unsigned long calc_crc_errors(struct tg3 *tp)
6701{
6702 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6703
6704 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6705 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6706 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1da177e4
LT
6707 u32 val;
6708
f47c11ee 6709 spin_lock_bh(&tp->lock);
1da177e4
LT
6710 if (!tg3_readphy(tp, 0x1e, &val)) {
6711 tg3_writephy(tp, 0x1e, val | 0x8000);
6712 tg3_readphy(tp, 0x14, &val);
6713 } else
6714 val = 0;
f47c11ee 6715 spin_unlock_bh(&tp->lock);
1da177e4
LT
6716
6717 tp->phy_crc_errors += val;
6718
6719 return tp->phy_crc_errors;
6720 }
6721
6722 return get_stat64(&hw_stats->rx_fcs_errors);
6723}
6724
6725#define ESTAT_ADD(member) \
6726 estats->member = old_estats->member + \
6727 get_stat64(&hw_stats->member)
6728
6729static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6730{
6731 struct tg3_ethtool_stats *estats = &tp->estats;
6732 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6733 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6734
6735 if (!hw_stats)
6736 return old_estats;
6737
6738 ESTAT_ADD(rx_octets);
6739 ESTAT_ADD(rx_fragments);
6740 ESTAT_ADD(rx_ucast_packets);
6741 ESTAT_ADD(rx_mcast_packets);
6742 ESTAT_ADD(rx_bcast_packets);
6743 ESTAT_ADD(rx_fcs_errors);
6744 ESTAT_ADD(rx_align_errors);
6745 ESTAT_ADD(rx_xon_pause_rcvd);
6746 ESTAT_ADD(rx_xoff_pause_rcvd);
6747 ESTAT_ADD(rx_mac_ctrl_rcvd);
6748 ESTAT_ADD(rx_xoff_entered);
6749 ESTAT_ADD(rx_frame_too_long_errors);
6750 ESTAT_ADD(rx_jabbers);
6751 ESTAT_ADD(rx_undersize_packets);
6752 ESTAT_ADD(rx_in_length_errors);
6753 ESTAT_ADD(rx_out_length_errors);
6754 ESTAT_ADD(rx_64_or_less_octet_packets);
6755 ESTAT_ADD(rx_65_to_127_octet_packets);
6756 ESTAT_ADD(rx_128_to_255_octet_packets);
6757 ESTAT_ADD(rx_256_to_511_octet_packets);
6758 ESTAT_ADD(rx_512_to_1023_octet_packets);
6759 ESTAT_ADD(rx_1024_to_1522_octet_packets);
6760 ESTAT_ADD(rx_1523_to_2047_octet_packets);
6761 ESTAT_ADD(rx_2048_to_4095_octet_packets);
6762 ESTAT_ADD(rx_4096_to_8191_octet_packets);
6763 ESTAT_ADD(rx_8192_to_9022_octet_packets);
6764
6765 ESTAT_ADD(tx_octets);
6766 ESTAT_ADD(tx_collisions);
6767 ESTAT_ADD(tx_xon_sent);
6768 ESTAT_ADD(tx_xoff_sent);
6769 ESTAT_ADD(tx_flow_control);
6770 ESTAT_ADD(tx_mac_errors);
6771 ESTAT_ADD(tx_single_collisions);
6772 ESTAT_ADD(tx_mult_collisions);
6773 ESTAT_ADD(tx_deferred);
6774 ESTAT_ADD(tx_excessive_collisions);
6775 ESTAT_ADD(tx_late_collisions);
6776 ESTAT_ADD(tx_collide_2times);
6777 ESTAT_ADD(tx_collide_3times);
6778 ESTAT_ADD(tx_collide_4times);
6779 ESTAT_ADD(tx_collide_5times);
6780 ESTAT_ADD(tx_collide_6times);
6781 ESTAT_ADD(tx_collide_7times);
6782 ESTAT_ADD(tx_collide_8times);
6783 ESTAT_ADD(tx_collide_9times);
6784 ESTAT_ADD(tx_collide_10times);
6785 ESTAT_ADD(tx_collide_11times);
6786 ESTAT_ADD(tx_collide_12times);
6787 ESTAT_ADD(tx_collide_13times);
6788 ESTAT_ADD(tx_collide_14times);
6789 ESTAT_ADD(tx_collide_15times);
6790 ESTAT_ADD(tx_ucast_packets);
6791 ESTAT_ADD(tx_mcast_packets);
6792 ESTAT_ADD(tx_bcast_packets);
6793 ESTAT_ADD(tx_carrier_sense_errors);
6794 ESTAT_ADD(tx_discards);
6795 ESTAT_ADD(tx_errors);
6796
6797 ESTAT_ADD(dma_writeq_full);
6798 ESTAT_ADD(dma_write_prioq_full);
6799 ESTAT_ADD(rxbds_empty);
6800 ESTAT_ADD(rx_discards);
6801 ESTAT_ADD(rx_errors);
6802 ESTAT_ADD(rx_threshold_hit);
6803
6804 ESTAT_ADD(dma_readq_full);
6805 ESTAT_ADD(dma_read_prioq_full);
6806 ESTAT_ADD(tx_comp_queue_full);
6807
6808 ESTAT_ADD(ring_set_send_prod_index);
6809 ESTAT_ADD(ring_status_update);
6810 ESTAT_ADD(nic_irqs);
6811 ESTAT_ADD(nic_avoided_irqs);
6812 ESTAT_ADD(nic_tx_threshold_hit);
6813
6814 return estats;
6815}
6816
6817static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6818{
6819 struct tg3 *tp = netdev_priv(dev);
6820 struct net_device_stats *stats = &tp->net_stats;
6821 struct net_device_stats *old_stats = &tp->net_stats_prev;
6822 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6823
6824 if (!hw_stats)
6825 return old_stats;
6826
6827 stats->rx_packets = old_stats->rx_packets +
6828 get_stat64(&hw_stats->rx_ucast_packets) +
6829 get_stat64(&hw_stats->rx_mcast_packets) +
6830 get_stat64(&hw_stats->rx_bcast_packets);
6831
6832 stats->tx_packets = old_stats->tx_packets +
6833 get_stat64(&hw_stats->tx_ucast_packets) +
6834 get_stat64(&hw_stats->tx_mcast_packets) +
6835 get_stat64(&hw_stats->tx_bcast_packets);
6836
6837 stats->rx_bytes = old_stats->rx_bytes +
6838 get_stat64(&hw_stats->rx_octets);
6839 stats->tx_bytes = old_stats->tx_bytes +
6840 get_stat64(&hw_stats->tx_octets);
6841
6842 stats->rx_errors = old_stats->rx_errors +
6843 get_stat64(&hw_stats->rx_errors) +
6844 get_stat64(&hw_stats->rx_discards);
6845 stats->tx_errors = old_stats->tx_errors +
6846 get_stat64(&hw_stats->tx_errors) +
6847 get_stat64(&hw_stats->tx_mac_errors) +
6848 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6849 get_stat64(&hw_stats->tx_discards);
6850
6851 stats->multicast = old_stats->multicast +
6852 get_stat64(&hw_stats->rx_mcast_packets);
6853 stats->collisions = old_stats->collisions +
6854 get_stat64(&hw_stats->tx_collisions);
6855
6856 stats->rx_length_errors = old_stats->rx_length_errors +
6857 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6858 get_stat64(&hw_stats->rx_undersize_packets);
6859
6860 stats->rx_over_errors = old_stats->rx_over_errors +
6861 get_stat64(&hw_stats->rxbds_empty);
6862 stats->rx_frame_errors = old_stats->rx_frame_errors +
6863 get_stat64(&hw_stats->rx_align_errors);
6864 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6865 get_stat64(&hw_stats->tx_discards);
6866 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6867 get_stat64(&hw_stats->tx_carrier_sense_errors);
6868
6869 stats->rx_crc_errors = old_stats->rx_crc_errors +
6870 calc_crc_errors(tp);
6871
6872 return stats;
6873}
6874
6875static inline u32 calc_crc(unsigned char *buf, int len)
6876{
6877 u32 reg;
6878 u32 tmp;
6879 int j, k;
6880
6881 reg = 0xffffffff;
6882
6883 for (j = 0; j < len; j++) {
6884 reg ^= buf[j];
6885
6886 for (k = 0; k < 8; k++) {
6887 tmp = reg & 0x01;
6888
6889 reg >>= 1;
6890
6891 if (tmp) {
6892 reg ^= 0xedb88320;
6893 }
6894 }
6895 }
6896
6897 return ~reg;
6898}
6899
6900static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6901{
6902 /* accept or reject all multicast frames */
6903 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6904 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6905 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6906 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6907}
6908
6909static void __tg3_set_rx_mode(struct net_device *dev)
6910{
6911 struct tg3 *tp = netdev_priv(dev);
6912 u32 rx_mode;
6913
6914 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6915 RX_MODE_KEEP_VLAN_TAG);
6916
6917 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6918 * flag clear.
6919 */
6920#if TG3_VLAN_TAG_USED
6921 if (!tp->vlgrp &&
6922 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6923 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6924#else
6925 /* By definition, VLAN is disabled always in this
6926 * case.
6927 */
6928 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6929 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6930#endif
6931
6932 if (dev->flags & IFF_PROMISC) {
6933 /* Promiscuous mode. */
6934 rx_mode |= RX_MODE_PROMISC;
6935 } else if (dev->flags & IFF_ALLMULTI) {
6936 /* Accept all multicast. */
6937 tg3_set_multi (tp, 1);
6938 } else if (dev->mc_count < 1) {
6939 /* Reject all multicast. */
6940 tg3_set_multi (tp, 0);
6941 } else {
6942 /* Accept one or more multicast(s). */
6943 struct dev_mc_list *mclist;
6944 unsigned int i;
6945 u32 mc_filter[4] = { 0, };
6946 u32 regidx;
6947 u32 bit;
6948 u32 crc;
6949
6950 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6951 i++, mclist = mclist->next) {
6952
6953 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6954 bit = ~crc & 0x7f;
6955 regidx = (bit & 0x60) >> 5;
6956 bit &= 0x1f;
6957 mc_filter[regidx] |= (1 << bit);
6958 }
6959
6960 tw32(MAC_HASH_REG_0, mc_filter[0]);
6961 tw32(MAC_HASH_REG_1, mc_filter[1]);
6962 tw32(MAC_HASH_REG_2, mc_filter[2]);
6963 tw32(MAC_HASH_REG_3, mc_filter[3]);
6964 }
6965
6966 if (rx_mode != tp->rx_mode) {
6967 tp->rx_mode = rx_mode;
6968 tw32_f(MAC_RX_MODE, rx_mode);
6969 udelay(10);
6970 }
6971}
6972
6973static void tg3_set_rx_mode(struct net_device *dev)
6974{
6975 struct tg3 *tp = netdev_priv(dev);
6976
f47c11ee 6977 tg3_full_lock(tp, 0);
1da177e4 6978 __tg3_set_rx_mode(dev);
f47c11ee 6979 tg3_full_unlock(tp);
1da177e4
LT
6980}
6981
6982#define TG3_REGDUMP_LEN (32 * 1024)
6983
6984static int tg3_get_regs_len(struct net_device *dev)
6985{
6986 return TG3_REGDUMP_LEN;
6987}
6988
6989static void tg3_get_regs(struct net_device *dev,
6990 struct ethtool_regs *regs, void *_p)
6991{
6992 u32 *p = _p;
6993 struct tg3 *tp = netdev_priv(dev);
6994 u8 *orig_p = _p;
6995 int i;
6996
6997 regs->version = 0;
6998
6999 memset(p, 0, TG3_REGDUMP_LEN);
7000
f47c11ee 7001 tg3_full_lock(tp, 0);
1da177e4
LT
7002
7003#define __GET_REG32(reg) (*(p)++ = tr32(reg))
7004#define GET_REG32_LOOP(base,len) \
7005do { p = (u32 *)(orig_p + (base)); \
7006 for (i = 0; i < len; i += 4) \
7007 __GET_REG32((base) + i); \
7008} while (0)
7009#define GET_REG32_1(reg) \
7010do { p = (u32 *)(orig_p + (reg)); \
7011 __GET_REG32((reg)); \
7012} while (0)
7013
7014 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7015 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7016 GET_REG32_LOOP(MAC_MODE, 0x4f0);
7017 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7018 GET_REG32_1(SNDDATAC_MODE);
7019 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7020 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7021 GET_REG32_1(SNDBDC_MODE);
7022 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7023 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7024 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7025 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7026 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7027 GET_REG32_1(RCVDCC_MODE);
7028 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7029 GET_REG32_LOOP(RCVCC_MODE, 0x14);
7030 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7031 GET_REG32_1(MBFREE_MODE);
7032 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7033 GET_REG32_LOOP(MEMARB_MODE, 0x10);
7034 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7035 GET_REG32_LOOP(RDMAC_MODE, 0x08);
7036 GET_REG32_LOOP(WDMAC_MODE, 0x08);
7037 GET_REG32_LOOP(RX_CPU_BASE, 0x280);
7038 GET_REG32_LOOP(TX_CPU_BASE, 0x280);
7039 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7040 GET_REG32_LOOP(FTQ_RESET, 0x120);
7041 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7042 GET_REG32_1(DMAC_MODE);
7043 GET_REG32_LOOP(GRC_MODE, 0x4c);
7044 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7045 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7046
7047#undef __GET_REG32
7048#undef GET_REG32_LOOP
7049#undef GET_REG32_1
7050
f47c11ee 7051 tg3_full_unlock(tp);
1da177e4
LT
7052}
7053
7054static int tg3_get_eeprom_len(struct net_device *dev)
7055{
7056 struct tg3 *tp = netdev_priv(dev);
7057
7058 return tp->nvram_size;
7059}
7060
7061static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7062
7063static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7064{
7065 struct tg3 *tp = netdev_priv(dev);
7066 int ret;
7067 u8 *pd;
7068 u32 i, offset, len, val, b_offset, b_count;
7069
7070 offset = eeprom->offset;
7071 len = eeprom->len;
7072 eeprom->len = 0;
7073
7074 eeprom->magic = TG3_EEPROM_MAGIC;
7075
7076 if (offset & 3) {
7077 /* adjustments to start on required 4 byte boundary */
7078 b_offset = offset & 3;
7079 b_count = 4 - b_offset;
7080 if (b_count > len) {
7081 /* i.e. offset=1 len=2 */
7082 b_count = len;
7083 }
7084 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7085 if (ret)
7086 return ret;
7087 val = cpu_to_le32(val);
7088 memcpy(data, ((char*)&val) + b_offset, b_count);
7089 len -= b_count;
7090 offset += b_count;
7091 eeprom->len += b_count;
7092 }
7093
7094 /* read bytes upto the last 4 byte boundary */
7095 pd = &data[eeprom->len];
7096 for (i = 0; i < (len - (len & 3)); i += 4) {
7097 ret = tg3_nvram_read(tp, offset + i, &val);
7098 if (ret) {
7099 eeprom->len += i;
7100 return ret;
7101 }
7102 val = cpu_to_le32(val);
7103 memcpy(pd + i, &val, 4);
7104 }
7105 eeprom->len += i;
7106
7107 if (len & 3) {
7108 /* read last bytes not ending on 4 byte boundary */
7109 pd = &data[eeprom->len];
7110 b_count = len & 3;
7111 b_offset = offset + len - b_count;
7112 ret = tg3_nvram_read(tp, b_offset, &val);
7113 if (ret)
7114 return ret;
7115 val = cpu_to_le32(val);
7116 memcpy(pd, ((char*)&val), b_count);
7117 eeprom->len += b_count;
7118 }
7119 return 0;
7120}
7121
7122static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7123
7124static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7125{
7126 struct tg3 *tp = netdev_priv(dev);
7127 int ret;
7128 u32 offset, len, b_offset, odd_len, start, end;
7129 u8 *buf;
7130
7131 if (eeprom->magic != TG3_EEPROM_MAGIC)
7132 return -EINVAL;
7133
7134 offset = eeprom->offset;
7135 len = eeprom->len;
7136
7137 if ((b_offset = (offset & 3))) {
7138 /* adjustments to start on required 4 byte boundary */
7139 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7140 if (ret)
7141 return ret;
7142 start = cpu_to_le32(start);
7143 len += b_offset;
7144 offset &= ~3;
1c8594b4
MC
7145 if (len < 4)
7146 len = 4;
1da177e4
LT
7147 }
7148
7149 odd_len = 0;
1c8594b4 7150 if (len & 3) {
1da177e4
LT
7151 /* adjustments to end on required 4 byte boundary */
7152 odd_len = 1;
7153 len = (len + 3) & ~3;
7154 ret = tg3_nvram_read(tp, offset+len-4, &end);
7155 if (ret)
7156 return ret;
7157 end = cpu_to_le32(end);
7158 }
7159
7160 buf = data;
7161 if (b_offset || odd_len) {
7162 buf = kmalloc(len, GFP_KERNEL);
7163 if (buf == 0)
7164 return -ENOMEM;
7165 if (b_offset)
7166 memcpy(buf, &start, 4);
7167 if (odd_len)
7168 memcpy(buf+len-4, &end, 4);
7169 memcpy(buf + b_offset, data, eeprom->len);
7170 }
7171
7172 ret = tg3_nvram_write_block(tp, offset, len, buf);
7173
7174 if (buf != data)
7175 kfree(buf);
7176
7177 return ret;
7178}
7179
7180static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7181{
7182 struct tg3 *tp = netdev_priv(dev);
7183
7184 cmd->supported = (SUPPORTED_Autoneg);
7185
7186 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7187 cmd->supported |= (SUPPORTED_1000baseT_Half |
7188 SUPPORTED_1000baseT_Full);
7189
7190 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
7191 cmd->supported |= (SUPPORTED_100baseT_Half |
7192 SUPPORTED_100baseT_Full |
7193 SUPPORTED_10baseT_Half |
7194 SUPPORTED_10baseT_Full |
7195 SUPPORTED_MII);
7196 else
7197 cmd->supported |= SUPPORTED_FIBRE;
7198
7199 cmd->advertising = tp->link_config.advertising;
7200 if (netif_running(dev)) {
7201 cmd->speed = tp->link_config.active_speed;
7202 cmd->duplex = tp->link_config.active_duplex;
7203 }
7204 cmd->port = 0;
7205 cmd->phy_address = PHY_ADDR;
7206 cmd->transceiver = 0;
7207 cmd->autoneg = tp->link_config.autoneg;
7208 cmd->maxtxpkt = 0;
7209 cmd->maxrxpkt = 0;
7210 return 0;
7211}
7212
7213static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7214{
7215 struct tg3 *tp = netdev_priv(dev);
7216
7217 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7218 /* These are the only valid advertisement bits allowed. */
7219 if (cmd->autoneg == AUTONEG_ENABLE &&
7220 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7221 ADVERTISED_1000baseT_Full |
7222 ADVERTISED_Autoneg |
7223 ADVERTISED_FIBRE)))
7224 return -EINVAL;
7225 }
7226
f47c11ee 7227 tg3_full_lock(tp, 0);
1da177e4
LT
7228
7229 tp->link_config.autoneg = cmd->autoneg;
7230 if (cmd->autoneg == AUTONEG_ENABLE) {
7231 tp->link_config.advertising = cmd->advertising;
7232 tp->link_config.speed = SPEED_INVALID;
7233 tp->link_config.duplex = DUPLEX_INVALID;
7234 } else {
7235 tp->link_config.advertising = 0;
7236 tp->link_config.speed = cmd->speed;
7237 tp->link_config.duplex = cmd->duplex;
7238 }
7239
7240 if (netif_running(dev))
7241 tg3_setup_phy(tp, 1);
7242
f47c11ee 7243 tg3_full_unlock(tp);
1da177e4
LT
7244
7245 return 0;
7246}
7247
7248static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7249{
7250 struct tg3 *tp = netdev_priv(dev);
7251
7252 strcpy(info->driver, DRV_MODULE_NAME);
7253 strcpy(info->version, DRV_MODULE_VERSION);
7254 strcpy(info->bus_info, pci_name(tp->pdev));
7255}
7256
7257static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7258{
7259 struct tg3 *tp = netdev_priv(dev);
7260
7261 wol->supported = WAKE_MAGIC;
7262 wol->wolopts = 0;
7263 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7264 wol->wolopts = WAKE_MAGIC;
7265 memset(&wol->sopass, 0, sizeof(wol->sopass));
7266}
7267
7268static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7269{
7270 struct tg3 *tp = netdev_priv(dev);
7271
7272 if (wol->wolopts & ~WAKE_MAGIC)
7273 return -EINVAL;
7274 if ((wol->wolopts & WAKE_MAGIC) &&
7275 tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7276 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7277 return -EINVAL;
7278
f47c11ee 7279 spin_lock_bh(&tp->lock);
1da177e4
LT
7280 if (wol->wolopts & WAKE_MAGIC)
7281 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7282 else
7283 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
f47c11ee 7284 spin_unlock_bh(&tp->lock);
1da177e4
LT
7285
7286 return 0;
7287}
7288
7289static u32 tg3_get_msglevel(struct net_device *dev)
7290{
7291 struct tg3 *tp = netdev_priv(dev);
7292 return tp->msg_enable;
7293}
7294
7295static void tg3_set_msglevel(struct net_device *dev, u32 value)
7296{
7297 struct tg3 *tp = netdev_priv(dev);
7298 tp->msg_enable = value;
7299}
7300
7301#if TG3_TSO_SUPPORT != 0
7302static int tg3_set_tso(struct net_device *dev, u32 value)
7303{
7304 struct tg3 *tp = netdev_priv(dev);
7305
7306 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7307 if (value)
7308 return -EINVAL;
7309 return 0;
7310 }
7311 return ethtool_op_set_tso(dev, value);
7312}
7313#endif
7314
7315static int tg3_nway_reset(struct net_device *dev)
7316{
7317 struct tg3 *tp = netdev_priv(dev);
7318 u32 bmcr;
7319 int r;
7320
7321 if (!netif_running(dev))
7322 return -EAGAIN;
7323
f47c11ee 7324 spin_lock_bh(&tp->lock);
1da177e4
LT
7325 r = -EINVAL;
7326 tg3_readphy(tp, MII_BMCR, &bmcr);
7327 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7328 (bmcr & BMCR_ANENABLE)) {
7329 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
7330 r = 0;
7331 }
f47c11ee 7332 spin_unlock_bh(&tp->lock);
1da177e4
LT
7333
7334 return r;
7335}
7336
7337static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7338{
7339 struct tg3 *tp = netdev_priv(dev);
7340
7341 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7342 ering->rx_mini_max_pending = 0;
7343 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7344
7345 ering->rx_pending = tp->rx_pending;
7346 ering->rx_mini_pending = 0;
7347 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7348 ering->tx_pending = tp->tx_pending;
7349}
7350
7351static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7352{
7353 struct tg3 *tp = netdev_priv(dev);
bbe832c0 7354 int irq_sync = 0;
1da177e4
LT
7355
7356 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7357 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7358 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7359 return -EINVAL;
7360
bbe832c0 7361 if (netif_running(dev)) {
1da177e4 7362 tg3_netif_stop(tp);
bbe832c0
MC
7363 irq_sync = 1;
7364 }
1da177e4 7365
bbe832c0 7366 tg3_full_lock(tp, irq_sync);
1da177e4
LT
7367
7368 tp->rx_pending = ering->rx_pending;
7369
7370 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7371 tp->rx_pending > 63)
7372 tp->rx_pending = 63;
7373 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7374 tp->tx_pending = ering->tx_pending;
7375
7376 if (netif_running(dev)) {
944d980e 7377 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7378 tg3_init_hw(tp);
7379 tg3_netif_start(tp);
7380 }
7381
f47c11ee 7382 tg3_full_unlock(tp);
1da177e4
LT
7383
7384 return 0;
7385}
7386
7387static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7388{
7389 struct tg3 *tp = netdev_priv(dev);
7390
7391 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7392 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7393 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7394}
7395
7396static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7397{
7398 struct tg3 *tp = netdev_priv(dev);
bbe832c0 7399 int irq_sync = 0;
1da177e4 7400
bbe832c0 7401 if (netif_running(dev)) {
1da177e4 7402 tg3_netif_stop(tp);
bbe832c0
MC
7403 irq_sync = 1;
7404 }
1da177e4 7405
bbe832c0 7406 tg3_full_lock(tp, irq_sync);
f47c11ee 7407
1da177e4
LT
7408 if (epause->autoneg)
7409 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7410 else
7411 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7412 if (epause->rx_pause)
7413 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7414 else
7415 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7416 if (epause->tx_pause)
7417 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7418 else
7419 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7420
7421 if (netif_running(dev)) {
944d980e 7422 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7423 tg3_init_hw(tp);
7424 tg3_netif_start(tp);
7425 }
f47c11ee
DM
7426
7427 tg3_full_unlock(tp);
1da177e4
LT
7428
7429 return 0;
7430}
7431
7432static u32 tg3_get_rx_csum(struct net_device *dev)
7433{
7434 struct tg3 *tp = netdev_priv(dev);
7435 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7436}
7437
7438static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7439{
7440 struct tg3 *tp = netdev_priv(dev);
7441
7442 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7443 if (data != 0)
7444 return -EINVAL;
7445 return 0;
7446 }
7447
f47c11ee 7448 spin_lock_bh(&tp->lock);
1da177e4
LT
7449 if (data)
7450 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7451 else
7452 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
f47c11ee 7453 spin_unlock_bh(&tp->lock);
1da177e4
LT
7454
7455 return 0;
7456}
7457
7458static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7459{
7460 struct tg3 *tp = netdev_priv(dev);
7461
7462 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7463 if (data != 0)
7464 return -EINVAL;
7465 return 0;
7466 }
7467
7468 if (data)
7469 dev->features |= NETIF_F_IP_CSUM;
7470 else
7471 dev->features &= ~NETIF_F_IP_CSUM;
7472
7473 return 0;
7474}
7475
7476static int tg3_get_stats_count (struct net_device *dev)
7477{
7478 return TG3_NUM_STATS;
7479}
7480
4cafd3f5
MC
7481static int tg3_get_test_count (struct net_device *dev)
7482{
7483 return TG3_NUM_TEST;
7484}
7485
1da177e4
LT
7486static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7487{
7488 switch (stringset) {
7489 case ETH_SS_STATS:
7490 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7491 break;
4cafd3f5
MC
7492 case ETH_SS_TEST:
7493 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7494 break;
1da177e4
LT
7495 default:
7496 WARN_ON(1); /* we need a WARN() */
7497 break;
7498 }
7499}
7500
7501static void tg3_get_ethtool_stats (struct net_device *dev,
7502 struct ethtool_stats *estats, u64 *tmp_stats)
7503{
7504 struct tg3 *tp = netdev_priv(dev);
7505 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7506}
7507
566f86ad
MC
7508#define NVRAM_TEST_SIZE 0x100
7509
7510static int tg3_test_nvram(struct tg3 *tp)
7511{
7512 u32 *buf, csum;
7513 int i, j, err = 0;
7514
7515 buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7516 if (buf == NULL)
7517 return -ENOMEM;
7518
7519 for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7520 u32 val;
7521
7522 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7523 break;
7524 buf[j] = cpu_to_le32(val);
7525 }
7526 if (i < NVRAM_TEST_SIZE)
7527 goto out;
7528
7529 err = -EIO;
7530 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7531 goto out;
7532
7533 /* Bootstrap checksum at offset 0x10 */
7534 csum = calc_crc((unsigned char *) buf, 0x10);
7535 if(csum != cpu_to_le32(buf[0x10/4]))
7536 goto out;
7537
7538 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7539 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7540 if (csum != cpu_to_le32(buf[0xfc/4]))
7541 goto out;
7542
7543 err = 0;
7544
7545out:
7546 kfree(buf);
7547 return err;
7548}
7549
ca43007a
MC
7550#define TG3_SERDES_TIMEOUT_SEC 2
7551#define TG3_COPPER_TIMEOUT_SEC 6
7552
7553static int tg3_test_link(struct tg3 *tp)
7554{
7555 int i, max;
7556
7557 if (!netif_running(tp->dev))
7558 return -ENODEV;
7559
7560 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7561 max = TG3_SERDES_TIMEOUT_SEC;
7562 else
7563 max = TG3_COPPER_TIMEOUT_SEC;
7564
7565 for (i = 0; i < max; i++) {
7566 if (netif_carrier_ok(tp->dev))
7567 return 0;
7568
7569 if (msleep_interruptible(1000))
7570 break;
7571 }
7572
7573 return -EIO;
7574}
7575
a71116d1
MC
7576/* Only test the commonly used registers */
7577static int tg3_test_registers(struct tg3 *tp)
7578{
7579 int i, is_5705;
7580 u32 offset, read_mask, write_mask, val, save_val, read_val;
7581 static struct {
7582 u16 offset;
7583 u16 flags;
7584#define TG3_FL_5705 0x1
7585#define TG3_FL_NOT_5705 0x2
7586#define TG3_FL_NOT_5788 0x4
7587 u32 read_mask;
7588 u32 write_mask;
7589 } reg_tbl[] = {
7590 /* MAC Control Registers */
7591 { MAC_MODE, TG3_FL_NOT_5705,
7592 0x00000000, 0x00ef6f8c },
7593 { MAC_MODE, TG3_FL_5705,
7594 0x00000000, 0x01ef6b8c },
7595 { MAC_STATUS, TG3_FL_NOT_5705,
7596 0x03800107, 0x00000000 },
7597 { MAC_STATUS, TG3_FL_5705,
7598 0x03800100, 0x00000000 },
7599 { MAC_ADDR_0_HIGH, 0x0000,
7600 0x00000000, 0x0000ffff },
7601 { MAC_ADDR_0_LOW, 0x0000,
7602 0x00000000, 0xffffffff },
7603 { MAC_RX_MTU_SIZE, 0x0000,
7604 0x00000000, 0x0000ffff },
7605 { MAC_TX_MODE, 0x0000,
7606 0x00000000, 0x00000070 },
7607 { MAC_TX_LENGTHS, 0x0000,
7608 0x00000000, 0x00003fff },
7609 { MAC_RX_MODE, TG3_FL_NOT_5705,
7610 0x00000000, 0x000007fc },
7611 { MAC_RX_MODE, TG3_FL_5705,
7612 0x00000000, 0x000007dc },
7613 { MAC_HASH_REG_0, 0x0000,
7614 0x00000000, 0xffffffff },
7615 { MAC_HASH_REG_1, 0x0000,
7616 0x00000000, 0xffffffff },
7617 { MAC_HASH_REG_2, 0x0000,
7618 0x00000000, 0xffffffff },
7619 { MAC_HASH_REG_3, 0x0000,
7620 0x00000000, 0xffffffff },
7621
7622 /* Receive Data and Receive BD Initiator Control Registers. */
7623 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7624 0x00000000, 0xffffffff },
7625 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7626 0x00000000, 0xffffffff },
7627 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7628 0x00000000, 0x00000003 },
7629 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7630 0x00000000, 0xffffffff },
7631 { RCVDBDI_STD_BD+0, 0x0000,
7632 0x00000000, 0xffffffff },
7633 { RCVDBDI_STD_BD+4, 0x0000,
7634 0x00000000, 0xffffffff },
7635 { RCVDBDI_STD_BD+8, 0x0000,
7636 0x00000000, 0xffff0002 },
7637 { RCVDBDI_STD_BD+0xc, 0x0000,
7638 0x00000000, 0xffffffff },
7639
7640 /* Receive BD Initiator Control Registers. */
7641 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7642 0x00000000, 0xffffffff },
7643 { RCVBDI_STD_THRESH, TG3_FL_5705,
7644 0x00000000, 0x000003ff },
7645 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7646 0x00000000, 0xffffffff },
7647
7648 /* Host Coalescing Control Registers. */
7649 { HOSTCC_MODE, TG3_FL_NOT_5705,
7650 0x00000000, 0x00000004 },
7651 { HOSTCC_MODE, TG3_FL_5705,
7652 0x00000000, 0x000000f6 },
7653 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7654 0x00000000, 0xffffffff },
7655 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7656 0x00000000, 0x000003ff },
7657 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7658 0x00000000, 0xffffffff },
7659 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7660 0x00000000, 0x000003ff },
7661 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7662 0x00000000, 0xffffffff },
7663 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7664 0x00000000, 0x000000ff },
7665 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7666 0x00000000, 0xffffffff },
7667 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7668 0x00000000, 0x000000ff },
7669 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7670 0x00000000, 0xffffffff },
7671 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7672 0x00000000, 0xffffffff },
7673 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7674 0x00000000, 0xffffffff },
7675 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7676 0x00000000, 0x000000ff },
7677 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7678 0x00000000, 0xffffffff },
7679 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7680 0x00000000, 0x000000ff },
7681 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7682 0x00000000, 0xffffffff },
7683 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7684 0x00000000, 0xffffffff },
7685 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7686 0x00000000, 0xffffffff },
7687 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7688 0x00000000, 0xffffffff },
7689 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7690 0x00000000, 0xffffffff },
7691 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7692 0xffffffff, 0x00000000 },
7693 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7694 0xffffffff, 0x00000000 },
7695
7696 /* Buffer Manager Control Registers. */
7697 { BUFMGR_MB_POOL_ADDR, 0x0000,
7698 0x00000000, 0x007fff80 },
7699 { BUFMGR_MB_POOL_SIZE, 0x0000,
7700 0x00000000, 0x007fffff },
7701 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7702 0x00000000, 0x0000003f },
7703 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7704 0x00000000, 0x000001ff },
7705 { BUFMGR_MB_HIGH_WATER, 0x0000,
7706 0x00000000, 0x000001ff },
7707 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7708 0xffffffff, 0x00000000 },
7709 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7710 0xffffffff, 0x00000000 },
7711
7712 /* Mailbox Registers */
7713 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7714 0x00000000, 0x000001ff },
7715 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7716 0x00000000, 0x000001ff },
7717 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7718 0x00000000, 0x000007ff },
7719 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7720 0x00000000, 0x000001ff },
7721
7722 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7723 };
7724
7725 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7726 is_5705 = 1;
7727 else
7728 is_5705 = 0;
7729
7730 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7731 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7732 continue;
7733
7734 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7735 continue;
7736
7737 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7738 (reg_tbl[i].flags & TG3_FL_NOT_5788))
7739 continue;
7740
7741 offset = (u32) reg_tbl[i].offset;
7742 read_mask = reg_tbl[i].read_mask;
7743 write_mask = reg_tbl[i].write_mask;
7744
7745 /* Save the original register content */
7746 save_val = tr32(offset);
7747
7748 /* Determine the read-only value. */
7749 read_val = save_val & read_mask;
7750
7751 /* Write zero to the register, then make sure the read-only bits
7752 * are not changed and the read/write bits are all zeros.
7753 */
7754 tw32(offset, 0);
7755
7756 val = tr32(offset);
7757
7758 /* Test the read-only and read/write bits. */
7759 if (((val & read_mask) != read_val) || (val & write_mask))
7760 goto out;
7761
7762 /* Write ones to all the bits defined by RdMask and WrMask, then
7763 * make sure the read-only bits are not changed and the
7764 * read/write bits are all ones.
7765 */
7766 tw32(offset, read_mask | write_mask);
7767
7768 val = tr32(offset);
7769
7770 /* Test the read-only bits. */
7771 if ((val & read_mask) != read_val)
7772 goto out;
7773
7774 /* Test the read/write bits. */
7775 if ((val & write_mask) != write_mask)
7776 goto out;
7777
7778 tw32(offset, save_val);
7779 }
7780
7781 return 0;
7782
7783out:
7784 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7785 tw32(offset, save_val);
7786 return -EIO;
7787}
7788
7942e1db
MC
7789static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7790{
7791 static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7792 int i;
7793 u32 j;
7794
7795 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7796 for (j = 0; j < len; j += 4) {
7797 u32 val;
7798
7799 tg3_write_mem(tp, offset + j, test_pattern[i]);
7800 tg3_read_mem(tp, offset + j, &val);
7801 if (val != test_pattern[i])
7802 return -EIO;
7803 }
7804 }
7805 return 0;
7806}
7807
7808static int tg3_test_memory(struct tg3 *tp)
7809{
7810 static struct mem_entry {
7811 u32 offset;
7812 u32 len;
7813 } mem_tbl_570x[] = {
7814 { 0x00000000, 0x01000},
7815 { 0x00002000, 0x1c000},
7816 { 0xffffffff, 0x00000}
7817 }, mem_tbl_5705[] = {
7818 { 0x00000100, 0x0000c},
7819 { 0x00000200, 0x00008},
7820 { 0x00000b50, 0x00400},
7821 { 0x00004000, 0x00800},
7822 { 0x00006000, 0x01000},
7823 { 0x00008000, 0x02000},
7824 { 0x00010000, 0x0e000},
7825 { 0xffffffff, 0x00000}
7826 };
7827 struct mem_entry *mem_tbl;
7828 int err = 0;
7829 int i;
7830
7831 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7832 mem_tbl = mem_tbl_5705;
7833 else
7834 mem_tbl = mem_tbl_570x;
7835
7836 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
7837 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
7838 mem_tbl[i].len)) != 0)
7839 break;
7840 }
7841
7842 return err;
7843}
7844
c76949a6
MC
7845static int tg3_test_loopback(struct tg3 *tp)
7846{
7847 u32 mac_mode, send_idx, rx_start_idx, rx_idx, tx_idx, opaque_key;
7848 u32 desc_idx;
7849 struct sk_buff *skb, *rx_skb;
7850 u8 *tx_data;
7851 dma_addr_t map;
7852 int num_pkts, tx_len, rx_len, i, err;
7853 struct tg3_rx_buffer_desc *desc;
7854
7855 if (!netif_running(tp->dev))
7856 return -ENODEV;
7857
7858 err = -EIO;
7859
c76949a6
MC
7860 tg3_reset_hw(tp);
7861
7862 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7863 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7864 MAC_MODE_PORT_MODE_GMII;
7865 tw32(MAC_MODE, mac_mode);
7866
7867 tx_len = 1514;
7868 skb = dev_alloc_skb(tx_len);
7869 tx_data = skb_put(skb, tx_len);
7870 memcpy(tx_data, tp->dev->dev_addr, 6);
7871 memset(tx_data + 6, 0x0, 8);
7872
7873 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
7874
7875 for (i = 14; i < tx_len; i++)
7876 tx_data[i] = (u8) (i & 0xff);
7877
7878 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
7879
7880 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7881 HOSTCC_MODE_NOW);
7882
7883 udelay(10);
7884
7885 rx_start_idx = tp->hw_status->idx[0].rx_producer;
7886
7887 send_idx = 0;
7888 num_pkts = 0;
7889
7890 tg3_set_txd(tp, send_idx, map, tx_len, 0, 1);
7891
7892 send_idx++;
7893 num_pkts++;
7894
7895 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx);
7896 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
7897
7898 udelay(10);
7899
7900 for (i = 0; i < 10; i++) {
7901 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7902 HOSTCC_MODE_NOW);
7903
7904 udelay(10);
7905
7906 tx_idx = tp->hw_status->idx[0].tx_consumer;
7907 rx_idx = tp->hw_status->idx[0].rx_producer;
7908 if ((tx_idx == send_idx) &&
7909 (rx_idx == (rx_start_idx + num_pkts)))
7910 break;
7911 }
7912
7913 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
7914 dev_kfree_skb(skb);
7915
7916 if (tx_idx != send_idx)
7917 goto out;
7918
7919 if (rx_idx != rx_start_idx + num_pkts)
7920 goto out;
7921
7922 desc = &tp->rx_rcb[rx_start_idx];
7923 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
7924 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
7925 if (opaque_key != RXD_OPAQUE_RING_STD)
7926 goto out;
7927
7928 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
7929 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
7930 goto out;
7931
7932 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
7933 if (rx_len != tx_len)
7934 goto out;
7935
7936 rx_skb = tp->rx_std_buffers[desc_idx].skb;
7937
7938 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
7939 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
7940
7941 for (i = 14; i < tx_len; i++) {
7942 if (*(rx_skb->data + i) != (u8) (i & 0xff))
7943 goto out;
7944 }
7945 err = 0;
7946
7947 /* tg3_free_rings will unmap and free the rx_skb */
7948out:
7949 return err;
7950}
7951
4cafd3f5
MC
7952static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
7953 u64 *data)
7954{
566f86ad
MC
7955 struct tg3 *tp = netdev_priv(dev);
7956
7957 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
7958
7959 if (tg3_test_nvram(tp) != 0) {
7960 etest->flags |= ETH_TEST_FL_FAILED;
7961 data[0] = 1;
7962 }
ca43007a
MC
7963 if (tg3_test_link(tp) != 0) {
7964 etest->flags |= ETH_TEST_FL_FAILED;
7965 data[1] = 1;
7966 }
a71116d1 7967 if (etest->flags & ETH_TEST_FL_OFFLINE) {
bbe832c0
MC
7968 int irq_sync = 0;
7969
7970 if (netif_running(dev)) {
a71116d1 7971 tg3_netif_stop(tp);
bbe832c0
MC
7972 irq_sync = 1;
7973 }
a71116d1 7974
bbe832c0 7975 tg3_full_lock(tp, irq_sync);
a71116d1
MC
7976
7977 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
7978 tg3_nvram_lock(tp);
7979 tg3_halt_cpu(tp, RX_CPU_BASE);
7980 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7981 tg3_halt_cpu(tp, TX_CPU_BASE);
7982 tg3_nvram_unlock(tp);
7983
7984 if (tg3_test_registers(tp) != 0) {
7985 etest->flags |= ETH_TEST_FL_FAILED;
7986 data[2] = 1;
7987 }
7942e1db
MC
7988 if (tg3_test_memory(tp) != 0) {
7989 etest->flags |= ETH_TEST_FL_FAILED;
7990 data[3] = 1;
7991 }
c76949a6
MC
7992 if (tg3_test_loopback(tp) != 0) {
7993 etest->flags |= ETH_TEST_FL_FAILED;
7994 data[4] = 1;
7995 }
a71116d1 7996
f47c11ee
DM
7997 tg3_full_unlock(tp);
7998
d4bc3927
MC
7999 if (tg3_test_interrupt(tp) != 0) {
8000 etest->flags |= ETH_TEST_FL_FAILED;
8001 data[5] = 1;
8002 }
f47c11ee
DM
8003
8004 tg3_full_lock(tp, 0);
d4bc3927 8005
a71116d1
MC
8006 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8007 if (netif_running(dev)) {
8008 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8009 tg3_init_hw(tp);
8010 tg3_netif_start(tp);
8011 }
f47c11ee
DM
8012
8013 tg3_full_unlock(tp);
a71116d1 8014 }
4cafd3f5
MC
8015}
8016
1da177e4
LT
8017static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8018{
8019 struct mii_ioctl_data *data = if_mii(ifr);
8020 struct tg3 *tp = netdev_priv(dev);
8021 int err;
8022
8023 switch(cmd) {
8024 case SIOCGMIIPHY:
8025 data->phy_id = PHY_ADDR;
8026
8027 /* fallthru */
8028 case SIOCGMIIREG: {
8029 u32 mii_regval;
8030
8031 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8032 break; /* We have no PHY */
8033
f47c11ee 8034 spin_lock_bh(&tp->lock);
1da177e4 8035 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
f47c11ee 8036 spin_unlock_bh(&tp->lock);
1da177e4
LT
8037
8038 data->val_out = mii_regval;
8039
8040 return err;
8041 }
8042
8043 case SIOCSMIIREG:
8044 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8045 break; /* We have no PHY */
8046
8047 if (!capable(CAP_NET_ADMIN))
8048 return -EPERM;
8049
f47c11ee 8050 spin_lock_bh(&tp->lock);
1da177e4 8051 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
f47c11ee 8052 spin_unlock_bh(&tp->lock);
1da177e4
LT
8053
8054 return err;
8055
8056 default:
8057 /* do nothing */
8058 break;
8059 }
8060 return -EOPNOTSUPP;
8061}
8062
8063#if TG3_VLAN_TAG_USED
8064static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8065{
8066 struct tg3 *tp = netdev_priv(dev);
8067
f47c11ee 8068 tg3_full_lock(tp, 0);
1da177e4
LT
8069
8070 tp->vlgrp = grp;
8071
8072 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8073 __tg3_set_rx_mode(dev);
8074
f47c11ee 8075 tg3_full_unlock(tp);
1da177e4
LT
8076}
8077
8078static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8079{
8080 struct tg3 *tp = netdev_priv(dev);
8081
f47c11ee 8082 tg3_full_lock(tp, 0);
1da177e4
LT
8083 if (tp->vlgrp)
8084 tp->vlgrp->vlan_devices[vid] = NULL;
f47c11ee 8085 tg3_full_unlock(tp);
1da177e4
LT
8086}
8087#endif
8088
15f9850d
DM
8089static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8090{
8091 struct tg3 *tp = netdev_priv(dev);
8092
8093 memcpy(ec, &tp->coal, sizeof(*ec));
8094 return 0;
8095}
8096
d244c892
MC
8097static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8098{
8099 struct tg3 *tp = netdev_priv(dev);
8100 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8101 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8102
8103 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8104 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8105 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8106 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8107 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8108 }
8109
8110 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8111 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8112 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8113 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8114 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8115 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8116 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8117 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8118 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8119 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8120 return -EINVAL;
8121
8122 /* No rx interrupts will be generated if both are zero */
8123 if ((ec->rx_coalesce_usecs == 0) &&
8124 (ec->rx_max_coalesced_frames == 0))
8125 return -EINVAL;
8126
8127 /* No tx interrupts will be generated if both are zero */
8128 if ((ec->tx_coalesce_usecs == 0) &&
8129 (ec->tx_max_coalesced_frames == 0))
8130 return -EINVAL;
8131
8132 /* Only copy relevant parameters, ignore all others. */
8133 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8134 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8135 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8136 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8137 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8138 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8139 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8140 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8141 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8142
8143 if (netif_running(dev)) {
8144 tg3_full_lock(tp, 0);
8145 __tg3_set_coalesce(tp, &tp->coal);
8146 tg3_full_unlock(tp);
8147 }
8148 return 0;
8149}
8150
1da177e4
LT
8151static struct ethtool_ops tg3_ethtool_ops = {
8152 .get_settings = tg3_get_settings,
8153 .set_settings = tg3_set_settings,
8154 .get_drvinfo = tg3_get_drvinfo,
8155 .get_regs_len = tg3_get_regs_len,
8156 .get_regs = tg3_get_regs,
8157 .get_wol = tg3_get_wol,
8158 .set_wol = tg3_set_wol,
8159 .get_msglevel = tg3_get_msglevel,
8160 .set_msglevel = tg3_set_msglevel,
8161 .nway_reset = tg3_nway_reset,
8162 .get_link = ethtool_op_get_link,
8163 .get_eeprom_len = tg3_get_eeprom_len,
8164 .get_eeprom = tg3_get_eeprom,
8165 .set_eeprom = tg3_set_eeprom,
8166 .get_ringparam = tg3_get_ringparam,
8167 .set_ringparam = tg3_set_ringparam,
8168 .get_pauseparam = tg3_get_pauseparam,
8169 .set_pauseparam = tg3_set_pauseparam,
8170 .get_rx_csum = tg3_get_rx_csum,
8171 .set_rx_csum = tg3_set_rx_csum,
8172 .get_tx_csum = ethtool_op_get_tx_csum,
8173 .set_tx_csum = tg3_set_tx_csum,
8174 .get_sg = ethtool_op_get_sg,
8175 .set_sg = ethtool_op_set_sg,
8176#if TG3_TSO_SUPPORT != 0
8177 .get_tso = ethtool_op_get_tso,
8178 .set_tso = tg3_set_tso,
8179#endif
4cafd3f5
MC
8180 .self_test_count = tg3_get_test_count,
8181 .self_test = tg3_self_test,
1da177e4
LT
8182 .get_strings = tg3_get_strings,
8183 .get_stats_count = tg3_get_stats_count,
8184 .get_ethtool_stats = tg3_get_ethtool_stats,
15f9850d 8185 .get_coalesce = tg3_get_coalesce,
d244c892 8186 .set_coalesce = tg3_set_coalesce,
1da177e4
LT
8187};
8188
8189static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8190{
8191 u32 cursize, val;
8192
8193 tp->nvram_size = EEPROM_CHIP_SIZE;
8194
8195 if (tg3_nvram_read(tp, 0, &val) != 0)
8196 return;
8197
8198 if (swab32(val) != TG3_EEPROM_MAGIC)
8199 return;
8200
8201 /*
8202 * Size the chip by reading offsets at increasing powers of two.
8203 * When we encounter our validation signature, we know the addressing
8204 * has wrapped around, and thus have our chip size.
8205 */
8206 cursize = 0x800;
8207
8208 while (cursize < tp->nvram_size) {
8209 if (tg3_nvram_read(tp, cursize, &val) != 0)
8210 return;
8211
8212 if (swab32(val) == TG3_EEPROM_MAGIC)
8213 break;
8214
8215 cursize <<= 1;
8216 }
8217
8218 tp->nvram_size = cursize;
8219}
8220
8221static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8222{
8223 u32 val;
8224
8225 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8226 if (val != 0) {
8227 tp->nvram_size = (val >> 16) * 1024;
8228 return;
8229 }
8230 }
8231 tp->nvram_size = 0x20000;
8232}
8233
8234static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8235{
8236 u32 nvcfg1;
8237
8238 nvcfg1 = tr32(NVRAM_CFG1);
8239 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8240 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8241 }
8242 else {
8243 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8244 tw32(NVRAM_CFG1, nvcfg1);
8245 }
8246
85e94ced 8247 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1da177e4
LT
8248 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8249 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8250 tp->nvram_jedecnum = JEDEC_ATMEL;
8251 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8252 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8253 break;
8254 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8255 tp->nvram_jedecnum = JEDEC_ATMEL;
8256 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8257 break;
8258 case FLASH_VENDOR_ATMEL_EEPROM:
8259 tp->nvram_jedecnum = JEDEC_ATMEL;
8260 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8261 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8262 break;
8263 case FLASH_VENDOR_ST:
8264 tp->nvram_jedecnum = JEDEC_ST;
8265 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8266 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8267 break;
8268 case FLASH_VENDOR_SAIFUN:
8269 tp->nvram_jedecnum = JEDEC_SAIFUN;
8270 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8271 break;
8272 case FLASH_VENDOR_SST_SMALL:
8273 case FLASH_VENDOR_SST_LARGE:
8274 tp->nvram_jedecnum = JEDEC_SST;
8275 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8276 break;
8277 }
8278 }
8279 else {
8280 tp->nvram_jedecnum = JEDEC_ATMEL;
8281 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8282 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8283 }
8284}
8285
361b4ac2
MC
8286static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8287{
8288 u32 nvcfg1;
8289
8290 nvcfg1 = tr32(NVRAM_CFG1);
8291
e6af301b
MC
8292 /* NVRAM protection for TPM */
8293 if (nvcfg1 & (1 << 27))
8294 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8295
361b4ac2
MC
8296 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8297 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8298 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8299 tp->nvram_jedecnum = JEDEC_ATMEL;
8300 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8301 break;
8302 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8303 tp->nvram_jedecnum = JEDEC_ATMEL;
8304 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8305 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8306 break;
8307 case FLASH_5752VENDOR_ST_M45PE10:
8308 case FLASH_5752VENDOR_ST_M45PE20:
8309 case FLASH_5752VENDOR_ST_M45PE40:
8310 tp->nvram_jedecnum = JEDEC_ST;
8311 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8312 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8313 break;
8314 }
8315
8316 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8317 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8318 case FLASH_5752PAGE_SIZE_256:
8319 tp->nvram_pagesize = 256;
8320 break;
8321 case FLASH_5752PAGE_SIZE_512:
8322 tp->nvram_pagesize = 512;
8323 break;
8324 case FLASH_5752PAGE_SIZE_1K:
8325 tp->nvram_pagesize = 1024;
8326 break;
8327 case FLASH_5752PAGE_SIZE_2K:
8328 tp->nvram_pagesize = 2048;
8329 break;
8330 case FLASH_5752PAGE_SIZE_4K:
8331 tp->nvram_pagesize = 4096;
8332 break;
8333 case FLASH_5752PAGE_SIZE_264:
8334 tp->nvram_pagesize = 264;
8335 break;
8336 }
8337 }
8338 else {
8339 /* For eeprom, set pagesize to maximum eeprom size */
8340 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8341
8342 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8343 tw32(NVRAM_CFG1, nvcfg1);
8344 }
8345}
8346
1da177e4
LT
8347/* Chips other than 5700/5701 use the NVRAM for fetching info. */
8348static void __devinit tg3_nvram_init(struct tg3 *tp)
8349{
8350 int j;
8351
8352 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8353 return;
8354
8355 tw32_f(GRC_EEPROM_ADDR,
8356 (EEPROM_ADDR_FSM_RESET |
8357 (EEPROM_DEFAULT_CLOCK_PERIOD <<
8358 EEPROM_ADDR_CLKPERD_SHIFT)));
8359
8360 /* XXX schedule_timeout() ... */
8361 for (j = 0; j < 100; j++)
8362 udelay(10);
8363
8364 /* Enable seeprom accesses. */
8365 tw32_f(GRC_LOCAL_CTRL,
8366 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8367 udelay(100);
8368
8369 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8370 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8371 tp->tg3_flags |= TG3_FLAG_NVRAM;
8372
e6af301b 8373 tg3_enable_nvram_access(tp);
1da177e4 8374
361b4ac2
MC
8375 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8376 tg3_get_5752_nvram_info(tp);
8377 else
8378 tg3_get_nvram_info(tp);
8379
1da177e4
LT
8380 tg3_get_nvram_size(tp);
8381
e6af301b 8382 tg3_disable_nvram_access(tp);
1da177e4
LT
8383
8384 } else {
8385 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8386
8387 tg3_get_eeprom_size(tp);
8388 }
8389}
8390
8391static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8392 u32 offset, u32 *val)
8393{
8394 u32 tmp;
8395 int i;
8396
8397 if (offset > EEPROM_ADDR_ADDR_MASK ||
8398 (offset % 4) != 0)
8399 return -EINVAL;
8400
8401 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8402 EEPROM_ADDR_DEVID_MASK |
8403 EEPROM_ADDR_READ);
8404 tw32(GRC_EEPROM_ADDR,
8405 tmp |
8406 (0 << EEPROM_ADDR_DEVID_SHIFT) |
8407 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8408 EEPROM_ADDR_ADDR_MASK) |
8409 EEPROM_ADDR_READ | EEPROM_ADDR_START);
8410
8411 for (i = 0; i < 10000; i++) {
8412 tmp = tr32(GRC_EEPROM_ADDR);
8413
8414 if (tmp & EEPROM_ADDR_COMPLETE)
8415 break;
8416 udelay(100);
8417 }
8418 if (!(tmp & EEPROM_ADDR_COMPLETE))
8419 return -EBUSY;
8420
8421 *val = tr32(GRC_EEPROM_DATA);
8422 return 0;
8423}
8424
8425#define NVRAM_CMD_TIMEOUT 10000
8426
8427static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8428{
8429 int i;
8430
8431 tw32(NVRAM_CMD, nvram_cmd);
8432 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8433 udelay(10);
8434 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8435 udelay(10);
8436 break;
8437 }
8438 }
8439 if (i == NVRAM_CMD_TIMEOUT) {
8440 return -EBUSY;
8441 }
8442 return 0;
8443}
8444
8445static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8446{
8447 int ret;
8448
8449 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8450 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8451 return -EINVAL;
8452 }
8453
8454 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8455 return tg3_nvram_read_using_eeprom(tp, offset, val);
8456
8457 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8458 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8459 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8460
8461 offset = ((offset / tp->nvram_pagesize) <<
8462 ATMEL_AT45DB0X1B_PAGE_POS) +
8463 (offset % tp->nvram_pagesize);
8464 }
8465
8466 if (offset > NVRAM_ADDR_MSK)
8467 return -EINVAL;
8468
8469 tg3_nvram_lock(tp);
8470
e6af301b 8471 tg3_enable_nvram_access(tp);
1da177e4
LT
8472
8473 tw32(NVRAM_ADDR, offset);
8474 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8475 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8476
8477 if (ret == 0)
8478 *val = swab32(tr32(NVRAM_RDDATA));
8479
8480 tg3_nvram_unlock(tp);
8481
e6af301b 8482 tg3_disable_nvram_access(tp);
1da177e4
LT
8483
8484 return ret;
8485}
8486
8487static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8488 u32 offset, u32 len, u8 *buf)
8489{
8490 int i, j, rc = 0;
8491 u32 val;
8492
8493 for (i = 0; i < len; i += 4) {
8494 u32 addr, data;
8495
8496 addr = offset + i;
8497
8498 memcpy(&data, buf + i, 4);
8499
8500 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8501
8502 val = tr32(GRC_EEPROM_ADDR);
8503 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8504
8505 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8506 EEPROM_ADDR_READ);
8507 tw32(GRC_EEPROM_ADDR, val |
8508 (0 << EEPROM_ADDR_DEVID_SHIFT) |
8509 (addr & EEPROM_ADDR_ADDR_MASK) |
8510 EEPROM_ADDR_START |
8511 EEPROM_ADDR_WRITE);
8512
8513 for (j = 0; j < 10000; j++) {
8514 val = tr32(GRC_EEPROM_ADDR);
8515
8516 if (val & EEPROM_ADDR_COMPLETE)
8517 break;
8518 udelay(100);
8519 }
8520 if (!(val & EEPROM_ADDR_COMPLETE)) {
8521 rc = -EBUSY;
8522 break;
8523 }
8524 }
8525
8526 return rc;
8527}
8528
8529/* offset and length are dword aligned */
8530static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8531 u8 *buf)
8532{
8533 int ret = 0;
8534 u32 pagesize = tp->nvram_pagesize;
8535 u32 pagemask = pagesize - 1;
8536 u32 nvram_cmd;
8537 u8 *tmp;
8538
8539 tmp = kmalloc(pagesize, GFP_KERNEL);
8540 if (tmp == NULL)
8541 return -ENOMEM;
8542
8543 while (len) {
8544 int j;
e6af301b 8545 u32 phy_addr, page_off, size;
1da177e4
LT
8546
8547 phy_addr = offset & ~pagemask;
8548
8549 for (j = 0; j < pagesize; j += 4) {
8550 if ((ret = tg3_nvram_read(tp, phy_addr + j,
8551 (u32 *) (tmp + j))))
8552 break;
8553 }
8554 if (ret)
8555 break;
8556
8557 page_off = offset & pagemask;
8558 size = pagesize;
8559 if (len < size)
8560 size = len;
8561
8562 len -= size;
8563
8564 memcpy(tmp + page_off, buf, size);
8565
8566 offset = offset + (pagesize - page_off);
8567
e6af301b 8568 tg3_enable_nvram_access(tp);
1da177e4
LT
8569
8570 /*
8571 * Before we can erase the flash page, we need
8572 * to issue a special "write enable" command.
8573 */
8574 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8575
8576 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8577 break;
8578
8579 /* Erase the target page */
8580 tw32(NVRAM_ADDR, phy_addr);
8581
8582 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8583 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8584
8585 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8586 break;
8587
8588 /* Issue another write enable to start the write. */
8589 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8590
8591 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8592 break;
8593
8594 for (j = 0; j < pagesize; j += 4) {
8595 u32 data;
8596
8597 data = *((u32 *) (tmp + j));
8598 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8599
8600 tw32(NVRAM_ADDR, phy_addr + j);
8601
8602 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8603 NVRAM_CMD_WR;
8604
8605 if (j == 0)
8606 nvram_cmd |= NVRAM_CMD_FIRST;
8607 else if (j == (pagesize - 4))
8608 nvram_cmd |= NVRAM_CMD_LAST;
8609
8610 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8611 break;
8612 }
8613 if (ret)
8614 break;
8615 }
8616
8617 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8618 tg3_nvram_exec_cmd(tp, nvram_cmd);
8619
8620 kfree(tmp);
8621
8622 return ret;
8623}
8624
8625/* offset and length are dword aligned */
8626static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8627 u8 *buf)
8628{
8629 int i, ret = 0;
8630
8631 for (i = 0; i < len; i += 4, offset += 4) {
8632 u32 data, page_off, phy_addr, nvram_cmd;
8633
8634 memcpy(&data, buf + i, 4);
8635 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8636
8637 page_off = offset % tp->nvram_pagesize;
8638
8639 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8640 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8641
8642 phy_addr = ((offset / tp->nvram_pagesize) <<
8643 ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8644 }
8645 else {
8646 phy_addr = offset;
8647 }
8648
8649 tw32(NVRAM_ADDR, phy_addr);
8650
8651 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
8652
8653 if ((page_off == 0) || (i == 0))
8654 nvram_cmd |= NVRAM_CMD_FIRST;
8655 else if (page_off == (tp->nvram_pagesize - 4))
8656 nvram_cmd |= NVRAM_CMD_LAST;
8657
8658 if (i == (len - 4))
8659 nvram_cmd |= NVRAM_CMD_LAST;
8660
8661 if ((tp->nvram_jedecnum == JEDEC_ST) &&
8662 (nvram_cmd & NVRAM_CMD_FIRST)) {
8663
8664 if ((ret = tg3_nvram_exec_cmd(tp,
8665 NVRAM_CMD_WREN | NVRAM_CMD_GO |
8666 NVRAM_CMD_DONE)))
8667
8668 break;
8669 }
8670 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8671 /* We always do complete word writes to eeprom. */
8672 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
8673 }
8674
8675 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8676 break;
8677 }
8678 return ret;
8679}
8680
8681/* offset and length are dword aligned */
8682static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
8683{
8684 int ret;
8685
8686 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8687 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
8688 return -EINVAL;
8689 }
8690
8691 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34
MC
8692 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
8693 ~GRC_LCLCTRL_GPIO_OUTPUT1);
1da177e4
LT
8694 udelay(40);
8695 }
8696
8697 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
8698 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
8699 }
8700 else {
8701 u32 grc_mode;
8702
8703 tg3_nvram_lock(tp);
8704
e6af301b
MC
8705 tg3_enable_nvram_access(tp);
8706 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
8707 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
1da177e4 8708 tw32(NVRAM_WRITE1, 0x406);
1da177e4
LT
8709
8710 grc_mode = tr32(GRC_MODE);
8711 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
8712
8713 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
8714 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8715
8716 ret = tg3_nvram_write_block_buffered(tp, offset, len,
8717 buf);
8718 }
8719 else {
8720 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
8721 buf);
8722 }
8723
8724 grc_mode = tr32(GRC_MODE);
8725 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
8726
e6af301b 8727 tg3_disable_nvram_access(tp);
1da177e4
LT
8728 tg3_nvram_unlock(tp);
8729 }
8730
8731 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34 8732 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1da177e4
LT
8733 udelay(40);
8734 }
8735
8736 return ret;
8737}
8738
8739struct subsys_tbl_ent {
8740 u16 subsys_vendor, subsys_devid;
8741 u32 phy_id;
8742};
8743
8744static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
8745 /* Broadcom boards. */
8746 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
8747 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
8748 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
8749 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
8750 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
8751 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
8752 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
8753 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
8754 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
8755 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
8756 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
8757
8758 /* 3com boards. */
8759 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
8760 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
8761 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
8762 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
8763 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
8764
8765 /* DELL boards. */
8766 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
8767 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
8768 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
8769 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
8770
8771 /* Compaq boards. */
8772 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
8773 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
8774 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
8775 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
8776 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
8777
8778 /* IBM boards. */
8779 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
8780};
8781
8782static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
8783{
8784 int i;
8785
8786 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
8787 if ((subsys_id_to_phy_id[i].subsys_vendor ==
8788 tp->pdev->subsystem_vendor) &&
8789 (subsys_id_to_phy_id[i].subsys_devid ==
8790 tp->pdev->subsystem_device))
8791 return &subsys_id_to_phy_id[i];
8792 }
8793 return NULL;
8794}
8795
7d0c41ef
MC
8796/* Since this function may be called in D3-hot power state during
8797 * tg3_init_one(), only config cycles are allowed.
8798 */
8799static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
1da177e4 8800{
1da177e4 8801 u32 val;
7d0c41ef
MC
8802
8803 /* Make sure register accesses (indirect or otherwise)
8804 * will function correctly.
8805 */
8806 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8807 tp->misc_host_ctrl);
1da177e4
LT
8808
8809 tp->phy_id = PHY_ID_INVALID;
7d0c41ef
MC
8810 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8811
1da177e4
LT
8812 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8813 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8814 u32 nic_cfg, led_cfg;
7d0c41ef
MC
8815 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
8816 int eeprom_phy_serdes = 0;
1da177e4
LT
8817
8818 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8819 tp->nic_sram_data_cfg = nic_cfg;
8820
8821 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
8822 ver >>= NIC_SRAM_DATA_VER_SHIFT;
8823 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8824 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8825 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
8826 (ver > 0) && (ver < 0x100))
8827 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
8828
1da177e4
LT
8829 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
8830 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
8831 eeprom_phy_serdes = 1;
8832
8833 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
8834 if (nic_phy_id != 0) {
8835 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
8836 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
8837
8838 eeprom_phy_id = (id1 >> 16) << 10;
8839 eeprom_phy_id |= (id2 & 0xfc00) << 16;
8840 eeprom_phy_id |= (id2 & 0x03ff) << 0;
8841 } else
8842 eeprom_phy_id = 0;
8843
7d0c41ef 8844 tp->phy_id = eeprom_phy_id;
747e8f8b
MC
8845 if (eeprom_phy_serdes) {
8846 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8847 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
8848 else
8849 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8850 }
7d0c41ef 8851
cbf46853 8852 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
8853 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
8854 SHASTA_EXT_LED_MODE_MASK);
cbf46853 8855 else
1da177e4
LT
8856 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
8857
8858 switch (led_cfg) {
8859 default:
8860 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
8861 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8862 break;
8863
8864 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
8865 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8866 break;
8867
8868 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
8869 tp->led_ctrl = LED_CTRL_MODE_MAC;
9ba27794
MC
8870
8871 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
8872 * read on some older 5700/5701 bootcode.
8873 */
8874 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
8875 ASIC_REV_5700 ||
8876 GET_ASIC_REV(tp->pci_chip_rev_id) ==
8877 ASIC_REV_5701)
8878 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8879
1da177e4
LT
8880 break;
8881
8882 case SHASTA_EXT_LED_SHARED:
8883 tp->led_ctrl = LED_CTRL_MODE_SHARED;
8884 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8885 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
8886 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
8887 LED_CTRL_MODE_PHY_2);
8888 break;
8889
8890 case SHASTA_EXT_LED_MAC:
8891 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
8892 break;
8893
8894 case SHASTA_EXT_LED_COMBO:
8895 tp->led_ctrl = LED_CTRL_MODE_COMBO;
8896 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
8897 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
8898 LED_CTRL_MODE_PHY_2);
8899 break;
8900
8901 };
8902
8903 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8904 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
8905 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
8906 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8907
8908 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8909 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8910 (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
8911 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
8912
8913 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8914 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 8915 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
8916 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
8917 }
8918 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
8919 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
8920
8921 if (cfg2 & (1 << 17))
8922 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
8923
8924 /* serdes signal pre-emphasis in register 0x590 set by */
8925 /* bootcode if bit 18 is set */
8926 if (cfg2 & (1 << 18))
8927 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
8928 }
7d0c41ef
MC
8929}
8930
8931static int __devinit tg3_phy_probe(struct tg3 *tp)
8932{
8933 u32 hw_phy_id_1, hw_phy_id_2;
8934 u32 hw_phy_id, hw_phy_id_masked;
8935 int err;
1da177e4
LT
8936
8937 /* Reading the PHY ID register can conflict with ASF
8938 * firwmare access to the PHY hardware.
8939 */
8940 err = 0;
8941 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
8942 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
8943 } else {
8944 /* Now read the physical PHY_ID from the chip and verify
8945 * that it is sane. If it doesn't look good, we fall back
8946 * to either the hard-coded table based PHY_ID and failing
8947 * that the value found in the eeprom area.
8948 */
8949 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
8950 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
8951
8952 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
8953 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
8954 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
8955
8956 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
8957 }
8958
8959 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
8960 tp->phy_id = hw_phy_id;
8961 if (hw_phy_id_masked == PHY_ID_BCM8002)
8962 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
da6b2d01
MC
8963 else
8964 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
1da177e4 8965 } else {
7d0c41ef
MC
8966 if (tp->phy_id != PHY_ID_INVALID) {
8967 /* Do nothing, phy ID already set up in
8968 * tg3_get_eeprom_hw_cfg().
8969 */
1da177e4
LT
8970 } else {
8971 struct subsys_tbl_ent *p;
8972
8973 /* No eeprom signature? Try the hardcoded
8974 * subsys device table.
8975 */
8976 p = lookup_by_subsys(tp);
8977 if (!p)
8978 return -ENODEV;
8979
8980 tp->phy_id = p->phy_id;
8981 if (!tp->phy_id ||
8982 tp->phy_id == PHY_ID_BCM8002)
8983 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8984 }
8985 }
8986
747e8f8b 8987 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
1da177e4
LT
8988 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
8989 u32 bmsr, adv_reg, tg3_ctrl;
8990
8991 tg3_readphy(tp, MII_BMSR, &bmsr);
8992 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
8993 (bmsr & BMSR_LSTATUS))
8994 goto skip_phy_reset;
8995
8996 err = tg3_phy_reset(tp);
8997 if (err)
8998 return err;
8999
9000 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9001 ADVERTISE_100HALF | ADVERTISE_100FULL |
9002 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9003 tg3_ctrl = 0;
9004 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9005 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9006 MII_TG3_CTRL_ADV_1000_FULL);
9007 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9008 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9009 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9010 MII_TG3_CTRL_ENABLE_AS_MASTER);
9011 }
9012
9013 if (!tg3_copper_is_advertising_all(tp)) {
9014 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9015
9016 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9017 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9018
9019 tg3_writephy(tp, MII_BMCR,
9020 BMCR_ANENABLE | BMCR_ANRESTART);
9021 }
9022 tg3_phy_set_wirespeed(tp);
9023
9024 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9025 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9026 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9027 }
9028
9029skip_phy_reset:
9030 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9031 err = tg3_init_5401phy_dsp(tp);
9032 if (err)
9033 return err;
9034 }
9035
9036 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9037 err = tg3_init_5401phy_dsp(tp);
9038 }
9039
747e8f8b 9040 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1da177e4
LT
9041 tp->link_config.advertising =
9042 (ADVERTISED_1000baseT_Half |
9043 ADVERTISED_1000baseT_Full |
9044 ADVERTISED_Autoneg |
9045 ADVERTISED_FIBRE);
9046 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9047 tp->link_config.advertising &=
9048 ~(ADVERTISED_1000baseT_Half |
9049 ADVERTISED_1000baseT_Full);
9050
9051 return err;
9052}
9053
9054static void __devinit tg3_read_partno(struct tg3 *tp)
9055{
9056 unsigned char vpd_data[256];
9057 int i;
9058
9059 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9060 /* Sun decided not to put the necessary bits in the
9061 * NVRAM of their onboard tg3 parts :(
9062 */
9063 strcpy(tp->board_part_number, "Sun 570X");
9064 return;
9065 }
9066
9067 for (i = 0; i < 256; i += 4) {
9068 u32 tmp;
9069
9070 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9071 goto out_not_found;
9072
9073 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
9074 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
9075 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9076 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9077 }
9078
9079 /* Now parse and find the part number. */
9080 for (i = 0; i < 256; ) {
9081 unsigned char val = vpd_data[i];
9082 int block_end;
9083
9084 if (val == 0x82 || val == 0x91) {
9085 i = (i + 3 +
9086 (vpd_data[i + 1] +
9087 (vpd_data[i + 2] << 8)));
9088 continue;
9089 }
9090
9091 if (val != 0x90)
9092 goto out_not_found;
9093
9094 block_end = (i + 3 +
9095 (vpd_data[i + 1] +
9096 (vpd_data[i + 2] << 8)));
9097 i += 3;
9098 while (i < block_end) {
9099 if (vpd_data[i + 0] == 'P' &&
9100 vpd_data[i + 1] == 'N') {
9101 int partno_len = vpd_data[i + 2];
9102
9103 if (partno_len > 24)
9104 goto out_not_found;
9105
9106 memcpy(tp->board_part_number,
9107 &vpd_data[i + 3],
9108 partno_len);
9109
9110 /* Success. */
9111 return;
9112 }
9113 }
9114
9115 /* Part number not found. */
9116 goto out_not_found;
9117 }
9118
9119out_not_found:
9120 strcpy(tp->board_part_number, "none");
9121}
9122
9123#ifdef CONFIG_SPARC64
9124static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9125{
9126 struct pci_dev *pdev = tp->pdev;
9127 struct pcidev_cookie *pcp = pdev->sysdata;
9128
9129 if (pcp != NULL) {
9130 int node = pcp->prom_node;
9131 u32 venid;
9132 int err;
9133
9134 err = prom_getproperty(node, "subsystem-vendor-id",
9135 (char *) &venid, sizeof(venid));
9136 if (err == 0 || err == -1)
9137 return 0;
9138 if (venid == PCI_VENDOR_ID_SUN)
9139 return 1;
9140 }
9141 return 0;
9142}
9143#endif
9144
9145static int __devinit tg3_get_invariants(struct tg3 *tp)
9146{
9147 static struct pci_device_id write_reorder_chipsets[] = {
9148 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9149 PCI_DEVICE_ID_INTEL_82801AA_8) },
9150 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9151 PCI_DEVICE_ID_INTEL_82801AB_8) },
9152 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9153 PCI_DEVICE_ID_INTEL_82801BA_11) },
9154 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9155 PCI_DEVICE_ID_INTEL_82801BA_6) },
9156 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9157 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9158 { },
9159 };
9160 u32 misc_ctrl_reg;
9161 u32 cacheline_sz_reg;
9162 u32 pci_state_reg, grc_misc_cfg;
9163 u32 val;
9164 u16 pci_cmd;
9165 int err;
9166
9167#ifdef CONFIG_SPARC64
9168 if (tg3_is_sun_570X(tp))
9169 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9170#endif
9171
9172 /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
9173 * reordering to the mailbox registers done by the host
9174 * controller can cause major troubles. We read back from
9175 * every mailbox register write to force the writes to be
9176 * posted to the chip in order.
9177 */
9178 if (pci_dev_present(write_reorder_chipsets))
9179 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9180
9181 /* Force memory write invalidate off. If we leave it on,
9182 * then on 5700_BX chips we have to enable a workaround.
9183 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9184 * to match the cacheline size. The Broadcom driver have this
9185 * workaround but turns MWI off all the times so never uses
9186 * it. This seems to suggest that the workaround is insufficient.
9187 */
9188 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9189 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9190 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9191
9192 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9193 * has the register indirect write enable bit set before
9194 * we try to access any of the MMIO registers. It is also
9195 * critical that the PCI-X hw workaround situation is decided
9196 * before that as well.
9197 */
9198 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9199 &misc_ctrl_reg);
9200
9201 tp->pci_chip_rev_id = (misc_ctrl_reg >>
9202 MISC_HOST_CTRL_CHIPREV_SHIFT);
9203
ff645bec
MC
9204 /* Wrong chip ID in 5752 A0. This code can be removed later
9205 * as A0 is not in production.
9206 */
9207 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9208 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9209
4cf78e4f
MC
9210 /* Find msi capability. */
9211 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9212 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
9213
1da177e4
LT
9214 /* Initialize misc host control in PCI block. */
9215 tp->misc_host_ctrl |= (misc_ctrl_reg &
9216 MISC_HOST_CTRL_CHIPREV);
9217 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9218 tp->misc_host_ctrl);
9219
9220 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9221 &cacheline_sz_reg);
9222
9223 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
9224 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
9225 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
9226 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
9227
6708e5cc 9228 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
4cf78e4f
MC
9229 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
9230 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
6708e5cc
JL
9231 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9232
1b440c56
JL
9233 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
9234 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9235 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9236
bb7064dc 9237 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
9238 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
9239
0f893dc6
MC
9240 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9241 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9242 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
9243 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9244
1da177e4
LT
9245 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9246 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9247
9248 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9249 tp->pci_lat_timer < 64) {
9250 tp->pci_lat_timer = 64;
9251
9252 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
9253 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
9254 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
9255 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
9256
9257 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9258 cacheline_sz_reg);
9259 }
9260
9261 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9262 &pci_state_reg);
9263
9264 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9265 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9266
9267 /* If this is a 5700 BX chipset, and we are in PCI-X
9268 * mode, enable register write workaround.
9269 *
9270 * The workaround is to use indirect register accesses
9271 * for all chip writes not to mailbox registers.
9272 */
9273 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9274 u32 pm_reg;
9275 u16 pci_cmd;
9276
9277 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9278
9279 /* The chip can have it's power management PCI config
9280 * space registers clobbered due to this bug.
9281 * So explicitly force the chip into D0 here.
9282 */
9283 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9284 &pm_reg);
9285 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9286 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9287 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9288 pm_reg);
9289
9290 /* Also, force SERR#/PERR# in PCI command. */
9291 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9292 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9293 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9294 }
9295 }
9296
9297 /* Back to back register writes can cause problems on this chip,
9298 * the workaround is to read back all reg writes except those to
9299 * mailbox regs. See tg3_write_indirect_reg32().
9300 *
9301 * PCI Express 5750_A0 rev chips need this workaround too.
9302 */
9303 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9304 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9305 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9306 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9307
9308 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9309 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9310 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9311 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9312
9313 /* Chip-specific fixup from Broadcom driver */
9314 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9315 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9316 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9317 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9318 }
9319
1ee582d8 9320 /* Default fast path register access methods */
20094930 9321 tp->read32 = tg3_read32;
1ee582d8 9322 tp->write32 = tg3_write32;
20094930 9323 tp->write32_mbox = tg3_write32;
1ee582d8
MC
9324 tp->write32_tx_mbox = tg3_write32;
9325 tp->write32_rx_mbox = tg3_write32;
9326
9327 /* Various workaround register access methods */
9328 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
9329 tp->write32 = tg3_write_indirect_reg32;
9330 else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
9331 tp->write32 = tg3_write_flush_reg32;
9332
9333 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
9334 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
9335 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9336 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
9337 tp->write32_rx_mbox = tg3_write_flush_reg32;
9338 }
20094930 9339
7d0c41ef
MC
9340 /* Get eeprom hw config before calling tg3_set_power_state().
9341 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9342 * determined before calling tg3_set_power_state() so that
9343 * we know whether or not to switch out of Vaux power.
9344 * When the flag is set, it means that GPIO1 is used for eeprom
9345 * write protect and also implies that it is a LOM where GPIOs
9346 * are not used to switch power.
9347 */
9348 tg3_get_eeprom_hw_cfg(tp);
9349
314fba34
MC
9350 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9351 * GPIO1 driven high will bring 5700's external PHY out of reset.
9352 * It is also used as eeprom write protect on LOMs.
9353 */
9354 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9355 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9356 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9357 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9358 GRC_LCLCTRL_GPIO_OUTPUT1);
3e7d83bc
MC
9359 /* Unused GPIO3 must be driven as output on 5752 because there
9360 * are no pull-up resistors on unused GPIO pins.
9361 */
9362 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9363 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
314fba34 9364
1da177e4
LT
9365 /* Force the chip into D0. */
9366 err = tg3_set_power_state(tp, 0);
9367 if (err) {
9368 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9369 pci_name(tp->pdev));
9370 return err;
9371 }
9372
9373 /* 5700 B0 chips do not support checksumming correctly due
9374 * to hardware bugs.
9375 */
9376 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9377 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9378
9379 /* Pseudo-header checksum is done by hardware logic and not
9380 * the offload processers, so make the chip do the pseudo-
9381 * header checksums on receive. For transmit it is more
9382 * convenient to do the pseudo-header checksum in software
9383 * as Linux does that on transmit for us in all cases.
9384 */
9385 tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9386 tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9387
9388 /* Derive initial jumbo mode from MTU assigned in
9389 * ether_setup() via the alloc_etherdev() call
9390 */
0f893dc6
MC
9391 if (tp->dev->mtu > ETH_DATA_LEN &&
9392 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780)
9393 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
1da177e4
LT
9394
9395 /* Determine WakeOnLan speed to use. */
9396 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9397 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9398 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9399 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9400 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9401 } else {
9402 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9403 }
9404
9405 /* A few boards don't want Ethernet@WireSpeed phy feature */
9406 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9407 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9408 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
747e8f8b
MC
9409 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
9410 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1da177e4
LT
9411 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9412
9413 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9414 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9415 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9416 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9417 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9418
bb7064dc 9419 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
1da177e4
LT
9420 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9421
1da177e4 9422 tp->coalesce_mode = 0;
1da177e4
LT
9423 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9424 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9425 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9426
9427 /* Initialize MAC MI mode, polling disabled. */
9428 tw32_f(MAC_MI_MODE, tp->mi_mode);
9429 udelay(80);
9430
9431 /* Initialize data/descriptor byte/word swapping. */
9432 val = tr32(GRC_MODE);
9433 val &= GRC_MODE_HOST_STACKUP;
9434 tw32(GRC_MODE, val | tp->grc_mode);
9435
9436 tg3_switch_clocks(tp);
9437
9438 /* Clear this out for sanity. */
9439 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9440
9441 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9442 &pci_state_reg);
9443 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9444 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9445 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9446
9447 if (chiprevid == CHIPREV_ID_5701_A0 ||
9448 chiprevid == CHIPREV_ID_5701_B0 ||
9449 chiprevid == CHIPREV_ID_5701_B2 ||
9450 chiprevid == CHIPREV_ID_5701_B5) {
9451 void __iomem *sram_base;
9452
9453 /* Write some dummy words into the SRAM status block
9454 * area, see if it reads back correctly. If the return
9455 * value is bad, force enable the PCIX workaround.
9456 */
9457 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9458
9459 writel(0x00000000, sram_base);
9460 writel(0x00000000, sram_base + 4);
9461 writel(0xffffffff, sram_base + 4);
9462 if (readl(sram_base) != 0x00000000)
9463 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9464 }
9465 }
9466
9467 udelay(50);
9468 tg3_nvram_init(tp);
9469
9470 grc_misc_cfg = tr32(GRC_MISC_CFG);
9471 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9472
9473 /* Broadcom's driver says that CIOBE multisplit has a bug */
9474#if 0
9475 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9476 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9477 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9478 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9479 }
9480#endif
9481 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9482 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9483 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9484 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9485
fac9b83e
DM
9486 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9487 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9488 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9489 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9490 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9491 HOSTCC_MODE_CLRTICK_TXBD);
9492
9493 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9494 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9495 tp->misc_host_ctrl);
9496 }
9497
1da177e4
LT
9498 /* these are limited to 10/100 only */
9499 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9500 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9501 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9502 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9503 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9504 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9505 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9506 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9507 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9508 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9509 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9510
9511 err = tg3_phy_probe(tp);
9512 if (err) {
9513 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
9514 pci_name(tp->pdev), err);
9515 /* ... but do not return immediately ... */
9516 }
9517
9518 tg3_read_partno(tp);
9519
9520 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9521 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9522 } else {
9523 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9524 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
9525 else
9526 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9527 }
9528
9529 /* 5700 {AX,BX} chips have a broken status block link
9530 * change bit implementation, so we must use the
9531 * status register in those cases.
9532 */
9533 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9534 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
9535 else
9536 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
9537
9538 /* The led_ctrl is set during tg3_phy_probe, here we might
9539 * have to force the link status polling mechanism based
9540 * upon subsystem IDs.
9541 */
9542 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
9543 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9544 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
9545 TG3_FLAG_USE_LINKCHG_REG);
9546 }
9547
9548 /* For all SERDES we poll the MAC status register. */
9549 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9550 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
9551 else
9552 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9553
9554 /* 5700 BX chips need to have their TX producer index mailboxes
9555 * written twice to workaround a bug.
9556 */
9557 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9558 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9559 else
9560 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
9561
9562 /* It seems all chips can get confused if TX buffers
9563 * straddle the 4GB address boundary in some cases.
9564 */
9565 tp->dev->hard_start_xmit = tg3_start_xmit;
9566
9567 tp->rx_offset = 2;
9568 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
9569 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
9570 tp->rx_offset = 0;
9571
9572 /* By default, disable wake-on-lan. User can change this
9573 * using ETHTOOL_SWOL.
9574 */
9575 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9576
9577 return err;
9578}
9579
9580#ifdef CONFIG_SPARC64
9581static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
9582{
9583 struct net_device *dev = tp->dev;
9584 struct pci_dev *pdev = tp->pdev;
9585 struct pcidev_cookie *pcp = pdev->sysdata;
9586
9587 if (pcp != NULL) {
9588 int node = pcp->prom_node;
9589
9590 if (prom_getproplen(node, "local-mac-address") == 6) {
9591 prom_getproperty(node, "local-mac-address",
9592 dev->dev_addr, 6);
9593 return 0;
9594 }
9595 }
9596 return -ENODEV;
9597}
9598
9599static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
9600{
9601 struct net_device *dev = tp->dev;
9602
9603 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
9604 return 0;
9605}
9606#endif
9607
9608static int __devinit tg3_get_device_address(struct tg3 *tp)
9609{
9610 struct net_device *dev = tp->dev;
9611 u32 hi, lo, mac_offset;
9612
9613#ifdef CONFIG_SPARC64
9614 if (!tg3_get_macaddr_sparc(tp))
9615 return 0;
9616#endif
9617
9618 mac_offset = 0x7c;
4cf78e4f
MC
9619 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9620 !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
9621 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
1da177e4
LT
9622 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
9623 mac_offset = 0xcc;
9624 if (tg3_nvram_lock(tp))
9625 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
9626 else
9627 tg3_nvram_unlock(tp);
9628 }
9629
9630 /* First try to get it from MAC address mailbox. */
9631 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
9632 if ((hi >> 16) == 0x484b) {
9633 dev->dev_addr[0] = (hi >> 8) & 0xff;
9634 dev->dev_addr[1] = (hi >> 0) & 0xff;
9635
9636 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
9637 dev->dev_addr[2] = (lo >> 24) & 0xff;
9638 dev->dev_addr[3] = (lo >> 16) & 0xff;
9639 dev->dev_addr[4] = (lo >> 8) & 0xff;
9640 dev->dev_addr[5] = (lo >> 0) & 0xff;
9641 }
9642 /* Next, try NVRAM. */
9643 else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
9644 !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
9645 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
9646 dev->dev_addr[0] = ((hi >> 16) & 0xff);
9647 dev->dev_addr[1] = ((hi >> 24) & 0xff);
9648 dev->dev_addr[2] = ((lo >> 0) & 0xff);
9649 dev->dev_addr[3] = ((lo >> 8) & 0xff);
9650 dev->dev_addr[4] = ((lo >> 16) & 0xff);
9651 dev->dev_addr[5] = ((lo >> 24) & 0xff);
9652 }
9653 /* Finally just fetch it out of the MAC control regs. */
9654 else {
9655 hi = tr32(MAC_ADDR_0_HIGH);
9656 lo = tr32(MAC_ADDR_0_LOW);
9657
9658 dev->dev_addr[5] = lo & 0xff;
9659 dev->dev_addr[4] = (lo >> 8) & 0xff;
9660 dev->dev_addr[3] = (lo >> 16) & 0xff;
9661 dev->dev_addr[2] = (lo >> 24) & 0xff;
9662 dev->dev_addr[1] = hi & 0xff;
9663 dev->dev_addr[0] = (hi >> 8) & 0xff;
9664 }
9665
9666 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
9667#ifdef CONFIG_SPARC64
9668 if (!tg3_get_default_macaddr_sparc(tp))
9669 return 0;
9670#endif
9671 return -EINVAL;
9672 }
9673 return 0;
9674}
9675
59e6b434
DM
9676#define BOUNDARY_SINGLE_CACHELINE 1
9677#define BOUNDARY_MULTI_CACHELINE 2
9678
9679static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
9680{
9681 int cacheline_size;
9682 u8 byte;
9683 int goal;
9684
9685 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
9686 if (byte == 0)
9687 cacheline_size = 1024;
9688 else
9689 cacheline_size = (int) byte * 4;
9690
9691 /* On 5703 and later chips, the boundary bits have no
9692 * effect.
9693 */
9694 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9695 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
9696 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9697 goto out;
9698
9699#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
9700 goal = BOUNDARY_MULTI_CACHELINE;
9701#else
9702#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
9703 goal = BOUNDARY_SINGLE_CACHELINE;
9704#else
9705 goal = 0;
9706#endif
9707#endif
9708
9709 if (!goal)
9710 goto out;
9711
9712 /* PCI controllers on most RISC systems tend to disconnect
9713 * when a device tries to burst across a cache-line boundary.
9714 * Therefore, letting tg3 do so just wastes PCI bandwidth.
9715 *
9716 * Unfortunately, for PCI-E there are only limited
9717 * write-side controls for this, and thus for reads
9718 * we will still get the disconnects. We'll also waste
9719 * these PCI cycles for both read and write for chips
9720 * other than 5700 and 5701 which do not implement the
9721 * boundary bits.
9722 */
9723 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
9724 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
9725 switch (cacheline_size) {
9726 case 16:
9727 case 32:
9728 case 64:
9729 case 128:
9730 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9731 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
9732 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
9733 } else {
9734 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9735 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9736 }
9737 break;
9738
9739 case 256:
9740 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
9741 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
9742 break;
9743
9744 default:
9745 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9746 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9747 break;
9748 };
9749 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9750 switch (cacheline_size) {
9751 case 16:
9752 case 32:
9753 case 64:
9754 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9755 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9756 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
9757 break;
9758 }
9759 /* fallthrough */
9760 case 128:
9761 default:
9762 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9763 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
9764 break;
9765 };
9766 } else {
9767 switch (cacheline_size) {
9768 case 16:
9769 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9770 val |= (DMA_RWCTRL_READ_BNDRY_16 |
9771 DMA_RWCTRL_WRITE_BNDRY_16);
9772 break;
9773 }
9774 /* fallthrough */
9775 case 32:
9776 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9777 val |= (DMA_RWCTRL_READ_BNDRY_32 |
9778 DMA_RWCTRL_WRITE_BNDRY_32);
9779 break;
9780 }
9781 /* fallthrough */
9782 case 64:
9783 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9784 val |= (DMA_RWCTRL_READ_BNDRY_64 |
9785 DMA_RWCTRL_WRITE_BNDRY_64);
9786 break;
9787 }
9788 /* fallthrough */
9789 case 128:
9790 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9791 val |= (DMA_RWCTRL_READ_BNDRY_128 |
9792 DMA_RWCTRL_WRITE_BNDRY_128);
9793 break;
9794 }
9795 /* fallthrough */
9796 case 256:
9797 val |= (DMA_RWCTRL_READ_BNDRY_256 |
9798 DMA_RWCTRL_WRITE_BNDRY_256);
9799 break;
9800 case 512:
9801 val |= (DMA_RWCTRL_READ_BNDRY_512 |
9802 DMA_RWCTRL_WRITE_BNDRY_512);
9803 break;
9804 case 1024:
9805 default:
9806 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
9807 DMA_RWCTRL_WRITE_BNDRY_1024);
9808 break;
9809 };
9810 }
9811
9812out:
9813 return val;
9814}
9815
1da177e4
LT
9816static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
9817{
9818 struct tg3_internal_buffer_desc test_desc;
9819 u32 sram_dma_descs;
9820 int i, ret;
9821
9822 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
9823
9824 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
9825 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
9826 tw32(RDMAC_STATUS, 0);
9827 tw32(WDMAC_STATUS, 0);
9828
9829 tw32(BUFMGR_MODE, 0);
9830 tw32(FTQ_RESET, 0);
9831
9832 test_desc.addr_hi = ((u64) buf_dma) >> 32;
9833 test_desc.addr_lo = buf_dma & 0xffffffff;
9834 test_desc.nic_mbuf = 0x00002100;
9835 test_desc.len = size;
9836
9837 /*
9838 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
9839 * the *second* time the tg3 driver was getting loaded after an
9840 * initial scan.
9841 *
9842 * Broadcom tells me:
9843 * ...the DMA engine is connected to the GRC block and a DMA
9844 * reset may affect the GRC block in some unpredictable way...
9845 * The behavior of resets to individual blocks has not been tested.
9846 *
9847 * Broadcom noted the GRC reset will also reset all sub-components.
9848 */
9849 if (to_device) {
9850 test_desc.cqid_sqid = (13 << 8) | 2;
9851
9852 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
9853 udelay(40);
9854 } else {
9855 test_desc.cqid_sqid = (16 << 8) | 7;
9856
9857 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
9858 udelay(40);
9859 }
9860 test_desc.flags = 0x00000005;
9861
9862 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
9863 u32 val;
9864
9865 val = *(((u32 *)&test_desc) + i);
9866 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
9867 sram_dma_descs + (i * sizeof(u32)));
9868 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
9869 }
9870 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
9871
9872 if (to_device) {
9873 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
9874 } else {
9875 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
9876 }
9877
9878 ret = -ENODEV;
9879 for (i = 0; i < 40; i++) {
9880 u32 val;
9881
9882 if (to_device)
9883 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
9884 else
9885 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
9886 if ((val & 0xffff) == sram_dma_descs) {
9887 ret = 0;
9888 break;
9889 }
9890
9891 udelay(100);
9892 }
9893
9894 return ret;
9895}
9896
ded7340d 9897#define TEST_BUFFER_SIZE 0x2000
1da177e4
LT
9898
9899static int __devinit tg3_test_dma(struct tg3 *tp)
9900{
9901 dma_addr_t buf_dma;
59e6b434 9902 u32 *buf, saved_dma_rwctrl;
1da177e4
LT
9903 int ret;
9904
9905 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
9906 if (!buf) {
9907 ret = -ENOMEM;
9908 goto out_nofree;
9909 }
9910
9911 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
9912 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
9913
59e6b434 9914 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
1da177e4
LT
9915
9916 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9917 /* DMA read watermark not used on PCIE */
9918 tp->dma_rwctrl |= 0x00180000;
9919 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
85e94ced
MC
9920 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
9921 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
1da177e4
LT
9922 tp->dma_rwctrl |= 0x003f0000;
9923 else
9924 tp->dma_rwctrl |= 0x003f000f;
9925 } else {
9926 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
9927 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9928 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
9929
9930 if (ccval == 0x6 || ccval == 0x7)
9931 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
9932
59e6b434 9933 /* Set bit 23 to enable PCIX hw bug fix */
1da177e4 9934 tp->dma_rwctrl |= 0x009f0000;
4cf78e4f
MC
9935 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
9936 /* 5780 always in PCIX mode */
9937 tp->dma_rwctrl |= 0x00144000;
1da177e4
LT
9938 } else {
9939 tp->dma_rwctrl |= 0x001b000f;
9940 }
9941 }
9942
9943 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
9944 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9945 tp->dma_rwctrl &= 0xfffffff0;
9946
9947 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9948 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
9949 /* Remove this if it causes problems for some boards. */
9950 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
9951
9952 /* On 5700/5701 chips, we need to set this bit.
9953 * Otherwise the chip will issue cacheline transactions
9954 * to streamable DMA memory with not all the byte
9955 * enables turned on. This is an error on several
9956 * RISC PCI controllers, in particular sparc64.
9957 *
9958 * On 5703/5704 chips, this bit has been reassigned
9959 * a different meaning. In particular, it is used
9960 * on those chips to enable a PCI-X workaround.
9961 */
9962 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
9963 }
9964
9965 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9966
9967#if 0
9968 /* Unneeded, already done by tg3_get_invariants. */
9969 tg3_switch_clocks(tp);
9970#endif
9971
9972 ret = 0;
9973 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9974 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
9975 goto out;
9976
59e6b434
DM
9977 /* It is best to perform DMA test with maximum write burst size
9978 * to expose the 5700/5701 write DMA bug.
9979 */
9980 saved_dma_rwctrl = tp->dma_rwctrl;
9981 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9982 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9983
1da177e4
LT
9984 while (1) {
9985 u32 *p = buf, i;
9986
9987 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
9988 p[i] = i;
9989
9990 /* Send the buffer to the chip. */
9991 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
9992 if (ret) {
9993 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
9994 break;
9995 }
9996
9997#if 0
9998 /* validate data reached card RAM correctly. */
9999 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10000 u32 val;
10001 tg3_read_mem(tp, 0x2100 + (i*4), &val);
10002 if (le32_to_cpu(val) != p[i]) {
10003 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
10004 /* ret = -ENODEV here? */
10005 }
10006 p[i] = 0;
10007 }
10008#endif
10009 /* Now read it back. */
10010 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10011 if (ret) {
10012 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10013
10014 break;
10015 }
10016
10017 /* Verify it. */
10018 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10019 if (p[i] == i)
10020 continue;
10021
59e6b434
DM
10022 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10023 DMA_RWCTRL_WRITE_BNDRY_16) {
10024 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
1da177e4
LT
10025 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10026 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10027 break;
10028 } else {
10029 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10030 ret = -ENODEV;
10031 goto out;
10032 }
10033 }
10034
10035 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10036 /* Success. */
10037 ret = 0;
10038 break;
10039 }
10040 }
59e6b434
DM
10041 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10042 DMA_RWCTRL_WRITE_BNDRY_16) {
6d1cfbab
MC
10043 static struct pci_device_id dma_wait_state_chipsets[] = {
10044 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10045 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10046 { },
10047 };
10048
59e6b434 10049 /* DMA test passed without adjusting DMA boundary,
6d1cfbab
MC
10050 * now look for chipsets that are known to expose the
10051 * DMA bug without failing the test.
59e6b434 10052 */
6d1cfbab
MC
10053 if (pci_dev_present(dma_wait_state_chipsets)) {
10054 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10055 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10056 }
10057 else
10058 /* Safe to use the calculated DMA boundary. */
10059 tp->dma_rwctrl = saved_dma_rwctrl;
10060
59e6b434
DM
10061 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10062 }
1da177e4
LT
10063
10064out:
10065 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10066out_nofree:
10067 return ret;
10068}
10069
10070static void __devinit tg3_init_link_config(struct tg3 *tp)
10071{
10072 tp->link_config.advertising =
10073 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10074 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10075 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10076 ADVERTISED_Autoneg | ADVERTISED_MII);
10077 tp->link_config.speed = SPEED_INVALID;
10078 tp->link_config.duplex = DUPLEX_INVALID;
10079 tp->link_config.autoneg = AUTONEG_ENABLE;
10080 netif_carrier_off(tp->dev);
10081 tp->link_config.active_speed = SPEED_INVALID;
10082 tp->link_config.active_duplex = DUPLEX_INVALID;
10083 tp->link_config.phy_is_low_power = 0;
10084 tp->link_config.orig_speed = SPEED_INVALID;
10085 tp->link_config.orig_duplex = DUPLEX_INVALID;
10086 tp->link_config.orig_autoneg = AUTONEG_INVALID;
10087}
10088
10089static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10090{
fdfec172
MC
10091 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10092 tp->bufmgr_config.mbuf_read_dma_low_water =
10093 DEFAULT_MB_RDMA_LOW_WATER_5705;
10094 tp->bufmgr_config.mbuf_mac_rx_low_water =
10095 DEFAULT_MB_MACRX_LOW_WATER_5705;
10096 tp->bufmgr_config.mbuf_high_water =
10097 DEFAULT_MB_HIGH_WATER_5705;
10098
10099 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10100 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10101 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10102 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10103 tp->bufmgr_config.mbuf_high_water_jumbo =
10104 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10105 } else {
10106 tp->bufmgr_config.mbuf_read_dma_low_water =
10107 DEFAULT_MB_RDMA_LOW_WATER;
10108 tp->bufmgr_config.mbuf_mac_rx_low_water =
10109 DEFAULT_MB_MACRX_LOW_WATER;
10110 tp->bufmgr_config.mbuf_high_water =
10111 DEFAULT_MB_HIGH_WATER;
10112
10113 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10114 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10115 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10116 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10117 tp->bufmgr_config.mbuf_high_water_jumbo =
10118 DEFAULT_MB_HIGH_WATER_JUMBO;
10119 }
1da177e4
LT
10120
10121 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10122 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10123}
10124
10125static char * __devinit tg3_phy_string(struct tg3 *tp)
10126{
10127 switch (tp->phy_id & PHY_ID_MASK) {
10128 case PHY_ID_BCM5400: return "5400";
10129 case PHY_ID_BCM5401: return "5401";
10130 case PHY_ID_BCM5411: return "5411";
10131 case PHY_ID_BCM5701: return "5701";
10132 case PHY_ID_BCM5703: return "5703";
10133 case PHY_ID_BCM5704: return "5704";
10134 case PHY_ID_BCM5705: return "5705";
10135 case PHY_ID_BCM5750: return "5750";
85e94ced 10136 case PHY_ID_BCM5752: return "5752";
4cf78e4f 10137 case PHY_ID_BCM5780: return "5780";
1da177e4
LT
10138 case PHY_ID_BCM8002: return "8002/serdes";
10139 case 0: return "serdes";
10140 default: return "unknown";
10141 };
10142}
10143
10144static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
10145{
10146 struct pci_dev *peer;
10147 unsigned int func, devnr = tp->pdev->devfn & ~7;
10148
10149 for (func = 0; func < 8; func++) {
10150 peer = pci_get_slot(tp->pdev->bus, devnr | func);
10151 if (peer && peer != tp->pdev)
10152 break;
10153 pci_dev_put(peer);
10154 }
10155 if (!peer || peer == tp->pdev)
10156 BUG();
10157
10158 /*
10159 * We don't need to keep the refcount elevated; there's no way
10160 * to remove one half of this device without removing the other
10161 */
10162 pci_dev_put(peer);
10163
10164 return peer;
10165}
10166
15f9850d
DM
10167static void __devinit tg3_init_coal(struct tg3 *tp)
10168{
10169 struct ethtool_coalesce *ec = &tp->coal;
10170
10171 memset(ec, 0, sizeof(*ec));
10172 ec->cmd = ETHTOOL_GCOALESCE;
10173 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
10174 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
10175 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
10176 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
10177 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
10178 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
10179 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
10180 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
10181 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
10182
10183 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
10184 HOSTCC_MODE_CLRTICK_TXBD)) {
10185 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
10186 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
10187 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
10188 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
10189 }
d244c892
MC
10190
10191 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10192 ec->rx_coalesce_usecs_irq = 0;
10193 ec->tx_coalesce_usecs_irq = 0;
10194 ec->stats_block_coalesce_usecs = 0;
10195 }
15f9850d
DM
10196}
10197
1da177e4
LT
10198static int __devinit tg3_init_one(struct pci_dev *pdev,
10199 const struct pci_device_id *ent)
10200{
10201 static int tg3_version_printed = 0;
10202 unsigned long tg3reg_base, tg3reg_len;
10203 struct net_device *dev;
10204 struct tg3 *tp;
10205 int i, err, pci_using_dac, pm_cap;
10206
10207 if (tg3_version_printed++ == 0)
10208 printk(KERN_INFO "%s", version);
10209
10210 err = pci_enable_device(pdev);
10211 if (err) {
10212 printk(KERN_ERR PFX "Cannot enable PCI device, "
10213 "aborting.\n");
10214 return err;
10215 }
10216
10217 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10218 printk(KERN_ERR PFX "Cannot find proper PCI device "
10219 "base address, aborting.\n");
10220 err = -ENODEV;
10221 goto err_out_disable_pdev;
10222 }
10223
10224 err = pci_request_regions(pdev, DRV_MODULE_NAME);
10225 if (err) {
10226 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
10227 "aborting.\n");
10228 goto err_out_disable_pdev;
10229 }
10230
10231 pci_set_master(pdev);
10232
10233 /* Find power-management capability. */
10234 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10235 if (pm_cap == 0) {
10236 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
10237 "aborting.\n");
10238 err = -EIO;
10239 goto err_out_free_res;
10240 }
10241
10242 /* Configure DMA attributes. */
10243 err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
10244 if (!err) {
10245 pci_using_dac = 1;
10246 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
10247 if (err < 0) {
10248 printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
10249 "for consistent allocations\n");
10250 goto err_out_free_res;
10251 }
10252 } else {
10253 err = pci_set_dma_mask(pdev, 0xffffffffULL);
10254 if (err) {
10255 printk(KERN_ERR PFX "No usable DMA configuration, "
10256 "aborting.\n");
10257 goto err_out_free_res;
10258 }
10259 pci_using_dac = 0;
10260 }
10261
10262 tg3reg_base = pci_resource_start(pdev, 0);
10263 tg3reg_len = pci_resource_len(pdev, 0);
10264
10265 dev = alloc_etherdev(sizeof(*tp));
10266 if (!dev) {
10267 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
10268 err = -ENOMEM;
10269 goto err_out_free_res;
10270 }
10271
10272 SET_MODULE_OWNER(dev);
10273 SET_NETDEV_DEV(dev, &pdev->dev);
10274
10275 if (pci_using_dac)
10276 dev->features |= NETIF_F_HIGHDMA;
10277 dev->features |= NETIF_F_LLTX;
10278#if TG3_VLAN_TAG_USED
10279 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
10280 dev->vlan_rx_register = tg3_vlan_rx_register;
10281 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
10282#endif
10283
10284 tp = netdev_priv(dev);
10285 tp->pdev = pdev;
10286 tp->dev = dev;
10287 tp->pm_cap = pm_cap;
10288 tp->mac_mode = TG3_DEF_MAC_MODE;
10289 tp->rx_mode = TG3_DEF_RX_MODE;
10290 tp->tx_mode = TG3_DEF_TX_MODE;
10291 tp->mi_mode = MAC_MI_MODE_BASE;
10292 if (tg3_debug > 0)
10293 tp->msg_enable = tg3_debug;
10294 else
10295 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10296
10297 /* The word/byte swap controls here control register access byte
10298 * swapping. DMA data byte swapping is controlled in the GRC_MODE
10299 * setting below.
10300 */
10301 tp->misc_host_ctrl =
10302 MISC_HOST_CTRL_MASK_PCI_INT |
10303 MISC_HOST_CTRL_WORD_SWAP |
10304 MISC_HOST_CTRL_INDIR_ACCESS |
10305 MISC_HOST_CTRL_PCISTATE_RW;
10306
10307 /* The NONFRM (non-frame) byte/word swap controls take effect
10308 * on descriptor entries, anything which isn't packet data.
10309 *
10310 * The StrongARM chips on the board (one for tx, one for rx)
10311 * are running in big-endian mode.
10312 */
10313 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10314 GRC_MODE_WSWAP_NONFRM_DATA);
10315#ifdef __BIG_ENDIAN
10316 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10317#endif
10318 spin_lock_init(&tp->lock);
10319 spin_lock_init(&tp->tx_lock);
10320 spin_lock_init(&tp->indirect_lock);
10321 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10322
10323 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10324 if (tp->regs == 0UL) {
10325 printk(KERN_ERR PFX "Cannot map device registers, "
10326 "aborting.\n");
10327 err = -ENOMEM;
10328 goto err_out_free_dev;
10329 }
10330
10331 tg3_init_link_config(tp);
10332
1da177e4
LT
10333 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10334 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10335 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10336
10337 dev->open = tg3_open;
10338 dev->stop = tg3_close;
10339 dev->get_stats = tg3_get_stats;
10340 dev->set_multicast_list = tg3_set_rx_mode;
10341 dev->set_mac_address = tg3_set_mac_addr;
10342 dev->do_ioctl = tg3_ioctl;
10343 dev->tx_timeout = tg3_tx_timeout;
10344 dev->poll = tg3_poll;
10345 dev->ethtool_ops = &tg3_ethtool_ops;
10346 dev->weight = 64;
10347 dev->watchdog_timeo = TG3_TX_TIMEOUT;
10348 dev->change_mtu = tg3_change_mtu;
10349 dev->irq = pdev->irq;
10350#ifdef CONFIG_NET_POLL_CONTROLLER
10351 dev->poll_controller = tg3_poll_controller;
10352#endif
10353
10354 err = tg3_get_invariants(tp);
10355 if (err) {
10356 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10357 "aborting.\n");
10358 goto err_out_iounmap;
10359 }
10360
fdfec172 10361 tg3_init_bufmgr_config(tp);
1da177e4
LT
10362
10363#if TG3_TSO_SUPPORT != 0
10364 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10365 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10366 }
10367 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10368 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10369 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10370 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10371 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10372 } else {
10373 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10374 }
10375
10376 /* TSO is off by default, user can enable using ethtool. */
10377#if 0
10378 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
10379 dev->features |= NETIF_F_TSO;
10380#endif
10381
10382#endif
10383
10384 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10385 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10386 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10387 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10388 tp->rx_pending = 63;
10389 }
10390
10391 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10392 tp->pdev_peer = tg3_find_5704_peer(tp);
10393
10394 err = tg3_get_device_address(tp);
10395 if (err) {
10396 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10397 "aborting.\n");
10398 goto err_out_iounmap;
10399 }
10400
10401 /*
10402 * Reset chip in case UNDI or EFI driver did not shutdown
10403 * DMA self test will enable WDMAC and we'll see (spurious)
10404 * pending DMA on the PCI bus at that point.
10405 */
10406 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10407 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10408 pci_save_state(tp->pdev);
10409 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
944d980e 10410 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
10411 }
10412
10413 err = tg3_test_dma(tp);
10414 if (err) {
10415 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10416 goto err_out_iounmap;
10417 }
10418
10419 /* Tigon3 can do ipv4 only... and some chips have buggy
10420 * checksumming.
10421 */
10422 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10423 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10424 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10425 } else
10426 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10427
10428 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10429 dev->features &= ~NETIF_F_HIGHDMA;
10430
10431 /* flow control autonegotiation is default behavior */
10432 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10433
15f9850d
DM
10434 tg3_init_coal(tp);
10435
7d3f4c97
DM
10436 /* Now that we have fully setup the chip, save away a snapshot
10437 * of the PCI config space. We need to restore this after
10438 * GRC_MISC_CFG core clock resets and some resume events.
10439 */
10440 pci_save_state(tp->pdev);
10441
1da177e4
LT
10442 err = register_netdev(dev);
10443 if (err) {
10444 printk(KERN_ERR PFX "Cannot register net device, "
10445 "aborting.\n");
10446 goto err_out_iounmap;
10447 }
10448
10449 pci_set_drvdata(pdev, dev);
10450
1da177e4
LT
10451 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
10452 dev->name,
10453 tp->board_part_number,
10454 tp->pci_chip_rev_id,
10455 tg3_phy_string(tp),
10456 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
10457 ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
10458 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
10459 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
10460 ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
10461 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10462
10463 for (i = 0; i < 6; i++)
10464 printk("%2.2x%c", dev->dev_addr[i],
10465 i == 5 ? '\n' : ':');
10466
10467 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
10468 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
10469 "TSOcap[%d] \n",
10470 dev->name,
10471 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
10472 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
10473 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
10474 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
10475 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
10476 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
10477 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
59e6b434
DM
10478 printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10479 dev->name, tp->dma_rwctrl);
1da177e4
LT
10480
10481 return 0;
10482
10483err_out_iounmap:
10484 iounmap(tp->regs);
10485
10486err_out_free_dev:
10487 free_netdev(dev);
10488
10489err_out_free_res:
10490 pci_release_regions(pdev);
10491
10492err_out_disable_pdev:
10493 pci_disable_device(pdev);
10494 pci_set_drvdata(pdev, NULL);
10495 return err;
10496}
10497
10498static void __devexit tg3_remove_one(struct pci_dev *pdev)
10499{
10500 struct net_device *dev = pci_get_drvdata(pdev);
10501
10502 if (dev) {
10503 struct tg3 *tp = netdev_priv(dev);
10504
10505 unregister_netdev(dev);
10506 iounmap(tp->regs);
10507 free_netdev(dev);
10508 pci_release_regions(pdev);
10509 pci_disable_device(pdev);
10510 pci_set_drvdata(pdev, NULL);
10511 }
10512}
10513
10514static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10515{
10516 struct net_device *dev = pci_get_drvdata(pdev);
10517 struct tg3 *tp = netdev_priv(dev);
10518 int err;
10519
10520 if (!netif_running(dev))
10521 return 0;
10522
10523 tg3_netif_stop(tp);
10524
10525 del_timer_sync(&tp->timer);
10526
f47c11ee 10527 tg3_full_lock(tp, 1);
1da177e4 10528 tg3_disable_ints(tp);
f47c11ee 10529 tg3_full_unlock(tp);
1da177e4
LT
10530
10531 netif_device_detach(dev);
10532
f47c11ee 10533 tg3_full_lock(tp, 0);
944d980e 10534 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
f47c11ee 10535 tg3_full_unlock(tp);
1da177e4
LT
10536
10537 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
10538 if (err) {
f47c11ee 10539 tg3_full_lock(tp, 0);
1da177e4
LT
10540
10541 tg3_init_hw(tp);
10542
10543 tp->timer.expires = jiffies + tp->timer_offset;
10544 add_timer(&tp->timer);
10545
10546 netif_device_attach(dev);
10547 tg3_netif_start(tp);
10548
f47c11ee 10549 tg3_full_unlock(tp);
1da177e4
LT
10550 }
10551
10552 return err;
10553}
10554
10555static int tg3_resume(struct pci_dev *pdev)
10556{
10557 struct net_device *dev = pci_get_drvdata(pdev);
10558 struct tg3 *tp = netdev_priv(dev);
10559 int err;
10560
10561 if (!netif_running(dev))
10562 return 0;
10563
10564 pci_restore_state(tp->pdev);
10565
10566 err = tg3_set_power_state(tp, 0);
10567 if (err)
10568 return err;
10569
10570 netif_device_attach(dev);
10571
f47c11ee 10572 tg3_full_lock(tp, 0);
1da177e4
LT
10573
10574 tg3_init_hw(tp);
10575
10576 tp->timer.expires = jiffies + tp->timer_offset;
10577 add_timer(&tp->timer);
10578
1da177e4
LT
10579 tg3_netif_start(tp);
10580
f47c11ee 10581 tg3_full_unlock(tp);
1da177e4
LT
10582
10583 return 0;
10584}
10585
10586static struct pci_driver tg3_driver = {
10587 .name = DRV_MODULE_NAME,
10588 .id_table = tg3_pci_tbl,
10589 .probe = tg3_init_one,
10590 .remove = __devexit_p(tg3_remove_one),
10591 .suspend = tg3_suspend,
10592 .resume = tg3_resume
10593};
10594
10595static int __init tg3_init(void)
10596{
10597 return pci_module_init(&tg3_driver);
10598}
10599
10600static void __exit tg3_cleanup(void)
10601{
10602 pci_unregister_driver(&tg3_driver);
10603}
10604
10605module_init(tg3_init);
10606module_exit(tg3_cleanup);