]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/tg3.c
[TG3]: Fix missing memory barriers and SD_STATUS_UPDATED bit clearing.
[net-next-2.6.git] / drivers / net / tg3.c
CommitLineData
1da177e4
LT
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
8 *
9 * Firmware is:
49cabf49
MC
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
1da177e4
LT
16 */
17
18#include <linux/config.h>
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
27#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/workqueue.h>
39
40#include <net/checksum.h>
41
42#include <asm/system.h>
43#include <asm/io.h>
44#include <asm/byteorder.h>
45#include <asm/uaccess.h>
46
47#ifdef CONFIG_SPARC64
48#include <asm/idprom.h>
49#include <asm/oplib.h>
50#include <asm/pbm.h>
51#endif
52
53#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
54#define TG3_VLAN_TAG_USED 1
55#else
56#define TG3_VLAN_TAG_USED 0
57#endif
58
59#ifdef NETIF_F_TSO
60#define TG3_TSO_SUPPORT 1
61#else
62#define TG3_TSO_SUPPORT 0
63#endif
64
65#include "tg3.h"
66
67#define DRV_MODULE_NAME "tg3"
68#define PFX DRV_MODULE_NAME ": "
ed7fce6c
DM
69#define DRV_MODULE_VERSION "3.31"
70#define DRV_MODULE_RELDATE "June 8, 2005"
1da177e4
LT
71
72#define TG3_DEF_MAC_MODE 0
73#define TG3_DEF_RX_MODE 0
74#define TG3_DEF_TX_MODE 0
75#define TG3_DEF_MSG_ENABLE \
76 (NETIF_MSG_DRV | \
77 NETIF_MSG_PROBE | \
78 NETIF_MSG_LINK | \
79 NETIF_MSG_TIMER | \
80 NETIF_MSG_IFDOWN | \
81 NETIF_MSG_IFUP | \
82 NETIF_MSG_RX_ERR | \
83 NETIF_MSG_TX_ERR)
84
85/* length of time before we decide the hardware is borked,
86 * and dev->tx_timeout() should be called to fix the problem
87 */
88#define TG3_TX_TIMEOUT (5 * HZ)
89
90/* hardware minimum and maximum for a single frame's data payload */
91#define TG3_MIN_MTU 60
92#define TG3_MAX_MTU(tp) \
fcf02693 93 (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 9000 : 1500)
1da177e4
LT
94
95/* These numbers seem to be hard coded in the NIC firmware somehow.
96 * You can't change the ring sizes, but you can change where you place
97 * them in the NIC onboard memory.
98 */
99#define TG3_RX_RING_SIZE 512
100#define TG3_DEF_RX_RING_PENDING 200
101#define TG3_RX_JUMBO_RING_SIZE 256
102#define TG3_DEF_RX_JUMBO_RING_PENDING 100
103
104/* Do not place this n-ring entries value into the tp struct itself,
105 * we really want to expose these constants to GCC so that modulo et
106 * al. operations are done with shifts and masks instead of with
107 * hw multiply/modulo instructions. Another solution would be to
108 * replace things like '% foo' with '& (foo - 1)'.
109 */
110#define TG3_RX_RCB_RING_SIZE(tp) \
111 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
112
113#define TG3_TX_RING_SIZE 512
114#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
115
116#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117 TG3_RX_RING_SIZE)
118#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_JUMBO_RING_SIZE)
120#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
121 TG3_RX_RCB_RING_SIZE(tp))
122#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
123 TG3_TX_RING_SIZE)
124#define TX_RING_GAP(TP) \
125 (TG3_TX_RING_SIZE - (TP)->tx_pending)
126#define TX_BUFFS_AVAIL(TP) \
127 (((TP)->tx_cons <= (TP)->tx_prod) ? \
128 (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod : \
129 (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
130#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
133#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
134
135/* minimum number of free TX descriptors required to wake up TX process */
136#define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
137
138/* number of ETHTOOL_GSTATS u64's */
139#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
4cafd3f5
MC
141#define TG3_NUM_TEST 6
142
1da177e4
LT
143static char version[] __devinitdata =
144 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148MODULE_LICENSE("GPL");
149MODULE_VERSION(DRV_MODULE_VERSION);
150
151static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
152module_param(tg3_debug, int, 0);
153MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155static struct pci_device_id tg3_pci_tbl[] = {
156 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
6e9017a7 214 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
af2bcd97 215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
d8659255
XVP
216 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
1da177e4
LT
218 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
237 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
239 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240 { 0, }
241};
242
243MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
244
245static struct {
246 const char string[ETH_GSTRING_LEN];
247} ethtool_stats_keys[TG3_NUM_STATS] = {
248 { "rx_octets" },
249 { "rx_fragments" },
250 { "rx_ucast_packets" },
251 { "rx_mcast_packets" },
252 { "rx_bcast_packets" },
253 { "rx_fcs_errors" },
254 { "rx_align_errors" },
255 { "rx_xon_pause_rcvd" },
256 { "rx_xoff_pause_rcvd" },
257 { "rx_mac_ctrl_rcvd" },
258 { "rx_xoff_entered" },
259 { "rx_frame_too_long_errors" },
260 { "rx_jabbers" },
261 { "rx_undersize_packets" },
262 { "rx_in_length_errors" },
263 { "rx_out_length_errors" },
264 { "rx_64_or_less_octet_packets" },
265 { "rx_65_to_127_octet_packets" },
266 { "rx_128_to_255_octet_packets" },
267 { "rx_256_to_511_octet_packets" },
268 { "rx_512_to_1023_octet_packets" },
269 { "rx_1024_to_1522_octet_packets" },
270 { "rx_1523_to_2047_octet_packets" },
271 { "rx_2048_to_4095_octet_packets" },
272 { "rx_4096_to_8191_octet_packets" },
273 { "rx_8192_to_9022_octet_packets" },
274
275 { "tx_octets" },
276 { "tx_collisions" },
277
278 { "tx_xon_sent" },
279 { "tx_xoff_sent" },
280 { "tx_flow_control" },
281 { "tx_mac_errors" },
282 { "tx_single_collisions" },
283 { "tx_mult_collisions" },
284 { "tx_deferred" },
285 { "tx_excessive_collisions" },
286 { "tx_late_collisions" },
287 { "tx_collide_2times" },
288 { "tx_collide_3times" },
289 { "tx_collide_4times" },
290 { "tx_collide_5times" },
291 { "tx_collide_6times" },
292 { "tx_collide_7times" },
293 { "tx_collide_8times" },
294 { "tx_collide_9times" },
295 { "tx_collide_10times" },
296 { "tx_collide_11times" },
297 { "tx_collide_12times" },
298 { "tx_collide_13times" },
299 { "tx_collide_14times" },
300 { "tx_collide_15times" },
301 { "tx_ucast_packets" },
302 { "tx_mcast_packets" },
303 { "tx_bcast_packets" },
304 { "tx_carrier_sense_errors" },
305 { "tx_discards" },
306 { "tx_errors" },
307
308 { "dma_writeq_full" },
309 { "dma_write_prioq_full" },
310 { "rxbds_empty" },
311 { "rx_discards" },
312 { "rx_errors" },
313 { "rx_threshold_hit" },
314
315 { "dma_readq_full" },
316 { "dma_read_prioq_full" },
317 { "tx_comp_queue_full" },
318
319 { "ring_set_send_prod_index" },
320 { "ring_status_update" },
321 { "nic_irqs" },
322 { "nic_avoided_irqs" },
323 { "nic_tx_threshold_hit" }
324};
325
4cafd3f5
MC
326static struct {
327 const char string[ETH_GSTRING_LEN];
328} ethtool_test_keys[TG3_NUM_TEST] = {
329 { "nvram test (online) " },
330 { "link test (online) " },
331 { "register test (offline)" },
332 { "memory test (offline)" },
333 { "loopback test (offline)" },
334 { "interrupt test (offline)" },
335};
336
1da177e4
LT
337static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
338{
339 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
340 unsigned long flags;
341
342 spin_lock_irqsave(&tp->indirect_lock, flags);
343 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
344 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
345 spin_unlock_irqrestore(&tp->indirect_lock, flags);
346 } else {
347 writel(val, tp->regs + off);
348 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
349 readl(tp->regs + off);
350 }
351}
352
353static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
354{
355 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
356 unsigned long flags;
357
358 spin_lock_irqsave(&tp->indirect_lock, flags);
359 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
360 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
361 spin_unlock_irqrestore(&tp->indirect_lock, flags);
362 } else {
363 void __iomem *dest = tp->regs + off;
364 writel(val, dest);
365 readl(dest); /* always flush PCI write */
366 }
367}
368
369static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
370{
371 void __iomem *mbox = tp->regs + off;
372 writel(val, mbox);
373 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
374 readl(mbox);
375}
376
377static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
378{
379 void __iomem *mbox = tp->regs + off;
380 writel(val, mbox);
381 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
382 writel(val, mbox);
383 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
384 readl(mbox);
385}
386
387#define tw32_mailbox(reg, val) writel(((val) & 0xffffffff), tp->regs + (reg))
388#define tw32_rx_mbox(reg, val) _tw32_rx_mbox(tp, reg, val)
389#define tw32_tx_mbox(reg, val) _tw32_tx_mbox(tp, reg, val)
390
391#define tw32(reg,val) tg3_write_indirect_reg32(tp,(reg),(val))
392#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val))
393#define tw16(reg,val) writew(((val) & 0xffff), tp->regs + (reg))
394#define tw8(reg,val) writeb(((val) & 0xff), tp->regs + (reg))
395#define tr32(reg) readl(tp->regs + (reg))
396#define tr16(reg) readw(tp->regs + (reg))
397#define tr8(reg) readb(tp->regs + (reg))
398
399static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
400{
401 unsigned long flags;
402
403 spin_lock_irqsave(&tp->indirect_lock, flags);
404 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
405 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
406
407 /* Always leave this as zero. */
408 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
409 spin_unlock_irqrestore(&tp->indirect_lock, flags);
410}
411
412static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
413{
414 unsigned long flags;
415
416 spin_lock_irqsave(&tp->indirect_lock, flags);
417 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
418 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
419
420 /* Always leave this as zero. */
421 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
422 spin_unlock_irqrestore(&tp->indirect_lock, flags);
423}
424
425static void tg3_disable_ints(struct tg3 *tp)
426{
427 tw32(TG3PCI_MISC_HOST_CTRL,
428 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
429 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
430 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
431}
432
433static inline void tg3_cond_int(struct tg3 *tp)
434{
435 if (tp->hw_status->status & SD_STATUS_UPDATED)
436 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
437}
438
439static void tg3_enable_ints(struct tg3 *tp)
440{
441 tw32(TG3PCI_MISC_HOST_CTRL,
442 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
fac9b83e
DM
443 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
444 (tp->last_tag << 24));
1da177e4
LT
445 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
446
447 tg3_cond_int(tp);
448}
449
04237ddd
MC
450static inline unsigned int tg3_has_work(struct tg3 *tp)
451{
452 struct tg3_hw_status *sblk = tp->hw_status;
453 unsigned int work_exists = 0;
454
455 /* check for phy events */
456 if (!(tp->tg3_flags &
457 (TG3_FLAG_USE_LINKCHG_REG |
458 TG3_FLAG_POLL_SERDES))) {
459 if (sblk->status & SD_STATUS_LINK_CHG)
460 work_exists = 1;
461 }
462 /* check for RX/TX work to do */
463 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
464 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
465 work_exists = 1;
466
467 return work_exists;
468}
469
1da177e4 470/* tg3_restart_ints
04237ddd
MC
471 * similar to tg3_enable_ints, but it accurately determines whether there
472 * is new work pending and can return without flushing the PIO write
473 * which reenables interrupts
1da177e4
LT
474 */
475static void tg3_restart_ints(struct tg3 *tp)
476{
477 tw32(TG3PCI_MISC_HOST_CTRL,
478 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
fac9b83e
DM
479 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
480 tp->last_tag << 24);
1da177e4
LT
481 mmiowb();
482
fac9b83e
DM
483 /* When doing tagged status, this work check is unnecessary.
484 * The last_tag we write above tells the chip which piece of
485 * work we've completed.
486 */
487 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
488 tg3_has_work(tp))
04237ddd
MC
489 tw32(HOSTCC_MODE, tp->coalesce_mode |
490 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
1da177e4
LT
491}
492
493static inline void tg3_netif_stop(struct tg3 *tp)
494{
495 netif_poll_disable(tp->dev);
496 netif_tx_disable(tp->dev);
497}
498
499static inline void tg3_netif_start(struct tg3 *tp)
500{
501 netif_wake_queue(tp->dev);
502 /* NOTE: unconditional netif_wake_queue is only appropriate
503 * so long as all callers are assured to have free tx slots
504 * (such as after tg3_init_hw)
505 */
506 netif_poll_enable(tp->dev);
507 tg3_cond_int(tp);
508}
509
510static void tg3_switch_clocks(struct tg3 *tp)
511{
512 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
513 u32 orig_clock_ctrl;
514
515 orig_clock_ctrl = clock_ctrl;
516 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
517 CLOCK_CTRL_CLKRUN_OENABLE |
518 0x1f);
519 tp->pci_clock_ctrl = clock_ctrl;
520
521 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
522 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
523 tw32_f(TG3PCI_CLOCK_CTRL,
524 clock_ctrl | CLOCK_CTRL_625_CORE);
525 udelay(40);
526 }
527 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
528 tw32_f(TG3PCI_CLOCK_CTRL,
529 clock_ctrl |
530 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
531 udelay(40);
532 tw32_f(TG3PCI_CLOCK_CTRL,
533 clock_ctrl | (CLOCK_CTRL_ALTCLK));
534 udelay(40);
535 }
536 tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
537 udelay(40);
538}
539
540#define PHY_BUSY_LOOPS 5000
541
542static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
543{
544 u32 frame_val;
545 unsigned int loops;
546 int ret;
547
548 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
549 tw32_f(MAC_MI_MODE,
550 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
551 udelay(80);
552 }
553
554 *val = 0x0;
555
556 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
557 MI_COM_PHY_ADDR_MASK);
558 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
559 MI_COM_REG_ADDR_MASK);
560 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
561
562 tw32_f(MAC_MI_COM, frame_val);
563
564 loops = PHY_BUSY_LOOPS;
565 while (loops != 0) {
566 udelay(10);
567 frame_val = tr32(MAC_MI_COM);
568
569 if ((frame_val & MI_COM_BUSY) == 0) {
570 udelay(5);
571 frame_val = tr32(MAC_MI_COM);
572 break;
573 }
574 loops -= 1;
575 }
576
577 ret = -EBUSY;
578 if (loops != 0) {
579 *val = frame_val & MI_COM_DATA_MASK;
580 ret = 0;
581 }
582
583 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
584 tw32_f(MAC_MI_MODE, tp->mi_mode);
585 udelay(80);
586 }
587
588 return ret;
589}
590
591static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
592{
593 u32 frame_val;
594 unsigned int loops;
595 int ret;
596
597 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
598 tw32_f(MAC_MI_MODE,
599 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
600 udelay(80);
601 }
602
603 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
604 MI_COM_PHY_ADDR_MASK);
605 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
606 MI_COM_REG_ADDR_MASK);
607 frame_val |= (val & MI_COM_DATA_MASK);
608 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
609
610 tw32_f(MAC_MI_COM, frame_val);
611
612 loops = PHY_BUSY_LOOPS;
613 while (loops != 0) {
614 udelay(10);
615 frame_val = tr32(MAC_MI_COM);
616 if ((frame_val & MI_COM_BUSY) == 0) {
617 udelay(5);
618 frame_val = tr32(MAC_MI_COM);
619 break;
620 }
621 loops -= 1;
622 }
623
624 ret = -EBUSY;
625 if (loops != 0)
626 ret = 0;
627
628 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
629 tw32_f(MAC_MI_MODE, tp->mi_mode);
630 udelay(80);
631 }
632
633 return ret;
634}
635
636static void tg3_phy_set_wirespeed(struct tg3 *tp)
637{
638 u32 val;
639
640 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
641 return;
642
643 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
644 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
645 tg3_writephy(tp, MII_TG3_AUX_CTRL,
646 (val | (1 << 15) | (1 << 4)));
647}
648
649static int tg3_bmcr_reset(struct tg3 *tp)
650{
651 u32 phy_control;
652 int limit, err;
653
654 /* OK, reset it, and poll the BMCR_RESET bit until it
655 * clears or we time out.
656 */
657 phy_control = BMCR_RESET;
658 err = tg3_writephy(tp, MII_BMCR, phy_control);
659 if (err != 0)
660 return -EBUSY;
661
662 limit = 5000;
663 while (limit--) {
664 err = tg3_readphy(tp, MII_BMCR, &phy_control);
665 if (err != 0)
666 return -EBUSY;
667
668 if ((phy_control & BMCR_RESET) == 0) {
669 udelay(40);
670 break;
671 }
672 udelay(10);
673 }
674 if (limit <= 0)
675 return -EBUSY;
676
677 return 0;
678}
679
680static int tg3_wait_macro_done(struct tg3 *tp)
681{
682 int limit = 100;
683
684 while (limit--) {
685 u32 tmp32;
686
687 if (!tg3_readphy(tp, 0x16, &tmp32)) {
688 if ((tmp32 & 0x1000) == 0)
689 break;
690 }
691 }
692 if (limit <= 0)
693 return -EBUSY;
694
695 return 0;
696}
697
698static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
699{
700 static const u32 test_pat[4][6] = {
701 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
702 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
703 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
704 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
705 };
706 int chan;
707
708 for (chan = 0; chan < 4; chan++) {
709 int i;
710
711 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
712 (chan * 0x2000) | 0x0200);
713 tg3_writephy(tp, 0x16, 0x0002);
714
715 for (i = 0; i < 6; i++)
716 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
717 test_pat[chan][i]);
718
719 tg3_writephy(tp, 0x16, 0x0202);
720 if (tg3_wait_macro_done(tp)) {
721 *resetp = 1;
722 return -EBUSY;
723 }
724
725 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
726 (chan * 0x2000) | 0x0200);
727 tg3_writephy(tp, 0x16, 0x0082);
728 if (tg3_wait_macro_done(tp)) {
729 *resetp = 1;
730 return -EBUSY;
731 }
732
733 tg3_writephy(tp, 0x16, 0x0802);
734 if (tg3_wait_macro_done(tp)) {
735 *resetp = 1;
736 return -EBUSY;
737 }
738
739 for (i = 0; i < 6; i += 2) {
740 u32 low, high;
741
742 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
743 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
744 tg3_wait_macro_done(tp)) {
745 *resetp = 1;
746 return -EBUSY;
747 }
748 low &= 0x7fff;
749 high &= 0x000f;
750 if (low != test_pat[chan][i] ||
751 high != test_pat[chan][i+1]) {
752 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
753 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
754 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
755
756 return -EBUSY;
757 }
758 }
759 }
760
761 return 0;
762}
763
764static int tg3_phy_reset_chanpat(struct tg3 *tp)
765{
766 int chan;
767
768 for (chan = 0; chan < 4; chan++) {
769 int i;
770
771 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
772 (chan * 0x2000) | 0x0200);
773 tg3_writephy(tp, 0x16, 0x0002);
774 for (i = 0; i < 6; i++)
775 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
776 tg3_writephy(tp, 0x16, 0x0202);
777 if (tg3_wait_macro_done(tp))
778 return -EBUSY;
779 }
780
781 return 0;
782}
783
784static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
785{
786 u32 reg32, phy9_orig;
787 int retries, do_phy_reset, err;
788
789 retries = 10;
790 do_phy_reset = 1;
791 do {
792 if (do_phy_reset) {
793 err = tg3_bmcr_reset(tp);
794 if (err)
795 return err;
796 do_phy_reset = 0;
797 }
798
799 /* Disable transmitter and interrupt. */
800 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
801 continue;
802
803 reg32 |= 0x3000;
804 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
805
806 /* Set full-duplex, 1000 mbps. */
807 tg3_writephy(tp, MII_BMCR,
808 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
809
810 /* Set to master mode. */
811 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
812 continue;
813
814 tg3_writephy(tp, MII_TG3_CTRL,
815 (MII_TG3_CTRL_AS_MASTER |
816 MII_TG3_CTRL_ENABLE_AS_MASTER));
817
818 /* Enable SM_DSP_CLOCK and 6dB. */
819 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
820
821 /* Block the PHY control access. */
822 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
823 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
824
825 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
826 if (!err)
827 break;
828 } while (--retries);
829
830 err = tg3_phy_reset_chanpat(tp);
831 if (err)
832 return err;
833
834 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
835 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
836
837 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
838 tg3_writephy(tp, 0x16, 0x0000);
839
840 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
841 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
842 /* Set Extended packet length bit for jumbo frames */
843 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
844 }
845 else {
846 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
847 }
848
849 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
850
851 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
852 reg32 &= ~0x3000;
853 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
854 } else if (!err)
855 err = -EBUSY;
856
857 return err;
858}
859
860/* This will reset the tigon3 PHY if there is no valid
861 * link unless the FORCE argument is non-zero.
862 */
863static int tg3_phy_reset(struct tg3 *tp)
864{
865 u32 phy_status;
866 int err;
867
868 err = tg3_readphy(tp, MII_BMSR, &phy_status);
869 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
870 if (err != 0)
871 return -EBUSY;
872
873 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
874 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
875 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
876 err = tg3_phy_reset_5703_4_5(tp);
877 if (err)
878 return err;
879 goto out;
880 }
881
882 err = tg3_bmcr_reset(tp);
883 if (err)
884 return err;
885
886out:
887 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
888 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
889 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
890 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
891 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
892 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
893 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
894 }
895 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
896 tg3_writephy(tp, 0x1c, 0x8d68);
897 tg3_writephy(tp, 0x1c, 0x8d68);
898 }
899 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
900 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
901 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
902 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
903 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
904 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
905 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
906 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
907 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
908 }
909 /* Set Extended packet length bit (bit 14) on all chips that */
910 /* support jumbo frames */
911 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
912 /* Cannot do read-modify-write on 5401 */
913 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
fcf02693 914 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1da177e4
LT
915 u32 phy_reg;
916
917 /* Set bit 14 with read-modify-write to preserve other bits */
918 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
919 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
920 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
921 }
922
923 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
924 * jumbo frames transmission.
925 */
fcf02693 926 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1da177e4
LT
927 u32 phy_reg;
928
929 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
930 tg3_writephy(tp, MII_TG3_EXT_CTRL,
931 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
932 }
933
934 tg3_phy_set_wirespeed(tp);
935 return 0;
936}
937
938static void tg3_frob_aux_power(struct tg3 *tp)
939{
940 struct tg3 *tp_peer = tp;
941
942 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
943 return;
944
945 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
946 tp_peer = pci_get_drvdata(tp->pdev_peer);
947 if (!tp_peer)
948 BUG();
949 }
950
951
952 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
953 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
954 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
955 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
956 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
957 (GRC_LCLCTRL_GPIO_OE0 |
958 GRC_LCLCTRL_GPIO_OE1 |
959 GRC_LCLCTRL_GPIO_OE2 |
960 GRC_LCLCTRL_GPIO_OUTPUT0 |
961 GRC_LCLCTRL_GPIO_OUTPUT1));
962 udelay(100);
963 } else {
964 u32 no_gpio2;
965 u32 grc_local_ctrl;
966
967 if (tp_peer != tp &&
968 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
969 return;
970
971 /* On 5753 and variants, GPIO2 cannot be used. */
972 no_gpio2 = tp->nic_sram_data_cfg &
973 NIC_SRAM_DATA_CFG_NO_GPIO2;
974
975 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
976 GRC_LCLCTRL_GPIO_OE1 |
977 GRC_LCLCTRL_GPIO_OE2 |
978 GRC_LCLCTRL_GPIO_OUTPUT1 |
979 GRC_LCLCTRL_GPIO_OUTPUT2;
980 if (no_gpio2) {
981 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
982 GRC_LCLCTRL_GPIO_OUTPUT2);
983 }
984 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
985 grc_local_ctrl);
986 udelay(100);
987
988 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
989
990 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
991 grc_local_ctrl);
992 udelay(100);
993
994 if (!no_gpio2) {
995 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
996 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
997 grc_local_ctrl);
998 udelay(100);
999 }
1000 }
1001 } else {
1002 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1003 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1004 if (tp_peer != tp &&
1005 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1006 return;
1007
1008 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1009 (GRC_LCLCTRL_GPIO_OE1 |
1010 GRC_LCLCTRL_GPIO_OUTPUT1));
1011 udelay(100);
1012
1013 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1014 (GRC_LCLCTRL_GPIO_OE1));
1015 udelay(100);
1016
1017 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1018 (GRC_LCLCTRL_GPIO_OE1 |
1019 GRC_LCLCTRL_GPIO_OUTPUT1));
1020 udelay(100);
1021 }
1022 }
1023}
1024
1025static int tg3_setup_phy(struct tg3 *, int);
1026
1027#define RESET_KIND_SHUTDOWN 0
1028#define RESET_KIND_INIT 1
1029#define RESET_KIND_SUSPEND 2
1030
1031static void tg3_write_sig_post_reset(struct tg3 *, int);
1032static int tg3_halt_cpu(struct tg3 *, u32);
1033
1034static int tg3_set_power_state(struct tg3 *tp, int state)
1035{
1036 u32 misc_host_ctrl;
1037 u16 power_control, power_caps;
1038 int pm = tp->pm_cap;
1039
1040 /* Make sure register accesses (indirect or otherwise)
1041 * will function correctly.
1042 */
1043 pci_write_config_dword(tp->pdev,
1044 TG3PCI_MISC_HOST_CTRL,
1045 tp->misc_host_ctrl);
1046
1047 pci_read_config_word(tp->pdev,
1048 pm + PCI_PM_CTRL,
1049 &power_control);
1050 power_control |= PCI_PM_CTRL_PME_STATUS;
1051 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1052 switch (state) {
1053 case 0:
1054 power_control |= 0;
1055 pci_write_config_word(tp->pdev,
1056 pm + PCI_PM_CTRL,
1057 power_control);
8c6bda1a
MC
1058 udelay(100); /* Delay after power state change */
1059
1060 /* Switch out of Vaux if it is not a LOM */
1061 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1062 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1063 udelay(100);
1064 }
1da177e4
LT
1065
1066 return 0;
1067
1068 case 1:
1069 power_control |= 1;
1070 break;
1071
1072 case 2:
1073 power_control |= 2;
1074 break;
1075
1076 case 3:
1077 power_control |= 3;
1078 break;
1079
1080 default:
1081 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1082 "requested.\n",
1083 tp->dev->name, state);
1084 return -EINVAL;
1085 };
1086
1087 power_control |= PCI_PM_CTRL_PME_ENABLE;
1088
1089 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1090 tw32(TG3PCI_MISC_HOST_CTRL,
1091 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1092
1093 if (tp->link_config.phy_is_low_power == 0) {
1094 tp->link_config.phy_is_low_power = 1;
1095 tp->link_config.orig_speed = tp->link_config.speed;
1096 tp->link_config.orig_duplex = tp->link_config.duplex;
1097 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1098 }
1099
1100 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1101 tp->link_config.speed = SPEED_10;
1102 tp->link_config.duplex = DUPLEX_HALF;
1103 tp->link_config.autoneg = AUTONEG_ENABLE;
1104 tg3_setup_phy(tp, 0);
1105 }
1106
1107 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1108
1109 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1110 u32 mac_mode;
1111
1112 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1113 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1114 udelay(40);
1115
1116 mac_mode = MAC_MODE_PORT_MODE_MII;
1117
1118 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1119 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1120 mac_mode |= MAC_MODE_LINK_POLARITY;
1121 } else {
1122 mac_mode = MAC_MODE_PORT_MODE_TBI;
1123 }
1124
cbf46853 1125 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1da177e4
LT
1126 tw32(MAC_LED_CTRL, tp->led_ctrl);
1127
1128 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1129 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1130 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1131
1132 tw32_f(MAC_MODE, mac_mode);
1133 udelay(100);
1134
1135 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1136 udelay(10);
1137 }
1138
1139 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1140 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1141 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1142 u32 base_val;
1143
1144 base_val = tp->pci_clock_ctrl;
1145 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1146 CLOCK_CTRL_TXCLK_DISABLE);
1147
1148 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1149 CLOCK_CTRL_ALTCLK |
1150 CLOCK_CTRL_PWRDOWN_PLL133);
1151 udelay(40);
85e94ced 1152 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1da177e4
LT
1153 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1154 u32 newbits1, newbits2;
1155
1156 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1157 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1158 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1159 CLOCK_CTRL_TXCLK_DISABLE |
1160 CLOCK_CTRL_ALTCLK);
1161 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1162 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1163 newbits1 = CLOCK_CTRL_625_CORE;
1164 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1165 } else {
1166 newbits1 = CLOCK_CTRL_ALTCLK;
1167 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1168 }
1169
1170 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1171 udelay(40);
1172
1173 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1174 udelay(40);
1175
1176 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1177 u32 newbits3;
1178
1179 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1180 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1181 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1182 CLOCK_CTRL_TXCLK_DISABLE |
1183 CLOCK_CTRL_44MHZ_CORE);
1184 } else {
1185 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1186 }
1187
1188 tw32_f(TG3PCI_CLOCK_CTRL,
1189 tp->pci_clock_ctrl | newbits3);
1190 udelay(40);
1191 }
1192 }
1193
1194 tg3_frob_aux_power(tp);
1195
1196 /* Workaround for unstable PLL clock */
1197 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1198 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1199 u32 val = tr32(0x7d00);
1200
1201 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1202 tw32(0x7d00, val);
1203 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1204 tg3_halt_cpu(tp, RX_CPU_BASE);
1205 }
1206
1207 /* Finally, set the new power state. */
1208 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
8c6bda1a 1209 udelay(100); /* Delay after power state change */
1da177e4
LT
1210
1211 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1212
1213 return 0;
1214}
1215
1216static void tg3_link_report(struct tg3 *tp)
1217{
1218 if (!netif_carrier_ok(tp->dev)) {
1219 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1220 } else {
1221 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1222 tp->dev->name,
1223 (tp->link_config.active_speed == SPEED_1000 ?
1224 1000 :
1225 (tp->link_config.active_speed == SPEED_100 ?
1226 100 : 10)),
1227 (tp->link_config.active_duplex == DUPLEX_FULL ?
1228 "full" : "half"));
1229
1230 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1231 "%s for RX.\n",
1232 tp->dev->name,
1233 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1234 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1235 }
1236}
1237
1238static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1239{
1240 u32 new_tg3_flags = 0;
1241 u32 old_rx_mode = tp->rx_mode;
1242 u32 old_tx_mode = tp->tx_mode;
1243
1244 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1245 if (local_adv & ADVERTISE_PAUSE_CAP) {
1246 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1247 if (remote_adv & LPA_PAUSE_CAP)
1248 new_tg3_flags |=
1249 (TG3_FLAG_RX_PAUSE |
1250 TG3_FLAG_TX_PAUSE);
1251 else if (remote_adv & LPA_PAUSE_ASYM)
1252 new_tg3_flags |=
1253 (TG3_FLAG_RX_PAUSE);
1254 } else {
1255 if (remote_adv & LPA_PAUSE_CAP)
1256 new_tg3_flags |=
1257 (TG3_FLAG_RX_PAUSE |
1258 TG3_FLAG_TX_PAUSE);
1259 }
1260 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1261 if ((remote_adv & LPA_PAUSE_CAP) &&
1262 (remote_adv & LPA_PAUSE_ASYM))
1263 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1264 }
1265
1266 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1267 tp->tg3_flags |= new_tg3_flags;
1268 } else {
1269 new_tg3_flags = tp->tg3_flags;
1270 }
1271
1272 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1273 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1274 else
1275 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1276
1277 if (old_rx_mode != tp->rx_mode) {
1278 tw32_f(MAC_RX_MODE, tp->rx_mode);
1279 }
1280
1281 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1282 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1283 else
1284 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1285
1286 if (old_tx_mode != tp->tx_mode) {
1287 tw32_f(MAC_TX_MODE, tp->tx_mode);
1288 }
1289}
1290
1291static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1292{
1293 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1294 case MII_TG3_AUX_STAT_10HALF:
1295 *speed = SPEED_10;
1296 *duplex = DUPLEX_HALF;
1297 break;
1298
1299 case MII_TG3_AUX_STAT_10FULL:
1300 *speed = SPEED_10;
1301 *duplex = DUPLEX_FULL;
1302 break;
1303
1304 case MII_TG3_AUX_STAT_100HALF:
1305 *speed = SPEED_100;
1306 *duplex = DUPLEX_HALF;
1307 break;
1308
1309 case MII_TG3_AUX_STAT_100FULL:
1310 *speed = SPEED_100;
1311 *duplex = DUPLEX_FULL;
1312 break;
1313
1314 case MII_TG3_AUX_STAT_1000HALF:
1315 *speed = SPEED_1000;
1316 *duplex = DUPLEX_HALF;
1317 break;
1318
1319 case MII_TG3_AUX_STAT_1000FULL:
1320 *speed = SPEED_1000;
1321 *duplex = DUPLEX_FULL;
1322 break;
1323
1324 default:
1325 *speed = SPEED_INVALID;
1326 *duplex = DUPLEX_INVALID;
1327 break;
1328 };
1329}
1330
1331static void tg3_phy_copper_begin(struct tg3 *tp)
1332{
1333 u32 new_adv;
1334 int i;
1335
1336 if (tp->link_config.phy_is_low_power) {
1337 /* Entering low power mode. Disable gigabit and
1338 * 100baseT advertisements.
1339 */
1340 tg3_writephy(tp, MII_TG3_CTRL, 0);
1341
1342 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1343 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1344 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1345 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1346
1347 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1348 } else if (tp->link_config.speed == SPEED_INVALID) {
1349 tp->link_config.advertising =
1350 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1351 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1352 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1353 ADVERTISED_Autoneg | ADVERTISED_MII);
1354
1355 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1356 tp->link_config.advertising &=
1357 ~(ADVERTISED_1000baseT_Half |
1358 ADVERTISED_1000baseT_Full);
1359
1360 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1361 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1362 new_adv |= ADVERTISE_10HALF;
1363 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1364 new_adv |= ADVERTISE_10FULL;
1365 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1366 new_adv |= ADVERTISE_100HALF;
1367 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1368 new_adv |= ADVERTISE_100FULL;
1369 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1370
1371 if (tp->link_config.advertising &
1372 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1373 new_adv = 0;
1374 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1375 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1376 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1377 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1378 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1379 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1380 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1381 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1382 MII_TG3_CTRL_ENABLE_AS_MASTER);
1383 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1384 } else {
1385 tg3_writephy(tp, MII_TG3_CTRL, 0);
1386 }
1387 } else {
1388 /* Asking for a specific link mode. */
1389 if (tp->link_config.speed == SPEED_1000) {
1390 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1391 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1392
1393 if (tp->link_config.duplex == DUPLEX_FULL)
1394 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1395 else
1396 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1397 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1398 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1399 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1400 MII_TG3_CTRL_ENABLE_AS_MASTER);
1401 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1402 } else {
1403 tg3_writephy(tp, MII_TG3_CTRL, 0);
1404
1405 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1406 if (tp->link_config.speed == SPEED_100) {
1407 if (tp->link_config.duplex == DUPLEX_FULL)
1408 new_adv |= ADVERTISE_100FULL;
1409 else
1410 new_adv |= ADVERTISE_100HALF;
1411 } else {
1412 if (tp->link_config.duplex == DUPLEX_FULL)
1413 new_adv |= ADVERTISE_10FULL;
1414 else
1415 new_adv |= ADVERTISE_10HALF;
1416 }
1417 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1418 }
1419 }
1420
1421 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1422 tp->link_config.speed != SPEED_INVALID) {
1423 u32 bmcr, orig_bmcr;
1424
1425 tp->link_config.active_speed = tp->link_config.speed;
1426 tp->link_config.active_duplex = tp->link_config.duplex;
1427
1428 bmcr = 0;
1429 switch (tp->link_config.speed) {
1430 default:
1431 case SPEED_10:
1432 break;
1433
1434 case SPEED_100:
1435 bmcr |= BMCR_SPEED100;
1436 break;
1437
1438 case SPEED_1000:
1439 bmcr |= TG3_BMCR_SPEED1000;
1440 break;
1441 };
1442
1443 if (tp->link_config.duplex == DUPLEX_FULL)
1444 bmcr |= BMCR_FULLDPLX;
1445
1446 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1447 (bmcr != orig_bmcr)) {
1448 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1449 for (i = 0; i < 1500; i++) {
1450 u32 tmp;
1451
1452 udelay(10);
1453 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1454 tg3_readphy(tp, MII_BMSR, &tmp))
1455 continue;
1456 if (!(tmp & BMSR_LSTATUS)) {
1457 udelay(40);
1458 break;
1459 }
1460 }
1461 tg3_writephy(tp, MII_BMCR, bmcr);
1462 udelay(40);
1463 }
1464 } else {
1465 tg3_writephy(tp, MII_BMCR,
1466 BMCR_ANENABLE | BMCR_ANRESTART);
1467 }
1468}
1469
1470static int tg3_init_5401phy_dsp(struct tg3 *tp)
1471{
1472 int err;
1473
1474 /* Turn off tap power management. */
1475 /* Set Extended packet length bit */
1476 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1477
1478 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1479 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1480
1481 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1482 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1483
1484 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1485 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1486
1487 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1488 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1489
1490 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1491 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1492
1493 udelay(40);
1494
1495 return err;
1496}
1497
1498static int tg3_copper_is_advertising_all(struct tg3 *tp)
1499{
1500 u32 adv_reg, all_mask;
1501
1502 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1503 return 0;
1504
1505 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1506 ADVERTISE_100HALF | ADVERTISE_100FULL);
1507 if ((adv_reg & all_mask) != all_mask)
1508 return 0;
1509 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1510 u32 tg3_ctrl;
1511
1512 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1513 return 0;
1514
1515 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1516 MII_TG3_CTRL_ADV_1000_FULL);
1517 if ((tg3_ctrl & all_mask) != all_mask)
1518 return 0;
1519 }
1520 return 1;
1521}
1522
1523static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1524{
1525 int current_link_up;
1526 u32 bmsr, dummy;
1527 u16 current_speed;
1528 u8 current_duplex;
1529 int i, err;
1530
1531 tw32(MAC_EVENT, 0);
1532
1533 tw32_f(MAC_STATUS,
1534 (MAC_STATUS_SYNC_CHANGED |
1535 MAC_STATUS_CFG_CHANGED |
1536 MAC_STATUS_MI_COMPLETION |
1537 MAC_STATUS_LNKSTATE_CHANGED));
1538 udelay(40);
1539
1540 tp->mi_mode = MAC_MI_MODE_BASE;
1541 tw32_f(MAC_MI_MODE, tp->mi_mode);
1542 udelay(80);
1543
1544 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1545
1546 /* Some third-party PHYs need to be reset on link going
1547 * down.
1548 */
1549 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1550 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1551 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1552 netif_carrier_ok(tp->dev)) {
1553 tg3_readphy(tp, MII_BMSR, &bmsr);
1554 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1555 !(bmsr & BMSR_LSTATUS))
1556 force_reset = 1;
1557 }
1558 if (force_reset)
1559 tg3_phy_reset(tp);
1560
1561 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1562 tg3_readphy(tp, MII_BMSR, &bmsr);
1563 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1564 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1565 bmsr = 0;
1566
1567 if (!(bmsr & BMSR_LSTATUS)) {
1568 err = tg3_init_5401phy_dsp(tp);
1569 if (err)
1570 return err;
1571
1572 tg3_readphy(tp, MII_BMSR, &bmsr);
1573 for (i = 0; i < 1000; i++) {
1574 udelay(10);
1575 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1576 (bmsr & BMSR_LSTATUS)) {
1577 udelay(40);
1578 break;
1579 }
1580 }
1581
1582 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1583 !(bmsr & BMSR_LSTATUS) &&
1584 tp->link_config.active_speed == SPEED_1000) {
1585 err = tg3_phy_reset(tp);
1586 if (!err)
1587 err = tg3_init_5401phy_dsp(tp);
1588 if (err)
1589 return err;
1590 }
1591 }
1592 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1593 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1594 /* 5701 {A0,B0} CRC bug workaround */
1595 tg3_writephy(tp, 0x15, 0x0a75);
1596 tg3_writephy(tp, 0x1c, 0x8c68);
1597 tg3_writephy(tp, 0x1c, 0x8d68);
1598 tg3_writephy(tp, 0x1c, 0x8c68);
1599 }
1600
1601 /* Clear pending interrupts... */
1602 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1603 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1604
1605 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1606 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1607 else
1608 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1609
1610 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1611 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1612 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1613 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1614 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1615 else
1616 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1617 }
1618
1619 current_link_up = 0;
1620 current_speed = SPEED_INVALID;
1621 current_duplex = DUPLEX_INVALID;
1622
1623 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1624 u32 val;
1625
1626 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1627 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1628 if (!(val & (1 << 10))) {
1629 val |= (1 << 10);
1630 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1631 goto relink;
1632 }
1633 }
1634
1635 bmsr = 0;
1636 for (i = 0; i < 100; i++) {
1637 tg3_readphy(tp, MII_BMSR, &bmsr);
1638 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1639 (bmsr & BMSR_LSTATUS))
1640 break;
1641 udelay(40);
1642 }
1643
1644 if (bmsr & BMSR_LSTATUS) {
1645 u32 aux_stat, bmcr;
1646
1647 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1648 for (i = 0; i < 2000; i++) {
1649 udelay(10);
1650 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1651 aux_stat)
1652 break;
1653 }
1654
1655 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1656 &current_speed,
1657 &current_duplex);
1658
1659 bmcr = 0;
1660 for (i = 0; i < 200; i++) {
1661 tg3_readphy(tp, MII_BMCR, &bmcr);
1662 if (tg3_readphy(tp, MII_BMCR, &bmcr))
1663 continue;
1664 if (bmcr && bmcr != 0x7fff)
1665 break;
1666 udelay(10);
1667 }
1668
1669 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1670 if (bmcr & BMCR_ANENABLE) {
1671 current_link_up = 1;
1672
1673 /* Force autoneg restart if we are exiting
1674 * low power mode.
1675 */
1676 if (!tg3_copper_is_advertising_all(tp))
1677 current_link_up = 0;
1678 } else {
1679 current_link_up = 0;
1680 }
1681 } else {
1682 if (!(bmcr & BMCR_ANENABLE) &&
1683 tp->link_config.speed == current_speed &&
1684 tp->link_config.duplex == current_duplex) {
1685 current_link_up = 1;
1686 } else {
1687 current_link_up = 0;
1688 }
1689 }
1690
1691 tp->link_config.active_speed = current_speed;
1692 tp->link_config.active_duplex = current_duplex;
1693 }
1694
1695 if (current_link_up == 1 &&
1696 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1697 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1698 u32 local_adv, remote_adv;
1699
1700 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1701 local_adv = 0;
1702 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1703
1704 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1705 remote_adv = 0;
1706
1707 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1708
1709 /* If we are not advertising full pause capability,
1710 * something is wrong. Bring the link down and reconfigure.
1711 */
1712 if (local_adv != ADVERTISE_PAUSE_CAP) {
1713 current_link_up = 0;
1714 } else {
1715 tg3_setup_flow_control(tp, local_adv, remote_adv);
1716 }
1717 }
1718relink:
1719 if (current_link_up == 0) {
1720 u32 tmp;
1721
1722 tg3_phy_copper_begin(tp);
1723
1724 tg3_readphy(tp, MII_BMSR, &tmp);
1725 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1726 (tmp & BMSR_LSTATUS))
1727 current_link_up = 1;
1728 }
1729
1730 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1731 if (current_link_up == 1) {
1732 if (tp->link_config.active_speed == SPEED_100 ||
1733 tp->link_config.active_speed == SPEED_10)
1734 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1735 else
1736 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1737 } else
1738 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1739
1740 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1741 if (tp->link_config.active_duplex == DUPLEX_HALF)
1742 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1743
1744 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1745 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1746 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1747 (current_link_up == 1 &&
1748 tp->link_config.active_speed == SPEED_10))
1749 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1750 } else {
1751 if (current_link_up == 1)
1752 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1753 }
1754
1755 /* ??? Without this setting Netgear GA302T PHY does not
1756 * ??? send/receive packets...
1757 */
1758 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1759 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1760 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1761 tw32_f(MAC_MI_MODE, tp->mi_mode);
1762 udelay(80);
1763 }
1764
1765 tw32_f(MAC_MODE, tp->mac_mode);
1766 udelay(40);
1767
1768 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1769 /* Polled via timer. */
1770 tw32_f(MAC_EVENT, 0);
1771 } else {
1772 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1773 }
1774 udelay(40);
1775
1776 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1777 current_link_up == 1 &&
1778 tp->link_config.active_speed == SPEED_1000 &&
1779 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1780 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1781 udelay(120);
1782 tw32_f(MAC_STATUS,
1783 (MAC_STATUS_SYNC_CHANGED |
1784 MAC_STATUS_CFG_CHANGED));
1785 udelay(40);
1786 tg3_write_mem(tp,
1787 NIC_SRAM_FIRMWARE_MBOX,
1788 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1789 }
1790
1791 if (current_link_up != netif_carrier_ok(tp->dev)) {
1792 if (current_link_up)
1793 netif_carrier_on(tp->dev);
1794 else
1795 netif_carrier_off(tp->dev);
1796 tg3_link_report(tp);
1797 }
1798
1799 return 0;
1800}
1801
1802struct tg3_fiber_aneginfo {
1803 int state;
1804#define ANEG_STATE_UNKNOWN 0
1805#define ANEG_STATE_AN_ENABLE 1
1806#define ANEG_STATE_RESTART_INIT 2
1807#define ANEG_STATE_RESTART 3
1808#define ANEG_STATE_DISABLE_LINK_OK 4
1809#define ANEG_STATE_ABILITY_DETECT_INIT 5
1810#define ANEG_STATE_ABILITY_DETECT 6
1811#define ANEG_STATE_ACK_DETECT_INIT 7
1812#define ANEG_STATE_ACK_DETECT 8
1813#define ANEG_STATE_COMPLETE_ACK_INIT 9
1814#define ANEG_STATE_COMPLETE_ACK 10
1815#define ANEG_STATE_IDLE_DETECT_INIT 11
1816#define ANEG_STATE_IDLE_DETECT 12
1817#define ANEG_STATE_LINK_OK 13
1818#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
1819#define ANEG_STATE_NEXT_PAGE_WAIT 15
1820
1821 u32 flags;
1822#define MR_AN_ENABLE 0x00000001
1823#define MR_RESTART_AN 0x00000002
1824#define MR_AN_COMPLETE 0x00000004
1825#define MR_PAGE_RX 0x00000008
1826#define MR_NP_LOADED 0x00000010
1827#define MR_TOGGLE_TX 0x00000020
1828#define MR_LP_ADV_FULL_DUPLEX 0x00000040
1829#define MR_LP_ADV_HALF_DUPLEX 0x00000080
1830#define MR_LP_ADV_SYM_PAUSE 0x00000100
1831#define MR_LP_ADV_ASYM_PAUSE 0x00000200
1832#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1833#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1834#define MR_LP_ADV_NEXT_PAGE 0x00001000
1835#define MR_TOGGLE_RX 0x00002000
1836#define MR_NP_RX 0x00004000
1837
1838#define MR_LINK_OK 0x80000000
1839
1840 unsigned long link_time, cur_time;
1841
1842 u32 ability_match_cfg;
1843 int ability_match_count;
1844
1845 char ability_match, idle_match, ack_match;
1846
1847 u32 txconfig, rxconfig;
1848#define ANEG_CFG_NP 0x00000080
1849#define ANEG_CFG_ACK 0x00000040
1850#define ANEG_CFG_RF2 0x00000020
1851#define ANEG_CFG_RF1 0x00000010
1852#define ANEG_CFG_PS2 0x00000001
1853#define ANEG_CFG_PS1 0x00008000
1854#define ANEG_CFG_HD 0x00004000
1855#define ANEG_CFG_FD 0x00002000
1856#define ANEG_CFG_INVAL 0x00001f06
1857
1858};
1859#define ANEG_OK 0
1860#define ANEG_DONE 1
1861#define ANEG_TIMER_ENAB 2
1862#define ANEG_FAILED -1
1863
1864#define ANEG_STATE_SETTLE_TIME 10000
1865
1866static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1867 struct tg3_fiber_aneginfo *ap)
1868{
1869 unsigned long delta;
1870 u32 rx_cfg_reg;
1871 int ret;
1872
1873 if (ap->state == ANEG_STATE_UNKNOWN) {
1874 ap->rxconfig = 0;
1875 ap->link_time = 0;
1876 ap->cur_time = 0;
1877 ap->ability_match_cfg = 0;
1878 ap->ability_match_count = 0;
1879 ap->ability_match = 0;
1880 ap->idle_match = 0;
1881 ap->ack_match = 0;
1882 }
1883 ap->cur_time++;
1884
1885 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1886 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1887
1888 if (rx_cfg_reg != ap->ability_match_cfg) {
1889 ap->ability_match_cfg = rx_cfg_reg;
1890 ap->ability_match = 0;
1891 ap->ability_match_count = 0;
1892 } else {
1893 if (++ap->ability_match_count > 1) {
1894 ap->ability_match = 1;
1895 ap->ability_match_cfg = rx_cfg_reg;
1896 }
1897 }
1898 if (rx_cfg_reg & ANEG_CFG_ACK)
1899 ap->ack_match = 1;
1900 else
1901 ap->ack_match = 0;
1902
1903 ap->idle_match = 0;
1904 } else {
1905 ap->idle_match = 1;
1906 ap->ability_match_cfg = 0;
1907 ap->ability_match_count = 0;
1908 ap->ability_match = 0;
1909 ap->ack_match = 0;
1910
1911 rx_cfg_reg = 0;
1912 }
1913
1914 ap->rxconfig = rx_cfg_reg;
1915 ret = ANEG_OK;
1916
1917 switch(ap->state) {
1918 case ANEG_STATE_UNKNOWN:
1919 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1920 ap->state = ANEG_STATE_AN_ENABLE;
1921
1922 /* fallthru */
1923 case ANEG_STATE_AN_ENABLE:
1924 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1925 if (ap->flags & MR_AN_ENABLE) {
1926 ap->link_time = 0;
1927 ap->cur_time = 0;
1928 ap->ability_match_cfg = 0;
1929 ap->ability_match_count = 0;
1930 ap->ability_match = 0;
1931 ap->idle_match = 0;
1932 ap->ack_match = 0;
1933
1934 ap->state = ANEG_STATE_RESTART_INIT;
1935 } else {
1936 ap->state = ANEG_STATE_DISABLE_LINK_OK;
1937 }
1938 break;
1939
1940 case ANEG_STATE_RESTART_INIT:
1941 ap->link_time = ap->cur_time;
1942 ap->flags &= ~(MR_NP_LOADED);
1943 ap->txconfig = 0;
1944 tw32(MAC_TX_AUTO_NEG, 0);
1945 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1946 tw32_f(MAC_MODE, tp->mac_mode);
1947 udelay(40);
1948
1949 ret = ANEG_TIMER_ENAB;
1950 ap->state = ANEG_STATE_RESTART;
1951
1952 /* fallthru */
1953 case ANEG_STATE_RESTART:
1954 delta = ap->cur_time - ap->link_time;
1955 if (delta > ANEG_STATE_SETTLE_TIME) {
1956 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1957 } else {
1958 ret = ANEG_TIMER_ENAB;
1959 }
1960 break;
1961
1962 case ANEG_STATE_DISABLE_LINK_OK:
1963 ret = ANEG_DONE;
1964 break;
1965
1966 case ANEG_STATE_ABILITY_DETECT_INIT:
1967 ap->flags &= ~(MR_TOGGLE_TX);
1968 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1969 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1970 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1971 tw32_f(MAC_MODE, tp->mac_mode);
1972 udelay(40);
1973
1974 ap->state = ANEG_STATE_ABILITY_DETECT;
1975 break;
1976
1977 case ANEG_STATE_ABILITY_DETECT:
1978 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1979 ap->state = ANEG_STATE_ACK_DETECT_INIT;
1980 }
1981 break;
1982
1983 case ANEG_STATE_ACK_DETECT_INIT:
1984 ap->txconfig |= ANEG_CFG_ACK;
1985 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1986 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1987 tw32_f(MAC_MODE, tp->mac_mode);
1988 udelay(40);
1989
1990 ap->state = ANEG_STATE_ACK_DETECT;
1991
1992 /* fallthru */
1993 case ANEG_STATE_ACK_DETECT:
1994 if (ap->ack_match != 0) {
1995 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1996 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1997 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1998 } else {
1999 ap->state = ANEG_STATE_AN_ENABLE;
2000 }
2001 } else if (ap->ability_match != 0 &&
2002 ap->rxconfig == 0) {
2003 ap->state = ANEG_STATE_AN_ENABLE;
2004 }
2005 break;
2006
2007 case ANEG_STATE_COMPLETE_ACK_INIT:
2008 if (ap->rxconfig & ANEG_CFG_INVAL) {
2009 ret = ANEG_FAILED;
2010 break;
2011 }
2012 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2013 MR_LP_ADV_HALF_DUPLEX |
2014 MR_LP_ADV_SYM_PAUSE |
2015 MR_LP_ADV_ASYM_PAUSE |
2016 MR_LP_ADV_REMOTE_FAULT1 |
2017 MR_LP_ADV_REMOTE_FAULT2 |
2018 MR_LP_ADV_NEXT_PAGE |
2019 MR_TOGGLE_RX |
2020 MR_NP_RX);
2021 if (ap->rxconfig & ANEG_CFG_FD)
2022 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2023 if (ap->rxconfig & ANEG_CFG_HD)
2024 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2025 if (ap->rxconfig & ANEG_CFG_PS1)
2026 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2027 if (ap->rxconfig & ANEG_CFG_PS2)
2028 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2029 if (ap->rxconfig & ANEG_CFG_RF1)
2030 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2031 if (ap->rxconfig & ANEG_CFG_RF2)
2032 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2033 if (ap->rxconfig & ANEG_CFG_NP)
2034 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2035
2036 ap->link_time = ap->cur_time;
2037
2038 ap->flags ^= (MR_TOGGLE_TX);
2039 if (ap->rxconfig & 0x0008)
2040 ap->flags |= MR_TOGGLE_RX;
2041 if (ap->rxconfig & ANEG_CFG_NP)
2042 ap->flags |= MR_NP_RX;
2043 ap->flags |= MR_PAGE_RX;
2044
2045 ap->state = ANEG_STATE_COMPLETE_ACK;
2046 ret = ANEG_TIMER_ENAB;
2047 break;
2048
2049 case ANEG_STATE_COMPLETE_ACK:
2050 if (ap->ability_match != 0 &&
2051 ap->rxconfig == 0) {
2052 ap->state = ANEG_STATE_AN_ENABLE;
2053 break;
2054 }
2055 delta = ap->cur_time - ap->link_time;
2056 if (delta > ANEG_STATE_SETTLE_TIME) {
2057 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2058 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2059 } else {
2060 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2061 !(ap->flags & MR_NP_RX)) {
2062 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2063 } else {
2064 ret = ANEG_FAILED;
2065 }
2066 }
2067 }
2068 break;
2069
2070 case ANEG_STATE_IDLE_DETECT_INIT:
2071 ap->link_time = ap->cur_time;
2072 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2073 tw32_f(MAC_MODE, tp->mac_mode);
2074 udelay(40);
2075
2076 ap->state = ANEG_STATE_IDLE_DETECT;
2077 ret = ANEG_TIMER_ENAB;
2078 break;
2079
2080 case ANEG_STATE_IDLE_DETECT:
2081 if (ap->ability_match != 0 &&
2082 ap->rxconfig == 0) {
2083 ap->state = ANEG_STATE_AN_ENABLE;
2084 break;
2085 }
2086 delta = ap->cur_time - ap->link_time;
2087 if (delta > ANEG_STATE_SETTLE_TIME) {
2088 /* XXX another gem from the Broadcom driver :( */
2089 ap->state = ANEG_STATE_LINK_OK;
2090 }
2091 break;
2092
2093 case ANEG_STATE_LINK_OK:
2094 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2095 ret = ANEG_DONE;
2096 break;
2097
2098 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2099 /* ??? unimplemented */
2100 break;
2101
2102 case ANEG_STATE_NEXT_PAGE_WAIT:
2103 /* ??? unimplemented */
2104 break;
2105
2106 default:
2107 ret = ANEG_FAILED;
2108 break;
2109 };
2110
2111 return ret;
2112}
2113
2114static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2115{
2116 int res = 0;
2117 struct tg3_fiber_aneginfo aninfo;
2118 int status = ANEG_FAILED;
2119 unsigned int tick;
2120 u32 tmp;
2121
2122 tw32_f(MAC_TX_AUTO_NEG, 0);
2123
2124 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2125 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2126 udelay(40);
2127
2128 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2129 udelay(40);
2130
2131 memset(&aninfo, 0, sizeof(aninfo));
2132 aninfo.flags |= MR_AN_ENABLE;
2133 aninfo.state = ANEG_STATE_UNKNOWN;
2134 aninfo.cur_time = 0;
2135 tick = 0;
2136 while (++tick < 195000) {
2137 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2138 if (status == ANEG_DONE || status == ANEG_FAILED)
2139 break;
2140
2141 udelay(1);
2142 }
2143
2144 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2145 tw32_f(MAC_MODE, tp->mac_mode);
2146 udelay(40);
2147
2148 *flags = aninfo.flags;
2149
2150 if (status == ANEG_DONE &&
2151 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2152 MR_LP_ADV_FULL_DUPLEX)))
2153 res = 1;
2154
2155 return res;
2156}
2157
2158static void tg3_init_bcm8002(struct tg3 *tp)
2159{
2160 u32 mac_status = tr32(MAC_STATUS);
2161 int i;
2162
2163 /* Reset when initting first time or we have a link. */
2164 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2165 !(mac_status & MAC_STATUS_PCS_SYNCED))
2166 return;
2167
2168 /* Set PLL lock range. */
2169 tg3_writephy(tp, 0x16, 0x8007);
2170
2171 /* SW reset */
2172 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2173
2174 /* Wait for reset to complete. */
2175 /* XXX schedule_timeout() ... */
2176 for (i = 0; i < 500; i++)
2177 udelay(10);
2178
2179 /* Config mode; select PMA/Ch 1 regs. */
2180 tg3_writephy(tp, 0x10, 0x8411);
2181
2182 /* Enable auto-lock and comdet, select txclk for tx. */
2183 tg3_writephy(tp, 0x11, 0x0a10);
2184
2185 tg3_writephy(tp, 0x18, 0x00a0);
2186 tg3_writephy(tp, 0x16, 0x41ff);
2187
2188 /* Assert and deassert POR. */
2189 tg3_writephy(tp, 0x13, 0x0400);
2190 udelay(40);
2191 tg3_writephy(tp, 0x13, 0x0000);
2192
2193 tg3_writephy(tp, 0x11, 0x0a50);
2194 udelay(40);
2195 tg3_writephy(tp, 0x11, 0x0a10);
2196
2197 /* Wait for signal to stabilize */
2198 /* XXX schedule_timeout() ... */
2199 for (i = 0; i < 15000; i++)
2200 udelay(10);
2201
2202 /* Deselect the channel register so we can read the PHYID
2203 * later.
2204 */
2205 tg3_writephy(tp, 0x10, 0x8011);
2206}
2207
2208static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2209{
2210 u32 sg_dig_ctrl, sg_dig_status;
2211 u32 serdes_cfg, expected_sg_dig_ctrl;
2212 int workaround, port_a;
2213 int current_link_up;
2214
2215 serdes_cfg = 0;
2216 expected_sg_dig_ctrl = 0;
2217 workaround = 0;
2218 port_a = 1;
2219 current_link_up = 0;
2220
2221 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2222 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2223 workaround = 1;
2224 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2225 port_a = 0;
2226
2227 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2228 /* preserve bits 20-23 for voltage regulator */
2229 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2230 }
2231
2232 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2233
2234 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2235 if (sg_dig_ctrl & (1 << 31)) {
2236 if (workaround) {
2237 u32 val = serdes_cfg;
2238
2239 if (port_a)
2240 val |= 0xc010000;
2241 else
2242 val |= 0x4010000;
2243 tw32_f(MAC_SERDES_CFG, val);
2244 }
2245 tw32_f(SG_DIG_CTRL, 0x01388400);
2246 }
2247 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2248 tg3_setup_flow_control(tp, 0, 0);
2249 current_link_up = 1;
2250 }
2251 goto out;
2252 }
2253
2254 /* Want auto-negotiation. */
2255 expected_sg_dig_ctrl = 0x81388400;
2256
2257 /* Pause capability */
2258 expected_sg_dig_ctrl |= (1 << 11);
2259
2260 /* Asymettric pause */
2261 expected_sg_dig_ctrl |= (1 << 12);
2262
2263 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2264 if (workaround)
2265 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2266 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2267 udelay(5);
2268 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2269
2270 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2271 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2272 MAC_STATUS_SIGNAL_DET)) {
2273 int i;
2274
2275 /* Giver time to negotiate (~200ms) */
2276 for (i = 0; i < 40000; i++) {
2277 sg_dig_status = tr32(SG_DIG_STATUS);
2278 if (sg_dig_status & (0x3))
2279 break;
2280 udelay(5);
2281 }
2282 mac_status = tr32(MAC_STATUS);
2283
2284 if ((sg_dig_status & (1 << 1)) &&
2285 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2286 u32 local_adv, remote_adv;
2287
2288 local_adv = ADVERTISE_PAUSE_CAP;
2289 remote_adv = 0;
2290 if (sg_dig_status & (1 << 19))
2291 remote_adv |= LPA_PAUSE_CAP;
2292 if (sg_dig_status & (1 << 20))
2293 remote_adv |= LPA_PAUSE_ASYM;
2294
2295 tg3_setup_flow_control(tp, local_adv, remote_adv);
2296 current_link_up = 1;
2297 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2298 } else if (!(sg_dig_status & (1 << 1))) {
2299 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2300 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2301 else {
2302 if (workaround) {
2303 u32 val = serdes_cfg;
2304
2305 if (port_a)
2306 val |= 0xc010000;
2307 else
2308 val |= 0x4010000;
2309
2310 tw32_f(MAC_SERDES_CFG, val);
2311 }
2312
2313 tw32_f(SG_DIG_CTRL, 0x01388400);
2314 udelay(40);
2315
2316 /* Link parallel detection - link is up */
2317 /* only if we have PCS_SYNC and not */
2318 /* receiving config code words */
2319 mac_status = tr32(MAC_STATUS);
2320 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2321 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2322 tg3_setup_flow_control(tp, 0, 0);
2323 current_link_up = 1;
2324 }
2325 }
2326 }
2327 }
2328
2329out:
2330 return current_link_up;
2331}
2332
2333static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2334{
2335 int current_link_up = 0;
2336
2337 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2338 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2339 goto out;
2340 }
2341
2342 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2343 u32 flags;
2344 int i;
2345
2346 if (fiber_autoneg(tp, &flags)) {
2347 u32 local_adv, remote_adv;
2348
2349 local_adv = ADVERTISE_PAUSE_CAP;
2350 remote_adv = 0;
2351 if (flags & MR_LP_ADV_SYM_PAUSE)
2352 remote_adv |= LPA_PAUSE_CAP;
2353 if (flags & MR_LP_ADV_ASYM_PAUSE)
2354 remote_adv |= LPA_PAUSE_ASYM;
2355
2356 tg3_setup_flow_control(tp, local_adv, remote_adv);
2357
2358 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2359 current_link_up = 1;
2360 }
2361 for (i = 0; i < 30; i++) {
2362 udelay(20);
2363 tw32_f(MAC_STATUS,
2364 (MAC_STATUS_SYNC_CHANGED |
2365 MAC_STATUS_CFG_CHANGED));
2366 udelay(40);
2367 if ((tr32(MAC_STATUS) &
2368 (MAC_STATUS_SYNC_CHANGED |
2369 MAC_STATUS_CFG_CHANGED)) == 0)
2370 break;
2371 }
2372
2373 mac_status = tr32(MAC_STATUS);
2374 if (current_link_up == 0 &&
2375 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2376 !(mac_status & MAC_STATUS_RCVD_CFG))
2377 current_link_up = 1;
2378 } else {
2379 /* Forcing 1000FD link up. */
2380 current_link_up = 1;
2381 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2382
2383 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2384 udelay(40);
2385 }
2386
2387out:
2388 return current_link_up;
2389}
2390
2391static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2392{
2393 u32 orig_pause_cfg;
2394 u16 orig_active_speed;
2395 u8 orig_active_duplex;
2396 u32 mac_status;
2397 int current_link_up;
2398 int i;
2399
2400 orig_pause_cfg =
2401 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2402 TG3_FLAG_TX_PAUSE));
2403 orig_active_speed = tp->link_config.active_speed;
2404 orig_active_duplex = tp->link_config.active_duplex;
2405
2406 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2407 netif_carrier_ok(tp->dev) &&
2408 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2409 mac_status = tr32(MAC_STATUS);
2410 mac_status &= (MAC_STATUS_PCS_SYNCED |
2411 MAC_STATUS_SIGNAL_DET |
2412 MAC_STATUS_CFG_CHANGED |
2413 MAC_STATUS_RCVD_CFG);
2414 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2415 MAC_STATUS_SIGNAL_DET)) {
2416 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2417 MAC_STATUS_CFG_CHANGED));
2418 return 0;
2419 }
2420 }
2421
2422 tw32_f(MAC_TX_AUTO_NEG, 0);
2423
2424 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2425 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2426 tw32_f(MAC_MODE, tp->mac_mode);
2427 udelay(40);
2428
2429 if (tp->phy_id == PHY_ID_BCM8002)
2430 tg3_init_bcm8002(tp);
2431
2432 /* Enable link change event even when serdes polling. */
2433 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2434 udelay(40);
2435
2436 current_link_up = 0;
2437 mac_status = tr32(MAC_STATUS);
2438
2439 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2440 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2441 else
2442 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2443
2444 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2445 tw32_f(MAC_MODE, tp->mac_mode);
2446 udelay(40);
2447
2448 tp->hw_status->status =
2449 (SD_STATUS_UPDATED |
2450 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2451
2452 for (i = 0; i < 100; i++) {
2453 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2454 MAC_STATUS_CFG_CHANGED));
2455 udelay(5);
2456 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2457 MAC_STATUS_CFG_CHANGED)) == 0)
2458 break;
2459 }
2460
2461 mac_status = tr32(MAC_STATUS);
2462 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2463 current_link_up = 0;
2464 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2465 tw32_f(MAC_MODE, (tp->mac_mode |
2466 MAC_MODE_SEND_CONFIGS));
2467 udelay(1);
2468 tw32_f(MAC_MODE, tp->mac_mode);
2469 }
2470 }
2471
2472 if (current_link_up == 1) {
2473 tp->link_config.active_speed = SPEED_1000;
2474 tp->link_config.active_duplex = DUPLEX_FULL;
2475 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2476 LED_CTRL_LNKLED_OVERRIDE |
2477 LED_CTRL_1000MBPS_ON));
2478 } else {
2479 tp->link_config.active_speed = SPEED_INVALID;
2480 tp->link_config.active_duplex = DUPLEX_INVALID;
2481 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2482 LED_CTRL_LNKLED_OVERRIDE |
2483 LED_CTRL_TRAFFIC_OVERRIDE));
2484 }
2485
2486 if (current_link_up != netif_carrier_ok(tp->dev)) {
2487 if (current_link_up)
2488 netif_carrier_on(tp->dev);
2489 else
2490 netif_carrier_off(tp->dev);
2491 tg3_link_report(tp);
2492 } else {
2493 u32 now_pause_cfg =
2494 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2495 TG3_FLAG_TX_PAUSE);
2496 if (orig_pause_cfg != now_pause_cfg ||
2497 orig_active_speed != tp->link_config.active_speed ||
2498 orig_active_duplex != tp->link_config.active_duplex)
2499 tg3_link_report(tp);
2500 }
2501
2502 return 0;
2503}
2504
2505static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2506{
2507 int err;
2508
2509 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2510 err = tg3_setup_fiber_phy(tp, force_reset);
2511 } else {
2512 err = tg3_setup_copper_phy(tp, force_reset);
2513 }
2514
2515 if (tp->link_config.active_speed == SPEED_1000 &&
2516 tp->link_config.active_duplex == DUPLEX_HALF)
2517 tw32(MAC_TX_LENGTHS,
2518 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2519 (6 << TX_LENGTHS_IPG_SHIFT) |
2520 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2521 else
2522 tw32(MAC_TX_LENGTHS,
2523 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2524 (6 << TX_LENGTHS_IPG_SHIFT) |
2525 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2526
2527 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2528 if (netif_carrier_ok(tp->dev)) {
2529 tw32(HOSTCC_STAT_COAL_TICKS,
15f9850d 2530 tp->coal.stats_block_coalesce_usecs);
1da177e4
LT
2531 } else {
2532 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2533 }
2534 }
2535
2536 return err;
2537}
2538
2539/* Tigon3 never reports partial packet sends. So we do not
2540 * need special logic to handle SKBs that have not had all
2541 * of their frags sent yet, like SunGEM does.
2542 */
2543static void tg3_tx(struct tg3 *tp)
2544{
2545 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2546 u32 sw_idx = tp->tx_cons;
2547
2548 while (sw_idx != hw_idx) {
2549 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2550 struct sk_buff *skb = ri->skb;
2551 int i;
2552
2553 if (unlikely(skb == NULL))
2554 BUG();
2555
2556 pci_unmap_single(tp->pdev,
2557 pci_unmap_addr(ri, mapping),
2558 skb_headlen(skb),
2559 PCI_DMA_TODEVICE);
2560
2561 ri->skb = NULL;
2562
2563 sw_idx = NEXT_TX(sw_idx);
2564
2565 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2566 if (unlikely(sw_idx == hw_idx))
2567 BUG();
2568
2569 ri = &tp->tx_buffers[sw_idx];
2570 if (unlikely(ri->skb != NULL))
2571 BUG();
2572
2573 pci_unmap_page(tp->pdev,
2574 pci_unmap_addr(ri, mapping),
2575 skb_shinfo(skb)->frags[i].size,
2576 PCI_DMA_TODEVICE);
2577
2578 sw_idx = NEXT_TX(sw_idx);
2579 }
2580
2581 dev_kfree_skb_irq(skb);
2582 }
2583
2584 tp->tx_cons = sw_idx;
2585
2586 if (netif_queue_stopped(tp->dev) &&
2587 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2588 netif_wake_queue(tp->dev);
2589}
2590
2591/* Returns size of skb allocated or < 0 on error.
2592 *
2593 * We only need to fill in the address because the other members
2594 * of the RX descriptor are invariant, see tg3_init_rings.
2595 *
2596 * Note the purposeful assymetry of cpu vs. chip accesses. For
2597 * posting buffers we only dirty the first cache line of the RX
2598 * descriptor (containing the address). Whereas for the RX status
2599 * buffers the cpu only reads the last cacheline of the RX descriptor
2600 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2601 */
2602static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2603 int src_idx, u32 dest_idx_unmasked)
2604{
2605 struct tg3_rx_buffer_desc *desc;
2606 struct ring_info *map, *src_map;
2607 struct sk_buff *skb;
2608 dma_addr_t mapping;
2609 int skb_size, dest_idx;
2610
2611 src_map = NULL;
2612 switch (opaque_key) {
2613 case RXD_OPAQUE_RING_STD:
2614 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2615 desc = &tp->rx_std[dest_idx];
2616 map = &tp->rx_std_buffers[dest_idx];
2617 if (src_idx >= 0)
2618 src_map = &tp->rx_std_buffers[src_idx];
2619 skb_size = RX_PKT_BUF_SZ;
2620 break;
2621
2622 case RXD_OPAQUE_RING_JUMBO:
2623 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2624 desc = &tp->rx_jumbo[dest_idx];
2625 map = &tp->rx_jumbo_buffers[dest_idx];
2626 if (src_idx >= 0)
2627 src_map = &tp->rx_jumbo_buffers[src_idx];
2628 skb_size = RX_JUMBO_PKT_BUF_SZ;
2629 break;
2630
2631 default:
2632 return -EINVAL;
2633 };
2634
2635 /* Do not overwrite any of the map or rp information
2636 * until we are sure we can commit to a new buffer.
2637 *
2638 * Callers depend upon this behavior and assume that
2639 * we leave everything unchanged if we fail.
2640 */
2641 skb = dev_alloc_skb(skb_size);
2642 if (skb == NULL)
2643 return -ENOMEM;
2644
2645 skb->dev = tp->dev;
2646 skb_reserve(skb, tp->rx_offset);
2647
2648 mapping = pci_map_single(tp->pdev, skb->data,
2649 skb_size - tp->rx_offset,
2650 PCI_DMA_FROMDEVICE);
2651
2652 map->skb = skb;
2653 pci_unmap_addr_set(map, mapping, mapping);
2654
2655 if (src_map != NULL)
2656 src_map->skb = NULL;
2657
2658 desc->addr_hi = ((u64)mapping >> 32);
2659 desc->addr_lo = ((u64)mapping & 0xffffffff);
2660
2661 return skb_size;
2662}
2663
2664/* We only need to move over in the address because the other
2665 * members of the RX descriptor are invariant. See notes above
2666 * tg3_alloc_rx_skb for full details.
2667 */
2668static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2669 int src_idx, u32 dest_idx_unmasked)
2670{
2671 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2672 struct ring_info *src_map, *dest_map;
2673 int dest_idx;
2674
2675 switch (opaque_key) {
2676 case RXD_OPAQUE_RING_STD:
2677 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2678 dest_desc = &tp->rx_std[dest_idx];
2679 dest_map = &tp->rx_std_buffers[dest_idx];
2680 src_desc = &tp->rx_std[src_idx];
2681 src_map = &tp->rx_std_buffers[src_idx];
2682 break;
2683
2684 case RXD_OPAQUE_RING_JUMBO:
2685 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2686 dest_desc = &tp->rx_jumbo[dest_idx];
2687 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2688 src_desc = &tp->rx_jumbo[src_idx];
2689 src_map = &tp->rx_jumbo_buffers[src_idx];
2690 break;
2691
2692 default:
2693 return;
2694 };
2695
2696 dest_map->skb = src_map->skb;
2697 pci_unmap_addr_set(dest_map, mapping,
2698 pci_unmap_addr(src_map, mapping));
2699 dest_desc->addr_hi = src_desc->addr_hi;
2700 dest_desc->addr_lo = src_desc->addr_lo;
2701
2702 src_map->skb = NULL;
2703}
2704
2705#if TG3_VLAN_TAG_USED
2706static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2707{
2708 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2709}
2710#endif
2711
2712/* The RX ring scheme is composed of multiple rings which post fresh
2713 * buffers to the chip, and one special ring the chip uses to report
2714 * status back to the host.
2715 *
2716 * The special ring reports the status of received packets to the
2717 * host. The chip does not write into the original descriptor the
2718 * RX buffer was obtained from. The chip simply takes the original
2719 * descriptor as provided by the host, updates the status and length
2720 * field, then writes this into the next status ring entry.
2721 *
2722 * Each ring the host uses to post buffers to the chip is described
2723 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
2724 * it is first placed into the on-chip ram. When the packet's length
2725 * is known, it walks down the TG3_BDINFO entries to select the ring.
2726 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2727 * which is within the range of the new packet's length is chosen.
2728 *
2729 * The "separate ring for rx status" scheme may sound queer, but it makes
2730 * sense from a cache coherency perspective. If only the host writes
2731 * to the buffer post rings, and only the chip writes to the rx status
2732 * rings, then cache lines never move beyond shared-modified state.
2733 * If both the host and chip were to write into the same ring, cache line
2734 * eviction could occur since both entities want it in an exclusive state.
2735 */
2736static int tg3_rx(struct tg3 *tp, int budget)
2737{
2738 u32 work_mask;
483ba50b
MC
2739 u32 sw_idx = tp->rx_rcb_ptr;
2740 u16 hw_idx;
1da177e4
LT
2741 int received;
2742
2743 hw_idx = tp->hw_status->idx[0].rx_producer;
2744 /*
2745 * We need to order the read of hw_idx and the read of
2746 * the opaque cookie.
2747 */
2748 rmb();
1da177e4
LT
2749 work_mask = 0;
2750 received = 0;
2751 while (sw_idx != hw_idx && budget > 0) {
2752 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2753 unsigned int len;
2754 struct sk_buff *skb;
2755 dma_addr_t dma_addr;
2756 u32 opaque_key, desc_idx, *post_ptr;
2757
2758 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2759 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2760 if (opaque_key == RXD_OPAQUE_RING_STD) {
2761 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2762 mapping);
2763 skb = tp->rx_std_buffers[desc_idx].skb;
2764 post_ptr = &tp->rx_std_ptr;
2765 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2766 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2767 mapping);
2768 skb = tp->rx_jumbo_buffers[desc_idx].skb;
2769 post_ptr = &tp->rx_jumbo_ptr;
2770 }
2771 else {
2772 goto next_pkt_nopost;
2773 }
2774
2775 work_mask |= opaque_key;
2776
2777 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2778 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2779 drop_it:
2780 tg3_recycle_rx(tp, opaque_key,
2781 desc_idx, *post_ptr);
2782 drop_it_no_recycle:
2783 /* Other statistics kept track of by card. */
2784 tp->net_stats.rx_dropped++;
2785 goto next_pkt;
2786 }
2787
2788 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2789
2790 if (len > RX_COPY_THRESHOLD
2791 && tp->rx_offset == 2
2792 /* rx_offset != 2 iff this is a 5701 card running
2793 * in PCI-X mode [see tg3_get_invariants()] */
2794 ) {
2795 int skb_size;
2796
2797 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2798 desc_idx, *post_ptr);
2799 if (skb_size < 0)
2800 goto drop_it;
2801
2802 pci_unmap_single(tp->pdev, dma_addr,
2803 skb_size - tp->rx_offset,
2804 PCI_DMA_FROMDEVICE);
2805
2806 skb_put(skb, len);
2807 } else {
2808 struct sk_buff *copy_skb;
2809
2810 tg3_recycle_rx(tp, opaque_key,
2811 desc_idx, *post_ptr);
2812
2813 copy_skb = dev_alloc_skb(len + 2);
2814 if (copy_skb == NULL)
2815 goto drop_it_no_recycle;
2816
2817 copy_skb->dev = tp->dev;
2818 skb_reserve(copy_skb, 2);
2819 skb_put(copy_skb, len);
2820 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2821 memcpy(copy_skb->data, skb->data, len);
2822 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2823
2824 /* We'll reuse the original ring buffer. */
2825 skb = copy_skb;
2826 }
2827
2828 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2829 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2830 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2831 >> RXD_TCPCSUM_SHIFT) == 0xffff))
2832 skb->ip_summed = CHECKSUM_UNNECESSARY;
2833 else
2834 skb->ip_summed = CHECKSUM_NONE;
2835
2836 skb->protocol = eth_type_trans(skb, tp->dev);
2837#if TG3_VLAN_TAG_USED
2838 if (tp->vlgrp != NULL &&
2839 desc->type_flags & RXD_FLAG_VLAN) {
2840 tg3_vlan_rx(tp, skb,
2841 desc->err_vlan & RXD_VLAN_MASK);
2842 } else
2843#endif
2844 netif_receive_skb(skb);
2845
2846 tp->dev->last_rx = jiffies;
2847 received++;
2848 budget--;
2849
2850next_pkt:
2851 (*post_ptr)++;
2852next_pkt_nopost:
483ba50b
MC
2853 sw_idx++;
2854 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
52f6d697
MC
2855
2856 /* Refresh hw_idx to see if there is new work */
2857 if (sw_idx == hw_idx) {
2858 hw_idx = tp->hw_status->idx[0].rx_producer;
2859 rmb();
2860 }
1da177e4
LT
2861 }
2862
2863 /* ACK the status ring. */
483ba50b
MC
2864 tp->rx_rcb_ptr = sw_idx;
2865 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
1da177e4
LT
2866
2867 /* Refill RX ring(s). */
2868 if (work_mask & RXD_OPAQUE_RING_STD) {
2869 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2870 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2871 sw_idx);
2872 }
2873 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2874 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2875 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2876 sw_idx);
2877 }
2878 mmiowb();
2879
2880 return received;
2881}
2882
2883static int tg3_poll(struct net_device *netdev, int *budget)
2884{
2885 struct tg3 *tp = netdev_priv(netdev);
2886 struct tg3_hw_status *sblk = tp->hw_status;
2887 unsigned long flags;
2888 int done;
2889
2890 spin_lock_irqsave(&tp->lock, flags);
2891
2892 /* handle link change and other phy events */
2893 if (!(tp->tg3_flags &
2894 (TG3_FLAG_USE_LINKCHG_REG |
2895 TG3_FLAG_POLL_SERDES))) {
2896 if (sblk->status & SD_STATUS_LINK_CHG) {
2897 sblk->status = SD_STATUS_UPDATED |
2898 (sblk->status & ~SD_STATUS_LINK_CHG);
2899 tg3_setup_phy(tp, 0);
2900 }
2901 }
2902
2903 /* run TX completion thread */
2904 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2905 spin_lock(&tp->tx_lock);
2906 tg3_tx(tp);
2907 spin_unlock(&tp->tx_lock);
2908 }
2909
2910 spin_unlock_irqrestore(&tp->lock, flags);
2911
2912 /* run RX thread, within the bounds set by NAPI.
2913 * All RX "locking" is done by ensuring outside
2914 * code synchronizes with dev->poll()
2915 */
1da177e4
LT
2916 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2917 int orig_budget = *budget;
2918 int work_done;
2919
2920 if (orig_budget > netdev->quota)
2921 orig_budget = netdev->quota;
2922
2923 work_done = tg3_rx(tp, orig_budget);
2924
2925 *budget -= work_done;
2926 netdev->quota -= work_done;
1da177e4
LT
2927 }
2928
f7383c22
DM
2929 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
2930 tp->last_tag = sblk->status_tag;
2931 rmb();
cd024c8b 2932 sblk->status &= ~SD_STATUS_UPDATED;
f7383c22 2933
1da177e4 2934 /* if no more work, tell net stack and NIC we're done */
f7383c22 2935 done = !tg3_has_work(tp);
1da177e4
LT
2936 if (done) {
2937 spin_lock_irqsave(&tp->lock, flags);
2938 __netif_rx_complete(netdev);
2939 tg3_restart_ints(tp);
2940 spin_unlock_irqrestore(&tp->lock, flags);
2941 }
2942
2943 return (done ? 0 : 1);
2944}
2945
88b06bc2
MC
2946/* MSI ISR - No need to check for interrupt sharing and no need to
2947 * flush status block and interrupt mailbox. PCI ordering rules
2948 * guarantee that MSI will arrive after the status block.
2949 */
2950static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2951{
2952 struct net_device *dev = dev_id;
2953 struct tg3 *tp = netdev_priv(dev);
2954 struct tg3_hw_status *sblk = tp->hw_status;
2955 unsigned long flags;
2956
2957 spin_lock_irqsave(&tp->lock, flags);
2958
2959 /*
fac9b83e 2960 * Writing any value to intr-mbox-0 clears PCI INTA# and
88b06bc2 2961 * chip-internal interrupt pending events.
fac9b83e 2962 * Writing non-zero to intr-mbox-0 additional tells the
88b06bc2
MC
2963 * NIC to stop sending us irqs, engaging "in-intr-handler"
2964 * event coalescing.
2965 */
2966 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
fac9b83e 2967 tp->last_tag = sblk->status_tag;
cd024c8b 2968 rmb();
88b06bc2 2969 sblk->status &= ~SD_STATUS_UPDATED;
04237ddd 2970 if (likely(tg3_has_work(tp)))
88b06bc2
MC
2971 netif_rx_schedule(dev); /* schedule NAPI poll */
2972 else {
fac9b83e 2973 /* No work, re-enable interrupts. */
88b06bc2 2974 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
fac9b83e 2975 tp->last_tag << 24);
88b06bc2
MC
2976 }
2977
2978 spin_unlock_irqrestore(&tp->lock, flags);
2979
2980 return IRQ_RETVAL(1);
2981}
2982
1da177e4
LT
2983static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2984{
2985 struct net_device *dev = dev_id;
2986 struct tg3 *tp = netdev_priv(dev);
2987 struct tg3_hw_status *sblk = tp->hw_status;
2988 unsigned long flags;
2989 unsigned int handled = 1;
2990
2991 spin_lock_irqsave(&tp->lock, flags);
2992
2993 /* In INTx mode, it is possible for the interrupt to arrive at
2994 * the CPU before the status block posted prior to the interrupt.
2995 * Reading the PCI State register will confirm whether the
2996 * interrupt is ours and will flush the status block.
2997 */
2998 if ((sblk->status & SD_STATUS_UPDATED) ||
2999 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3000 /*
fac9b83e 3001 * Writing any value to intr-mbox-0 clears PCI INTA# and
1da177e4 3002 * chip-internal interrupt pending events.
fac9b83e 3003 * Writing non-zero to intr-mbox-0 additional tells the
1da177e4
LT
3004 * NIC to stop sending us irqs, engaging "in-intr-handler"
3005 * event coalescing.
3006 */
3007 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3008 0x00000001);
fac9b83e
DM
3009 sblk->status &= ~SD_STATUS_UPDATED;
3010 if (likely(tg3_has_work(tp)))
3011 netif_rx_schedule(dev); /* schedule NAPI poll */
3012 else {
3013 /* No work, shared interrupt perhaps? re-enable
3014 * interrupts, and flush that PCI write
3015 */
3016 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3017 0x00000000);
3018 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3019 }
3020 } else { /* shared interrupt */
3021 handled = 0;
3022 }
3023
3024 spin_unlock_irqrestore(&tp->lock, flags);
3025
3026 return IRQ_RETVAL(handled);
3027}
3028
3029static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3030{
3031 struct net_device *dev = dev_id;
3032 struct tg3 *tp = netdev_priv(dev);
3033 struct tg3_hw_status *sblk = tp->hw_status;
3034 unsigned long flags;
3035 unsigned int handled = 1;
3036
3037 spin_lock_irqsave(&tp->lock, flags);
3038
3039 /* In INTx mode, it is possible for the interrupt to arrive at
3040 * the CPU before the status block posted prior to the interrupt.
3041 * Reading the PCI State register will confirm whether the
3042 * interrupt is ours and will flush the status block.
3043 */
3044 if ((sblk->status & SD_STATUS_UPDATED) ||
3045 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
1da177e4 3046 /*
fac9b83e
DM
3047 * writing any value to intr-mbox-0 clears PCI INTA# and
3048 * chip-internal interrupt pending events.
3049 * writing non-zero to intr-mbox-0 additional tells the
3050 * NIC to stop sending us irqs, engaging "in-intr-handler"
3051 * event coalescing.
1da177e4 3052 */
fac9b83e
DM
3053 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3054 0x00000001);
3055 tp->last_tag = sblk->status_tag;
cd024c8b 3056 rmb();
1da177e4 3057 sblk->status &= ~SD_STATUS_UPDATED;
04237ddd 3058 if (likely(tg3_has_work(tp)))
1da177e4
LT
3059 netif_rx_schedule(dev); /* schedule NAPI poll */
3060 else {
3061 /* no work, shared interrupt perhaps? re-enable
3062 * interrupts, and flush that PCI write
3063 */
3064 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
fac9b83e 3065 tp->last_tag << 24);
1da177e4
LT
3066 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3067 }
3068 } else { /* shared interrupt */
3069 handled = 0;
3070 }
3071
3072 spin_unlock_irqrestore(&tp->lock, flags);
3073
3074 return IRQ_RETVAL(handled);
3075}
3076
7938109f
MC
3077/* ISR for interrupt test */
3078static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3079 struct pt_regs *regs)
3080{
3081 struct net_device *dev = dev_id;
3082 struct tg3 *tp = netdev_priv(dev);
3083 struct tg3_hw_status *sblk = tp->hw_status;
3084
3085 if (sblk->status & SD_STATUS_UPDATED) {
3086 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3087 0x00000001);
3088 return IRQ_RETVAL(1);
3089 }
3090 return IRQ_RETVAL(0);
3091}
3092
1da177e4 3093static int tg3_init_hw(struct tg3 *);
944d980e 3094static int tg3_halt(struct tg3 *, int, int);
1da177e4
LT
3095
3096#ifdef CONFIG_NET_POLL_CONTROLLER
3097static void tg3_poll_controller(struct net_device *dev)
3098{
88b06bc2
MC
3099 struct tg3 *tp = netdev_priv(dev);
3100
3101 tg3_interrupt(tp->pdev->irq, dev, NULL);
1da177e4
LT
3102}
3103#endif
3104
3105static void tg3_reset_task(void *_data)
3106{
3107 struct tg3 *tp = _data;
3108 unsigned int restart_timer;
3109
3110 tg3_netif_stop(tp);
3111
3112 spin_lock_irq(&tp->lock);
3113 spin_lock(&tp->tx_lock);
3114
3115 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3116 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3117
944d980e 3118 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
1da177e4
LT
3119 tg3_init_hw(tp);
3120
3121 tg3_netif_start(tp);
3122
3123 spin_unlock(&tp->tx_lock);
3124 spin_unlock_irq(&tp->lock);
3125
3126 if (restart_timer)
3127 mod_timer(&tp->timer, jiffies + 1);
3128}
3129
3130static void tg3_tx_timeout(struct net_device *dev)
3131{
3132 struct tg3 *tp = netdev_priv(dev);
3133
3134 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3135 dev->name);
3136
3137 schedule_work(&tp->reset_task);
3138}
3139
3140static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3141
3142static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3143 u32 guilty_entry, int guilty_len,
3144 u32 last_plus_one, u32 *start, u32 mss)
3145{
3146 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3147 dma_addr_t new_addr;
3148 u32 entry = *start;
3149 int i;
3150
3151 if (!new_skb) {
3152 dev_kfree_skb(skb);
3153 return -1;
3154 }
3155
3156 /* New SKB is guaranteed to be linear. */
3157 entry = *start;
3158 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3159 PCI_DMA_TODEVICE);
3160 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3161 (skb->ip_summed == CHECKSUM_HW) ?
3162 TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3163 *start = NEXT_TX(entry);
3164
3165 /* Now clean up the sw ring entries. */
3166 i = 0;
3167 while (entry != last_plus_one) {
3168 int len;
3169
3170 if (i == 0)
3171 len = skb_headlen(skb);
3172 else
3173 len = skb_shinfo(skb)->frags[i-1].size;
3174 pci_unmap_single(tp->pdev,
3175 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3176 len, PCI_DMA_TODEVICE);
3177 if (i == 0) {
3178 tp->tx_buffers[entry].skb = new_skb;
3179 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3180 } else {
3181 tp->tx_buffers[entry].skb = NULL;
3182 }
3183 entry = NEXT_TX(entry);
3184 i++;
3185 }
3186
3187 dev_kfree_skb(skb);
3188
3189 return 0;
3190}
3191
3192static void tg3_set_txd(struct tg3 *tp, int entry,
3193 dma_addr_t mapping, int len, u32 flags,
3194 u32 mss_and_is_end)
3195{
3196 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3197 int is_end = (mss_and_is_end & 0x1);
3198 u32 mss = (mss_and_is_end >> 1);
3199 u32 vlan_tag = 0;
3200
3201 if (is_end)
3202 flags |= TXD_FLAG_END;
3203 if (flags & TXD_FLAG_VLAN) {
3204 vlan_tag = flags >> 16;
3205 flags &= 0xffff;
3206 }
3207 vlan_tag |= (mss << TXD_MSS_SHIFT);
3208
3209 txd->addr_hi = ((u64) mapping >> 32);
3210 txd->addr_lo = ((u64) mapping & 0xffffffff);
3211 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3212 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3213}
3214
3215static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3216{
3217 u32 base = (u32) mapping & 0xffffffff;
3218
3219 return ((base > 0xffffdcc0) &&
3220 (base + len + 8 < base));
3221}
3222
3223static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3224{
3225 struct tg3 *tp = netdev_priv(dev);
3226 dma_addr_t mapping;
3227 unsigned int i;
3228 u32 len, entry, base_flags, mss;
3229 int would_hit_hwbug;
3230 unsigned long flags;
3231
3232 len = skb_headlen(skb);
3233
3234 /* No BH disabling for tx_lock here. We are running in BH disabled
3235 * context and TX reclaim runs via tp->poll inside of a software
3236 * interrupt. Rejoice!
3237 *
3238 * Actually, things are not so simple. If we are to take a hw
3239 * IRQ here, we can deadlock, consider:
3240 *
3241 * CPU1 CPU2
3242 * tg3_start_xmit
3243 * take tp->tx_lock
3244 * tg3_timer
3245 * take tp->lock
3246 * tg3_interrupt
3247 * spin on tp->lock
3248 * spin on tp->tx_lock
3249 *
3250 * So we really do need to disable interrupts when taking
3251 * tx_lock here.
3252 */
3253 local_irq_save(flags);
3254 if (!spin_trylock(&tp->tx_lock)) {
3255 local_irq_restore(flags);
3256 return NETDEV_TX_LOCKED;
3257 }
3258
3259 /* This is a hard error, log it. */
3260 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3261 netif_stop_queue(dev);
3262 spin_unlock_irqrestore(&tp->tx_lock, flags);
3263 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3264 dev->name);
3265 return NETDEV_TX_BUSY;
3266 }
3267
3268 entry = tp->tx_prod;
3269 base_flags = 0;
3270 if (skb->ip_summed == CHECKSUM_HW)
3271 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3272#if TG3_TSO_SUPPORT != 0
3273 mss = 0;
3274 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3275 (mss = skb_shinfo(skb)->tso_size) != 0) {
3276 int tcp_opt_len, ip_tcp_len;
3277
3278 if (skb_header_cloned(skb) &&
3279 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3280 dev_kfree_skb(skb);
3281 goto out_unlock;
3282 }
3283
3284 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3285 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3286
3287 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3288 TXD_FLAG_CPU_POST_DMA);
3289
3290 skb->nh.iph->check = 0;
3291 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3292 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3293 skb->h.th->check = 0;
3294 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3295 }
3296 else {
3297 skb->h.th->check =
3298 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3299 skb->nh.iph->daddr,
3300 0, IPPROTO_TCP, 0);
3301 }
3302
3303 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3304 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3305 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3306 int tsflags;
3307
3308 tsflags = ((skb->nh.iph->ihl - 5) +
3309 (tcp_opt_len >> 2));
3310 mss |= (tsflags << 11);
3311 }
3312 } else {
3313 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3314 int tsflags;
3315
3316 tsflags = ((skb->nh.iph->ihl - 5) +
3317 (tcp_opt_len >> 2));
3318 base_flags |= tsflags << 12;
3319 }
3320 }
3321 }
3322#else
3323 mss = 0;
3324#endif
3325#if TG3_VLAN_TAG_USED
3326 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3327 base_flags |= (TXD_FLAG_VLAN |
3328 (vlan_tx_tag_get(skb) << 16));
3329#endif
3330
3331 /* Queue skb data, a.k.a. the main skb fragment. */
3332 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3333
3334 tp->tx_buffers[entry].skb = skb;
3335 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3336
3337 would_hit_hwbug = 0;
3338
3339 if (tg3_4g_overflow_test(mapping, len))
3340 would_hit_hwbug = entry + 1;
3341
3342 tg3_set_txd(tp, entry, mapping, len, base_flags,
3343 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3344
3345 entry = NEXT_TX(entry);
3346
3347 /* Now loop through additional data fragments, and queue them. */
3348 if (skb_shinfo(skb)->nr_frags > 0) {
3349 unsigned int i, last;
3350
3351 last = skb_shinfo(skb)->nr_frags - 1;
3352 for (i = 0; i <= last; i++) {
3353 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3354
3355 len = frag->size;
3356 mapping = pci_map_page(tp->pdev,
3357 frag->page,
3358 frag->page_offset,
3359 len, PCI_DMA_TODEVICE);
3360
3361 tp->tx_buffers[entry].skb = NULL;
3362 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3363
3364 if (tg3_4g_overflow_test(mapping, len)) {
3365 /* Only one should match. */
3366 if (would_hit_hwbug)
3367 BUG();
3368 would_hit_hwbug = entry + 1;
3369 }
3370
3371 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3372 tg3_set_txd(tp, entry, mapping, len,
3373 base_flags, (i == last)|(mss << 1));
3374 else
3375 tg3_set_txd(tp, entry, mapping, len,
3376 base_flags, (i == last));
3377
3378 entry = NEXT_TX(entry);
3379 }
3380 }
3381
3382 if (would_hit_hwbug) {
3383 u32 last_plus_one = entry;
3384 u32 start;
3385 unsigned int len = 0;
3386
3387 would_hit_hwbug -= 1;
3388 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3389 entry &= (TG3_TX_RING_SIZE - 1);
3390 start = entry;
3391 i = 0;
3392 while (entry != last_plus_one) {
3393 if (i == 0)
3394 len = skb_headlen(skb);
3395 else
3396 len = skb_shinfo(skb)->frags[i-1].size;
3397
3398 if (entry == would_hit_hwbug)
3399 break;
3400
3401 i++;
3402 entry = NEXT_TX(entry);
3403
3404 }
3405
3406 /* If the workaround fails due to memory/mapping
3407 * failure, silently drop this packet.
3408 */
3409 if (tigon3_4gb_hwbug_workaround(tp, skb,
3410 entry, len,
3411 last_plus_one,
3412 &start, mss))
3413 goto out_unlock;
3414
3415 entry = start;
3416 }
3417
3418 /* Packets are ready, update Tx producer idx local and on card. */
3419 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3420
3421 tp->tx_prod = entry;
3422 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3423 netif_stop_queue(dev);
3424
3425out_unlock:
3426 mmiowb();
3427 spin_unlock_irqrestore(&tp->tx_lock, flags);
3428
3429 dev->trans_start = jiffies;
3430
3431 return NETDEV_TX_OK;
3432}
3433
3434static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3435 int new_mtu)
3436{
3437 dev->mtu = new_mtu;
3438
3439 if (new_mtu > ETH_DATA_LEN)
3440 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3441 else
3442 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3443}
3444
3445static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3446{
3447 struct tg3 *tp = netdev_priv(dev);
3448
3449 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3450 return -EINVAL;
3451
3452 if (!netif_running(dev)) {
3453 /* We'll just catch it later when the
3454 * device is up'd.
3455 */
3456 tg3_set_mtu(dev, tp, new_mtu);
3457 return 0;
3458 }
3459
3460 tg3_netif_stop(tp);
3461 spin_lock_irq(&tp->lock);
3462 spin_lock(&tp->tx_lock);
3463
944d980e 3464 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
3465
3466 tg3_set_mtu(dev, tp, new_mtu);
3467
3468 tg3_init_hw(tp);
3469
3470 tg3_netif_start(tp);
3471
3472 spin_unlock(&tp->tx_lock);
3473 spin_unlock_irq(&tp->lock);
3474
3475 return 0;
3476}
3477
3478/* Free up pending packets in all rx/tx rings.
3479 *
3480 * The chip has been shut down and the driver detached from
3481 * the networking, so no interrupts or new tx packets will
3482 * end up in the driver. tp->{tx,}lock is not held and we are not
3483 * in an interrupt context and thus may sleep.
3484 */
3485static void tg3_free_rings(struct tg3 *tp)
3486{
3487 struct ring_info *rxp;
3488 int i;
3489
3490 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3491 rxp = &tp->rx_std_buffers[i];
3492
3493 if (rxp->skb == NULL)
3494 continue;
3495 pci_unmap_single(tp->pdev,
3496 pci_unmap_addr(rxp, mapping),
3497 RX_PKT_BUF_SZ - tp->rx_offset,
3498 PCI_DMA_FROMDEVICE);
3499 dev_kfree_skb_any(rxp->skb);
3500 rxp->skb = NULL;
3501 }
3502
3503 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3504 rxp = &tp->rx_jumbo_buffers[i];
3505
3506 if (rxp->skb == NULL)
3507 continue;
3508 pci_unmap_single(tp->pdev,
3509 pci_unmap_addr(rxp, mapping),
3510 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3511 PCI_DMA_FROMDEVICE);
3512 dev_kfree_skb_any(rxp->skb);
3513 rxp->skb = NULL;
3514 }
3515
3516 for (i = 0; i < TG3_TX_RING_SIZE; ) {
3517 struct tx_ring_info *txp;
3518 struct sk_buff *skb;
3519 int j;
3520
3521 txp = &tp->tx_buffers[i];
3522 skb = txp->skb;
3523
3524 if (skb == NULL) {
3525 i++;
3526 continue;
3527 }
3528
3529 pci_unmap_single(tp->pdev,
3530 pci_unmap_addr(txp, mapping),
3531 skb_headlen(skb),
3532 PCI_DMA_TODEVICE);
3533 txp->skb = NULL;
3534
3535 i++;
3536
3537 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3538 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3539 pci_unmap_page(tp->pdev,
3540 pci_unmap_addr(txp, mapping),
3541 skb_shinfo(skb)->frags[j].size,
3542 PCI_DMA_TODEVICE);
3543 i++;
3544 }
3545
3546 dev_kfree_skb_any(skb);
3547 }
3548}
3549
3550/* Initialize tx/rx rings for packet processing.
3551 *
3552 * The chip has been shut down and the driver detached from
3553 * the networking, so no interrupts or new tx packets will
3554 * end up in the driver. tp->{tx,}lock are held and thus
3555 * we may not sleep.
3556 */
3557static void tg3_init_rings(struct tg3 *tp)
3558{
3559 u32 i;
3560
3561 /* Free up all the SKBs. */
3562 tg3_free_rings(tp);
3563
3564 /* Zero out all descriptors. */
3565 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3566 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3567 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3568 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3569
3570 /* Initialize invariants of the rings, we only set this
3571 * stuff once. This works because the card does not
3572 * write into the rx buffer posting rings.
3573 */
3574 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3575 struct tg3_rx_buffer_desc *rxd;
3576
3577 rxd = &tp->rx_std[i];
3578 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3579 << RXD_LEN_SHIFT;
3580 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3581 rxd->opaque = (RXD_OPAQUE_RING_STD |
3582 (i << RXD_OPAQUE_INDEX_SHIFT));
3583 }
3584
3585 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3586 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3587 struct tg3_rx_buffer_desc *rxd;
3588
3589 rxd = &tp->rx_jumbo[i];
3590 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3591 << RXD_LEN_SHIFT;
3592 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3593 RXD_FLAG_JUMBO;
3594 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3595 (i << RXD_OPAQUE_INDEX_SHIFT));
3596 }
3597 }
3598
3599 /* Now allocate fresh SKBs for each rx ring. */
3600 for (i = 0; i < tp->rx_pending; i++) {
3601 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3602 -1, i) < 0)
3603 break;
3604 }
3605
3606 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3607 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3608 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3609 -1, i) < 0)
3610 break;
3611 }
3612 }
3613}
3614
3615/*
3616 * Must not be invoked with interrupt sources disabled and
3617 * the hardware shutdown down.
3618 */
3619static void tg3_free_consistent(struct tg3 *tp)
3620{
3621 if (tp->rx_std_buffers) {
3622 kfree(tp->rx_std_buffers);
3623 tp->rx_std_buffers = NULL;
3624 }
3625 if (tp->rx_std) {
3626 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3627 tp->rx_std, tp->rx_std_mapping);
3628 tp->rx_std = NULL;
3629 }
3630 if (tp->rx_jumbo) {
3631 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3632 tp->rx_jumbo, tp->rx_jumbo_mapping);
3633 tp->rx_jumbo = NULL;
3634 }
3635 if (tp->rx_rcb) {
3636 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3637 tp->rx_rcb, tp->rx_rcb_mapping);
3638 tp->rx_rcb = NULL;
3639 }
3640 if (tp->tx_ring) {
3641 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3642 tp->tx_ring, tp->tx_desc_mapping);
3643 tp->tx_ring = NULL;
3644 }
3645 if (tp->hw_status) {
3646 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3647 tp->hw_status, tp->status_mapping);
3648 tp->hw_status = NULL;
3649 }
3650 if (tp->hw_stats) {
3651 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3652 tp->hw_stats, tp->stats_mapping);
3653 tp->hw_stats = NULL;
3654 }
3655}
3656
3657/*
3658 * Must not be invoked with interrupt sources disabled and
3659 * the hardware shutdown down. Can sleep.
3660 */
3661static int tg3_alloc_consistent(struct tg3 *tp)
3662{
3663 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3664 (TG3_RX_RING_SIZE +
3665 TG3_RX_JUMBO_RING_SIZE)) +
3666 (sizeof(struct tx_ring_info) *
3667 TG3_TX_RING_SIZE),
3668 GFP_KERNEL);
3669 if (!tp->rx_std_buffers)
3670 return -ENOMEM;
3671
3672 memset(tp->rx_std_buffers, 0,
3673 (sizeof(struct ring_info) *
3674 (TG3_RX_RING_SIZE +
3675 TG3_RX_JUMBO_RING_SIZE)) +
3676 (sizeof(struct tx_ring_info) *
3677 TG3_TX_RING_SIZE));
3678
3679 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3680 tp->tx_buffers = (struct tx_ring_info *)
3681 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3682
3683 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3684 &tp->rx_std_mapping);
3685 if (!tp->rx_std)
3686 goto err_out;
3687
3688 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3689 &tp->rx_jumbo_mapping);
3690
3691 if (!tp->rx_jumbo)
3692 goto err_out;
3693
3694 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3695 &tp->rx_rcb_mapping);
3696 if (!tp->rx_rcb)
3697 goto err_out;
3698
3699 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3700 &tp->tx_desc_mapping);
3701 if (!tp->tx_ring)
3702 goto err_out;
3703
3704 tp->hw_status = pci_alloc_consistent(tp->pdev,
3705 TG3_HW_STATUS_SIZE,
3706 &tp->status_mapping);
3707 if (!tp->hw_status)
3708 goto err_out;
3709
3710 tp->hw_stats = pci_alloc_consistent(tp->pdev,
3711 sizeof(struct tg3_hw_stats),
3712 &tp->stats_mapping);
3713 if (!tp->hw_stats)
3714 goto err_out;
3715
3716 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3717 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3718
3719 return 0;
3720
3721err_out:
3722 tg3_free_consistent(tp);
3723 return -ENOMEM;
3724}
3725
3726#define MAX_WAIT_CNT 1000
3727
3728/* To stop a block, clear the enable bit and poll till it
3729 * clears. tp->lock is held.
3730 */
b3b7d6be 3731static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
1da177e4
LT
3732{
3733 unsigned int i;
3734 u32 val;
3735
3736 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
3737 switch (ofs) {
3738 case RCVLSC_MODE:
3739 case DMAC_MODE:
3740 case MBFREE_MODE:
3741 case BUFMGR_MODE:
3742 case MEMARB_MODE:
3743 /* We can't enable/disable these bits of the
3744 * 5705/5750, just say success.
3745 */
3746 return 0;
3747
3748 default:
3749 break;
3750 };
3751 }
3752
3753 val = tr32(ofs);
3754 val &= ~enable_bit;
3755 tw32_f(ofs, val);
3756
3757 for (i = 0; i < MAX_WAIT_CNT; i++) {
3758 udelay(100);
3759 val = tr32(ofs);
3760 if ((val & enable_bit) == 0)
3761 break;
3762 }
3763
b3b7d6be 3764 if (i == MAX_WAIT_CNT && !silent) {
1da177e4
LT
3765 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3766 "ofs=%lx enable_bit=%x\n",
3767 ofs, enable_bit);
3768 return -ENODEV;
3769 }
3770
3771 return 0;
3772}
3773
3774/* tp->lock is held. */
b3b7d6be 3775static int tg3_abort_hw(struct tg3 *tp, int silent)
1da177e4
LT
3776{
3777 int i, err;
3778
3779 tg3_disable_ints(tp);
3780
3781 tp->rx_mode &= ~RX_MODE_ENABLE;
3782 tw32_f(MAC_RX_MODE, tp->rx_mode);
3783 udelay(10);
3784
b3b7d6be
DM
3785 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
3786 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
3787 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
3788 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
3789 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
3790 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
3791
3792 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
3793 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
3794 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
3795 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
3796 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
3797 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
3798 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
1da177e4
LT
3799
3800 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3801 tw32_f(MAC_MODE, tp->mac_mode);
3802 udelay(40);
3803
3804 tp->tx_mode &= ~TX_MODE_ENABLE;
3805 tw32_f(MAC_TX_MODE, tp->tx_mode);
3806
3807 for (i = 0; i < MAX_WAIT_CNT; i++) {
3808 udelay(100);
3809 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3810 break;
3811 }
3812 if (i >= MAX_WAIT_CNT) {
3813 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3814 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3815 tp->dev->name, tr32(MAC_TX_MODE));
e6de8ad1 3816 err |= -ENODEV;
1da177e4
LT
3817 }
3818
e6de8ad1 3819 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
b3b7d6be
DM
3820 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
3821 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
1da177e4
LT
3822
3823 tw32(FTQ_RESET, 0xffffffff);
3824 tw32(FTQ_RESET, 0x00000000);
3825
b3b7d6be
DM
3826 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
3827 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
1da177e4
LT
3828
3829 if (tp->hw_status)
3830 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3831 if (tp->hw_stats)
3832 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3833
1da177e4
LT
3834 return err;
3835}
3836
3837/* tp->lock is held. */
3838static int tg3_nvram_lock(struct tg3 *tp)
3839{
3840 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3841 int i;
3842
3843 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3844 for (i = 0; i < 8000; i++) {
3845 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3846 break;
3847 udelay(20);
3848 }
3849 if (i == 8000)
3850 return -ENODEV;
3851 }
3852 return 0;
3853}
3854
3855/* tp->lock is held. */
3856static void tg3_nvram_unlock(struct tg3 *tp)
3857{
3858 if (tp->tg3_flags & TG3_FLAG_NVRAM)
3859 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3860}
3861
e6af301b
MC
3862/* tp->lock is held. */
3863static void tg3_enable_nvram_access(struct tg3 *tp)
3864{
3865 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
3866 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
3867 u32 nvaccess = tr32(NVRAM_ACCESS);
3868
3869 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3870 }
3871}
3872
3873/* tp->lock is held. */
3874static void tg3_disable_nvram_access(struct tg3 *tp)
3875{
3876 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
3877 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
3878 u32 nvaccess = tr32(NVRAM_ACCESS);
3879
3880 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3881 }
3882}
3883
1da177e4
LT
3884/* tp->lock is held. */
3885static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3886{
3887 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3888 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3889 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3890
3891 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3892 switch (kind) {
3893 case RESET_KIND_INIT:
3894 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3895 DRV_STATE_START);
3896 break;
3897
3898 case RESET_KIND_SHUTDOWN:
3899 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3900 DRV_STATE_UNLOAD);
3901 break;
3902
3903 case RESET_KIND_SUSPEND:
3904 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3905 DRV_STATE_SUSPEND);
3906 break;
3907
3908 default:
3909 break;
3910 };
3911 }
3912}
3913
3914/* tp->lock is held. */
3915static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3916{
3917 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3918 switch (kind) {
3919 case RESET_KIND_INIT:
3920 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3921 DRV_STATE_START_DONE);
3922 break;
3923
3924 case RESET_KIND_SHUTDOWN:
3925 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3926 DRV_STATE_UNLOAD_DONE);
3927 break;
3928
3929 default:
3930 break;
3931 };
3932 }
3933}
3934
3935/* tp->lock is held. */
3936static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3937{
3938 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3939 switch (kind) {
3940 case RESET_KIND_INIT:
3941 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3942 DRV_STATE_START);
3943 break;
3944
3945 case RESET_KIND_SHUTDOWN:
3946 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3947 DRV_STATE_UNLOAD);
3948 break;
3949
3950 case RESET_KIND_SUSPEND:
3951 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3952 DRV_STATE_SUSPEND);
3953 break;
3954
3955 default:
3956 break;
3957 };
3958 }
3959}
3960
3961static void tg3_stop_fw(struct tg3 *);
3962
3963/* tp->lock is held. */
3964static int tg3_chip_reset(struct tg3 *tp)
3965{
3966 u32 val;
3967 u32 flags_save;
3968 int i;
3969
3970 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3971 tg3_nvram_lock(tp);
3972
3973 /*
3974 * We must avoid the readl() that normally takes place.
3975 * It locks machines, causes machine checks, and other
3976 * fun things. So, temporarily disable the 5701
3977 * hardware workaround, while we do the reset.
3978 */
3979 flags_save = tp->tg3_flags;
3980 tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3981
3982 /* do the reset */
3983 val = GRC_MISC_CFG_CORECLK_RESET;
3984
3985 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3986 if (tr32(0x7e2c) == 0x60) {
3987 tw32(0x7e2c, 0x20);
3988 }
3989 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3990 tw32(GRC_MISC_CFG, (1 << 29));
3991 val |= (1 << 29);
3992 }
3993 }
3994
3995 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
3996 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3997 tw32(GRC_MISC_CFG, val);
3998
3999 /* restore 5701 hardware bug workaround flag */
4000 tp->tg3_flags = flags_save;
4001
4002 /* Unfortunately, we have to delay before the PCI read back.
4003 * Some 575X chips even will not respond to a PCI cfg access
4004 * when the reset command is given to the chip.
4005 *
4006 * How do these hardware designers expect things to work
4007 * properly if the PCI write is posted for a long period
4008 * of time? It is always necessary to have some method by
4009 * which a register read back can occur to push the write
4010 * out which does the reset.
4011 *
4012 * For most tg3 variants the trick below was working.
4013 * Ho hum...
4014 */
4015 udelay(120);
4016
4017 /* Flush PCI posted writes. The normal MMIO registers
4018 * are inaccessible at this time so this is the only
4019 * way to make this reliably (actually, this is no longer
4020 * the case, see above). I tried to use indirect
4021 * register read/write but this upset some 5701 variants.
4022 */
4023 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4024
4025 udelay(120);
4026
4027 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4028 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4029 int i;
4030 u32 cfg_val;
4031
4032 /* Wait for link training to complete. */
4033 for (i = 0; i < 5000; i++)
4034 udelay(100);
4035
4036 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4037 pci_write_config_dword(tp->pdev, 0xc4,
4038 cfg_val | (1 << 15));
4039 }
4040 /* Set PCIE max payload size and clear error status. */
4041 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4042 }
4043
4044 /* Re-enable indirect register accesses. */
4045 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4046 tp->misc_host_ctrl);
4047
4048 /* Set MAX PCI retry to zero. */
4049 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4050 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4051 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4052 val |= PCISTATE_RETRY_SAME_DMA;
4053 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4054
4055 pci_restore_state(tp->pdev);
4056
4057 /* Make sure PCI-X relaxed ordering bit is clear. */
4058 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4059 val &= ~PCIX_CAPS_RELAXED_ORDERING;
4060 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4061
4062 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4063
4064 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4065 tg3_stop_fw(tp);
4066 tw32(0x5000, 0x400);
4067 }
4068
4069 tw32(GRC_MODE, tp->grc_mode);
4070
4071 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4072 u32 val = tr32(0xc4);
4073
4074 tw32(0xc4, val | (1 << 15));
4075 }
4076
4077 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4078 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4079 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4080 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4081 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4082 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4083 }
4084
4085 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4086 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4087 tw32_f(MAC_MODE, tp->mac_mode);
4088 } else
4089 tw32_f(MAC_MODE, 0);
4090 udelay(40);
4091
4092 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4093 /* Wait for firmware initialization to complete. */
4094 for (i = 0; i < 100000; i++) {
4095 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4096 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4097 break;
4098 udelay(10);
4099 }
4100 if (i >= 100000) {
4101 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4102 "firmware will not restart magic=%08x\n",
4103 tp->dev->name, val);
4104 return -ENODEV;
4105 }
4106 }
4107
4108 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4109 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4110 u32 val = tr32(0x7c00);
4111
4112 tw32(0x7c00, val | (1 << 25));
4113 }
4114
4115 /* Reprobe ASF enable state. */
4116 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4117 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4118 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4119 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4120 u32 nic_cfg;
4121
4122 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4123 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4124 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 4125 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
4126 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4127 }
4128 }
4129
4130 return 0;
4131}
4132
4133/* tp->lock is held. */
4134static void tg3_stop_fw(struct tg3 *tp)
4135{
4136 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4137 u32 val;
4138 int i;
4139
4140 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4141 val = tr32(GRC_RX_CPU_EVENT);
4142 val |= (1 << 14);
4143 tw32(GRC_RX_CPU_EVENT, val);
4144
4145 /* Wait for RX cpu to ACK the event. */
4146 for (i = 0; i < 100; i++) {
4147 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4148 break;
4149 udelay(1);
4150 }
4151 }
4152}
4153
4154/* tp->lock is held. */
944d980e 4155static int tg3_halt(struct tg3 *tp, int kind, int silent)
1da177e4
LT
4156{
4157 int err;
4158
4159 tg3_stop_fw(tp);
4160
944d980e 4161 tg3_write_sig_pre_reset(tp, kind);
1da177e4 4162
b3b7d6be 4163 tg3_abort_hw(tp, silent);
1da177e4
LT
4164 err = tg3_chip_reset(tp);
4165
944d980e
MC
4166 tg3_write_sig_legacy(tp, kind);
4167 tg3_write_sig_post_reset(tp, kind);
1da177e4
LT
4168
4169 if (err)
4170 return err;
4171
4172 return 0;
4173}
4174
4175#define TG3_FW_RELEASE_MAJOR 0x0
4176#define TG3_FW_RELASE_MINOR 0x0
4177#define TG3_FW_RELEASE_FIX 0x0
4178#define TG3_FW_START_ADDR 0x08000000
4179#define TG3_FW_TEXT_ADDR 0x08000000
4180#define TG3_FW_TEXT_LEN 0x9c0
4181#define TG3_FW_RODATA_ADDR 0x080009c0
4182#define TG3_FW_RODATA_LEN 0x60
4183#define TG3_FW_DATA_ADDR 0x08000a40
4184#define TG3_FW_DATA_LEN 0x20
4185#define TG3_FW_SBSS_ADDR 0x08000a60
4186#define TG3_FW_SBSS_LEN 0xc
4187#define TG3_FW_BSS_ADDR 0x08000a70
4188#define TG3_FW_BSS_LEN 0x10
4189
4190static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4191 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4192 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4193 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4194 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4195 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4196 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4197 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4198 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4199 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4200 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4201 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4202 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4203 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4204 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4205 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4206 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4207 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4208 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4209 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4210 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4211 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4212 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4213 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4214 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4215 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4216 0, 0, 0, 0, 0, 0,
4217 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4218 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4219 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4220 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4221 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4222 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4223 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4224 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4225 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4226 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4227 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4228 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4229 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4230 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4231 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4232 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4233 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4234 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4235 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4236 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4237 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4238 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4239 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4240 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4241 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4242 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4243 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4244 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4245 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4246 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4247 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4248 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4249 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4250 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4251 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4252 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4253 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4254 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4255 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4256 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4257 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4258 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4259 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4260 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4261 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4262 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4263 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4264 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4265 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4266 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4267 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4268 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4269 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4270 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4271 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4272 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4273 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4274 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4275 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4276 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4277 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4278 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4279 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4280 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4281 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4282};
4283
4284static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4285 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4286 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4287 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4288 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4289 0x00000000
4290};
4291
4292#if 0 /* All zeros, don't eat up space with it. */
4293u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4294 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4295 0x00000000, 0x00000000, 0x00000000, 0x00000000
4296};
4297#endif
4298
4299#define RX_CPU_SCRATCH_BASE 0x30000
4300#define RX_CPU_SCRATCH_SIZE 0x04000
4301#define TX_CPU_SCRATCH_BASE 0x34000
4302#define TX_CPU_SCRATCH_SIZE 0x04000
4303
4304/* tp->lock is held. */
4305static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4306{
4307 int i;
4308
4309 if (offset == TX_CPU_BASE &&
4310 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4311 BUG();
4312
4313 if (offset == RX_CPU_BASE) {
4314 for (i = 0; i < 10000; i++) {
4315 tw32(offset + CPU_STATE, 0xffffffff);
4316 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4317 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4318 break;
4319 }
4320
4321 tw32(offset + CPU_STATE, 0xffffffff);
4322 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
4323 udelay(10);
4324 } else {
4325 for (i = 0; i < 10000; i++) {
4326 tw32(offset + CPU_STATE, 0xffffffff);
4327 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4328 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4329 break;
4330 }
4331 }
4332
4333 if (i >= 10000) {
4334 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4335 "and %s CPU\n",
4336 tp->dev->name,
4337 (offset == RX_CPU_BASE ? "RX" : "TX"));
4338 return -ENODEV;
4339 }
4340 return 0;
4341}
4342
4343struct fw_info {
4344 unsigned int text_base;
4345 unsigned int text_len;
4346 u32 *text_data;
4347 unsigned int rodata_base;
4348 unsigned int rodata_len;
4349 u32 *rodata_data;
4350 unsigned int data_base;
4351 unsigned int data_len;
4352 u32 *data_data;
4353};
4354
4355/* tp->lock is held. */
4356static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4357 int cpu_scratch_size, struct fw_info *info)
4358{
4359 int err, i;
4360 u32 orig_tg3_flags = tp->tg3_flags;
4361 void (*write_op)(struct tg3 *, u32, u32);
4362
4363 if (cpu_base == TX_CPU_BASE &&
4364 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4365 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4366 "TX cpu firmware on %s which is 5705.\n",
4367 tp->dev->name);
4368 return -EINVAL;
4369 }
4370
4371 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4372 write_op = tg3_write_mem;
4373 else
4374 write_op = tg3_write_indirect_reg32;
4375
4376 /* Force use of PCI config space for indirect register
4377 * write calls.
4378 */
4379 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4380
1b628151
MC
4381 /* It is possible that bootcode is still loading at this point.
4382 * Get the nvram lock first before halting the cpu.
4383 */
4384 tg3_nvram_lock(tp);
1da177e4 4385 err = tg3_halt_cpu(tp, cpu_base);
1b628151 4386 tg3_nvram_unlock(tp);
1da177e4
LT
4387 if (err)
4388 goto out;
4389
4390 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4391 write_op(tp, cpu_scratch_base + i, 0);
4392 tw32(cpu_base + CPU_STATE, 0xffffffff);
4393 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4394 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4395 write_op(tp, (cpu_scratch_base +
4396 (info->text_base & 0xffff) +
4397 (i * sizeof(u32))),
4398 (info->text_data ?
4399 info->text_data[i] : 0));
4400 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4401 write_op(tp, (cpu_scratch_base +
4402 (info->rodata_base & 0xffff) +
4403 (i * sizeof(u32))),
4404 (info->rodata_data ?
4405 info->rodata_data[i] : 0));
4406 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4407 write_op(tp, (cpu_scratch_base +
4408 (info->data_base & 0xffff) +
4409 (i * sizeof(u32))),
4410 (info->data_data ?
4411 info->data_data[i] : 0));
4412
4413 err = 0;
4414
4415out:
4416 tp->tg3_flags = orig_tg3_flags;
4417 return err;
4418}
4419
4420/* tp->lock is held. */
4421static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4422{
4423 struct fw_info info;
4424 int err, i;
4425
4426 info.text_base = TG3_FW_TEXT_ADDR;
4427 info.text_len = TG3_FW_TEXT_LEN;
4428 info.text_data = &tg3FwText[0];
4429 info.rodata_base = TG3_FW_RODATA_ADDR;
4430 info.rodata_len = TG3_FW_RODATA_LEN;
4431 info.rodata_data = &tg3FwRodata[0];
4432 info.data_base = TG3_FW_DATA_ADDR;
4433 info.data_len = TG3_FW_DATA_LEN;
4434 info.data_data = NULL;
4435
4436 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4437 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4438 &info);
4439 if (err)
4440 return err;
4441
4442 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4443 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4444 &info);
4445 if (err)
4446 return err;
4447
4448 /* Now startup only the RX cpu. */
4449 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4450 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4451
4452 for (i = 0; i < 5; i++) {
4453 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4454 break;
4455 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4456 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
4457 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4458 udelay(1000);
4459 }
4460 if (i >= 5) {
4461 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4462 "to set RX CPU PC, is %08x should be %08x\n",
4463 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4464 TG3_FW_TEXT_ADDR);
4465 return -ENODEV;
4466 }
4467 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4468 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
4469
4470 return 0;
4471}
4472
4473#if TG3_TSO_SUPPORT != 0
4474
4475#define TG3_TSO_FW_RELEASE_MAJOR 0x1
4476#define TG3_TSO_FW_RELASE_MINOR 0x6
4477#define TG3_TSO_FW_RELEASE_FIX 0x0
4478#define TG3_TSO_FW_START_ADDR 0x08000000
4479#define TG3_TSO_FW_TEXT_ADDR 0x08000000
4480#define TG3_TSO_FW_TEXT_LEN 0x1aa0
4481#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
4482#define TG3_TSO_FW_RODATA_LEN 0x60
4483#define TG3_TSO_FW_DATA_ADDR 0x08001b20
4484#define TG3_TSO_FW_DATA_LEN 0x30
4485#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
4486#define TG3_TSO_FW_SBSS_LEN 0x2c
4487#define TG3_TSO_FW_BSS_ADDR 0x08001b80
4488#define TG3_TSO_FW_BSS_LEN 0x894
4489
4490static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4491 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4492 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4493 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4494 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4495 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4496 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4497 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4498 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4499 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4500 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4501 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4502 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4503 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4504 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4505 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4506 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4507 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4508 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4509 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4510 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4511 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4512 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4513 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4514 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4515 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4516 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4517 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4518 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4519 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4520 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4521 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4522 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4523 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4524 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4525 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4526 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4527 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4528 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4529 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4530 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4531 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4532 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4533 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4534 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4535 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4536 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4537 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4538 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4539 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4540 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4541 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4542 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4543 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4544 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4545 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4546 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4547 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4548 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4549 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4550 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4551 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4552 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4553 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4554 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4555 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4556 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4557 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4558 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4559 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4560 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4561 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4562 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4563 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4564 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4565 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4566 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4567 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4568 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4569 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4570 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4571 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4572 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4573 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4574 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4575 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4576 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4577 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4578 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4579 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4580 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4581 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4582 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4583 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4584 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4585 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4586 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4587 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4588 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4589 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4590 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4591 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4592 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4593 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4594 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4595 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4596 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4597 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4598 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4599 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4600 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4601 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4602 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4603 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4604 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4605 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4606 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4607 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4608 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4609 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4610 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4611 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4612 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4613 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4614 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4615 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4616 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4617 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4618 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4619 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4620 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4621 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4622 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4623 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4624 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4625 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4626 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4627 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4628 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4629 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4630 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4631 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4632 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4633 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4634 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4635 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4636 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4637 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4638 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4639 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4640 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4641 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4642 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4643 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4644 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4645 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4646 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4647 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4648 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4649 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4650 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4651 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4652 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4653 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4654 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4655 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4656 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4657 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4658 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4659 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4660 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4661 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4662 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4663 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4664 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4665 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4666 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4667 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4668 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4669 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4670 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4671 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4672 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4673 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4674 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4675 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4676 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4677 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4678 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4679 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4680 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4681 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4682 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4683 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4684 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4685 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4686 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4687 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4688 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4689 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4690 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4691 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4692 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4693 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4694 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4695 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4696 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4697 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4698 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4699 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4700 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4701 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4702 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4703 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4704 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4705 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4706 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4707 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4708 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4709 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4710 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4711 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4712 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4713 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4714 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4715 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4716 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4717 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4718 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4719 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4720 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4721 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4722 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4723 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4724 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4725 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4726 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4727 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4728 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4729 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4730 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4731 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4732 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4733 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4734 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4735 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4736 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4737 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4738 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4739 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4740 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4741 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4742 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4743 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4744 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4745 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4746 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4747 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4748 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4749 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4750 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4751 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4752 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4753 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4754 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4755 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4756 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4757 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4758 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4759 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4760 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4761 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4762 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4763 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4764 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4765 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4766 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4767 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4768 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4769 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4770 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4771 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4772 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4773 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4774 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4775};
4776
4777static u32 tg3TsoFwRodata[] = {
4778 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4779 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4780 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4781 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4782 0x00000000,
4783};
4784
4785static u32 tg3TsoFwData[] = {
4786 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4787 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4788 0x00000000,
4789};
4790
4791/* 5705 needs a special version of the TSO firmware. */
4792#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
4793#define TG3_TSO5_FW_RELASE_MINOR 0x2
4794#define TG3_TSO5_FW_RELEASE_FIX 0x0
4795#define TG3_TSO5_FW_START_ADDR 0x00010000
4796#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
4797#define TG3_TSO5_FW_TEXT_LEN 0xe90
4798#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
4799#define TG3_TSO5_FW_RODATA_LEN 0x50
4800#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
4801#define TG3_TSO5_FW_DATA_LEN 0x20
4802#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
4803#define TG3_TSO5_FW_SBSS_LEN 0x28
4804#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
4805#define TG3_TSO5_FW_BSS_LEN 0x88
4806
4807static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4808 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4809 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4810 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4811 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4812 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4813 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4814 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4815 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4816 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4817 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4818 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4819 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4820 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4821 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4822 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4823 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4824 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4825 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4826 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4827 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4828 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4829 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4830 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4831 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4832 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4833 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4834 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4835 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4836 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4837 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4838 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4839 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4840 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4841 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4842 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4843 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4844 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4845 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4846 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4847 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4848 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4849 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4850 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4851 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4852 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4853 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4854 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4855 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4856 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4857 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4858 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4859 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4860 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4861 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4862 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4863 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4864 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4865 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4866 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4867 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4868 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4869 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4870 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4871 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4872 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4873 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4874 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4875 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4876 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4877 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4878 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4879 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4880 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4881 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4882 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4883 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4884 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4885 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4886 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4887 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4888 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4889 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4890 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4891 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4892 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4893 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4894 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4895 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4896 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4897 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4898 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4899 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4900 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4901 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4902 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4903 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4904 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4905 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4906 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4907 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4908 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4909 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4910 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4911 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4912 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4913 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4914 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4915 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4916 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4917 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4918 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4919 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4920 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4921 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4922 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4923 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4924 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4925 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4926 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4927 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4928 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4929 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4930 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4931 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4932 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4933 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4934 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4935 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4936 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4937 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4938 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4939 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4940 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4941 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4942 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4943 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4944 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4945 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4946 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4947 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4948 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4949 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4950 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4951 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4952 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4953 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4954 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4955 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4956 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4957 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4958 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4959 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4960 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4961 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4962 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4963 0x00000000, 0x00000000, 0x00000000,
4964};
4965
4966static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4967 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4968 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4969 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4970 0x00000000, 0x00000000, 0x00000000,
4971};
4972
4973static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4974 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4975 0x00000000, 0x00000000, 0x00000000,
4976};
4977
4978/* tp->lock is held. */
4979static int tg3_load_tso_firmware(struct tg3 *tp)
4980{
4981 struct fw_info info;
4982 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4983 int err, i;
4984
4985 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4986 return 0;
4987
4988 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4989 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4990 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4991 info.text_data = &tg3Tso5FwText[0];
4992 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4993 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4994 info.rodata_data = &tg3Tso5FwRodata[0];
4995 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4996 info.data_len = TG3_TSO5_FW_DATA_LEN;
4997 info.data_data = &tg3Tso5FwData[0];
4998 cpu_base = RX_CPU_BASE;
4999 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5000 cpu_scratch_size = (info.text_len +
5001 info.rodata_len +
5002 info.data_len +
5003 TG3_TSO5_FW_SBSS_LEN +
5004 TG3_TSO5_FW_BSS_LEN);
5005 } else {
5006 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5007 info.text_len = TG3_TSO_FW_TEXT_LEN;
5008 info.text_data = &tg3TsoFwText[0];
5009 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5010 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5011 info.rodata_data = &tg3TsoFwRodata[0];
5012 info.data_base = TG3_TSO_FW_DATA_ADDR;
5013 info.data_len = TG3_TSO_FW_DATA_LEN;
5014 info.data_data = &tg3TsoFwData[0];
5015 cpu_base = TX_CPU_BASE;
5016 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5017 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5018 }
5019
5020 err = tg3_load_firmware_cpu(tp, cpu_base,
5021 cpu_scratch_base, cpu_scratch_size,
5022 &info);
5023 if (err)
5024 return err;
5025
5026 /* Now startup the cpu. */
5027 tw32(cpu_base + CPU_STATE, 0xffffffff);
5028 tw32_f(cpu_base + CPU_PC, info.text_base);
5029
5030 for (i = 0; i < 5; i++) {
5031 if (tr32(cpu_base + CPU_PC) == info.text_base)
5032 break;
5033 tw32(cpu_base + CPU_STATE, 0xffffffff);
5034 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
5035 tw32_f(cpu_base + CPU_PC, info.text_base);
5036 udelay(1000);
5037 }
5038 if (i >= 5) {
5039 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5040 "to set CPU PC, is %08x should be %08x\n",
5041 tp->dev->name, tr32(cpu_base + CPU_PC),
5042 info.text_base);
5043 return -ENODEV;
5044 }
5045 tw32(cpu_base + CPU_STATE, 0xffffffff);
5046 tw32_f(cpu_base + CPU_MODE, 0x00000000);
5047 return 0;
5048}
5049
5050#endif /* TG3_TSO_SUPPORT != 0 */
5051
5052/* tp->lock is held. */
5053static void __tg3_set_mac_addr(struct tg3 *tp)
5054{
5055 u32 addr_high, addr_low;
5056 int i;
5057
5058 addr_high = ((tp->dev->dev_addr[0] << 8) |
5059 tp->dev->dev_addr[1]);
5060 addr_low = ((tp->dev->dev_addr[2] << 24) |
5061 (tp->dev->dev_addr[3] << 16) |
5062 (tp->dev->dev_addr[4] << 8) |
5063 (tp->dev->dev_addr[5] << 0));
5064 for (i = 0; i < 4; i++) {
5065 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5066 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5067 }
5068
5069 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5070 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5071 for (i = 0; i < 12; i++) {
5072 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5073 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5074 }
5075 }
5076
5077 addr_high = (tp->dev->dev_addr[0] +
5078 tp->dev->dev_addr[1] +
5079 tp->dev->dev_addr[2] +
5080 tp->dev->dev_addr[3] +
5081 tp->dev->dev_addr[4] +
5082 tp->dev->dev_addr[5]) &
5083 TX_BACKOFF_SEED_MASK;
5084 tw32(MAC_TX_BACKOFF_SEED, addr_high);
5085}
5086
5087static int tg3_set_mac_addr(struct net_device *dev, void *p)
5088{
5089 struct tg3 *tp = netdev_priv(dev);
5090 struct sockaddr *addr = p;
5091
5092 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5093
5094 spin_lock_irq(&tp->lock);
5095 __tg3_set_mac_addr(tp);
5096 spin_unlock_irq(&tp->lock);
5097
5098 return 0;
5099}
5100
5101/* tp->lock is held. */
5102static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5103 dma_addr_t mapping, u32 maxlen_flags,
5104 u32 nic_addr)
5105{
5106 tg3_write_mem(tp,
5107 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5108 ((u64) mapping >> 32));
5109 tg3_write_mem(tp,
5110 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5111 ((u64) mapping & 0xffffffff));
5112 tg3_write_mem(tp,
5113 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5114 maxlen_flags);
5115
5116 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5117 tg3_write_mem(tp,
5118 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5119 nic_addr);
5120}
5121
5122static void __tg3_set_rx_mode(struct net_device *);
15f9850d
DM
5123static void tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5124{
5125 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5126 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5127 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5128 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5129 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5130 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5131 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5132 }
5133 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5134 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5135 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5136 u32 val = ec->stats_block_coalesce_usecs;
5137
5138 if (!netif_carrier_ok(tp->dev))
5139 val = 0;
5140
5141 tw32(HOSTCC_STAT_COAL_TICKS, val);
5142 }
5143}
1da177e4
LT
5144
5145/* tp->lock is held. */
5146static int tg3_reset_hw(struct tg3 *tp)
5147{
5148 u32 val, rdmac_mode;
5149 int i, err, limit;
5150
5151 tg3_disable_ints(tp);
5152
5153 tg3_stop_fw(tp);
5154
5155 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5156
5157 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
e6de8ad1 5158 tg3_abort_hw(tp, 1);
1da177e4
LT
5159 }
5160
5161 err = tg3_chip_reset(tp);
5162 if (err)
5163 return err;
5164
5165 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5166
5167 /* This works around an issue with Athlon chipsets on
5168 * B3 tigon3 silicon. This bit has no effect on any
5169 * other revision. But do not set this on PCI Express
5170 * chips.
5171 */
5172 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5173 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5174 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5175
5176 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5177 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5178 val = tr32(TG3PCI_PCISTATE);
5179 val |= PCISTATE_RETRY_SAME_DMA;
5180 tw32(TG3PCI_PCISTATE, val);
5181 }
5182
5183 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5184 /* Enable some hw fixes. */
5185 val = tr32(TG3PCI_MSI_DATA);
5186 val |= (1 << 26) | (1 << 28) | (1 << 29);
5187 tw32(TG3PCI_MSI_DATA, val);
5188 }
5189
5190 /* Descriptor ring init may make accesses to the
5191 * NIC SRAM area to setup the TX descriptors, so we
5192 * can only do this after the hardware has been
5193 * successfully reset.
5194 */
5195 tg3_init_rings(tp);
5196
5197 /* This value is determined during the probe time DMA
5198 * engine test, tg3_test_dma.
5199 */
5200 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5201
5202 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5203 GRC_MODE_4X_NIC_SEND_RINGS |
5204 GRC_MODE_NO_TX_PHDR_CSUM |
5205 GRC_MODE_NO_RX_PHDR_CSUM);
5206 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5207 if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5208 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5209 if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5210 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5211
5212 tw32(GRC_MODE,
5213 tp->grc_mode |
5214 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5215
5216 /* Setup the timer prescalar register. Clock is always 66Mhz. */
5217 val = tr32(GRC_MISC_CFG);
5218 val &= ~0xff;
5219 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5220 tw32(GRC_MISC_CFG, val);
5221
5222 /* Initialize MBUF/DESC pool. */
cbf46853 5223 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
1da177e4
LT
5224 /* Do nothing. */
5225 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5226 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5227 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5228 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5229 else
5230 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5231 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5232 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5233 }
5234#if TG3_TSO_SUPPORT != 0
5235 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5236 int fw_len;
5237
5238 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5239 TG3_TSO5_FW_RODATA_LEN +
5240 TG3_TSO5_FW_DATA_LEN +
5241 TG3_TSO5_FW_SBSS_LEN +
5242 TG3_TSO5_FW_BSS_LEN);
5243 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5244 tw32(BUFMGR_MB_POOL_ADDR,
5245 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5246 tw32(BUFMGR_MB_POOL_SIZE,
5247 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5248 }
5249#endif
5250
5251 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
5252 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5253 tp->bufmgr_config.mbuf_read_dma_low_water);
5254 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5255 tp->bufmgr_config.mbuf_mac_rx_low_water);
5256 tw32(BUFMGR_MB_HIGH_WATER,
5257 tp->bufmgr_config.mbuf_high_water);
5258 } else {
5259 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5260 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5261 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5262 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5263 tw32(BUFMGR_MB_HIGH_WATER,
5264 tp->bufmgr_config.mbuf_high_water_jumbo);
5265 }
5266 tw32(BUFMGR_DMA_LOW_WATER,
5267 tp->bufmgr_config.dma_low_water);
5268 tw32(BUFMGR_DMA_HIGH_WATER,
5269 tp->bufmgr_config.dma_high_water);
5270
5271 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5272 for (i = 0; i < 2000; i++) {
5273 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5274 break;
5275 udelay(10);
5276 }
5277 if (i >= 2000) {
5278 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5279 tp->dev->name);
5280 return -ENODEV;
5281 }
5282
5283 /* Setup replenish threshold. */
5284 tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5285
5286 /* Initialize TG3_BDINFO's at:
5287 * RCVDBDI_STD_BD: standard eth size rx ring
5288 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
5289 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
5290 *
5291 * like so:
5292 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
5293 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
5294 * ring attribute flags
5295 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
5296 *
5297 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5298 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5299 *
5300 * The size of each ring is fixed in the firmware, but the location is
5301 * configurable.
5302 */
5303 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5304 ((u64) tp->rx_std_mapping >> 32));
5305 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5306 ((u64) tp->rx_std_mapping & 0xffffffff));
5307 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5308 NIC_SRAM_RX_BUFFER_DESC);
5309
5310 /* Don't even try to program the JUMBO/MINI buffer descriptor
5311 * configs on 5705.
5312 */
5313 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5314 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5315 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5316 } else {
5317 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5318 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5319
5320 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5321 BDINFO_FLAGS_DISABLED);
5322
5323 /* Setup replenish threshold. */
5324 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5325
5326 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
5327 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5328 ((u64) tp->rx_jumbo_mapping >> 32));
5329 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5330 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5331 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5332 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5333 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5334 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5335 } else {
5336 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5337 BDINFO_FLAGS_DISABLED);
5338 }
5339
5340 }
5341
5342 /* There is only one send ring on 5705/5750, no need to explicitly
5343 * disable the others.
5344 */
5345 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5346 /* Clear out send RCB ring in SRAM. */
5347 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5348 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5349 BDINFO_FLAGS_DISABLED);
5350 }
5351
5352 tp->tx_prod = 0;
5353 tp->tx_cons = 0;
5354 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5355 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5356
5357 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5358 tp->tx_desc_mapping,
5359 (TG3_TX_RING_SIZE <<
5360 BDINFO_FLAGS_MAXLEN_SHIFT),
5361 NIC_SRAM_TX_BUFFER_DESC);
5362
5363 /* There is only one receive return ring on 5705/5750, no need
5364 * to explicitly disable the others.
5365 */
5366 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5367 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5368 i += TG3_BDINFO_SIZE) {
5369 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5370 BDINFO_FLAGS_DISABLED);
5371 }
5372 }
5373
5374 tp->rx_rcb_ptr = 0;
5375 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5376
5377 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5378 tp->rx_rcb_mapping,
5379 (TG3_RX_RCB_RING_SIZE(tp) <<
5380 BDINFO_FLAGS_MAXLEN_SHIFT),
5381 0);
5382
5383 tp->rx_std_ptr = tp->rx_pending;
5384 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5385 tp->rx_std_ptr);
5386
5387 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5388 tp->rx_jumbo_pending : 0;
5389 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5390 tp->rx_jumbo_ptr);
5391
5392 /* Initialize MAC address and backoff seed. */
5393 __tg3_set_mac_addr(tp);
5394
5395 /* MTU + ethernet header + FCS + optional VLAN tag */
5396 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5397
5398 /* The slot time is changed by tg3_setup_phy if we
5399 * run at gigabit with half duplex.
5400 */
5401 tw32(MAC_TX_LENGTHS,
5402 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5403 (6 << TX_LENGTHS_IPG_SHIFT) |
5404 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5405
5406 /* Receive rules. */
5407 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5408 tw32(RCVLPC_CONFIG, 0x0181);
5409
5410 /* Calculate RDMAC_MODE setting early, we need it to determine
5411 * the RCVLPC_STATE_ENABLE mask.
5412 */
5413 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5414 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5415 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5416 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5417 RDMAC_MODE_LNGREAD_ENAB);
5418 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5419 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
85e94ced
MC
5420
5421 /* If statement applies to 5705 and 5750 PCI devices only */
5422 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5423 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5424 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
1da177e4
LT
5425 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5426 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5427 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5428 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5429 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5430 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5431 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5432 }
5433 }
5434
85e94ced
MC
5435 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5436 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5437
1da177e4
LT
5438#if TG3_TSO_SUPPORT != 0
5439 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5440 rdmac_mode |= (1 << 27);
5441#endif
5442
5443 /* Receive/send statistics. */
5444 if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5445 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5446 val = tr32(RCVLPC_STATS_ENABLE);
5447 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5448 tw32(RCVLPC_STATS_ENABLE, val);
5449 } else {
5450 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5451 }
5452 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5453 tw32(SNDDATAI_STATSENAB, 0xffffff);
5454 tw32(SNDDATAI_STATSCTRL,
5455 (SNDDATAI_SCTRL_ENABLE |
5456 SNDDATAI_SCTRL_FASTUPD));
5457
5458 /* Setup host coalescing engine. */
5459 tw32(HOSTCC_MODE, 0);
5460 for (i = 0; i < 2000; i++) {
5461 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5462 break;
5463 udelay(10);
5464 }
5465
15f9850d 5466 tg3_set_coalesce(tp, &tp->coal);
1da177e4
LT
5467
5468 /* set status block DMA address */
5469 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5470 ((u64) tp->status_mapping >> 32));
5471 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5472 ((u64) tp->status_mapping & 0xffffffff));
5473
5474 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5475 /* Status/statistics block address. See tg3_timer,
5476 * the tg3_periodic_fetch_stats call there, and
5477 * tg3_get_stats to see how this works for 5705/5750 chips.
5478 */
1da177e4
LT
5479 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5480 ((u64) tp->stats_mapping >> 32));
5481 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5482 ((u64) tp->stats_mapping & 0xffffffff));
5483 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5484 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5485 }
5486
5487 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5488
5489 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5490 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5491 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5492 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5493
5494 /* Clear statistics/status block in chip, and status block in ram. */
5495 for (i = NIC_SRAM_STATS_BLK;
5496 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5497 i += sizeof(u32)) {
5498 tg3_write_mem(tp, i, 0);
5499 udelay(40);
5500 }
5501 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5502
5503 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5504 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5505 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5506 udelay(40);
5507
314fba34
MC
5508 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5509 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5510 * register to preserve the GPIO settings for LOMs. The GPIOs,
5511 * whether used as inputs or outputs, are set by boot code after
5512 * reset.
5513 */
5514 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5515 u32 gpio_mask;
5516
5517 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5518 GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
3e7d83bc
MC
5519
5520 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5521 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5522 GRC_LCLCTRL_GPIO_OUTPUT3;
5523
314fba34
MC
5524 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5525
5526 /* GPIO1 must be driven high for eeprom write protect */
1da177e4
LT
5527 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5528 GRC_LCLCTRL_GPIO_OUTPUT1);
314fba34 5529 }
1da177e4
LT
5530 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5531 udelay(100);
5532
5533 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
fac9b83e
DM
5534 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5535 tp->last_tag = 0;
1da177e4
LT
5536
5537 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5538 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5539 udelay(40);
5540 }
5541
5542 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5543 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5544 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5545 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5546 WDMAC_MODE_LNGREAD_ENAB);
5547
85e94ced
MC
5548 /* If statement applies to 5705 and 5750 PCI devices only */
5549 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5550 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5551 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1da177e4
LT
5552 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5553 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5554 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5555 /* nothing */
5556 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5557 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5558 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5559 val |= WDMAC_MODE_RX_ACCEL;
5560 }
5561 }
5562
5563 tw32_f(WDMAC_MODE, val);
5564 udelay(40);
5565
5566 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5567 val = tr32(TG3PCI_X_CAPS);
5568 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5569 val &= ~PCIX_CAPS_BURST_MASK;
5570 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5571 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5572 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5573 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5574 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5575 val |= (tp->split_mode_max_reqs <<
5576 PCIX_CAPS_SPLIT_SHIFT);
5577 }
5578 tw32(TG3PCI_X_CAPS, val);
5579 }
5580
5581 tw32_f(RDMAC_MODE, rdmac_mode);
5582 udelay(40);
5583
5584 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5585 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5586 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5587 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5588 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5589 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5590 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5591 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5592#if TG3_TSO_SUPPORT != 0
5593 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5594 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5595#endif
5596 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5597 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5598
5599 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5600 err = tg3_load_5701_a0_firmware_fix(tp);
5601 if (err)
5602 return err;
5603 }
5604
5605#if TG3_TSO_SUPPORT != 0
5606 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5607 err = tg3_load_tso_firmware(tp);
5608 if (err)
5609 return err;
5610 }
5611#endif
5612
5613 tp->tx_mode = TX_MODE_ENABLE;
5614 tw32_f(MAC_TX_MODE, tp->tx_mode);
5615 udelay(100);
5616
5617 tp->rx_mode = RX_MODE_ENABLE;
5618 tw32_f(MAC_RX_MODE, tp->rx_mode);
5619 udelay(10);
5620
5621 if (tp->link_config.phy_is_low_power) {
5622 tp->link_config.phy_is_low_power = 0;
5623 tp->link_config.speed = tp->link_config.orig_speed;
5624 tp->link_config.duplex = tp->link_config.orig_duplex;
5625 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5626 }
5627
5628 tp->mi_mode = MAC_MI_MODE_BASE;
5629 tw32_f(MAC_MI_MODE, tp->mi_mode);
5630 udelay(80);
5631
5632 tw32(MAC_LED_CTRL, tp->led_ctrl);
5633
5634 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5635 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5636 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5637 udelay(10);
5638 }
5639 tw32_f(MAC_RX_MODE, tp->rx_mode);
5640 udelay(10);
5641
5642 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5643 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5644 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5645 /* Set drive transmission level to 1.2V */
5646 /* only if the signal pre-emphasis bit is not set */
5647 val = tr32(MAC_SERDES_CFG);
5648 val &= 0xfffff000;
5649 val |= 0x880;
5650 tw32(MAC_SERDES_CFG, val);
5651 }
5652 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5653 tw32(MAC_SERDES_CFG, 0x616000);
5654 }
5655
5656 /* Prevent chip from dropping frames when flow control
5657 * is enabled.
5658 */
5659 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5660
5661 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5662 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5663 /* Use hardware link auto-negotiation */
5664 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5665 }
5666
5667 err = tg3_setup_phy(tp, 1);
5668 if (err)
5669 return err;
5670
5671 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5672 u32 tmp;
5673
5674 /* Clear CRC stats. */
5675 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5676 tg3_writephy(tp, 0x1e, tmp | 0x8000);
5677 tg3_readphy(tp, 0x14, &tmp);
5678 }
5679 }
5680
5681 __tg3_set_rx_mode(tp->dev);
5682
5683 /* Initialize receive rules. */
5684 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
5685 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5686 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
5687 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5688
5689 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5690 limit = 8;
5691 else
5692 limit = 16;
5693 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5694 limit -= 4;
5695 switch (limit) {
5696 case 16:
5697 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
5698 case 15:
5699 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
5700 case 14:
5701 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
5702 case 13:
5703 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
5704 case 12:
5705 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
5706 case 11:
5707 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
5708 case 10:
5709 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
5710 case 9:
5711 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
5712 case 8:
5713 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
5714 case 7:
5715 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
5716 case 6:
5717 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
5718 case 5:
5719 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
5720 case 4:
5721 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
5722 case 3:
5723 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
5724 case 2:
5725 case 1:
5726
5727 default:
5728 break;
5729 };
5730
5731 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5732
5733 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5734 tg3_enable_ints(tp);
5735
5736 return 0;
5737}
5738
5739/* Called at device open time to get the chip ready for
5740 * packet processing. Invoked with tp->lock held.
5741 */
5742static int tg3_init_hw(struct tg3 *tp)
5743{
5744 int err;
5745
5746 /* Force the chip into D0. */
5747 err = tg3_set_power_state(tp, 0);
5748 if (err)
5749 goto out;
5750
5751 tg3_switch_clocks(tp);
5752
5753 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5754
5755 err = tg3_reset_hw(tp);
5756
5757out:
5758 return err;
5759}
5760
5761#define TG3_STAT_ADD32(PSTAT, REG) \
5762do { u32 __val = tr32(REG); \
5763 (PSTAT)->low += __val; \
5764 if ((PSTAT)->low < __val) \
5765 (PSTAT)->high += 1; \
5766} while (0)
5767
5768static void tg3_periodic_fetch_stats(struct tg3 *tp)
5769{
5770 struct tg3_hw_stats *sp = tp->hw_stats;
5771
5772 if (!netif_carrier_ok(tp->dev))
5773 return;
5774
5775 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5776 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5777 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5778 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5779 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5780 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5781 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5782 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5783 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5784 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5785 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5786 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5787 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5788
5789 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5790 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5791 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5792 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5793 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5794 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5795 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5796 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5797 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5798 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5799 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5800 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5801 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5802 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5803}
5804
5805static void tg3_timer(unsigned long __opaque)
5806{
5807 struct tg3 *tp = (struct tg3 *) __opaque;
5808 unsigned long flags;
5809
5810 spin_lock_irqsave(&tp->lock, flags);
5811 spin_lock(&tp->tx_lock);
5812
fac9b83e
DM
5813 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
5814 /* All of this garbage is because when using non-tagged
5815 * IRQ status the mailbox/status_block protocol the chip
5816 * uses with the cpu is race prone.
5817 */
5818 if (tp->hw_status->status & SD_STATUS_UPDATED) {
5819 tw32(GRC_LOCAL_CTRL,
5820 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5821 } else {
5822 tw32(HOSTCC_MODE, tp->coalesce_mode |
5823 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5824 }
1da177e4 5825
fac9b83e
DM
5826 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5827 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5828 spin_unlock(&tp->tx_lock);
5829 spin_unlock_irqrestore(&tp->lock, flags);
5830 schedule_work(&tp->reset_task);
5831 return;
5832 }
1da177e4
LT
5833 }
5834
1da177e4
LT
5835 /* This part only runs once per second. */
5836 if (!--tp->timer_counter) {
fac9b83e
DM
5837 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5838 tg3_periodic_fetch_stats(tp);
5839
1da177e4
LT
5840 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5841 u32 mac_stat;
5842 int phy_event;
5843
5844 mac_stat = tr32(MAC_STATUS);
5845
5846 phy_event = 0;
5847 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5848 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5849 phy_event = 1;
5850 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5851 phy_event = 1;
5852
5853 if (phy_event)
5854 tg3_setup_phy(tp, 0);
5855 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5856 u32 mac_stat = tr32(MAC_STATUS);
5857 int need_setup = 0;
5858
5859 if (netif_carrier_ok(tp->dev) &&
5860 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5861 need_setup = 1;
5862 }
5863 if (! netif_carrier_ok(tp->dev) &&
5864 (mac_stat & (MAC_STATUS_PCS_SYNCED |
5865 MAC_STATUS_SIGNAL_DET))) {
5866 need_setup = 1;
5867 }
5868 if (need_setup) {
5869 tw32_f(MAC_MODE,
5870 (tp->mac_mode &
5871 ~MAC_MODE_PORT_MODE_MASK));
5872 udelay(40);
5873 tw32_f(MAC_MODE, tp->mac_mode);
5874 udelay(40);
5875 tg3_setup_phy(tp, 0);
5876 }
5877 }
5878
5879 tp->timer_counter = tp->timer_multiplier;
5880 }
5881
5882 /* Heartbeat is only sent once every 120 seconds. */
5883 if (!--tp->asf_counter) {
5884 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5885 u32 val;
5886
5887 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5888 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5889 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5890 val = tr32(GRC_RX_CPU_EVENT);
5891 val |= (1 << 14);
5892 tw32(GRC_RX_CPU_EVENT, val);
5893 }
5894 tp->asf_counter = tp->asf_multiplier;
5895 }
5896
5897 spin_unlock(&tp->tx_lock);
5898 spin_unlock_irqrestore(&tp->lock, flags);
5899
5900 tp->timer.expires = jiffies + tp->timer_offset;
5901 add_timer(&tp->timer);
5902}
5903
7938109f
MC
5904static int tg3_test_interrupt(struct tg3 *tp)
5905{
5906 struct net_device *dev = tp->dev;
5907 int err, i;
5908 u32 int_mbox = 0;
5909
d4bc3927
MC
5910 if (!netif_running(dev))
5911 return -ENODEV;
5912
7938109f
MC
5913 tg3_disable_ints(tp);
5914
5915 free_irq(tp->pdev->irq, dev);
5916
5917 err = request_irq(tp->pdev->irq, tg3_test_isr,
f4d0ee98 5918 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
7938109f
MC
5919 if (err)
5920 return err;
5921
5922 tg3_enable_ints(tp);
5923
5924 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
5925 HOSTCC_MODE_NOW);
5926
5927 for (i = 0; i < 5; i++) {
5928 int_mbox = tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5929 if (int_mbox != 0)
5930 break;
5931 msleep(10);
5932 }
5933
5934 tg3_disable_ints(tp);
5935
5936 free_irq(tp->pdev->irq, dev);
5937
5938 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5939 err = request_irq(tp->pdev->irq, tg3_msi,
f4d0ee98 5940 SA_SAMPLE_RANDOM, dev->name, dev);
fac9b83e
DM
5941 else {
5942 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
5943 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5944 fn = tg3_interrupt_tagged;
5945 err = request_irq(tp->pdev->irq, fn,
f4d0ee98 5946 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
fac9b83e 5947 }
7938109f
MC
5948
5949 if (err)
5950 return err;
5951
5952 if (int_mbox != 0)
5953 return 0;
5954
5955 return -EIO;
5956}
5957
5958/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
5959 * successfully restored
5960 */
5961static int tg3_test_msi(struct tg3 *tp)
5962{
5963 struct net_device *dev = tp->dev;
5964 int err;
5965 u16 pci_cmd;
5966
5967 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
5968 return 0;
5969
5970 /* Turn off SERR reporting in case MSI terminates with Master
5971 * Abort.
5972 */
5973 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
5974 pci_write_config_word(tp->pdev, PCI_COMMAND,
5975 pci_cmd & ~PCI_COMMAND_SERR);
5976
5977 err = tg3_test_interrupt(tp);
5978
5979 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
5980
5981 if (!err)
5982 return 0;
5983
5984 /* other failures */
5985 if (err != -EIO)
5986 return err;
5987
5988 /* MSI test failed, go back to INTx mode */
5989 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
5990 "switching to INTx mode. Please report this failure to "
5991 "the PCI maintainer and include system chipset information.\n",
5992 tp->dev->name);
5993
5994 free_irq(tp->pdev->irq, dev);
5995 pci_disable_msi(tp->pdev);
5996
5997 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
5998
fac9b83e
DM
5999 {
6000 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6001 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6002 fn = tg3_interrupt_tagged;
7938109f 6003
fac9b83e
DM
6004 err = request_irq(tp->pdev->irq, fn,
6005 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6006 }
7938109f
MC
6007 if (err)
6008 return err;
6009
6010 /* Need to reset the chip because the MSI cycle may have terminated
6011 * with Master Abort.
6012 */
6013 spin_lock_irq(&tp->lock);
6014 spin_lock(&tp->tx_lock);
6015
944d980e 6016 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f
MC
6017 err = tg3_init_hw(tp);
6018
6019 spin_unlock(&tp->tx_lock);
6020 spin_unlock_irq(&tp->lock);
6021
6022 if (err)
6023 free_irq(tp->pdev->irq, dev);
6024
6025 return err;
6026}
6027
1da177e4
LT
6028static int tg3_open(struct net_device *dev)
6029{
6030 struct tg3 *tp = netdev_priv(dev);
6031 int err;
6032
6033 spin_lock_irq(&tp->lock);
6034 spin_lock(&tp->tx_lock);
6035
6036 tg3_disable_ints(tp);
6037 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6038
6039 spin_unlock(&tp->tx_lock);
6040 spin_unlock_irq(&tp->lock);
6041
6042 /* The placement of this call is tied
6043 * to the setup and use of Host TX descriptors.
6044 */
6045 err = tg3_alloc_consistent(tp);
6046 if (err)
6047 return err;
6048
88b06bc2
MC
6049 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6050 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6051 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
fac9b83e
DM
6052 /* All MSI supporting chips should support tagged
6053 * status. Assert that this is the case.
6054 */
6055 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6056 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6057 "Not using MSI.\n", tp->dev->name);
6058 } else if (pci_enable_msi(tp->pdev) == 0) {
88b06bc2
MC
6059 u32 msi_mode;
6060
6061 msi_mode = tr32(MSGINT_MODE);
6062 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6063 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6064 }
6065 }
6066 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6067 err = request_irq(tp->pdev->irq, tg3_msi,
f4d0ee98 6068 SA_SAMPLE_RANDOM, dev->name, dev);
fac9b83e
DM
6069 else {
6070 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6071 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6072 fn = tg3_interrupt_tagged;
6073
6074 err = request_irq(tp->pdev->irq, fn,
f4d0ee98 6075 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
fac9b83e 6076 }
1da177e4
LT
6077
6078 if (err) {
88b06bc2
MC
6079 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6080 pci_disable_msi(tp->pdev);
6081 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6082 }
1da177e4
LT
6083 tg3_free_consistent(tp);
6084 return err;
6085 }
6086
6087 spin_lock_irq(&tp->lock);
6088 spin_lock(&tp->tx_lock);
6089
6090 err = tg3_init_hw(tp);
6091 if (err) {
944d980e 6092 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
6093 tg3_free_rings(tp);
6094 } else {
fac9b83e
DM
6095 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6096 tp->timer_offset = HZ;
6097 else
6098 tp->timer_offset = HZ / 10;
6099
6100 BUG_ON(tp->timer_offset > HZ);
6101 tp->timer_counter = tp->timer_multiplier =
6102 (HZ / tp->timer_offset);
6103 tp->asf_counter = tp->asf_multiplier =
6104 ((HZ / tp->timer_offset) * 120);
1da177e4
LT
6105
6106 init_timer(&tp->timer);
6107 tp->timer.expires = jiffies + tp->timer_offset;
6108 tp->timer.data = (unsigned long) tp;
6109 tp->timer.function = tg3_timer;
1da177e4
LT
6110 }
6111
6112 spin_unlock(&tp->tx_lock);
6113 spin_unlock_irq(&tp->lock);
6114
6115 if (err) {
88b06bc2
MC
6116 free_irq(tp->pdev->irq, dev);
6117 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6118 pci_disable_msi(tp->pdev);
6119 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6120 }
1da177e4
LT
6121 tg3_free_consistent(tp);
6122 return err;
6123 }
6124
7938109f
MC
6125 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6126 err = tg3_test_msi(tp);
fac9b83e 6127
7938109f
MC
6128 if (err) {
6129 spin_lock_irq(&tp->lock);
6130 spin_lock(&tp->tx_lock);
6131
6132 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6133 pci_disable_msi(tp->pdev);
6134 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6135 }
944d980e 6136 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f
MC
6137 tg3_free_rings(tp);
6138 tg3_free_consistent(tp);
6139
6140 spin_unlock(&tp->tx_lock);
6141 spin_unlock_irq(&tp->lock);
6142
6143 return err;
6144 }
6145 }
6146
1da177e4
LT
6147 spin_lock_irq(&tp->lock);
6148 spin_lock(&tp->tx_lock);
6149
7938109f
MC
6150 add_timer(&tp->timer);
6151 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
1da177e4
LT
6152 tg3_enable_ints(tp);
6153
6154 spin_unlock(&tp->tx_lock);
6155 spin_unlock_irq(&tp->lock);
6156
6157 netif_start_queue(dev);
6158
6159 return 0;
6160}
6161
6162#if 0
6163/*static*/ void tg3_dump_state(struct tg3 *tp)
6164{
6165 u32 val32, val32_2, val32_3, val32_4, val32_5;
6166 u16 val16;
6167 int i;
6168
6169 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6170 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6171 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6172 val16, val32);
6173
6174 /* MAC block */
6175 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6176 tr32(MAC_MODE), tr32(MAC_STATUS));
6177 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6178 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6179 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6180 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6181 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6182 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6183
6184 /* Send data initiator control block */
6185 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6186 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6187 printk(" SNDDATAI_STATSCTRL[%08x]\n",
6188 tr32(SNDDATAI_STATSCTRL));
6189
6190 /* Send data completion control block */
6191 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6192
6193 /* Send BD ring selector block */
6194 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6195 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6196
6197 /* Send BD initiator control block */
6198 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6199 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6200
6201 /* Send BD completion control block */
6202 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6203
6204 /* Receive list placement control block */
6205 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6206 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6207 printk(" RCVLPC_STATSCTRL[%08x]\n",
6208 tr32(RCVLPC_STATSCTRL));
6209
6210 /* Receive data and receive BD initiator control block */
6211 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6212 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6213
6214 /* Receive data completion control block */
6215 printk("DEBUG: RCVDCC_MODE[%08x]\n",
6216 tr32(RCVDCC_MODE));
6217
6218 /* Receive BD initiator control block */
6219 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6220 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6221
6222 /* Receive BD completion control block */
6223 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6224 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6225
6226 /* Receive list selector control block */
6227 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6228 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6229
6230 /* Mbuf cluster free block */
6231 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6232 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6233
6234 /* Host coalescing control block */
6235 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6236 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6237 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6238 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6239 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6240 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6241 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6242 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6243 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6244 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6245 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6246 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6247
6248 /* Memory arbiter control block */
6249 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6250 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6251
6252 /* Buffer manager control block */
6253 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6254 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6255 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6256 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6257 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6258 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6259 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6260 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6261
6262 /* Read DMA control block */
6263 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6264 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6265
6266 /* Write DMA control block */
6267 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6268 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6269
6270 /* DMA completion block */
6271 printk("DEBUG: DMAC_MODE[%08x]\n",
6272 tr32(DMAC_MODE));
6273
6274 /* GRC block */
6275 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6276 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6277 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6278 tr32(GRC_LOCAL_CTRL));
6279
6280 /* TG3_BDINFOs */
6281 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6282 tr32(RCVDBDI_JUMBO_BD + 0x0),
6283 tr32(RCVDBDI_JUMBO_BD + 0x4),
6284 tr32(RCVDBDI_JUMBO_BD + 0x8),
6285 tr32(RCVDBDI_JUMBO_BD + 0xc));
6286 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6287 tr32(RCVDBDI_STD_BD + 0x0),
6288 tr32(RCVDBDI_STD_BD + 0x4),
6289 tr32(RCVDBDI_STD_BD + 0x8),
6290 tr32(RCVDBDI_STD_BD + 0xc));
6291 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6292 tr32(RCVDBDI_MINI_BD + 0x0),
6293 tr32(RCVDBDI_MINI_BD + 0x4),
6294 tr32(RCVDBDI_MINI_BD + 0x8),
6295 tr32(RCVDBDI_MINI_BD + 0xc));
6296
6297 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6298 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6299 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6300 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6301 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6302 val32, val32_2, val32_3, val32_4);
6303
6304 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6305 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6306 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6307 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6308 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6309 val32, val32_2, val32_3, val32_4);
6310
6311 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6312 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6313 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6314 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6315 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6316 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6317 val32, val32_2, val32_3, val32_4, val32_5);
6318
6319 /* SW status block */
6320 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6321 tp->hw_status->status,
6322 tp->hw_status->status_tag,
6323 tp->hw_status->rx_jumbo_consumer,
6324 tp->hw_status->rx_consumer,
6325 tp->hw_status->rx_mini_consumer,
6326 tp->hw_status->idx[0].rx_producer,
6327 tp->hw_status->idx[0].tx_consumer);
6328
6329 /* SW statistics block */
6330 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6331 ((u32 *)tp->hw_stats)[0],
6332 ((u32 *)tp->hw_stats)[1],
6333 ((u32 *)tp->hw_stats)[2],
6334 ((u32 *)tp->hw_stats)[3]);
6335
6336 /* Mailboxes */
6337 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6338 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6339 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6340 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6341 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6342
6343 /* NIC side send descriptors. */
6344 for (i = 0; i < 6; i++) {
6345 unsigned long txd;
6346
6347 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6348 + (i * sizeof(struct tg3_tx_buffer_desc));
6349 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6350 i,
6351 readl(txd + 0x0), readl(txd + 0x4),
6352 readl(txd + 0x8), readl(txd + 0xc));
6353 }
6354
6355 /* NIC side RX descriptors. */
6356 for (i = 0; i < 6; i++) {
6357 unsigned long rxd;
6358
6359 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6360 + (i * sizeof(struct tg3_rx_buffer_desc));
6361 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6362 i,
6363 readl(rxd + 0x0), readl(rxd + 0x4),
6364 readl(rxd + 0x8), readl(rxd + 0xc));
6365 rxd += (4 * sizeof(u32));
6366 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6367 i,
6368 readl(rxd + 0x0), readl(rxd + 0x4),
6369 readl(rxd + 0x8), readl(rxd + 0xc));
6370 }
6371
6372 for (i = 0; i < 6; i++) {
6373 unsigned long rxd;
6374
6375 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6376 + (i * sizeof(struct tg3_rx_buffer_desc));
6377 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6378 i,
6379 readl(rxd + 0x0), readl(rxd + 0x4),
6380 readl(rxd + 0x8), readl(rxd + 0xc));
6381 rxd += (4 * sizeof(u32));
6382 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6383 i,
6384 readl(rxd + 0x0), readl(rxd + 0x4),
6385 readl(rxd + 0x8), readl(rxd + 0xc));
6386 }
6387}
6388#endif
6389
6390static struct net_device_stats *tg3_get_stats(struct net_device *);
6391static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6392
6393static int tg3_close(struct net_device *dev)
6394{
6395 struct tg3 *tp = netdev_priv(dev);
6396
6397 netif_stop_queue(dev);
6398
6399 del_timer_sync(&tp->timer);
6400
6401 spin_lock_irq(&tp->lock);
6402 spin_lock(&tp->tx_lock);
6403#if 0
6404 tg3_dump_state(tp);
6405#endif
6406
6407 tg3_disable_ints(tp);
6408
944d980e 6409 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
6410 tg3_free_rings(tp);
6411 tp->tg3_flags &=
6412 ~(TG3_FLAG_INIT_COMPLETE |
6413 TG3_FLAG_GOT_SERDES_FLOWCTL);
6414 netif_carrier_off(tp->dev);
6415
6416 spin_unlock(&tp->tx_lock);
6417 spin_unlock_irq(&tp->lock);
6418
88b06bc2
MC
6419 free_irq(tp->pdev->irq, dev);
6420 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6421 pci_disable_msi(tp->pdev);
6422 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6423 }
1da177e4
LT
6424
6425 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6426 sizeof(tp->net_stats_prev));
6427 memcpy(&tp->estats_prev, tg3_get_estats(tp),
6428 sizeof(tp->estats_prev));
6429
6430 tg3_free_consistent(tp);
6431
6432 return 0;
6433}
6434
6435static inline unsigned long get_stat64(tg3_stat64_t *val)
6436{
6437 unsigned long ret;
6438
6439#if (BITS_PER_LONG == 32)
6440 ret = val->low;
6441#else
6442 ret = ((u64)val->high << 32) | ((u64)val->low);
6443#endif
6444 return ret;
6445}
6446
6447static unsigned long calc_crc_errors(struct tg3 *tp)
6448{
6449 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6450
6451 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6452 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6453 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6454 unsigned long flags;
6455 u32 val;
6456
6457 spin_lock_irqsave(&tp->lock, flags);
6458 if (!tg3_readphy(tp, 0x1e, &val)) {
6459 tg3_writephy(tp, 0x1e, val | 0x8000);
6460 tg3_readphy(tp, 0x14, &val);
6461 } else
6462 val = 0;
6463 spin_unlock_irqrestore(&tp->lock, flags);
6464
6465 tp->phy_crc_errors += val;
6466
6467 return tp->phy_crc_errors;
6468 }
6469
6470 return get_stat64(&hw_stats->rx_fcs_errors);
6471}
6472
6473#define ESTAT_ADD(member) \
6474 estats->member = old_estats->member + \
6475 get_stat64(&hw_stats->member)
6476
6477static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6478{
6479 struct tg3_ethtool_stats *estats = &tp->estats;
6480 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6481 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6482
6483 if (!hw_stats)
6484 return old_estats;
6485
6486 ESTAT_ADD(rx_octets);
6487 ESTAT_ADD(rx_fragments);
6488 ESTAT_ADD(rx_ucast_packets);
6489 ESTAT_ADD(rx_mcast_packets);
6490 ESTAT_ADD(rx_bcast_packets);
6491 ESTAT_ADD(rx_fcs_errors);
6492 ESTAT_ADD(rx_align_errors);
6493 ESTAT_ADD(rx_xon_pause_rcvd);
6494 ESTAT_ADD(rx_xoff_pause_rcvd);
6495 ESTAT_ADD(rx_mac_ctrl_rcvd);
6496 ESTAT_ADD(rx_xoff_entered);
6497 ESTAT_ADD(rx_frame_too_long_errors);
6498 ESTAT_ADD(rx_jabbers);
6499 ESTAT_ADD(rx_undersize_packets);
6500 ESTAT_ADD(rx_in_length_errors);
6501 ESTAT_ADD(rx_out_length_errors);
6502 ESTAT_ADD(rx_64_or_less_octet_packets);
6503 ESTAT_ADD(rx_65_to_127_octet_packets);
6504 ESTAT_ADD(rx_128_to_255_octet_packets);
6505 ESTAT_ADD(rx_256_to_511_octet_packets);
6506 ESTAT_ADD(rx_512_to_1023_octet_packets);
6507 ESTAT_ADD(rx_1024_to_1522_octet_packets);
6508 ESTAT_ADD(rx_1523_to_2047_octet_packets);
6509 ESTAT_ADD(rx_2048_to_4095_octet_packets);
6510 ESTAT_ADD(rx_4096_to_8191_octet_packets);
6511 ESTAT_ADD(rx_8192_to_9022_octet_packets);
6512
6513 ESTAT_ADD(tx_octets);
6514 ESTAT_ADD(tx_collisions);
6515 ESTAT_ADD(tx_xon_sent);
6516 ESTAT_ADD(tx_xoff_sent);
6517 ESTAT_ADD(tx_flow_control);
6518 ESTAT_ADD(tx_mac_errors);
6519 ESTAT_ADD(tx_single_collisions);
6520 ESTAT_ADD(tx_mult_collisions);
6521 ESTAT_ADD(tx_deferred);
6522 ESTAT_ADD(tx_excessive_collisions);
6523 ESTAT_ADD(tx_late_collisions);
6524 ESTAT_ADD(tx_collide_2times);
6525 ESTAT_ADD(tx_collide_3times);
6526 ESTAT_ADD(tx_collide_4times);
6527 ESTAT_ADD(tx_collide_5times);
6528 ESTAT_ADD(tx_collide_6times);
6529 ESTAT_ADD(tx_collide_7times);
6530 ESTAT_ADD(tx_collide_8times);
6531 ESTAT_ADD(tx_collide_9times);
6532 ESTAT_ADD(tx_collide_10times);
6533 ESTAT_ADD(tx_collide_11times);
6534 ESTAT_ADD(tx_collide_12times);
6535 ESTAT_ADD(tx_collide_13times);
6536 ESTAT_ADD(tx_collide_14times);
6537 ESTAT_ADD(tx_collide_15times);
6538 ESTAT_ADD(tx_ucast_packets);
6539 ESTAT_ADD(tx_mcast_packets);
6540 ESTAT_ADD(tx_bcast_packets);
6541 ESTAT_ADD(tx_carrier_sense_errors);
6542 ESTAT_ADD(tx_discards);
6543 ESTAT_ADD(tx_errors);
6544
6545 ESTAT_ADD(dma_writeq_full);
6546 ESTAT_ADD(dma_write_prioq_full);
6547 ESTAT_ADD(rxbds_empty);
6548 ESTAT_ADD(rx_discards);
6549 ESTAT_ADD(rx_errors);
6550 ESTAT_ADD(rx_threshold_hit);
6551
6552 ESTAT_ADD(dma_readq_full);
6553 ESTAT_ADD(dma_read_prioq_full);
6554 ESTAT_ADD(tx_comp_queue_full);
6555
6556 ESTAT_ADD(ring_set_send_prod_index);
6557 ESTAT_ADD(ring_status_update);
6558 ESTAT_ADD(nic_irqs);
6559 ESTAT_ADD(nic_avoided_irqs);
6560 ESTAT_ADD(nic_tx_threshold_hit);
6561
6562 return estats;
6563}
6564
6565static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6566{
6567 struct tg3 *tp = netdev_priv(dev);
6568 struct net_device_stats *stats = &tp->net_stats;
6569 struct net_device_stats *old_stats = &tp->net_stats_prev;
6570 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6571
6572 if (!hw_stats)
6573 return old_stats;
6574
6575 stats->rx_packets = old_stats->rx_packets +
6576 get_stat64(&hw_stats->rx_ucast_packets) +
6577 get_stat64(&hw_stats->rx_mcast_packets) +
6578 get_stat64(&hw_stats->rx_bcast_packets);
6579
6580 stats->tx_packets = old_stats->tx_packets +
6581 get_stat64(&hw_stats->tx_ucast_packets) +
6582 get_stat64(&hw_stats->tx_mcast_packets) +
6583 get_stat64(&hw_stats->tx_bcast_packets);
6584
6585 stats->rx_bytes = old_stats->rx_bytes +
6586 get_stat64(&hw_stats->rx_octets);
6587 stats->tx_bytes = old_stats->tx_bytes +
6588 get_stat64(&hw_stats->tx_octets);
6589
6590 stats->rx_errors = old_stats->rx_errors +
6591 get_stat64(&hw_stats->rx_errors) +
6592 get_stat64(&hw_stats->rx_discards);
6593 stats->tx_errors = old_stats->tx_errors +
6594 get_stat64(&hw_stats->tx_errors) +
6595 get_stat64(&hw_stats->tx_mac_errors) +
6596 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6597 get_stat64(&hw_stats->tx_discards);
6598
6599 stats->multicast = old_stats->multicast +
6600 get_stat64(&hw_stats->rx_mcast_packets);
6601 stats->collisions = old_stats->collisions +
6602 get_stat64(&hw_stats->tx_collisions);
6603
6604 stats->rx_length_errors = old_stats->rx_length_errors +
6605 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6606 get_stat64(&hw_stats->rx_undersize_packets);
6607
6608 stats->rx_over_errors = old_stats->rx_over_errors +
6609 get_stat64(&hw_stats->rxbds_empty);
6610 stats->rx_frame_errors = old_stats->rx_frame_errors +
6611 get_stat64(&hw_stats->rx_align_errors);
6612 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6613 get_stat64(&hw_stats->tx_discards);
6614 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6615 get_stat64(&hw_stats->tx_carrier_sense_errors);
6616
6617 stats->rx_crc_errors = old_stats->rx_crc_errors +
6618 calc_crc_errors(tp);
6619
6620 return stats;
6621}
6622
6623static inline u32 calc_crc(unsigned char *buf, int len)
6624{
6625 u32 reg;
6626 u32 tmp;
6627 int j, k;
6628
6629 reg = 0xffffffff;
6630
6631 for (j = 0; j < len; j++) {
6632 reg ^= buf[j];
6633
6634 for (k = 0; k < 8; k++) {
6635 tmp = reg & 0x01;
6636
6637 reg >>= 1;
6638
6639 if (tmp) {
6640 reg ^= 0xedb88320;
6641 }
6642 }
6643 }
6644
6645 return ~reg;
6646}
6647
6648static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6649{
6650 /* accept or reject all multicast frames */
6651 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6652 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6653 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6654 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6655}
6656
6657static void __tg3_set_rx_mode(struct net_device *dev)
6658{
6659 struct tg3 *tp = netdev_priv(dev);
6660 u32 rx_mode;
6661
6662 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6663 RX_MODE_KEEP_VLAN_TAG);
6664
6665 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6666 * flag clear.
6667 */
6668#if TG3_VLAN_TAG_USED
6669 if (!tp->vlgrp &&
6670 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6671 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6672#else
6673 /* By definition, VLAN is disabled always in this
6674 * case.
6675 */
6676 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6677 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6678#endif
6679
6680 if (dev->flags & IFF_PROMISC) {
6681 /* Promiscuous mode. */
6682 rx_mode |= RX_MODE_PROMISC;
6683 } else if (dev->flags & IFF_ALLMULTI) {
6684 /* Accept all multicast. */
6685 tg3_set_multi (tp, 1);
6686 } else if (dev->mc_count < 1) {
6687 /* Reject all multicast. */
6688 tg3_set_multi (tp, 0);
6689 } else {
6690 /* Accept one or more multicast(s). */
6691 struct dev_mc_list *mclist;
6692 unsigned int i;
6693 u32 mc_filter[4] = { 0, };
6694 u32 regidx;
6695 u32 bit;
6696 u32 crc;
6697
6698 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6699 i++, mclist = mclist->next) {
6700
6701 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6702 bit = ~crc & 0x7f;
6703 regidx = (bit & 0x60) >> 5;
6704 bit &= 0x1f;
6705 mc_filter[regidx] |= (1 << bit);
6706 }
6707
6708 tw32(MAC_HASH_REG_0, mc_filter[0]);
6709 tw32(MAC_HASH_REG_1, mc_filter[1]);
6710 tw32(MAC_HASH_REG_2, mc_filter[2]);
6711 tw32(MAC_HASH_REG_3, mc_filter[3]);
6712 }
6713
6714 if (rx_mode != tp->rx_mode) {
6715 tp->rx_mode = rx_mode;
6716 tw32_f(MAC_RX_MODE, rx_mode);
6717 udelay(10);
6718 }
6719}
6720
6721static void tg3_set_rx_mode(struct net_device *dev)
6722{
6723 struct tg3 *tp = netdev_priv(dev);
6724
6725 spin_lock_irq(&tp->lock);
6726 spin_lock(&tp->tx_lock);
6727 __tg3_set_rx_mode(dev);
6728 spin_unlock(&tp->tx_lock);
6729 spin_unlock_irq(&tp->lock);
6730}
6731
6732#define TG3_REGDUMP_LEN (32 * 1024)
6733
6734static int tg3_get_regs_len(struct net_device *dev)
6735{
6736 return TG3_REGDUMP_LEN;
6737}
6738
6739static void tg3_get_regs(struct net_device *dev,
6740 struct ethtool_regs *regs, void *_p)
6741{
6742 u32 *p = _p;
6743 struct tg3 *tp = netdev_priv(dev);
6744 u8 *orig_p = _p;
6745 int i;
6746
6747 regs->version = 0;
6748
6749 memset(p, 0, TG3_REGDUMP_LEN);
6750
6751 spin_lock_irq(&tp->lock);
6752 spin_lock(&tp->tx_lock);
6753
6754#define __GET_REG32(reg) (*(p)++ = tr32(reg))
6755#define GET_REG32_LOOP(base,len) \
6756do { p = (u32 *)(orig_p + (base)); \
6757 for (i = 0; i < len; i += 4) \
6758 __GET_REG32((base) + i); \
6759} while (0)
6760#define GET_REG32_1(reg) \
6761do { p = (u32 *)(orig_p + (reg)); \
6762 __GET_REG32((reg)); \
6763} while (0)
6764
6765 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6766 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6767 GET_REG32_LOOP(MAC_MODE, 0x4f0);
6768 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6769 GET_REG32_1(SNDDATAC_MODE);
6770 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6771 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6772 GET_REG32_1(SNDBDC_MODE);
6773 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6774 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6775 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6776 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6777 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6778 GET_REG32_1(RCVDCC_MODE);
6779 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6780 GET_REG32_LOOP(RCVCC_MODE, 0x14);
6781 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6782 GET_REG32_1(MBFREE_MODE);
6783 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6784 GET_REG32_LOOP(MEMARB_MODE, 0x10);
6785 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6786 GET_REG32_LOOP(RDMAC_MODE, 0x08);
6787 GET_REG32_LOOP(WDMAC_MODE, 0x08);
6788 GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6789 GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6790 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6791 GET_REG32_LOOP(FTQ_RESET, 0x120);
6792 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6793 GET_REG32_1(DMAC_MODE);
6794 GET_REG32_LOOP(GRC_MODE, 0x4c);
6795 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6796 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6797
6798#undef __GET_REG32
6799#undef GET_REG32_LOOP
6800#undef GET_REG32_1
6801
6802 spin_unlock(&tp->tx_lock);
6803 spin_unlock_irq(&tp->lock);
6804}
6805
6806static int tg3_get_eeprom_len(struct net_device *dev)
6807{
6808 struct tg3 *tp = netdev_priv(dev);
6809
6810 return tp->nvram_size;
6811}
6812
6813static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
6814
6815static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6816{
6817 struct tg3 *tp = netdev_priv(dev);
6818 int ret;
6819 u8 *pd;
6820 u32 i, offset, len, val, b_offset, b_count;
6821
6822 offset = eeprom->offset;
6823 len = eeprom->len;
6824 eeprom->len = 0;
6825
6826 eeprom->magic = TG3_EEPROM_MAGIC;
6827
6828 if (offset & 3) {
6829 /* adjustments to start on required 4 byte boundary */
6830 b_offset = offset & 3;
6831 b_count = 4 - b_offset;
6832 if (b_count > len) {
6833 /* i.e. offset=1 len=2 */
6834 b_count = len;
6835 }
6836 ret = tg3_nvram_read(tp, offset-b_offset, &val);
6837 if (ret)
6838 return ret;
6839 val = cpu_to_le32(val);
6840 memcpy(data, ((char*)&val) + b_offset, b_count);
6841 len -= b_count;
6842 offset += b_count;
6843 eeprom->len += b_count;
6844 }
6845
6846 /* read bytes upto the last 4 byte boundary */
6847 pd = &data[eeprom->len];
6848 for (i = 0; i < (len - (len & 3)); i += 4) {
6849 ret = tg3_nvram_read(tp, offset + i, &val);
6850 if (ret) {
6851 eeprom->len += i;
6852 return ret;
6853 }
6854 val = cpu_to_le32(val);
6855 memcpy(pd + i, &val, 4);
6856 }
6857 eeprom->len += i;
6858
6859 if (len & 3) {
6860 /* read last bytes not ending on 4 byte boundary */
6861 pd = &data[eeprom->len];
6862 b_count = len & 3;
6863 b_offset = offset + len - b_count;
6864 ret = tg3_nvram_read(tp, b_offset, &val);
6865 if (ret)
6866 return ret;
6867 val = cpu_to_le32(val);
6868 memcpy(pd, ((char*)&val), b_count);
6869 eeprom->len += b_count;
6870 }
6871 return 0;
6872}
6873
6874static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
6875
6876static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6877{
6878 struct tg3 *tp = netdev_priv(dev);
6879 int ret;
6880 u32 offset, len, b_offset, odd_len, start, end;
6881 u8 *buf;
6882
6883 if (eeprom->magic != TG3_EEPROM_MAGIC)
6884 return -EINVAL;
6885
6886 offset = eeprom->offset;
6887 len = eeprom->len;
6888
6889 if ((b_offset = (offset & 3))) {
6890 /* adjustments to start on required 4 byte boundary */
6891 ret = tg3_nvram_read(tp, offset-b_offset, &start);
6892 if (ret)
6893 return ret;
6894 start = cpu_to_le32(start);
6895 len += b_offset;
6896 offset &= ~3;
1c8594b4
MC
6897 if (len < 4)
6898 len = 4;
1da177e4
LT
6899 }
6900
6901 odd_len = 0;
1c8594b4 6902 if (len & 3) {
1da177e4
LT
6903 /* adjustments to end on required 4 byte boundary */
6904 odd_len = 1;
6905 len = (len + 3) & ~3;
6906 ret = tg3_nvram_read(tp, offset+len-4, &end);
6907 if (ret)
6908 return ret;
6909 end = cpu_to_le32(end);
6910 }
6911
6912 buf = data;
6913 if (b_offset || odd_len) {
6914 buf = kmalloc(len, GFP_KERNEL);
6915 if (buf == 0)
6916 return -ENOMEM;
6917 if (b_offset)
6918 memcpy(buf, &start, 4);
6919 if (odd_len)
6920 memcpy(buf+len-4, &end, 4);
6921 memcpy(buf + b_offset, data, eeprom->len);
6922 }
6923
6924 ret = tg3_nvram_write_block(tp, offset, len, buf);
6925
6926 if (buf != data)
6927 kfree(buf);
6928
6929 return ret;
6930}
6931
6932static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6933{
6934 struct tg3 *tp = netdev_priv(dev);
6935
6936 cmd->supported = (SUPPORTED_Autoneg);
6937
6938 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6939 cmd->supported |= (SUPPORTED_1000baseT_Half |
6940 SUPPORTED_1000baseT_Full);
6941
6942 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
6943 cmd->supported |= (SUPPORTED_100baseT_Half |
6944 SUPPORTED_100baseT_Full |
6945 SUPPORTED_10baseT_Half |
6946 SUPPORTED_10baseT_Full |
6947 SUPPORTED_MII);
6948 else
6949 cmd->supported |= SUPPORTED_FIBRE;
6950
6951 cmd->advertising = tp->link_config.advertising;
6952 if (netif_running(dev)) {
6953 cmd->speed = tp->link_config.active_speed;
6954 cmd->duplex = tp->link_config.active_duplex;
6955 }
6956 cmd->port = 0;
6957 cmd->phy_address = PHY_ADDR;
6958 cmd->transceiver = 0;
6959 cmd->autoneg = tp->link_config.autoneg;
6960 cmd->maxtxpkt = 0;
6961 cmd->maxrxpkt = 0;
6962 return 0;
6963}
6964
6965static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6966{
6967 struct tg3 *tp = netdev_priv(dev);
6968
6969 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6970 /* These are the only valid advertisement bits allowed. */
6971 if (cmd->autoneg == AUTONEG_ENABLE &&
6972 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6973 ADVERTISED_1000baseT_Full |
6974 ADVERTISED_Autoneg |
6975 ADVERTISED_FIBRE)))
6976 return -EINVAL;
6977 }
6978
6979 spin_lock_irq(&tp->lock);
6980 spin_lock(&tp->tx_lock);
6981
6982 tp->link_config.autoneg = cmd->autoneg;
6983 if (cmd->autoneg == AUTONEG_ENABLE) {
6984 tp->link_config.advertising = cmd->advertising;
6985 tp->link_config.speed = SPEED_INVALID;
6986 tp->link_config.duplex = DUPLEX_INVALID;
6987 } else {
6988 tp->link_config.advertising = 0;
6989 tp->link_config.speed = cmd->speed;
6990 tp->link_config.duplex = cmd->duplex;
6991 }
6992
6993 if (netif_running(dev))
6994 tg3_setup_phy(tp, 1);
6995
6996 spin_unlock(&tp->tx_lock);
6997 spin_unlock_irq(&tp->lock);
6998
6999 return 0;
7000}
7001
7002static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7003{
7004 struct tg3 *tp = netdev_priv(dev);
7005
7006 strcpy(info->driver, DRV_MODULE_NAME);
7007 strcpy(info->version, DRV_MODULE_VERSION);
7008 strcpy(info->bus_info, pci_name(tp->pdev));
7009}
7010
7011static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7012{
7013 struct tg3 *tp = netdev_priv(dev);
7014
7015 wol->supported = WAKE_MAGIC;
7016 wol->wolopts = 0;
7017 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7018 wol->wolopts = WAKE_MAGIC;
7019 memset(&wol->sopass, 0, sizeof(wol->sopass));
7020}
7021
7022static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7023{
7024 struct tg3 *tp = netdev_priv(dev);
7025
7026 if (wol->wolopts & ~WAKE_MAGIC)
7027 return -EINVAL;
7028 if ((wol->wolopts & WAKE_MAGIC) &&
7029 tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7030 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7031 return -EINVAL;
7032
7033 spin_lock_irq(&tp->lock);
7034 if (wol->wolopts & WAKE_MAGIC)
7035 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7036 else
7037 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7038 spin_unlock_irq(&tp->lock);
7039
7040 return 0;
7041}
7042
7043static u32 tg3_get_msglevel(struct net_device *dev)
7044{
7045 struct tg3 *tp = netdev_priv(dev);
7046 return tp->msg_enable;
7047}
7048
7049static void tg3_set_msglevel(struct net_device *dev, u32 value)
7050{
7051 struct tg3 *tp = netdev_priv(dev);
7052 tp->msg_enable = value;
7053}
7054
7055#if TG3_TSO_SUPPORT != 0
7056static int tg3_set_tso(struct net_device *dev, u32 value)
7057{
7058 struct tg3 *tp = netdev_priv(dev);
7059
7060 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7061 if (value)
7062 return -EINVAL;
7063 return 0;
7064 }
7065 return ethtool_op_set_tso(dev, value);
7066}
7067#endif
7068
7069static int tg3_nway_reset(struct net_device *dev)
7070{
7071 struct tg3 *tp = netdev_priv(dev);
7072 u32 bmcr;
7073 int r;
7074
7075 if (!netif_running(dev))
7076 return -EAGAIN;
7077
7078 spin_lock_irq(&tp->lock);
7079 r = -EINVAL;
7080 tg3_readphy(tp, MII_BMCR, &bmcr);
7081 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7082 (bmcr & BMCR_ANENABLE)) {
7083 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
7084 r = 0;
7085 }
7086 spin_unlock_irq(&tp->lock);
7087
7088 return r;
7089}
7090
7091static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7092{
7093 struct tg3 *tp = netdev_priv(dev);
7094
7095 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7096 ering->rx_mini_max_pending = 0;
7097 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7098
7099 ering->rx_pending = tp->rx_pending;
7100 ering->rx_mini_pending = 0;
7101 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7102 ering->tx_pending = tp->tx_pending;
7103}
7104
7105static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7106{
7107 struct tg3 *tp = netdev_priv(dev);
7108
7109 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7110 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7111 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7112 return -EINVAL;
7113
7114 if (netif_running(dev))
7115 tg3_netif_stop(tp);
7116
7117 spin_lock_irq(&tp->lock);
7118 spin_lock(&tp->tx_lock);
7119
7120 tp->rx_pending = ering->rx_pending;
7121
7122 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7123 tp->rx_pending > 63)
7124 tp->rx_pending = 63;
7125 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7126 tp->tx_pending = ering->tx_pending;
7127
7128 if (netif_running(dev)) {
944d980e 7129 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7130 tg3_init_hw(tp);
7131 tg3_netif_start(tp);
7132 }
7133
7134 spin_unlock(&tp->tx_lock);
7135 spin_unlock_irq(&tp->lock);
7136
7137 return 0;
7138}
7139
7140static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7141{
7142 struct tg3 *tp = netdev_priv(dev);
7143
7144 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7145 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7146 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7147}
7148
7149static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7150{
7151 struct tg3 *tp = netdev_priv(dev);
7152
7153 if (netif_running(dev))
7154 tg3_netif_stop(tp);
7155
7156 spin_lock_irq(&tp->lock);
7157 spin_lock(&tp->tx_lock);
7158 if (epause->autoneg)
7159 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7160 else
7161 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7162 if (epause->rx_pause)
7163 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7164 else
7165 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7166 if (epause->tx_pause)
7167 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7168 else
7169 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7170
7171 if (netif_running(dev)) {
944d980e 7172 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7173 tg3_init_hw(tp);
7174 tg3_netif_start(tp);
7175 }
7176 spin_unlock(&tp->tx_lock);
7177 spin_unlock_irq(&tp->lock);
7178
7179 return 0;
7180}
7181
7182static u32 tg3_get_rx_csum(struct net_device *dev)
7183{
7184 struct tg3 *tp = netdev_priv(dev);
7185 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7186}
7187
7188static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7189{
7190 struct tg3 *tp = netdev_priv(dev);
7191
7192 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7193 if (data != 0)
7194 return -EINVAL;
7195 return 0;
7196 }
7197
7198 spin_lock_irq(&tp->lock);
7199 if (data)
7200 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7201 else
7202 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7203 spin_unlock_irq(&tp->lock);
7204
7205 return 0;
7206}
7207
7208static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7209{
7210 struct tg3 *tp = netdev_priv(dev);
7211
7212 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7213 if (data != 0)
7214 return -EINVAL;
7215 return 0;
7216 }
7217
7218 if (data)
7219 dev->features |= NETIF_F_IP_CSUM;
7220 else
7221 dev->features &= ~NETIF_F_IP_CSUM;
7222
7223 return 0;
7224}
7225
7226static int tg3_get_stats_count (struct net_device *dev)
7227{
7228 return TG3_NUM_STATS;
7229}
7230
4cafd3f5
MC
7231static int tg3_get_test_count (struct net_device *dev)
7232{
7233 return TG3_NUM_TEST;
7234}
7235
1da177e4
LT
7236static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7237{
7238 switch (stringset) {
7239 case ETH_SS_STATS:
7240 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7241 break;
4cafd3f5
MC
7242 case ETH_SS_TEST:
7243 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7244 break;
1da177e4
LT
7245 default:
7246 WARN_ON(1); /* we need a WARN() */
7247 break;
7248 }
7249}
7250
7251static void tg3_get_ethtool_stats (struct net_device *dev,
7252 struct ethtool_stats *estats, u64 *tmp_stats)
7253{
7254 struct tg3 *tp = netdev_priv(dev);
7255 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7256}
7257
566f86ad
MC
7258#define NVRAM_TEST_SIZE 0x100
7259
7260static int tg3_test_nvram(struct tg3 *tp)
7261{
7262 u32 *buf, csum;
7263 int i, j, err = 0;
7264
7265 buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7266 if (buf == NULL)
7267 return -ENOMEM;
7268
7269 for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7270 u32 val;
7271
7272 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7273 break;
7274 buf[j] = cpu_to_le32(val);
7275 }
7276 if (i < NVRAM_TEST_SIZE)
7277 goto out;
7278
7279 err = -EIO;
7280 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7281 goto out;
7282
7283 /* Bootstrap checksum at offset 0x10 */
7284 csum = calc_crc((unsigned char *) buf, 0x10);
7285 if(csum != cpu_to_le32(buf[0x10/4]))
7286 goto out;
7287
7288 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7289 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7290 if (csum != cpu_to_le32(buf[0xfc/4]))
7291 goto out;
7292
7293 err = 0;
7294
7295out:
7296 kfree(buf);
7297 return err;
7298}
7299
ca43007a
MC
7300#define TG3_SERDES_TIMEOUT_SEC 2
7301#define TG3_COPPER_TIMEOUT_SEC 6
7302
7303static int tg3_test_link(struct tg3 *tp)
7304{
7305 int i, max;
7306
7307 if (!netif_running(tp->dev))
7308 return -ENODEV;
7309
7310 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7311 max = TG3_SERDES_TIMEOUT_SEC;
7312 else
7313 max = TG3_COPPER_TIMEOUT_SEC;
7314
7315 for (i = 0; i < max; i++) {
7316 if (netif_carrier_ok(tp->dev))
7317 return 0;
7318
7319 if (msleep_interruptible(1000))
7320 break;
7321 }
7322
7323 return -EIO;
7324}
7325
a71116d1
MC
7326/* Only test the commonly used registers */
7327static int tg3_test_registers(struct tg3 *tp)
7328{
7329 int i, is_5705;
7330 u32 offset, read_mask, write_mask, val, save_val, read_val;
7331 static struct {
7332 u16 offset;
7333 u16 flags;
7334#define TG3_FL_5705 0x1
7335#define TG3_FL_NOT_5705 0x2
7336#define TG3_FL_NOT_5788 0x4
7337 u32 read_mask;
7338 u32 write_mask;
7339 } reg_tbl[] = {
7340 /* MAC Control Registers */
7341 { MAC_MODE, TG3_FL_NOT_5705,
7342 0x00000000, 0x00ef6f8c },
7343 { MAC_MODE, TG3_FL_5705,
7344 0x00000000, 0x01ef6b8c },
7345 { MAC_STATUS, TG3_FL_NOT_5705,
7346 0x03800107, 0x00000000 },
7347 { MAC_STATUS, TG3_FL_5705,
7348 0x03800100, 0x00000000 },
7349 { MAC_ADDR_0_HIGH, 0x0000,
7350 0x00000000, 0x0000ffff },
7351 { MAC_ADDR_0_LOW, 0x0000,
7352 0x00000000, 0xffffffff },
7353 { MAC_RX_MTU_SIZE, 0x0000,
7354 0x00000000, 0x0000ffff },
7355 { MAC_TX_MODE, 0x0000,
7356 0x00000000, 0x00000070 },
7357 { MAC_TX_LENGTHS, 0x0000,
7358 0x00000000, 0x00003fff },
7359 { MAC_RX_MODE, TG3_FL_NOT_5705,
7360 0x00000000, 0x000007fc },
7361 { MAC_RX_MODE, TG3_FL_5705,
7362 0x00000000, 0x000007dc },
7363 { MAC_HASH_REG_0, 0x0000,
7364 0x00000000, 0xffffffff },
7365 { MAC_HASH_REG_1, 0x0000,
7366 0x00000000, 0xffffffff },
7367 { MAC_HASH_REG_2, 0x0000,
7368 0x00000000, 0xffffffff },
7369 { MAC_HASH_REG_3, 0x0000,
7370 0x00000000, 0xffffffff },
7371
7372 /* Receive Data and Receive BD Initiator Control Registers. */
7373 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7374 0x00000000, 0xffffffff },
7375 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7376 0x00000000, 0xffffffff },
7377 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7378 0x00000000, 0x00000003 },
7379 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7380 0x00000000, 0xffffffff },
7381 { RCVDBDI_STD_BD+0, 0x0000,
7382 0x00000000, 0xffffffff },
7383 { RCVDBDI_STD_BD+4, 0x0000,
7384 0x00000000, 0xffffffff },
7385 { RCVDBDI_STD_BD+8, 0x0000,
7386 0x00000000, 0xffff0002 },
7387 { RCVDBDI_STD_BD+0xc, 0x0000,
7388 0x00000000, 0xffffffff },
7389
7390 /* Receive BD Initiator Control Registers. */
7391 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7392 0x00000000, 0xffffffff },
7393 { RCVBDI_STD_THRESH, TG3_FL_5705,
7394 0x00000000, 0x000003ff },
7395 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7396 0x00000000, 0xffffffff },
7397
7398 /* Host Coalescing Control Registers. */
7399 { HOSTCC_MODE, TG3_FL_NOT_5705,
7400 0x00000000, 0x00000004 },
7401 { HOSTCC_MODE, TG3_FL_5705,
7402 0x00000000, 0x000000f6 },
7403 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7404 0x00000000, 0xffffffff },
7405 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7406 0x00000000, 0x000003ff },
7407 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7408 0x00000000, 0xffffffff },
7409 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7410 0x00000000, 0x000003ff },
7411 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7412 0x00000000, 0xffffffff },
7413 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7414 0x00000000, 0x000000ff },
7415 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7416 0x00000000, 0xffffffff },
7417 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7418 0x00000000, 0x000000ff },
7419 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7420 0x00000000, 0xffffffff },
7421 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7422 0x00000000, 0xffffffff },
7423 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7424 0x00000000, 0xffffffff },
7425 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7426 0x00000000, 0x000000ff },
7427 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7428 0x00000000, 0xffffffff },
7429 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7430 0x00000000, 0x000000ff },
7431 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7432 0x00000000, 0xffffffff },
7433 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7434 0x00000000, 0xffffffff },
7435 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7436 0x00000000, 0xffffffff },
7437 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7438 0x00000000, 0xffffffff },
7439 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7440 0x00000000, 0xffffffff },
7441 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7442 0xffffffff, 0x00000000 },
7443 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7444 0xffffffff, 0x00000000 },
7445
7446 /* Buffer Manager Control Registers. */
7447 { BUFMGR_MB_POOL_ADDR, 0x0000,
7448 0x00000000, 0x007fff80 },
7449 { BUFMGR_MB_POOL_SIZE, 0x0000,
7450 0x00000000, 0x007fffff },
7451 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7452 0x00000000, 0x0000003f },
7453 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7454 0x00000000, 0x000001ff },
7455 { BUFMGR_MB_HIGH_WATER, 0x0000,
7456 0x00000000, 0x000001ff },
7457 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7458 0xffffffff, 0x00000000 },
7459 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7460 0xffffffff, 0x00000000 },
7461
7462 /* Mailbox Registers */
7463 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7464 0x00000000, 0x000001ff },
7465 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7466 0x00000000, 0x000001ff },
7467 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7468 0x00000000, 0x000007ff },
7469 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7470 0x00000000, 0x000001ff },
7471
7472 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7473 };
7474
7475 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7476 is_5705 = 1;
7477 else
7478 is_5705 = 0;
7479
7480 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7481 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7482 continue;
7483
7484 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7485 continue;
7486
7487 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7488 (reg_tbl[i].flags & TG3_FL_NOT_5788))
7489 continue;
7490
7491 offset = (u32) reg_tbl[i].offset;
7492 read_mask = reg_tbl[i].read_mask;
7493 write_mask = reg_tbl[i].write_mask;
7494
7495 /* Save the original register content */
7496 save_val = tr32(offset);
7497
7498 /* Determine the read-only value. */
7499 read_val = save_val & read_mask;
7500
7501 /* Write zero to the register, then make sure the read-only bits
7502 * are not changed and the read/write bits are all zeros.
7503 */
7504 tw32(offset, 0);
7505
7506 val = tr32(offset);
7507
7508 /* Test the read-only and read/write bits. */
7509 if (((val & read_mask) != read_val) || (val & write_mask))
7510 goto out;
7511
7512 /* Write ones to all the bits defined by RdMask and WrMask, then
7513 * make sure the read-only bits are not changed and the
7514 * read/write bits are all ones.
7515 */
7516 tw32(offset, read_mask | write_mask);
7517
7518 val = tr32(offset);
7519
7520 /* Test the read-only bits. */
7521 if ((val & read_mask) != read_val)
7522 goto out;
7523
7524 /* Test the read/write bits. */
7525 if ((val & write_mask) != write_mask)
7526 goto out;
7527
7528 tw32(offset, save_val);
7529 }
7530
7531 return 0;
7532
7533out:
7534 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7535 tw32(offset, save_val);
7536 return -EIO;
7537}
7538
7942e1db
MC
7539static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7540{
7541 static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7542 int i;
7543 u32 j;
7544
7545 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7546 for (j = 0; j < len; j += 4) {
7547 u32 val;
7548
7549 tg3_write_mem(tp, offset + j, test_pattern[i]);
7550 tg3_read_mem(tp, offset + j, &val);
7551 if (val != test_pattern[i])
7552 return -EIO;
7553 }
7554 }
7555 return 0;
7556}
7557
7558static int tg3_test_memory(struct tg3 *tp)
7559{
7560 static struct mem_entry {
7561 u32 offset;
7562 u32 len;
7563 } mem_tbl_570x[] = {
7564 { 0x00000000, 0x01000},
7565 { 0x00002000, 0x1c000},
7566 { 0xffffffff, 0x00000}
7567 }, mem_tbl_5705[] = {
7568 { 0x00000100, 0x0000c},
7569 { 0x00000200, 0x00008},
7570 { 0x00000b50, 0x00400},
7571 { 0x00004000, 0x00800},
7572 { 0x00006000, 0x01000},
7573 { 0x00008000, 0x02000},
7574 { 0x00010000, 0x0e000},
7575 { 0xffffffff, 0x00000}
7576 };
7577 struct mem_entry *mem_tbl;
7578 int err = 0;
7579 int i;
7580
7581 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7582 mem_tbl = mem_tbl_5705;
7583 else
7584 mem_tbl = mem_tbl_570x;
7585
7586 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
7587 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
7588 mem_tbl[i].len)) != 0)
7589 break;
7590 }
7591
7592 return err;
7593}
7594
c76949a6
MC
7595static int tg3_test_loopback(struct tg3 *tp)
7596{
7597 u32 mac_mode, send_idx, rx_start_idx, rx_idx, tx_idx, opaque_key;
7598 u32 desc_idx;
7599 struct sk_buff *skb, *rx_skb;
7600 u8 *tx_data;
7601 dma_addr_t map;
7602 int num_pkts, tx_len, rx_len, i, err;
7603 struct tg3_rx_buffer_desc *desc;
7604
7605 if (!netif_running(tp->dev))
7606 return -ENODEV;
7607
7608 err = -EIO;
7609
7610 tg3_abort_hw(tp, 1);
7611
7612 /* Clearing this flag to keep interrupts disabled */
7613 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7614 tg3_reset_hw(tp);
7615
7616 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7617 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7618 MAC_MODE_PORT_MODE_GMII;
7619 tw32(MAC_MODE, mac_mode);
7620
7621 tx_len = 1514;
7622 skb = dev_alloc_skb(tx_len);
7623 tx_data = skb_put(skb, tx_len);
7624 memcpy(tx_data, tp->dev->dev_addr, 6);
7625 memset(tx_data + 6, 0x0, 8);
7626
7627 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
7628
7629 for (i = 14; i < tx_len; i++)
7630 tx_data[i] = (u8) (i & 0xff);
7631
7632 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
7633
7634 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7635 HOSTCC_MODE_NOW);
7636
7637 udelay(10);
7638
7639 rx_start_idx = tp->hw_status->idx[0].rx_producer;
7640
7641 send_idx = 0;
7642 num_pkts = 0;
7643
7644 tg3_set_txd(tp, send_idx, map, tx_len, 0, 1);
7645
7646 send_idx++;
7647 num_pkts++;
7648
7649 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx);
7650 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
7651
7652 udelay(10);
7653
7654 for (i = 0; i < 10; i++) {
7655 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7656 HOSTCC_MODE_NOW);
7657
7658 udelay(10);
7659
7660 tx_idx = tp->hw_status->idx[0].tx_consumer;
7661 rx_idx = tp->hw_status->idx[0].rx_producer;
7662 if ((tx_idx == send_idx) &&
7663 (rx_idx == (rx_start_idx + num_pkts)))
7664 break;
7665 }
7666
7667 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
7668 dev_kfree_skb(skb);
7669
7670 if (tx_idx != send_idx)
7671 goto out;
7672
7673 if (rx_idx != rx_start_idx + num_pkts)
7674 goto out;
7675
7676 desc = &tp->rx_rcb[rx_start_idx];
7677 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
7678 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
7679 if (opaque_key != RXD_OPAQUE_RING_STD)
7680 goto out;
7681
7682 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
7683 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
7684 goto out;
7685
7686 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
7687 if (rx_len != tx_len)
7688 goto out;
7689
7690 rx_skb = tp->rx_std_buffers[desc_idx].skb;
7691
7692 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
7693 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
7694
7695 for (i = 14; i < tx_len; i++) {
7696 if (*(rx_skb->data + i) != (u8) (i & 0xff))
7697 goto out;
7698 }
7699 err = 0;
7700
7701 /* tg3_free_rings will unmap and free the rx_skb */
7702out:
7703 return err;
7704}
7705
4cafd3f5
MC
7706static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
7707 u64 *data)
7708{
566f86ad
MC
7709 struct tg3 *tp = netdev_priv(dev);
7710
7711 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
7712
7713 if (tg3_test_nvram(tp) != 0) {
7714 etest->flags |= ETH_TEST_FL_FAILED;
7715 data[0] = 1;
7716 }
ca43007a
MC
7717 if (tg3_test_link(tp) != 0) {
7718 etest->flags |= ETH_TEST_FL_FAILED;
7719 data[1] = 1;
7720 }
a71116d1
MC
7721 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7722 if (netif_running(dev))
7723 tg3_netif_stop(tp);
7724
7725 spin_lock_irq(&tp->lock);
7726 spin_lock(&tp->tx_lock);
7727
7728 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
7729 tg3_nvram_lock(tp);
7730 tg3_halt_cpu(tp, RX_CPU_BASE);
7731 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7732 tg3_halt_cpu(tp, TX_CPU_BASE);
7733 tg3_nvram_unlock(tp);
7734
7735 if (tg3_test_registers(tp) != 0) {
7736 etest->flags |= ETH_TEST_FL_FAILED;
7737 data[2] = 1;
7738 }
7942e1db
MC
7739 if (tg3_test_memory(tp) != 0) {
7740 etest->flags |= ETH_TEST_FL_FAILED;
7741 data[3] = 1;
7742 }
c76949a6
MC
7743 if (tg3_test_loopback(tp) != 0) {
7744 etest->flags |= ETH_TEST_FL_FAILED;
7745 data[4] = 1;
7746 }
a71116d1 7747
d4bc3927
MC
7748 spin_unlock(&tp->tx_lock);
7749 spin_unlock_irq(&tp->lock);
7750 if (tg3_test_interrupt(tp) != 0) {
7751 etest->flags |= ETH_TEST_FL_FAILED;
7752 data[5] = 1;
7753 }
7754 spin_lock_irq(&tp->lock);
7755 spin_lock(&tp->tx_lock);
7756
a71116d1
MC
7757 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7758 if (netif_running(dev)) {
7759 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7760 tg3_init_hw(tp);
7761 tg3_netif_start(tp);
7762 }
7763 spin_unlock(&tp->tx_lock);
7764 spin_unlock_irq(&tp->lock);
7765 }
4cafd3f5
MC
7766}
7767
1da177e4
LT
7768static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7769{
7770 struct mii_ioctl_data *data = if_mii(ifr);
7771 struct tg3 *tp = netdev_priv(dev);
7772 int err;
7773
7774 switch(cmd) {
7775 case SIOCGMIIPHY:
7776 data->phy_id = PHY_ADDR;
7777
7778 /* fallthru */
7779 case SIOCGMIIREG: {
7780 u32 mii_regval;
7781
7782 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7783 break; /* We have no PHY */
7784
7785 spin_lock_irq(&tp->lock);
7786 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
7787 spin_unlock_irq(&tp->lock);
7788
7789 data->val_out = mii_regval;
7790
7791 return err;
7792 }
7793
7794 case SIOCSMIIREG:
7795 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7796 break; /* We have no PHY */
7797
7798 if (!capable(CAP_NET_ADMIN))
7799 return -EPERM;
7800
7801 spin_lock_irq(&tp->lock);
7802 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
7803 spin_unlock_irq(&tp->lock);
7804
7805 return err;
7806
7807 default:
7808 /* do nothing */
7809 break;
7810 }
7811 return -EOPNOTSUPP;
7812}
7813
7814#if TG3_VLAN_TAG_USED
7815static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
7816{
7817 struct tg3 *tp = netdev_priv(dev);
7818
7819 spin_lock_irq(&tp->lock);
7820 spin_lock(&tp->tx_lock);
7821
7822 tp->vlgrp = grp;
7823
7824 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
7825 __tg3_set_rx_mode(dev);
7826
7827 spin_unlock(&tp->tx_lock);
7828 spin_unlock_irq(&tp->lock);
7829}
7830
7831static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
7832{
7833 struct tg3 *tp = netdev_priv(dev);
7834
7835 spin_lock_irq(&tp->lock);
7836 spin_lock(&tp->tx_lock);
7837 if (tp->vlgrp)
7838 tp->vlgrp->vlan_devices[vid] = NULL;
7839 spin_unlock(&tp->tx_lock);
7840 spin_unlock_irq(&tp->lock);
7841}
7842#endif
7843
15f9850d
DM
7844static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
7845{
7846 struct tg3 *tp = netdev_priv(dev);
7847
7848 memcpy(ec, &tp->coal, sizeof(*ec));
7849 return 0;
7850}
7851
1da177e4
LT
7852static struct ethtool_ops tg3_ethtool_ops = {
7853 .get_settings = tg3_get_settings,
7854 .set_settings = tg3_set_settings,
7855 .get_drvinfo = tg3_get_drvinfo,
7856 .get_regs_len = tg3_get_regs_len,
7857 .get_regs = tg3_get_regs,
7858 .get_wol = tg3_get_wol,
7859 .set_wol = tg3_set_wol,
7860 .get_msglevel = tg3_get_msglevel,
7861 .set_msglevel = tg3_set_msglevel,
7862 .nway_reset = tg3_nway_reset,
7863 .get_link = ethtool_op_get_link,
7864 .get_eeprom_len = tg3_get_eeprom_len,
7865 .get_eeprom = tg3_get_eeprom,
7866 .set_eeprom = tg3_set_eeprom,
7867 .get_ringparam = tg3_get_ringparam,
7868 .set_ringparam = tg3_set_ringparam,
7869 .get_pauseparam = tg3_get_pauseparam,
7870 .set_pauseparam = tg3_set_pauseparam,
7871 .get_rx_csum = tg3_get_rx_csum,
7872 .set_rx_csum = tg3_set_rx_csum,
7873 .get_tx_csum = ethtool_op_get_tx_csum,
7874 .set_tx_csum = tg3_set_tx_csum,
7875 .get_sg = ethtool_op_get_sg,
7876 .set_sg = ethtool_op_set_sg,
7877#if TG3_TSO_SUPPORT != 0
7878 .get_tso = ethtool_op_get_tso,
7879 .set_tso = tg3_set_tso,
7880#endif
4cafd3f5
MC
7881 .self_test_count = tg3_get_test_count,
7882 .self_test = tg3_self_test,
1da177e4
LT
7883 .get_strings = tg3_get_strings,
7884 .get_stats_count = tg3_get_stats_count,
7885 .get_ethtool_stats = tg3_get_ethtool_stats,
15f9850d 7886 .get_coalesce = tg3_get_coalesce,
1da177e4
LT
7887};
7888
7889static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
7890{
7891 u32 cursize, val;
7892
7893 tp->nvram_size = EEPROM_CHIP_SIZE;
7894
7895 if (tg3_nvram_read(tp, 0, &val) != 0)
7896 return;
7897
7898 if (swab32(val) != TG3_EEPROM_MAGIC)
7899 return;
7900
7901 /*
7902 * Size the chip by reading offsets at increasing powers of two.
7903 * When we encounter our validation signature, we know the addressing
7904 * has wrapped around, and thus have our chip size.
7905 */
7906 cursize = 0x800;
7907
7908 while (cursize < tp->nvram_size) {
7909 if (tg3_nvram_read(tp, cursize, &val) != 0)
7910 return;
7911
7912 if (swab32(val) == TG3_EEPROM_MAGIC)
7913 break;
7914
7915 cursize <<= 1;
7916 }
7917
7918 tp->nvram_size = cursize;
7919}
7920
7921static void __devinit tg3_get_nvram_size(struct tg3 *tp)
7922{
7923 u32 val;
7924
7925 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
7926 if (val != 0) {
7927 tp->nvram_size = (val >> 16) * 1024;
7928 return;
7929 }
7930 }
7931 tp->nvram_size = 0x20000;
7932}
7933
7934static void __devinit tg3_get_nvram_info(struct tg3 *tp)
7935{
7936 u32 nvcfg1;
7937
7938 nvcfg1 = tr32(NVRAM_CFG1);
7939 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
7940 tp->tg3_flags2 |= TG3_FLG2_FLASH;
7941 }
7942 else {
7943 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
7944 tw32(NVRAM_CFG1, nvcfg1);
7945 }
7946
85e94ced 7947 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1da177e4
LT
7948 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
7949 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
7950 tp->nvram_jedecnum = JEDEC_ATMEL;
7951 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7952 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7953 break;
7954 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
7955 tp->nvram_jedecnum = JEDEC_ATMEL;
7956 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
7957 break;
7958 case FLASH_VENDOR_ATMEL_EEPROM:
7959 tp->nvram_jedecnum = JEDEC_ATMEL;
7960 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
7961 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7962 break;
7963 case FLASH_VENDOR_ST:
7964 tp->nvram_jedecnum = JEDEC_ST;
7965 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
7966 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7967 break;
7968 case FLASH_VENDOR_SAIFUN:
7969 tp->nvram_jedecnum = JEDEC_SAIFUN;
7970 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
7971 break;
7972 case FLASH_VENDOR_SST_SMALL:
7973 case FLASH_VENDOR_SST_LARGE:
7974 tp->nvram_jedecnum = JEDEC_SST;
7975 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
7976 break;
7977 }
7978 }
7979 else {
7980 tp->nvram_jedecnum = JEDEC_ATMEL;
7981 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7982 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7983 }
7984}
7985
361b4ac2
MC
7986static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
7987{
7988 u32 nvcfg1;
7989
7990 nvcfg1 = tr32(NVRAM_CFG1);
7991
e6af301b
MC
7992 /* NVRAM protection for TPM */
7993 if (nvcfg1 & (1 << 27))
7994 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
7995
361b4ac2
MC
7996 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
7997 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
7998 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
7999 tp->nvram_jedecnum = JEDEC_ATMEL;
8000 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8001 break;
8002 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8003 tp->nvram_jedecnum = JEDEC_ATMEL;
8004 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8005 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8006 break;
8007 case FLASH_5752VENDOR_ST_M45PE10:
8008 case FLASH_5752VENDOR_ST_M45PE20:
8009 case FLASH_5752VENDOR_ST_M45PE40:
8010 tp->nvram_jedecnum = JEDEC_ST;
8011 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8012 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8013 break;
8014 }
8015
8016 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8017 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8018 case FLASH_5752PAGE_SIZE_256:
8019 tp->nvram_pagesize = 256;
8020 break;
8021 case FLASH_5752PAGE_SIZE_512:
8022 tp->nvram_pagesize = 512;
8023 break;
8024 case FLASH_5752PAGE_SIZE_1K:
8025 tp->nvram_pagesize = 1024;
8026 break;
8027 case FLASH_5752PAGE_SIZE_2K:
8028 tp->nvram_pagesize = 2048;
8029 break;
8030 case FLASH_5752PAGE_SIZE_4K:
8031 tp->nvram_pagesize = 4096;
8032 break;
8033 case FLASH_5752PAGE_SIZE_264:
8034 tp->nvram_pagesize = 264;
8035 break;
8036 }
8037 }
8038 else {
8039 /* For eeprom, set pagesize to maximum eeprom size */
8040 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8041
8042 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8043 tw32(NVRAM_CFG1, nvcfg1);
8044 }
8045}
8046
1da177e4
LT
8047/* Chips other than 5700/5701 use the NVRAM for fetching info. */
8048static void __devinit tg3_nvram_init(struct tg3 *tp)
8049{
8050 int j;
8051
8052 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8053 return;
8054
8055 tw32_f(GRC_EEPROM_ADDR,
8056 (EEPROM_ADDR_FSM_RESET |
8057 (EEPROM_DEFAULT_CLOCK_PERIOD <<
8058 EEPROM_ADDR_CLKPERD_SHIFT)));
8059
8060 /* XXX schedule_timeout() ... */
8061 for (j = 0; j < 100; j++)
8062 udelay(10);
8063
8064 /* Enable seeprom accesses. */
8065 tw32_f(GRC_LOCAL_CTRL,
8066 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8067 udelay(100);
8068
8069 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8070 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8071 tp->tg3_flags |= TG3_FLAG_NVRAM;
8072
e6af301b 8073 tg3_enable_nvram_access(tp);
1da177e4 8074
361b4ac2
MC
8075 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8076 tg3_get_5752_nvram_info(tp);
8077 else
8078 tg3_get_nvram_info(tp);
8079
1da177e4
LT
8080 tg3_get_nvram_size(tp);
8081
e6af301b 8082 tg3_disable_nvram_access(tp);
1da177e4
LT
8083
8084 } else {
8085 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8086
8087 tg3_get_eeprom_size(tp);
8088 }
8089}
8090
8091static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8092 u32 offset, u32 *val)
8093{
8094 u32 tmp;
8095 int i;
8096
8097 if (offset > EEPROM_ADDR_ADDR_MASK ||
8098 (offset % 4) != 0)
8099 return -EINVAL;
8100
8101 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8102 EEPROM_ADDR_DEVID_MASK |
8103 EEPROM_ADDR_READ);
8104 tw32(GRC_EEPROM_ADDR,
8105 tmp |
8106 (0 << EEPROM_ADDR_DEVID_SHIFT) |
8107 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8108 EEPROM_ADDR_ADDR_MASK) |
8109 EEPROM_ADDR_READ | EEPROM_ADDR_START);
8110
8111 for (i = 0; i < 10000; i++) {
8112 tmp = tr32(GRC_EEPROM_ADDR);
8113
8114 if (tmp & EEPROM_ADDR_COMPLETE)
8115 break;
8116 udelay(100);
8117 }
8118 if (!(tmp & EEPROM_ADDR_COMPLETE))
8119 return -EBUSY;
8120
8121 *val = tr32(GRC_EEPROM_DATA);
8122 return 0;
8123}
8124
8125#define NVRAM_CMD_TIMEOUT 10000
8126
8127static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8128{
8129 int i;
8130
8131 tw32(NVRAM_CMD, nvram_cmd);
8132 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8133 udelay(10);
8134 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8135 udelay(10);
8136 break;
8137 }
8138 }
8139 if (i == NVRAM_CMD_TIMEOUT) {
8140 return -EBUSY;
8141 }
8142 return 0;
8143}
8144
8145static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8146{
8147 int ret;
8148
8149 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8150 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8151 return -EINVAL;
8152 }
8153
8154 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8155 return tg3_nvram_read_using_eeprom(tp, offset, val);
8156
8157 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8158 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8159 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8160
8161 offset = ((offset / tp->nvram_pagesize) <<
8162 ATMEL_AT45DB0X1B_PAGE_POS) +
8163 (offset % tp->nvram_pagesize);
8164 }
8165
8166 if (offset > NVRAM_ADDR_MSK)
8167 return -EINVAL;
8168
8169 tg3_nvram_lock(tp);
8170
e6af301b 8171 tg3_enable_nvram_access(tp);
1da177e4
LT
8172
8173 tw32(NVRAM_ADDR, offset);
8174 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8175 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8176
8177 if (ret == 0)
8178 *val = swab32(tr32(NVRAM_RDDATA));
8179
8180 tg3_nvram_unlock(tp);
8181
e6af301b 8182 tg3_disable_nvram_access(tp);
1da177e4
LT
8183
8184 return ret;
8185}
8186
8187static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8188 u32 offset, u32 len, u8 *buf)
8189{
8190 int i, j, rc = 0;
8191 u32 val;
8192
8193 for (i = 0; i < len; i += 4) {
8194 u32 addr, data;
8195
8196 addr = offset + i;
8197
8198 memcpy(&data, buf + i, 4);
8199
8200 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8201
8202 val = tr32(GRC_EEPROM_ADDR);
8203 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8204
8205 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8206 EEPROM_ADDR_READ);
8207 tw32(GRC_EEPROM_ADDR, val |
8208 (0 << EEPROM_ADDR_DEVID_SHIFT) |
8209 (addr & EEPROM_ADDR_ADDR_MASK) |
8210 EEPROM_ADDR_START |
8211 EEPROM_ADDR_WRITE);
8212
8213 for (j = 0; j < 10000; j++) {
8214 val = tr32(GRC_EEPROM_ADDR);
8215
8216 if (val & EEPROM_ADDR_COMPLETE)
8217 break;
8218 udelay(100);
8219 }
8220 if (!(val & EEPROM_ADDR_COMPLETE)) {
8221 rc = -EBUSY;
8222 break;
8223 }
8224 }
8225
8226 return rc;
8227}
8228
8229/* offset and length are dword aligned */
8230static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8231 u8 *buf)
8232{
8233 int ret = 0;
8234 u32 pagesize = tp->nvram_pagesize;
8235 u32 pagemask = pagesize - 1;
8236 u32 nvram_cmd;
8237 u8 *tmp;
8238
8239 tmp = kmalloc(pagesize, GFP_KERNEL);
8240 if (tmp == NULL)
8241 return -ENOMEM;
8242
8243 while (len) {
8244 int j;
e6af301b 8245 u32 phy_addr, page_off, size;
1da177e4
LT
8246
8247 phy_addr = offset & ~pagemask;
8248
8249 for (j = 0; j < pagesize; j += 4) {
8250 if ((ret = tg3_nvram_read(tp, phy_addr + j,
8251 (u32 *) (tmp + j))))
8252 break;
8253 }
8254 if (ret)
8255 break;
8256
8257 page_off = offset & pagemask;
8258 size = pagesize;
8259 if (len < size)
8260 size = len;
8261
8262 len -= size;
8263
8264 memcpy(tmp + page_off, buf, size);
8265
8266 offset = offset + (pagesize - page_off);
8267
e6af301b 8268 tg3_enable_nvram_access(tp);
1da177e4
LT
8269
8270 /*
8271 * Before we can erase the flash page, we need
8272 * to issue a special "write enable" command.
8273 */
8274 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8275
8276 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8277 break;
8278
8279 /* Erase the target page */
8280 tw32(NVRAM_ADDR, phy_addr);
8281
8282 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8283 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8284
8285 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8286 break;
8287
8288 /* Issue another write enable to start the write. */
8289 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8290
8291 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8292 break;
8293
8294 for (j = 0; j < pagesize; j += 4) {
8295 u32 data;
8296
8297 data = *((u32 *) (tmp + j));
8298 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8299
8300 tw32(NVRAM_ADDR, phy_addr + j);
8301
8302 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8303 NVRAM_CMD_WR;
8304
8305 if (j == 0)
8306 nvram_cmd |= NVRAM_CMD_FIRST;
8307 else if (j == (pagesize - 4))
8308 nvram_cmd |= NVRAM_CMD_LAST;
8309
8310 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8311 break;
8312 }
8313 if (ret)
8314 break;
8315 }
8316
8317 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8318 tg3_nvram_exec_cmd(tp, nvram_cmd);
8319
8320 kfree(tmp);
8321
8322 return ret;
8323}
8324
8325/* offset and length are dword aligned */
8326static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8327 u8 *buf)
8328{
8329 int i, ret = 0;
8330
8331 for (i = 0; i < len; i += 4, offset += 4) {
8332 u32 data, page_off, phy_addr, nvram_cmd;
8333
8334 memcpy(&data, buf + i, 4);
8335 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8336
8337 page_off = offset % tp->nvram_pagesize;
8338
8339 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8340 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8341
8342 phy_addr = ((offset / tp->nvram_pagesize) <<
8343 ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8344 }
8345 else {
8346 phy_addr = offset;
8347 }
8348
8349 tw32(NVRAM_ADDR, phy_addr);
8350
8351 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
8352
8353 if ((page_off == 0) || (i == 0))
8354 nvram_cmd |= NVRAM_CMD_FIRST;
8355 else if (page_off == (tp->nvram_pagesize - 4))
8356 nvram_cmd |= NVRAM_CMD_LAST;
8357
8358 if (i == (len - 4))
8359 nvram_cmd |= NVRAM_CMD_LAST;
8360
8361 if ((tp->nvram_jedecnum == JEDEC_ST) &&
8362 (nvram_cmd & NVRAM_CMD_FIRST)) {
8363
8364 if ((ret = tg3_nvram_exec_cmd(tp,
8365 NVRAM_CMD_WREN | NVRAM_CMD_GO |
8366 NVRAM_CMD_DONE)))
8367
8368 break;
8369 }
8370 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8371 /* We always do complete word writes to eeprom. */
8372 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
8373 }
8374
8375 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8376 break;
8377 }
8378 return ret;
8379}
8380
8381/* offset and length are dword aligned */
8382static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
8383{
8384 int ret;
8385
8386 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8387 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
8388 return -EINVAL;
8389 }
8390
8391 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34
MC
8392 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
8393 ~GRC_LCLCTRL_GPIO_OUTPUT1);
1da177e4
LT
8394 udelay(40);
8395 }
8396
8397 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
8398 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
8399 }
8400 else {
8401 u32 grc_mode;
8402
8403 tg3_nvram_lock(tp);
8404
e6af301b
MC
8405 tg3_enable_nvram_access(tp);
8406 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
8407 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
1da177e4 8408 tw32(NVRAM_WRITE1, 0x406);
1da177e4
LT
8409
8410 grc_mode = tr32(GRC_MODE);
8411 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
8412
8413 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
8414 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8415
8416 ret = tg3_nvram_write_block_buffered(tp, offset, len,
8417 buf);
8418 }
8419 else {
8420 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
8421 buf);
8422 }
8423
8424 grc_mode = tr32(GRC_MODE);
8425 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
8426
e6af301b 8427 tg3_disable_nvram_access(tp);
1da177e4
LT
8428 tg3_nvram_unlock(tp);
8429 }
8430
8431 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34 8432 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1da177e4
LT
8433 udelay(40);
8434 }
8435
8436 return ret;
8437}
8438
8439struct subsys_tbl_ent {
8440 u16 subsys_vendor, subsys_devid;
8441 u32 phy_id;
8442};
8443
8444static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
8445 /* Broadcom boards. */
8446 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
8447 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
8448 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
8449 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
8450 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
8451 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
8452 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
8453 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
8454 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
8455 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
8456 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
8457
8458 /* 3com boards. */
8459 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
8460 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
8461 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
8462 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
8463 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
8464
8465 /* DELL boards. */
8466 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
8467 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
8468 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
8469 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
8470
8471 /* Compaq boards. */
8472 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
8473 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
8474 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
8475 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
8476 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
8477
8478 /* IBM boards. */
8479 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
8480};
8481
8482static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
8483{
8484 int i;
8485
8486 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
8487 if ((subsys_id_to_phy_id[i].subsys_vendor ==
8488 tp->pdev->subsystem_vendor) &&
8489 (subsys_id_to_phy_id[i].subsys_devid ==
8490 tp->pdev->subsystem_device))
8491 return &subsys_id_to_phy_id[i];
8492 }
8493 return NULL;
8494}
8495
7d0c41ef
MC
8496/* Since this function may be called in D3-hot power state during
8497 * tg3_init_one(), only config cycles are allowed.
8498 */
8499static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
1da177e4 8500{
1da177e4 8501 u32 val;
7d0c41ef
MC
8502
8503 /* Make sure register accesses (indirect or otherwise)
8504 * will function correctly.
8505 */
8506 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8507 tp->misc_host_ctrl);
1da177e4
LT
8508
8509 tp->phy_id = PHY_ID_INVALID;
7d0c41ef
MC
8510 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8511
1da177e4
LT
8512 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8513 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8514 u32 nic_cfg, led_cfg;
7d0c41ef
MC
8515 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
8516 int eeprom_phy_serdes = 0;
1da177e4
LT
8517
8518 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8519 tp->nic_sram_data_cfg = nic_cfg;
8520
8521 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
8522 ver >>= NIC_SRAM_DATA_VER_SHIFT;
8523 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8524 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8525 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
8526 (ver > 0) && (ver < 0x100))
8527 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
8528
1da177e4
LT
8529 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
8530 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
8531 eeprom_phy_serdes = 1;
8532
8533 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
8534 if (nic_phy_id != 0) {
8535 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
8536 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
8537
8538 eeprom_phy_id = (id1 >> 16) << 10;
8539 eeprom_phy_id |= (id2 & 0xfc00) << 16;
8540 eeprom_phy_id |= (id2 & 0x03ff) << 0;
8541 } else
8542 eeprom_phy_id = 0;
8543
7d0c41ef
MC
8544 tp->phy_id = eeprom_phy_id;
8545 if (eeprom_phy_serdes)
8546 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8547
cbf46853 8548 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
8549 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
8550 SHASTA_EXT_LED_MODE_MASK);
cbf46853 8551 else
1da177e4
LT
8552 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
8553
8554 switch (led_cfg) {
8555 default:
8556 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
8557 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8558 break;
8559
8560 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
8561 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8562 break;
8563
8564 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
8565 tp->led_ctrl = LED_CTRL_MODE_MAC;
9ba27794
MC
8566
8567 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
8568 * read on some older 5700/5701 bootcode.
8569 */
8570 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
8571 ASIC_REV_5700 ||
8572 GET_ASIC_REV(tp->pci_chip_rev_id) ==
8573 ASIC_REV_5701)
8574 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8575
1da177e4
LT
8576 break;
8577
8578 case SHASTA_EXT_LED_SHARED:
8579 tp->led_ctrl = LED_CTRL_MODE_SHARED;
8580 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8581 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
8582 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
8583 LED_CTRL_MODE_PHY_2);
8584 break;
8585
8586 case SHASTA_EXT_LED_MAC:
8587 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
8588 break;
8589
8590 case SHASTA_EXT_LED_COMBO:
8591 tp->led_ctrl = LED_CTRL_MODE_COMBO;
8592 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
8593 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
8594 LED_CTRL_MODE_PHY_2);
8595 break;
8596
8597 };
8598
8599 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8600 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
8601 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
8602 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8603
8604 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8605 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8606 (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
8607 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
8608
8609 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8610 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 8611 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
8612 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
8613 }
8614 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
8615 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
8616
8617 if (cfg2 & (1 << 17))
8618 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
8619
8620 /* serdes signal pre-emphasis in register 0x590 set by */
8621 /* bootcode if bit 18 is set */
8622 if (cfg2 & (1 << 18))
8623 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
8624 }
7d0c41ef
MC
8625}
8626
8627static int __devinit tg3_phy_probe(struct tg3 *tp)
8628{
8629 u32 hw_phy_id_1, hw_phy_id_2;
8630 u32 hw_phy_id, hw_phy_id_masked;
8631 int err;
1da177e4
LT
8632
8633 /* Reading the PHY ID register can conflict with ASF
8634 * firwmare access to the PHY hardware.
8635 */
8636 err = 0;
8637 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
8638 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
8639 } else {
8640 /* Now read the physical PHY_ID from the chip and verify
8641 * that it is sane. If it doesn't look good, we fall back
8642 * to either the hard-coded table based PHY_ID and failing
8643 * that the value found in the eeprom area.
8644 */
8645 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
8646 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
8647
8648 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
8649 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
8650 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
8651
8652 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
8653 }
8654
8655 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
8656 tp->phy_id = hw_phy_id;
8657 if (hw_phy_id_masked == PHY_ID_BCM8002)
8658 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8659 } else {
7d0c41ef
MC
8660 if (tp->phy_id != PHY_ID_INVALID) {
8661 /* Do nothing, phy ID already set up in
8662 * tg3_get_eeprom_hw_cfg().
8663 */
1da177e4
LT
8664 } else {
8665 struct subsys_tbl_ent *p;
8666
8667 /* No eeprom signature? Try the hardcoded
8668 * subsys device table.
8669 */
8670 p = lookup_by_subsys(tp);
8671 if (!p)
8672 return -ENODEV;
8673
8674 tp->phy_id = p->phy_id;
8675 if (!tp->phy_id ||
8676 tp->phy_id == PHY_ID_BCM8002)
8677 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8678 }
8679 }
8680
8681 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8682 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
8683 u32 bmsr, adv_reg, tg3_ctrl;
8684
8685 tg3_readphy(tp, MII_BMSR, &bmsr);
8686 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
8687 (bmsr & BMSR_LSTATUS))
8688 goto skip_phy_reset;
8689
8690 err = tg3_phy_reset(tp);
8691 if (err)
8692 return err;
8693
8694 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
8695 ADVERTISE_100HALF | ADVERTISE_100FULL |
8696 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
8697 tg3_ctrl = 0;
8698 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
8699 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
8700 MII_TG3_CTRL_ADV_1000_FULL);
8701 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
8702 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
8703 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
8704 MII_TG3_CTRL_ENABLE_AS_MASTER);
8705 }
8706
8707 if (!tg3_copper_is_advertising_all(tp)) {
8708 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
8709
8710 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8711 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
8712
8713 tg3_writephy(tp, MII_BMCR,
8714 BMCR_ANENABLE | BMCR_ANRESTART);
8715 }
8716 tg3_phy_set_wirespeed(tp);
8717
8718 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
8719 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8720 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
8721 }
8722
8723skip_phy_reset:
8724 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8725 err = tg3_init_5401phy_dsp(tp);
8726 if (err)
8727 return err;
8728 }
8729
8730 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
8731 err = tg3_init_5401phy_dsp(tp);
8732 }
8733
1da177e4
LT
8734 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8735 tp->link_config.advertising =
8736 (ADVERTISED_1000baseT_Half |
8737 ADVERTISED_1000baseT_Full |
8738 ADVERTISED_Autoneg |
8739 ADVERTISED_FIBRE);
8740 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8741 tp->link_config.advertising &=
8742 ~(ADVERTISED_1000baseT_Half |
8743 ADVERTISED_1000baseT_Full);
8744
8745 return err;
8746}
8747
8748static void __devinit tg3_read_partno(struct tg3 *tp)
8749{
8750 unsigned char vpd_data[256];
8751 int i;
8752
8753 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8754 /* Sun decided not to put the necessary bits in the
8755 * NVRAM of their onboard tg3 parts :(
8756 */
8757 strcpy(tp->board_part_number, "Sun 570X");
8758 return;
8759 }
8760
8761 for (i = 0; i < 256; i += 4) {
8762 u32 tmp;
8763
8764 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
8765 goto out_not_found;
8766
8767 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
8768 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
8769 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
8770 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
8771 }
8772
8773 /* Now parse and find the part number. */
8774 for (i = 0; i < 256; ) {
8775 unsigned char val = vpd_data[i];
8776 int block_end;
8777
8778 if (val == 0x82 || val == 0x91) {
8779 i = (i + 3 +
8780 (vpd_data[i + 1] +
8781 (vpd_data[i + 2] << 8)));
8782 continue;
8783 }
8784
8785 if (val != 0x90)
8786 goto out_not_found;
8787
8788 block_end = (i + 3 +
8789 (vpd_data[i + 1] +
8790 (vpd_data[i + 2] << 8)));
8791 i += 3;
8792 while (i < block_end) {
8793 if (vpd_data[i + 0] == 'P' &&
8794 vpd_data[i + 1] == 'N') {
8795 int partno_len = vpd_data[i + 2];
8796
8797 if (partno_len > 24)
8798 goto out_not_found;
8799
8800 memcpy(tp->board_part_number,
8801 &vpd_data[i + 3],
8802 partno_len);
8803
8804 /* Success. */
8805 return;
8806 }
8807 }
8808
8809 /* Part number not found. */
8810 goto out_not_found;
8811 }
8812
8813out_not_found:
8814 strcpy(tp->board_part_number, "none");
8815}
8816
8817#ifdef CONFIG_SPARC64
8818static int __devinit tg3_is_sun_570X(struct tg3 *tp)
8819{
8820 struct pci_dev *pdev = tp->pdev;
8821 struct pcidev_cookie *pcp = pdev->sysdata;
8822
8823 if (pcp != NULL) {
8824 int node = pcp->prom_node;
8825 u32 venid;
8826 int err;
8827
8828 err = prom_getproperty(node, "subsystem-vendor-id",
8829 (char *) &venid, sizeof(venid));
8830 if (err == 0 || err == -1)
8831 return 0;
8832 if (venid == PCI_VENDOR_ID_SUN)
8833 return 1;
8834 }
8835 return 0;
8836}
8837#endif
8838
8839static int __devinit tg3_get_invariants(struct tg3 *tp)
8840{
8841 static struct pci_device_id write_reorder_chipsets[] = {
8842 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8843 PCI_DEVICE_ID_INTEL_82801AA_8) },
8844 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8845 PCI_DEVICE_ID_INTEL_82801AB_8) },
8846 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8847 PCI_DEVICE_ID_INTEL_82801BA_11) },
8848 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8849 PCI_DEVICE_ID_INTEL_82801BA_6) },
8850 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
8851 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
8852 { },
8853 };
8854 u32 misc_ctrl_reg;
8855 u32 cacheline_sz_reg;
8856 u32 pci_state_reg, grc_misc_cfg;
8857 u32 val;
8858 u16 pci_cmd;
8859 int err;
8860
8861#ifdef CONFIG_SPARC64
8862 if (tg3_is_sun_570X(tp))
8863 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
8864#endif
8865
8866 /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
8867 * reordering to the mailbox registers done by the host
8868 * controller can cause major troubles. We read back from
8869 * every mailbox register write to force the writes to be
8870 * posted to the chip in order.
8871 */
8872 if (pci_dev_present(write_reorder_chipsets))
8873 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
8874
8875 /* Force memory write invalidate off. If we leave it on,
8876 * then on 5700_BX chips we have to enable a workaround.
8877 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
8878 * to match the cacheline size. The Broadcom driver have this
8879 * workaround but turns MWI off all the times so never uses
8880 * it. This seems to suggest that the workaround is insufficient.
8881 */
8882 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8883 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
8884 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8885
8886 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
8887 * has the register indirect write enable bit set before
8888 * we try to access any of the MMIO registers. It is also
8889 * critical that the PCI-X hw workaround situation is decided
8890 * before that as well.
8891 */
8892 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8893 &misc_ctrl_reg);
8894
8895 tp->pci_chip_rev_id = (misc_ctrl_reg >>
8896 MISC_HOST_CTRL_CHIPREV_SHIFT);
8897
ff645bec
MC
8898 /* Wrong chip ID in 5752 A0. This code can be removed later
8899 * as A0 is not in production.
8900 */
8901 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
8902 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
8903
1da177e4
LT
8904 /* Initialize misc host control in PCI block. */
8905 tp->misc_host_ctrl |= (misc_ctrl_reg &
8906 MISC_HOST_CTRL_CHIPREV);
8907 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8908 tp->misc_host_ctrl);
8909
8910 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
8911 &cacheline_sz_reg);
8912
8913 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
8914 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
8915 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
8916 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
8917
6708e5cc 8918 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
ff645bec 8919 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6708e5cc
JL
8920 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
8921
1b440c56
JL
8922 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
8923 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
8924 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
8925
bb7064dc 8926 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
8927 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
8928
8929 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
8930 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
8931
8932 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8933 tp->pci_lat_timer < 64) {
8934 tp->pci_lat_timer = 64;
8935
8936 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
8937 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
8938 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
8939 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
8940
8941 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
8942 cacheline_sz_reg);
8943 }
8944
8945 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
8946 &pci_state_reg);
8947
8948 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
8949 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
8950
8951 /* If this is a 5700 BX chipset, and we are in PCI-X
8952 * mode, enable register write workaround.
8953 *
8954 * The workaround is to use indirect register accesses
8955 * for all chip writes not to mailbox registers.
8956 */
8957 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
8958 u32 pm_reg;
8959 u16 pci_cmd;
8960
8961 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
8962
8963 /* The chip can have it's power management PCI config
8964 * space registers clobbered due to this bug.
8965 * So explicitly force the chip into D0 here.
8966 */
8967 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
8968 &pm_reg);
8969 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
8970 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
8971 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
8972 pm_reg);
8973
8974 /* Also, force SERR#/PERR# in PCI command. */
8975 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8976 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
8977 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8978 }
8979 }
8980
8981 /* Back to back register writes can cause problems on this chip,
8982 * the workaround is to read back all reg writes except those to
8983 * mailbox regs. See tg3_write_indirect_reg32().
8984 *
8985 * PCI Express 5750_A0 rev chips need this workaround too.
8986 */
8987 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8988 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
8989 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
8990 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
8991
8992 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
8993 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
8994 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
8995 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
8996
8997 /* Chip-specific fixup from Broadcom driver */
8998 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
8999 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9000 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9001 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9002 }
9003
7d0c41ef
MC
9004 /* Get eeprom hw config before calling tg3_set_power_state().
9005 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9006 * determined before calling tg3_set_power_state() so that
9007 * we know whether or not to switch out of Vaux power.
9008 * When the flag is set, it means that GPIO1 is used for eeprom
9009 * write protect and also implies that it is a LOM where GPIOs
9010 * are not used to switch power.
9011 */
9012 tg3_get_eeprom_hw_cfg(tp);
9013
314fba34
MC
9014 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9015 * GPIO1 driven high will bring 5700's external PHY out of reset.
9016 * It is also used as eeprom write protect on LOMs.
9017 */
9018 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9019 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9020 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9021 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9022 GRC_LCLCTRL_GPIO_OUTPUT1);
3e7d83bc
MC
9023 /* Unused GPIO3 must be driven as output on 5752 because there
9024 * are no pull-up resistors on unused GPIO pins.
9025 */
9026 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9027 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
314fba34 9028
1da177e4
LT
9029 /* Force the chip into D0. */
9030 err = tg3_set_power_state(tp, 0);
9031 if (err) {
9032 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9033 pci_name(tp->pdev));
9034 return err;
9035 }
9036
9037 /* 5700 B0 chips do not support checksumming correctly due
9038 * to hardware bugs.
9039 */
9040 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9041 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9042
9043 /* Pseudo-header checksum is done by hardware logic and not
9044 * the offload processers, so make the chip do the pseudo-
9045 * header checksums on receive. For transmit it is more
9046 * convenient to do the pseudo-header checksum in software
9047 * as Linux does that on transmit for us in all cases.
9048 */
9049 tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9050 tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9051
9052 /* Derive initial jumbo mode from MTU assigned in
9053 * ether_setup() via the alloc_etherdev() call
9054 */
9055 if (tp->dev->mtu > ETH_DATA_LEN)
9056 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
9057
9058 /* Determine WakeOnLan speed to use. */
9059 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9060 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9061 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9062 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9063 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9064 } else {
9065 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9066 }
9067
9068 /* A few boards don't want Ethernet@WireSpeed phy feature */
9069 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9070 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9071 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
9072 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
9073 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9074
9075 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9076 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9077 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9078 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9079 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9080
bb7064dc 9081 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
1da177e4
LT
9082 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9083
1da177e4 9084 tp->coalesce_mode = 0;
1da177e4
LT
9085 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9086 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9087 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9088
9089 /* Initialize MAC MI mode, polling disabled. */
9090 tw32_f(MAC_MI_MODE, tp->mi_mode);
9091 udelay(80);
9092
9093 /* Initialize data/descriptor byte/word swapping. */
9094 val = tr32(GRC_MODE);
9095 val &= GRC_MODE_HOST_STACKUP;
9096 tw32(GRC_MODE, val | tp->grc_mode);
9097
9098 tg3_switch_clocks(tp);
9099
9100 /* Clear this out for sanity. */
9101 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9102
9103 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9104 &pci_state_reg);
9105 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9106 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9107 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9108
9109 if (chiprevid == CHIPREV_ID_5701_A0 ||
9110 chiprevid == CHIPREV_ID_5701_B0 ||
9111 chiprevid == CHIPREV_ID_5701_B2 ||
9112 chiprevid == CHIPREV_ID_5701_B5) {
9113 void __iomem *sram_base;
9114
9115 /* Write some dummy words into the SRAM status block
9116 * area, see if it reads back correctly. If the return
9117 * value is bad, force enable the PCIX workaround.
9118 */
9119 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9120
9121 writel(0x00000000, sram_base);
9122 writel(0x00000000, sram_base + 4);
9123 writel(0xffffffff, sram_base + 4);
9124 if (readl(sram_base) != 0x00000000)
9125 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9126 }
9127 }
9128
9129 udelay(50);
9130 tg3_nvram_init(tp);
9131
9132 grc_misc_cfg = tr32(GRC_MISC_CFG);
9133 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9134
9135 /* Broadcom's driver says that CIOBE multisplit has a bug */
9136#if 0
9137 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9138 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9139 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9140 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9141 }
9142#endif
9143 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9144 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9145 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9146 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9147
fac9b83e
DM
9148 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9149 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9150 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9151 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9152 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9153 HOSTCC_MODE_CLRTICK_TXBD);
9154
9155 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9156 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9157 tp->misc_host_ctrl);
9158 }
9159
1da177e4
LT
9160 /* these are limited to 10/100 only */
9161 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9162 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9163 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9164 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9165 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9166 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9167 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9168 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9169 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9170 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9171 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9172
9173 err = tg3_phy_probe(tp);
9174 if (err) {
9175 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
9176 pci_name(tp->pdev), err);
9177 /* ... but do not return immediately ... */
9178 }
9179
9180 tg3_read_partno(tp);
9181
9182 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9183 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9184 } else {
9185 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9186 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
9187 else
9188 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9189 }
9190
9191 /* 5700 {AX,BX} chips have a broken status block link
9192 * change bit implementation, so we must use the
9193 * status register in those cases.
9194 */
9195 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9196 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
9197 else
9198 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
9199
9200 /* The led_ctrl is set during tg3_phy_probe, here we might
9201 * have to force the link status polling mechanism based
9202 * upon subsystem IDs.
9203 */
9204 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
9205 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9206 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
9207 TG3_FLAG_USE_LINKCHG_REG);
9208 }
9209
9210 /* For all SERDES we poll the MAC status register. */
9211 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9212 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
9213 else
9214 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9215
9216 /* 5700 BX chips need to have their TX producer index mailboxes
9217 * written twice to workaround a bug.
9218 */
9219 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9220 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9221 else
9222 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
9223
9224 /* It seems all chips can get confused if TX buffers
9225 * straddle the 4GB address boundary in some cases.
9226 */
9227 tp->dev->hard_start_xmit = tg3_start_xmit;
9228
9229 tp->rx_offset = 2;
9230 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
9231 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
9232 tp->rx_offset = 0;
9233
9234 /* By default, disable wake-on-lan. User can change this
9235 * using ETHTOOL_SWOL.
9236 */
9237 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9238
9239 return err;
9240}
9241
9242#ifdef CONFIG_SPARC64
9243static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
9244{
9245 struct net_device *dev = tp->dev;
9246 struct pci_dev *pdev = tp->pdev;
9247 struct pcidev_cookie *pcp = pdev->sysdata;
9248
9249 if (pcp != NULL) {
9250 int node = pcp->prom_node;
9251
9252 if (prom_getproplen(node, "local-mac-address") == 6) {
9253 prom_getproperty(node, "local-mac-address",
9254 dev->dev_addr, 6);
9255 return 0;
9256 }
9257 }
9258 return -ENODEV;
9259}
9260
9261static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
9262{
9263 struct net_device *dev = tp->dev;
9264
9265 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
9266 return 0;
9267}
9268#endif
9269
9270static int __devinit tg3_get_device_address(struct tg3 *tp)
9271{
9272 struct net_device *dev = tp->dev;
9273 u32 hi, lo, mac_offset;
9274
9275#ifdef CONFIG_SPARC64
9276 if (!tg3_get_macaddr_sparc(tp))
9277 return 0;
9278#endif
9279
9280 mac_offset = 0x7c;
9281 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9282 !(tp->tg3_flags & TG3_FLG2_SUN_570X)) {
9283 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
9284 mac_offset = 0xcc;
9285 if (tg3_nvram_lock(tp))
9286 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
9287 else
9288 tg3_nvram_unlock(tp);
9289 }
9290
9291 /* First try to get it from MAC address mailbox. */
9292 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
9293 if ((hi >> 16) == 0x484b) {
9294 dev->dev_addr[0] = (hi >> 8) & 0xff;
9295 dev->dev_addr[1] = (hi >> 0) & 0xff;
9296
9297 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
9298 dev->dev_addr[2] = (lo >> 24) & 0xff;
9299 dev->dev_addr[3] = (lo >> 16) & 0xff;
9300 dev->dev_addr[4] = (lo >> 8) & 0xff;
9301 dev->dev_addr[5] = (lo >> 0) & 0xff;
9302 }
9303 /* Next, try NVRAM. */
9304 else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
9305 !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
9306 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
9307 dev->dev_addr[0] = ((hi >> 16) & 0xff);
9308 dev->dev_addr[1] = ((hi >> 24) & 0xff);
9309 dev->dev_addr[2] = ((lo >> 0) & 0xff);
9310 dev->dev_addr[3] = ((lo >> 8) & 0xff);
9311 dev->dev_addr[4] = ((lo >> 16) & 0xff);
9312 dev->dev_addr[5] = ((lo >> 24) & 0xff);
9313 }
9314 /* Finally just fetch it out of the MAC control regs. */
9315 else {
9316 hi = tr32(MAC_ADDR_0_HIGH);
9317 lo = tr32(MAC_ADDR_0_LOW);
9318
9319 dev->dev_addr[5] = lo & 0xff;
9320 dev->dev_addr[4] = (lo >> 8) & 0xff;
9321 dev->dev_addr[3] = (lo >> 16) & 0xff;
9322 dev->dev_addr[2] = (lo >> 24) & 0xff;
9323 dev->dev_addr[1] = hi & 0xff;
9324 dev->dev_addr[0] = (hi >> 8) & 0xff;
9325 }
9326
9327 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
9328#ifdef CONFIG_SPARC64
9329 if (!tg3_get_default_macaddr_sparc(tp))
9330 return 0;
9331#endif
9332 return -EINVAL;
9333 }
9334 return 0;
9335}
9336
59e6b434
DM
9337#define BOUNDARY_SINGLE_CACHELINE 1
9338#define BOUNDARY_MULTI_CACHELINE 2
9339
9340static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
9341{
9342 int cacheline_size;
9343 u8 byte;
9344 int goal;
9345
9346 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
9347 if (byte == 0)
9348 cacheline_size = 1024;
9349 else
9350 cacheline_size = (int) byte * 4;
9351
9352 /* On 5703 and later chips, the boundary bits have no
9353 * effect.
9354 */
9355 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9356 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
9357 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9358 goto out;
9359
9360#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
9361 goal = BOUNDARY_MULTI_CACHELINE;
9362#else
9363#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
9364 goal = BOUNDARY_SINGLE_CACHELINE;
9365#else
9366 goal = 0;
9367#endif
9368#endif
9369
9370 if (!goal)
9371 goto out;
9372
9373 /* PCI controllers on most RISC systems tend to disconnect
9374 * when a device tries to burst across a cache-line boundary.
9375 * Therefore, letting tg3 do so just wastes PCI bandwidth.
9376 *
9377 * Unfortunately, for PCI-E there are only limited
9378 * write-side controls for this, and thus for reads
9379 * we will still get the disconnects. We'll also waste
9380 * these PCI cycles for both read and write for chips
9381 * other than 5700 and 5701 which do not implement the
9382 * boundary bits.
9383 */
9384 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
9385 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
9386 switch (cacheline_size) {
9387 case 16:
9388 case 32:
9389 case 64:
9390 case 128:
9391 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9392 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
9393 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
9394 } else {
9395 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9396 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9397 }
9398 break;
9399
9400 case 256:
9401 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
9402 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
9403 break;
9404
9405 default:
9406 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9407 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9408 break;
9409 };
9410 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9411 switch (cacheline_size) {
9412 case 16:
9413 case 32:
9414 case 64:
9415 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9416 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9417 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
9418 break;
9419 }
9420 /* fallthrough */
9421 case 128:
9422 default:
9423 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9424 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
9425 break;
9426 };
9427 } else {
9428 switch (cacheline_size) {
9429 case 16:
9430 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9431 val |= (DMA_RWCTRL_READ_BNDRY_16 |
9432 DMA_RWCTRL_WRITE_BNDRY_16);
9433 break;
9434 }
9435 /* fallthrough */
9436 case 32:
9437 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9438 val |= (DMA_RWCTRL_READ_BNDRY_32 |
9439 DMA_RWCTRL_WRITE_BNDRY_32);
9440 break;
9441 }
9442 /* fallthrough */
9443 case 64:
9444 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9445 val |= (DMA_RWCTRL_READ_BNDRY_64 |
9446 DMA_RWCTRL_WRITE_BNDRY_64);
9447 break;
9448 }
9449 /* fallthrough */
9450 case 128:
9451 if (goal == BOUNDARY_SINGLE_CACHELINE) {
9452 val |= (DMA_RWCTRL_READ_BNDRY_128 |
9453 DMA_RWCTRL_WRITE_BNDRY_128);
9454 break;
9455 }
9456 /* fallthrough */
9457 case 256:
9458 val |= (DMA_RWCTRL_READ_BNDRY_256 |
9459 DMA_RWCTRL_WRITE_BNDRY_256);
9460 break;
9461 case 512:
9462 val |= (DMA_RWCTRL_READ_BNDRY_512 |
9463 DMA_RWCTRL_WRITE_BNDRY_512);
9464 break;
9465 case 1024:
9466 default:
9467 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
9468 DMA_RWCTRL_WRITE_BNDRY_1024);
9469 break;
9470 };
9471 }
9472
9473out:
9474 return val;
9475}
9476
1da177e4
LT
9477static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
9478{
9479 struct tg3_internal_buffer_desc test_desc;
9480 u32 sram_dma_descs;
9481 int i, ret;
9482
9483 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
9484
9485 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
9486 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
9487 tw32(RDMAC_STATUS, 0);
9488 tw32(WDMAC_STATUS, 0);
9489
9490 tw32(BUFMGR_MODE, 0);
9491 tw32(FTQ_RESET, 0);
9492
9493 test_desc.addr_hi = ((u64) buf_dma) >> 32;
9494 test_desc.addr_lo = buf_dma & 0xffffffff;
9495 test_desc.nic_mbuf = 0x00002100;
9496 test_desc.len = size;
9497
9498 /*
9499 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
9500 * the *second* time the tg3 driver was getting loaded after an
9501 * initial scan.
9502 *
9503 * Broadcom tells me:
9504 * ...the DMA engine is connected to the GRC block and a DMA
9505 * reset may affect the GRC block in some unpredictable way...
9506 * The behavior of resets to individual blocks has not been tested.
9507 *
9508 * Broadcom noted the GRC reset will also reset all sub-components.
9509 */
9510 if (to_device) {
9511 test_desc.cqid_sqid = (13 << 8) | 2;
9512
9513 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
9514 udelay(40);
9515 } else {
9516 test_desc.cqid_sqid = (16 << 8) | 7;
9517
9518 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
9519 udelay(40);
9520 }
9521 test_desc.flags = 0x00000005;
9522
9523 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
9524 u32 val;
9525
9526 val = *(((u32 *)&test_desc) + i);
9527 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
9528 sram_dma_descs + (i * sizeof(u32)));
9529 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
9530 }
9531 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
9532
9533 if (to_device) {
9534 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
9535 } else {
9536 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
9537 }
9538
9539 ret = -ENODEV;
9540 for (i = 0; i < 40; i++) {
9541 u32 val;
9542
9543 if (to_device)
9544 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
9545 else
9546 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
9547 if ((val & 0xffff) == sram_dma_descs) {
9548 ret = 0;
9549 break;
9550 }
9551
9552 udelay(100);
9553 }
9554
9555 return ret;
9556}
9557
ded7340d 9558#define TEST_BUFFER_SIZE 0x2000
1da177e4
LT
9559
9560static int __devinit tg3_test_dma(struct tg3 *tp)
9561{
9562 dma_addr_t buf_dma;
59e6b434 9563 u32 *buf, saved_dma_rwctrl;
1da177e4
LT
9564 int ret;
9565
9566 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
9567 if (!buf) {
9568 ret = -ENOMEM;
9569 goto out_nofree;
9570 }
9571
9572 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
9573 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
9574
59e6b434 9575 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
1da177e4
LT
9576
9577 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9578 /* DMA read watermark not used on PCIE */
9579 tp->dma_rwctrl |= 0x00180000;
9580 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
85e94ced
MC
9581 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
9582 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
1da177e4
LT
9583 tp->dma_rwctrl |= 0x003f0000;
9584 else
9585 tp->dma_rwctrl |= 0x003f000f;
9586 } else {
9587 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
9588 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9589 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
9590
9591 if (ccval == 0x6 || ccval == 0x7)
9592 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
9593
59e6b434 9594 /* Set bit 23 to enable PCIX hw bug fix */
1da177e4
LT
9595 tp->dma_rwctrl |= 0x009f0000;
9596 } else {
9597 tp->dma_rwctrl |= 0x001b000f;
9598 }
9599 }
9600
9601 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
9602 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9603 tp->dma_rwctrl &= 0xfffffff0;
9604
9605 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9606 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
9607 /* Remove this if it causes problems for some boards. */
9608 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
9609
9610 /* On 5700/5701 chips, we need to set this bit.
9611 * Otherwise the chip will issue cacheline transactions
9612 * to streamable DMA memory with not all the byte
9613 * enables turned on. This is an error on several
9614 * RISC PCI controllers, in particular sparc64.
9615 *
9616 * On 5703/5704 chips, this bit has been reassigned
9617 * a different meaning. In particular, it is used
9618 * on those chips to enable a PCI-X workaround.
9619 */
9620 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
9621 }
9622
9623 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9624
9625#if 0
9626 /* Unneeded, already done by tg3_get_invariants. */
9627 tg3_switch_clocks(tp);
9628#endif
9629
9630 ret = 0;
9631 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9632 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
9633 goto out;
9634
59e6b434
DM
9635 /* It is best to perform DMA test with maximum write burst size
9636 * to expose the 5700/5701 write DMA bug.
9637 */
9638 saved_dma_rwctrl = tp->dma_rwctrl;
9639 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9640 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9641
1da177e4
LT
9642 while (1) {
9643 u32 *p = buf, i;
9644
9645 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
9646 p[i] = i;
9647
9648 /* Send the buffer to the chip. */
9649 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
9650 if (ret) {
9651 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
9652 break;
9653 }
9654
9655#if 0
9656 /* validate data reached card RAM correctly. */
9657 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
9658 u32 val;
9659 tg3_read_mem(tp, 0x2100 + (i*4), &val);
9660 if (le32_to_cpu(val) != p[i]) {
9661 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
9662 /* ret = -ENODEV here? */
9663 }
9664 p[i] = 0;
9665 }
9666#endif
9667 /* Now read it back. */
9668 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
9669 if (ret) {
9670 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
9671
9672 break;
9673 }
9674
9675 /* Verify it. */
9676 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
9677 if (p[i] == i)
9678 continue;
9679
59e6b434
DM
9680 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
9681 DMA_RWCTRL_WRITE_BNDRY_16) {
9682 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
1da177e4
LT
9683 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
9684 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9685 break;
9686 } else {
9687 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
9688 ret = -ENODEV;
9689 goto out;
9690 }
9691 }
9692
9693 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
9694 /* Success. */
9695 ret = 0;
9696 break;
9697 }
9698 }
59e6b434
DM
9699 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
9700 DMA_RWCTRL_WRITE_BNDRY_16) {
6d1cfbab
MC
9701 static struct pci_device_id dma_wait_state_chipsets[] = {
9702 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
9703 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
9704 { },
9705 };
9706
59e6b434 9707 /* DMA test passed without adjusting DMA boundary,
6d1cfbab
MC
9708 * now look for chipsets that are known to expose the
9709 * DMA bug without failing the test.
59e6b434 9710 */
6d1cfbab
MC
9711 if (pci_dev_present(dma_wait_state_chipsets)) {
9712 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9713 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
9714 }
9715 else
9716 /* Safe to use the calculated DMA boundary. */
9717 tp->dma_rwctrl = saved_dma_rwctrl;
9718
59e6b434
DM
9719 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9720 }
1da177e4
LT
9721
9722out:
9723 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
9724out_nofree:
9725 return ret;
9726}
9727
9728static void __devinit tg3_init_link_config(struct tg3 *tp)
9729{
9730 tp->link_config.advertising =
9731 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
9732 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
9733 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
9734 ADVERTISED_Autoneg | ADVERTISED_MII);
9735 tp->link_config.speed = SPEED_INVALID;
9736 tp->link_config.duplex = DUPLEX_INVALID;
9737 tp->link_config.autoneg = AUTONEG_ENABLE;
9738 netif_carrier_off(tp->dev);
9739 tp->link_config.active_speed = SPEED_INVALID;
9740 tp->link_config.active_duplex = DUPLEX_INVALID;
9741 tp->link_config.phy_is_low_power = 0;
9742 tp->link_config.orig_speed = SPEED_INVALID;
9743 tp->link_config.orig_duplex = DUPLEX_INVALID;
9744 tp->link_config.orig_autoneg = AUTONEG_INVALID;
9745}
9746
9747static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
9748{
9749 tp->bufmgr_config.mbuf_read_dma_low_water =
9750 DEFAULT_MB_RDMA_LOW_WATER;
9751 tp->bufmgr_config.mbuf_mac_rx_low_water =
9752 DEFAULT_MB_MACRX_LOW_WATER;
9753 tp->bufmgr_config.mbuf_high_water =
9754 DEFAULT_MB_HIGH_WATER;
9755
9756 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
9757 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
9758 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
9759 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
9760 tp->bufmgr_config.mbuf_high_water_jumbo =
9761 DEFAULT_MB_HIGH_WATER_JUMBO;
9762
9763 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
9764 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
9765}
9766
9767static char * __devinit tg3_phy_string(struct tg3 *tp)
9768{
9769 switch (tp->phy_id & PHY_ID_MASK) {
9770 case PHY_ID_BCM5400: return "5400";
9771 case PHY_ID_BCM5401: return "5401";
9772 case PHY_ID_BCM5411: return "5411";
9773 case PHY_ID_BCM5701: return "5701";
9774 case PHY_ID_BCM5703: return "5703";
9775 case PHY_ID_BCM5704: return "5704";
9776 case PHY_ID_BCM5705: return "5705";
9777 case PHY_ID_BCM5750: return "5750";
85e94ced 9778 case PHY_ID_BCM5752: return "5752";
1da177e4
LT
9779 case PHY_ID_BCM8002: return "8002/serdes";
9780 case 0: return "serdes";
9781 default: return "unknown";
9782 };
9783}
9784
9785static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
9786{
9787 struct pci_dev *peer;
9788 unsigned int func, devnr = tp->pdev->devfn & ~7;
9789
9790 for (func = 0; func < 8; func++) {
9791 peer = pci_get_slot(tp->pdev->bus, devnr | func);
9792 if (peer && peer != tp->pdev)
9793 break;
9794 pci_dev_put(peer);
9795 }
9796 if (!peer || peer == tp->pdev)
9797 BUG();
9798
9799 /*
9800 * We don't need to keep the refcount elevated; there's no way
9801 * to remove one half of this device without removing the other
9802 */
9803 pci_dev_put(peer);
9804
9805 return peer;
9806}
9807
15f9850d
DM
9808static void __devinit tg3_init_coal(struct tg3 *tp)
9809{
9810 struct ethtool_coalesce *ec = &tp->coal;
9811
9812 memset(ec, 0, sizeof(*ec));
9813 ec->cmd = ETHTOOL_GCOALESCE;
9814 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
9815 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
9816 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
9817 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
9818 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
9819 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
9820 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
9821 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
9822 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
9823
9824 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
9825 HOSTCC_MODE_CLRTICK_TXBD)) {
9826 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
9827 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
9828 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
9829 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
9830 }
9831}
9832
1da177e4
LT
9833static int __devinit tg3_init_one(struct pci_dev *pdev,
9834 const struct pci_device_id *ent)
9835{
9836 static int tg3_version_printed = 0;
9837 unsigned long tg3reg_base, tg3reg_len;
9838 struct net_device *dev;
9839 struct tg3 *tp;
9840 int i, err, pci_using_dac, pm_cap;
9841
9842 if (tg3_version_printed++ == 0)
9843 printk(KERN_INFO "%s", version);
9844
9845 err = pci_enable_device(pdev);
9846 if (err) {
9847 printk(KERN_ERR PFX "Cannot enable PCI device, "
9848 "aborting.\n");
9849 return err;
9850 }
9851
9852 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9853 printk(KERN_ERR PFX "Cannot find proper PCI device "
9854 "base address, aborting.\n");
9855 err = -ENODEV;
9856 goto err_out_disable_pdev;
9857 }
9858
9859 err = pci_request_regions(pdev, DRV_MODULE_NAME);
9860 if (err) {
9861 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
9862 "aborting.\n");
9863 goto err_out_disable_pdev;
9864 }
9865
9866 pci_set_master(pdev);
9867
9868 /* Find power-management capability. */
9869 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9870 if (pm_cap == 0) {
9871 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
9872 "aborting.\n");
9873 err = -EIO;
9874 goto err_out_free_res;
9875 }
9876
9877 /* Configure DMA attributes. */
9878 err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
9879 if (!err) {
9880 pci_using_dac = 1;
9881 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
9882 if (err < 0) {
9883 printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
9884 "for consistent allocations\n");
9885 goto err_out_free_res;
9886 }
9887 } else {
9888 err = pci_set_dma_mask(pdev, 0xffffffffULL);
9889 if (err) {
9890 printk(KERN_ERR PFX "No usable DMA configuration, "
9891 "aborting.\n");
9892 goto err_out_free_res;
9893 }
9894 pci_using_dac = 0;
9895 }
9896
9897 tg3reg_base = pci_resource_start(pdev, 0);
9898 tg3reg_len = pci_resource_len(pdev, 0);
9899
9900 dev = alloc_etherdev(sizeof(*tp));
9901 if (!dev) {
9902 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
9903 err = -ENOMEM;
9904 goto err_out_free_res;
9905 }
9906
9907 SET_MODULE_OWNER(dev);
9908 SET_NETDEV_DEV(dev, &pdev->dev);
9909
9910 if (pci_using_dac)
9911 dev->features |= NETIF_F_HIGHDMA;
9912 dev->features |= NETIF_F_LLTX;
9913#if TG3_VLAN_TAG_USED
9914 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
9915 dev->vlan_rx_register = tg3_vlan_rx_register;
9916 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
9917#endif
9918
9919 tp = netdev_priv(dev);
9920 tp->pdev = pdev;
9921 tp->dev = dev;
9922 tp->pm_cap = pm_cap;
9923 tp->mac_mode = TG3_DEF_MAC_MODE;
9924 tp->rx_mode = TG3_DEF_RX_MODE;
9925 tp->tx_mode = TG3_DEF_TX_MODE;
9926 tp->mi_mode = MAC_MI_MODE_BASE;
9927 if (tg3_debug > 0)
9928 tp->msg_enable = tg3_debug;
9929 else
9930 tp->msg_enable = TG3_DEF_MSG_ENABLE;
9931
9932 /* The word/byte swap controls here control register access byte
9933 * swapping. DMA data byte swapping is controlled in the GRC_MODE
9934 * setting below.
9935 */
9936 tp->misc_host_ctrl =
9937 MISC_HOST_CTRL_MASK_PCI_INT |
9938 MISC_HOST_CTRL_WORD_SWAP |
9939 MISC_HOST_CTRL_INDIR_ACCESS |
9940 MISC_HOST_CTRL_PCISTATE_RW;
9941
9942 /* The NONFRM (non-frame) byte/word swap controls take effect
9943 * on descriptor entries, anything which isn't packet data.
9944 *
9945 * The StrongARM chips on the board (one for tx, one for rx)
9946 * are running in big-endian mode.
9947 */
9948 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
9949 GRC_MODE_WSWAP_NONFRM_DATA);
9950#ifdef __BIG_ENDIAN
9951 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
9952#endif
9953 spin_lock_init(&tp->lock);
9954 spin_lock_init(&tp->tx_lock);
9955 spin_lock_init(&tp->indirect_lock);
9956 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
9957
9958 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
9959 if (tp->regs == 0UL) {
9960 printk(KERN_ERR PFX "Cannot map device registers, "
9961 "aborting.\n");
9962 err = -ENOMEM;
9963 goto err_out_free_dev;
9964 }
9965
9966 tg3_init_link_config(tp);
9967
9968 tg3_init_bufmgr_config(tp);
9969
9970 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
9971 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
9972 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
9973
9974 dev->open = tg3_open;
9975 dev->stop = tg3_close;
9976 dev->get_stats = tg3_get_stats;
9977 dev->set_multicast_list = tg3_set_rx_mode;
9978 dev->set_mac_address = tg3_set_mac_addr;
9979 dev->do_ioctl = tg3_ioctl;
9980 dev->tx_timeout = tg3_tx_timeout;
9981 dev->poll = tg3_poll;
9982 dev->ethtool_ops = &tg3_ethtool_ops;
9983 dev->weight = 64;
9984 dev->watchdog_timeo = TG3_TX_TIMEOUT;
9985 dev->change_mtu = tg3_change_mtu;
9986 dev->irq = pdev->irq;
9987#ifdef CONFIG_NET_POLL_CONTROLLER
9988 dev->poll_controller = tg3_poll_controller;
9989#endif
9990
9991 err = tg3_get_invariants(tp);
9992 if (err) {
9993 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
9994 "aborting.\n");
9995 goto err_out_iounmap;
9996 }
9997
9998 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9999 tp->bufmgr_config.mbuf_read_dma_low_water =
10000 DEFAULT_MB_RDMA_LOW_WATER_5705;
10001 tp->bufmgr_config.mbuf_mac_rx_low_water =
10002 DEFAULT_MB_MACRX_LOW_WATER_5705;
10003 tp->bufmgr_config.mbuf_high_water =
10004 DEFAULT_MB_HIGH_WATER_5705;
10005 }
10006
10007#if TG3_TSO_SUPPORT != 0
10008 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10009 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10010 }
10011 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10012 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10013 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10014 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10015 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10016 } else {
10017 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10018 }
10019
10020 /* TSO is off by default, user can enable using ethtool. */
10021#if 0
10022 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
10023 dev->features |= NETIF_F_TSO;
10024#endif
10025
10026#endif
10027
10028 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10029 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10030 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10031 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10032 tp->rx_pending = 63;
10033 }
10034
10035 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10036 tp->pdev_peer = tg3_find_5704_peer(tp);
10037
10038 err = tg3_get_device_address(tp);
10039 if (err) {
10040 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10041 "aborting.\n");
10042 goto err_out_iounmap;
10043 }
10044
10045 /*
10046 * Reset chip in case UNDI or EFI driver did not shutdown
10047 * DMA self test will enable WDMAC and we'll see (spurious)
10048 * pending DMA on the PCI bus at that point.
10049 */
10050 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10051 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10052 pci_save_state(tp->pdev);
10053 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
944d980e 10054 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
10055 }
10056
10057 err = tg3_test_dma(tp);
10058 if (err) {
10059 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10060 goto err_out_iounmap;
10061 }
10062
10063 /* Tigon3 can do ipv4 only... and some chips have buggy
10064 * checksumming.
10065 */
10066 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10067 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10068 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10069 } else
10070 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10071
10072 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10073 dev->features &= ~NETIF_F_HIGHDMA;
10074
10075 /* flow control autonegotiation is default behavior */
10076 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10077
15f9850d
DM
10078 tg3_init_coal(tp);
10079
1da177e4
LT
10080 err = register_netdev(dev);
10081 if (err) {
10082 printk(KERN_ERR PFX "Cannot register net device, "
10083 "aborting.\n");
10084 goto err_out_iounmap;
10085 }
10086
10087 pci_set_drvdata(pdev, dev);
10088
10089 /* Now that we have fully setup the chip, save away a snapshot
10090 * of the PCI config space. We need to restore this after
10091 * GRC_MISC_CFG core clock resets and some resume events.
10092 */
10093 pci_save_state(tp->pdev);
10094
10095 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
10096 dev->name,
10097 tp->board_part_number,
10098 tp->pci_chip_rev_id,
10099 tg3_phy_string(tp),
10100 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
10101 ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
10102 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
10103 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
10104 ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
10105 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10106
10107 for (i = 0; i < 6; i++)
10108 printk("%2.2x%c", dev->dev_addr[i],
10109 i == 5 ? '\n' : ':');
10110
10111 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
10112 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
10113 "TSOcap[%d] \n",
10114 dev->name,
10115 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
10116 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
10117 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
10118 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
10119 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
10120 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
10121 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
59e6b434
DM
10122 printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10123 dev->name, tp->dma_rwctrl);
1da177e4
LT
10124
10125 return 0;
10126
10127err_out_iounmap:
10128 iounmap(tp->regs);
10129
10130err_out_free_dev:
10131 free_netdev(dev);
10132
10133err_out_free_res:
10134 pci_release_regions(pdev);
10135
10136err_out_disable_pdev:
10137 pci_disable_device(pdev);
10138 pci_set_drvdata(pdev, NULL);
10139 return err;
10140}
10141
10142static void __devexit tg3_remove_one(struct pci_dev *pdev)
10143{
10144 struct net_device *dev = pci_get_drvdata(pdev);
10145
10146 if (dev) {
10147 struct tg3 *tp = netdev_priv(dev);
10148
10149 unregister_netdev(dev);
10150 iounmap(tp->regs);
10151 free_netdev(dev);
10152 pci_release_regions(pdev);
10153 pci_disable_device(pdev);
10154 pci_set_drvdata(pdev, NULL);
10155 }
10156}
10157
10158static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10159{
10160 struct net_device *dev = pci_get_drvdata(pdev);
10161 struct tg3 *tp = netdev_priv(dev);
10162 int err;
10163
10164 if (!netif_running(dev))
10165 return 0;
10166
10167 tg3_netif_stop(tp);
10168
10169 del_timer_sync(&tp->timer);
10170
10171 spin_lock_irq(&tp->lock);
10172 spin_lock(&tp->tx_lock);
10173 tg3_disable_ints(tp);
10174 spin_unlock(&tp->tx_lock);
10175 spin_unlock_irq(&tp->lock);
10176
10177 netif_device_detach(dev);
10178
10179 spin_lock_irq(&tp->lock);
10180 spin_lock(&tp->tx_lock);
944d980e 10181 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
10182 spin_unlock(&tp->tx_lock);
10183 spin_unlock_irq(&tp->lock);
10184
10185 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
10186 if (err) {
10187 spin_lock_irq(&tp->lock);
10188 spin_lock(&tp->tx_lock);
10189
10190 tg3_init_hw(tp);
10191
10192 tp->timer.expires = jiffies + tp->timer_offset;
10193 add_timer(&tp->timer);
10194
10195 netif_device_attach(dev);
10196 tg3_netif_start(tp);
10197
10198 spin_unlock(&tp->tx_lock);
10199 spin_unlock_irq(&tp->lock);
10200 }
10201
10202 return err;
10203}
10204
10205static int tg3_resume(struct pci_dev *pdev)
10206{
10207 struct net_device *dev = pci_get_drvdata(pdev);
10208 struct tg3 *tp = netdev_priv(dev);
10209 int err;
10210
10211 if (!netif_running(dev))
10212 return 0;
10213
10214 pci_restore_state(tp->pdev);
10215
10216 err = tg3_set_power_state(tp, 0);
10217 if (err)
10218 return err;
10219
10220 netif_device_attach(dev);
10221
10222 spin_lock_irq(&tp->lock);
10223 spin_lock(&tp->tx_lock);
10224
10225 tg3_init_hw(tp);
10226
10227 tp->timer.expires = jiffies + tp->timer_offset;
10228 add_timer(&tp->timer);
10229
10230 tg3_enable_ints(tp);
10231
10232 tg3_netif_start(tp);
10233
10234 spin_unlock(&tp->tx_lock);
10235 spin_unlock_irq(&tp->lock);
10236
10237 return 0;
10238}
10239
10240static struct pci_driver tg3_driver = {
10241 .name = DRV_MODULE_NAME,
10242 .id_table = tg3_pci_tbl,
10243 .probe = tg3_init_one,
10244 .remove = __devexit_p(tg3_remove_one),
10245 .suspend = tg3_suspend,
10246 .resume = tg3_resume
10247};
10248
10249static int __init tg3_init(void)
10250{
10251 return pci_module_init(&tg3_driver);
10252}
10253
10254static void __exit tg3_cleanup(void)
10255{
10256 pci_unregister_driver(&tg3_driver);
10257}
10258
10259module_init(tg3_init);
10260module_exit(tg3_cleanup);