2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2010 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/phy.h>
37 #include <linux/brcmphy.h>
38 #include <linux/if_vlan.h>
40 #include <linux/tcp.h>
41 #include <linux/workqueue.h>
42 #include <linux/prefetch.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/firmware.h>
46 #include <net/checksum.h>
49 #include <asm/system.h>
51 #include <asm/byteorder.h>
52 #include <asm/uaccess.h>
55 #include <asm/idprom.h>
62 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
63 #define TG3_VLAN_TAG_USED 1
65 #define TG3_VLAN_TAG_USED 0
70 #define DRV_MODULE_NAME "tg3"
72 #define TG3_MIN_NUM 115
73 #define DRV_MODULE_VERSION \
74 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
75 #define DRV_MODULE_RELDATE "October 14, 2010"
77 #define TG3_DEF_MAC_MODE 0
78 #define TG3_DEF_RX_MODE 0
79 #define TG3_DEF_TX_MODE 0
80 #define TG3_DEF_MSG_ENABLE \
90 /* length of time before we decide the hardware is borked,
91 * and dev->tx_timeout() should be called to fix the problem
93 #define TG3_TX_TIMEOUT (5 * HZ)
95 /* hardware minimum and maximum for a single frame's data payload */
96 #define TG3_MIN_MTU 60
97 #define TG3_MAX_MTU(tp) \
98 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
100 /* These numbers seem to be hard coded in the NIC firmware somehow.
101 * You can't change the ring sizes, but you can change where you place
102 * them in the NIC onboard memory.
104 #define TG3_RX_STD_RING_SIZE(tp) \
105 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || \
106 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) ? \
107 RX_STD_MAX_SIZE_5717 : 512)
108 #define TG3_DEF_RX_RING_PENDING 200
109 #define TG3_RX_JMB_RING_SIZE(tp) \
110 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || \
111 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) ? \
113 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
114 #define TG3_RSS_INDIR_TBL_SIZE 128
116 /* Do not place this n-ring entries value into the tp struct itself,
117 * we really want to expose these constants to GCC so that modulo et
118 * al. operations are done with shifts and masks instead of with
119 * hw multiply/modulo instructions. Another solution would be to
120 * replace things like '% foo' with '& (foo - 1)'.
123 #define TG3_TX_RING_SIZE 512
124 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
126 #define TG3_RX_STD_RING_BYTES(tp) \
127 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
128 #define TG3_RX_JMB_RING_BYTES(tp) \
129 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
130 #define TG3_RX_RCB_RING_BYTES(tp) \
131 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
132 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
134 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
136 #define TG3_RX_DMA_ALIGN 16
137 #define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN)
139 #define TG3_DMA_BYTE_ENAB 64
141 #define TG3_RX_STD_DMA_SZ 1536
142 #define TG3_RX_JMB_DMA_SZ 9046
144 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
146 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
147 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
149 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
150 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
153 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
155 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
156 * that are at least dword aligned when used in PCIX mode. The driver
157 * works around this bug by double copying the packet. This workaround
158 * is built into the normal double copy length check for efficiency.
160 * However, the double copy is only necessary on those architectures
161 * where unaligned memory accesses are inefficient. For those architectures
162 * where unaligned memory accesses incur little penalty, we can reintegrate
163 * the 5701 in the normal rx path. Doing so saves a device structure
164 * dereference by hardcoding the double copy threshold in place.
166 #define TG3_RX_COPY_THRESHOLD 256
167 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
168 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
170 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
173 /* minimum number of free TX descriptors required to wake up TX process */
174 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
176 #define TG3_RAW_IP_ALIGN 2
178 /* number of ETHTOOL_GSTATS u64's */
179 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
181 #define TG3_NUM_TEST 6
183 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
185 #define FIRMWARE_TG3 "tigon/tg3.bin"
186 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
187 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
189 static char version[] __devinitdata =
190 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
192 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
193 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
194 MODULE_LICENSE("GPL");
195 MODULE_VERSION(DRV_MODULE_VERSION);
196 MODULE_FIRMWARE(FIRMWARE_TG3);
197 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
198 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
200 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
201 module_param(tg3_debug, int, 0);
202 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
204 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
277 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
278 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
279 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
280 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
281 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
282 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
283 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
287 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
289 static const struct {
290 const char string[ETH_GSTRING_LEN];
291 } ethtool_stats_keys[TG3_NUM_STATS] = {
294 { "rx_ucast_packets" },
295 { "rx_mcast_packets" },
296 { "rx_bcast_packets" },
298 { "rx_align_errors" },
299 { "rx_xon_pause_rcvd" },
300 { "rx_xoff_pause_rcvd" },
301 { "rx_mac_ctrl_rcvd" },
302 { "rx_xoff_entered" },
303 { "rx_frame_too_long_errors" },
305 { "rx_undersize_packets" },
306 { "rx_in_length_errors" },
307 { "rx_out_length_errors" },
308 { "rx_64_or_less_octet_packets" },
309 { "rx_65_to_127_octet_packets" },
310 { "rx_128_to_255_octet_packets" },
311 { "rx_256_to_511_octet_packets" },
312 { "rx_512_to_1023_octet_packets" },
313 { "rx_1024_to_1522_octet_packets" },
314 { "rx_1523_to_2047_octet_packets" },
315 { "rx_2048_to_4095_octet_packets" },
316 { "rx_4096_to_8191_octet_packets" },
317 { "rx_8192_to_9022_octet_packets" },
324 { "tx_flow_control" },
326 { "tx_single_collisions" },
327 { "tx_mult_collisions" },
329 { "tx_excessive_collisions" },
330 { "tx_late_collisions" },
331 { "tx_collide_2times" },
332 { "tx_collide_3times" },
333 { "tx_collide_4times" },
334 { "tx_collide_5times" },
335 { "tx_collide_6times" },
336 { "tx_collide_7times" },
337 { "tx_collide_8times" },
338 { "tx_collide_9times" },
339 { "tx_collide_10times" },
340 { "tx_collide_11times" },
341 { "tx_collide_12times" },
342 { "tx_collide_13times" },
343 { "tx_collide_14times" },
344 { "tx_collide_15times" },
345 { "tx_ucast_packets" },
346 { "tx_mcast_packets" },
347 { "tx_bcast_packets" },
348 { "tx_carrier_sense_errors" },
352 { "dma_writeq_full" },
353 { "dma_write_prioq_full" },
357 { "rx_threshold_hit" },
359 { "dma_readq_full" },
360 { "dma_read_prioq_full" },
361 { "tx_comp_queue_full" },
363 { "ring_set_send_prod_index" },
364 { "ring_status_update" },
366 { "nic_avoided_irqs" },
367 { "nic_tx_threshold_hit" }
370 static const struct {
371 const char string[ETH_GSTRING_LEN];
372 } ethtool_test_keys[TG3_NUM_TEST] = {
373 { "nvram test (online) " },
374 { "link test (online) " },
375 { "register test (offline)" },
376 { "memory test (offline)" },
377 { "loopback test (offline)" },
378 { "interrupt test (offline)" },
381 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
383 writel(val, tp->regs + off);
386 static u32 tg3_read32(struct tg3 *tp, u32 off)
388 return readl(tp->regs + off);
391 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
393 writel(val, tp->aperegs + off);
396 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
398 return readl(tp->aperegs + off);
401 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
405 spin_lock_irqsave(&tp->indirect_lock, flags);
406 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
407 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
408 spin_unlock_irqrestore(&tp->indirect_lock, flags);
411 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
413 writel(val, tp->regs + off);
414 readl(tp->regs + off);
417 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
422 spin_lock_irqsave(&tp->indirect_lock, flags);
423 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
424 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
425 spin_unlock_irqrestore(&tp->indirect_lock, flags);
429 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
433 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
434 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
435 TG3_64BIT_REG_LOW, val);
438 if (off == TG3_RX_STD_PROD_IDX_REG) {
439 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
440 TG3_64BIT_REG_LOW, val);
444 spin_lock_irqsave(&tp->indirect_lock, flags);
445 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
446 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
447 spin_unlock_irqrestore(&tp->indirect_lock, flags);
449 /* In indirect mode when disabling interrupts, we also need
450 * to clear the interrupt bit in the GRC local ctrl register.
452 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
454 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
455 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
459 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
464 spin_lock_irqsave(&tp->indirect_lock, flags);
465 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
466 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
467 spin_unlock_irqrestore(&tp->indirect_lock, flags);
471 /* usec_wait specifies the wait time in usec when writing to certain registers
472 * where it is unsafe to read back the register without some delay.
473 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
474 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
476 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
478 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
479 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
480 /* Non-posted methods */
481 tp->write32(tp, off, val);
484 tg3_write32(tp, off, val);
489 /* Wait again after the read for the posted method to guarantee that
490 * the wait time is met.
496 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
498 tp->write32_mbox(tp, off, val);
499 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
500 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
501 tp->read32_mbox(tp, off);
504 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
506 void __iomem *mbox = tp->regs + off;
508 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
510 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
514 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
516 return readl(tp->regs + off + GRCMBOX_BASE);
519 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
521 writel(val, tp->regs + off + GRCMBOX_BASE);
524 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
525 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
526 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
527 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
528 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
530 #define tw32(reg, val) tp->write32(tp, reg, val)
531 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
532 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
533 #define tr32(reg) tp->read32(tp, reg)
535 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
539 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
540 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
543 spin_lock_irqsave(&tp->indirect_lock, flags);
544 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
545 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
546 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
548 /* Always leave this as zero. */
549 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
551 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
552 tw32_f(TG3PCI_MEM_WIN_DATA, val);
554 /* Always leave this as zero. */
555 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
557 spin_unlock_irqrestore(&tp->indirect_lock, flags);
560 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
564 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
565 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
570 spin_lock_irqsave(&tp->indirect_lock, flags);
571 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
572 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
573 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
575 /* Always leave this as zero. */
576 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
578 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
579 *val = tr32(TG3PCI_MEM_WIN_DATA);
581 /* Always leave this as zero. */
582 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
584 spin_unlock_irqrestore(&tp->indirect_lock, flags);
587 static void tg3_ape_lock_init(struct tg3 *tp)
592 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
593 regbase = TG3_APE_LOCK_GRANT;
595 regbase = TG3_APE_PER_LOCK_GRANT;
597 /* Make sure the driver hasn't any stale locks. */
598 for (i = 0; i < 8; i++)
599 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
602 static int tg3_ape_lock(struct tg3 *tp, int locknum)
606 u32 status, req, gnt;
608 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
612 case TG3_APE_LOCK_GRC:
613 case TG3_APE_LOCK_MEM:
619 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
620 req = TG3_APE_LOCK_REQ;
621 gnt = TG3_APE_LOCK_GRANT;
623 req = TG3_APE_PER_LOCK_REQ;
624 gnt = TG3_APE_PER_LOCK_GRANT;
629 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
631 /* Wait for up to 1 millisecond to acquire lock. */
632 for (i = 0; i < 100; i++) {
633 status = tg3_ape_read32(tp, gnt + off);
634 if (status == APE_LOCK_GRANT_DRIVER)
639 if (status != APE_LOCK_GRANT_DRIVER) {
640 /* Revoke the lock request. */
641 tg3_ape_write32(tp, gnt + off,
642 APE_LOCK_GRANT_DRIVER);
650 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
654 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
658 case TG3_APE_LOCK_GRC:
659 case TG3_APE_LOCK_MEM:
665 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
666 gnt = TG3_APE_LOCK_GRANT;
668 gnt = TG3_APE_PER_LOCK_GRANT;
670 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
673 static void tg3_disable_ints(struct tg3 *tp)
677 tw32(TG3PCI_MISC_HOST_CTRL,
678 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
679 for (i = 0; i < tp->irq_max; i++)
680 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
683 static void tg3_enable_ints(struct tg3 *tp)
690 tw32(TG3PCI_MISC_HOST_CTRL,
691 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
693 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
694 for (i = 0; i < tp->irq_cnt; i++) {
695 struct tg3_napi *tnapi = &tp->napi[i];
697 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
698 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
699 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
701 tp->coal_now |= tnapi->coal_now;
704 /* Force an initial interrupt */
705 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
706 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
707 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
709 tw32(HOSTCC_MODE, tp->coal_now);
711 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
714 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
716 struct tg3 *tp = tnapi->tp;
717 struct tg3_hw_status *sblk = tnapi->hw_status;
718 unsigned int work_exists = 0;
720 /* check for phy events */
721 if (!(tp->tg3_flags &
722 (TG3_FLAG_USE_LINKCHG_REG |
723 TG3_FLAG_POLL_SERDES))) {
724 if (sblk->status & SD_STATUS_LINK_CHG)
727 /* check for RX/TX work to do */
728 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
729 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
736 * similar to tg3_enable_ints, but it accurately determines whether there
737 * is new work pending and can return without flushing the PIO write
738 * which reenables interrupts
740 static void tg3_int_reenable(struct tg3_napi *tnapi)
742 struct tg3 *tp = tnapi->tp;
744 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
747 /* When doing tagged status, this work check is unnecessary.
748 * The last_tag we write above tells the chip which piece of
749 * work we've completed.
751 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
753 tw32(HOSTCC_MODE, tp->coalesce_mode |
754 HOSTCC_MODE_ENABLE | tnapi->coal_now);
757 static void tg3_switch_clocks(struct tg3 *tp)
762 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
763 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
766 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
768 orig_clock_ctrl = clock_ctrl;
769 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
770 CLOCK_CTRL_CLKRUN_OENABLE |
772 tp->pci_clock_ctrl = clock_ctrl;
774 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
775 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
776 tw32_wait_f(TG3PCI_CLOCK_CTRL,
777 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
779 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
780 tw32_wait_f(TG3PCI_CLOCK_CTRL,
782 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
784 tw32_wait_f(TG3PCI_CLOCK_CTRL,
785 clock_ctrl | (CLOCK_CTRL_ALTCLK),
788 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
791 #define PHY_BUSY_LOOPS 5000
793 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
799 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
801 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
807 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
808 MI_COM_PHY_ADDR_MASK);
809 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
810 MI_COM_REG_ADDR_MASK);
811 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
813 tw32_f(MAC_MI_COM, frame_val);
815 loops = PHY_BUSY_LOOPS;
818 frame_val = tr32(MAC_MI_COM);
820 if ((frame_val & MI_COM_BUSY) == 0) {
822 frame_val = tr32(MAC_MI_COM);
830 *val = frame_val & MI_COM_DATA_MASK;
834 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
835 tw32_f(MAC_MI_MODE, tp->mi_mode);
842 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
848 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
849 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
852 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
854 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
858 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
859 MI_COM_PHY_ADDR_MASK);
860 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
861 MI_COM_REG_ADDR_MASK);
862 frame_val |= (val & MI_COM_DATA_MASK);
863 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
865 tw32_f(MAC_MI_COM, frame_val);
867 loops = PHY_BUSY_LOOPS;
870 frame_val = tr32(MAC_MI_COM);
871 if ((frame_val & MI_COM_BUSY) == 0) {
873 frame_val = tr32(MAC_MI_COM);
883 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
884 tw32_f(MAC_MI_MODE, tp->mi_mode);
891 static int tg3_bmcr_reset(struct tg3 *tp)
896 /* OK, reset it, and poll the BMCR_RESET bit until it
897 * clears or we time out.
899 phy_control = BMCR_RESET;
900 err = tg3_writephy(tp, MII_BMCR, phy_control);
906 err = tg3_readphy(tp, MII_BMCR, &phy_control);
910 if ((phy_control & BMCR_RESET) == 0) {
922 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
924 struct tg3 *tp = bp->priv;
927 spin_lock_bh(&tp->lock);
929 if (tg3_readphy(tp, reg, &val))
932 spin_unlock_bh(&tp->lock);
937 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
939 struct tg3 *tp = bp->priv;
942 spin_lock_bh(&tp->lock);
944 if (tg3_writephy(tp, reg, val))
947 spin_unlock_bh(&tp->lock);
952 static int tg3_mdio_reset(struct mii_bus *bp)
957 static void tg3_mdio_config_5785(struct tg3 *tp)
960 struct phy_device *phydev;
962 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
963 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
964 case PHY_ID_BCM50610:
965 case PHY_ID_BCM50610M:
966 val = MAC_PHYCFG2_50610_LED_MODES;
968 case PHY_ID_BCMAC131:
969 val = MAC_PHYCFG2_AC131_LED_MODES;
971 case PHY_ID_RTL8211C:
972 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
974 case PHY_ID_RTL8201E:
975 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
981 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
982 tw32(MAC_PHYCFG2, val);
984 val = tr32(MAC_PHYCFG1);
985 val &= ~(MAC_PHYCFG1_RGMII_INT |
986 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
987 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
988 tw32(MAC_PHYCFG1, val);
993 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE))
994 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
995 MAC_PHYCFG2_FMODE_MASK_MASK |
996 MAC_PHYCFG2_GMODE_MASK_MASK |
997 MAC_PHYCFG2_ACT_MASK_MASK |
998 MAC_PHYCFG2_QUAL_MASK_MASK |
999 MAC_PHYCFG2_INBAND_ENABLE;
1001 tw32(MAC_PHYCFG2, val);
1003 val = tr32(MAC_PHYCFG1);
1004 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1005 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1006 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1007 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1008 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1009 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1010 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1012 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1013 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1014 tw32(MAC_PHYCFG1, val);
1016 val = tr32(MAC_EXT_RGMII_MODE);
1017 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1018 MAC_RGMII_MODE_RX_QUALITY |
1019 MAC_RGMII_MODE_RX_ACTIVITY |
1020 MAC_RGMII_MODE_RX_ENG_DET |
1021 MAC_RGMII_MODE_TX_ENABLE |
1022 MAC_RGMII_MODE_TX_LOWPWR |
1023 MAC_RGMII_MODE_TX_RESET);
1024 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1025 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1026 val |= MAC_RGMII_MODE_RX_INT_B |
1027 MAC_RGMII_MODE_RX_QUALITY |
1028 MAC_RGMII_MODE_RX_ACTIVITY |
1029 MAC_RGMII_MODE_RX_ENG_DET;
1030 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1031 val |= MAC_RGMII_MODE_TX_ENABLE |
1032 MAC_RGMII_MODE_TX_LOWPWR |
1033 MAC_RGMII_MODE_TX_RESET;
1035 tw32(MAC_EXT_RGMII_MODE, val);
1038 static void tg3_mdio_start(struct tg3 *tp)
1040 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1041 tw32_f(MAC_MI_MODE, tp->mi_mode);
1044 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1045 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1046 tg3_mdio_config_5785(tp);
1049 static int tg3_mdio_init(struct tg3 *tp)
1053 struct phy_device *phydev;
1055 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1056 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
1059 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1061 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1062 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1064 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1065 TG3_CPMU_PHY_STRAP_IS_SERDES;
1069 tp->phy_addr = TG3_PHY_MII_ADDR;
1073 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1074 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1077 tp->mdio_bus = mdiobus_alloc();
1078 if (tp->mdio_bus == NULL)
1081 tp->mdio_bus->name = "tg3 mdio bus";
1082 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1083 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1084 tp->mdio_bus->priv = tp;
1085 tp->mdio_bus->parent = &tp->pdev->dev;
1086 tp->mdio_bus->read = &tg3_mdio_read;
1087 tp->mdio_bus->write = &tg3_mdio_write;
1088 tp->mdio_bus->reset = &tg3_mdio_reset;
1089 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1090 tp->mdio_bus->irq = &tp->mdio_irq[0];
1092 for (i = 0; i < PHY_MAX_ADDR; i++)
1093 tp->mdio_bus->irq[i] = PHY_POLL;
1095 /* The bus registration will look for all the PHYs on the mdio bus.
1096 * Unfortunately, it does not ensure the PHY is powered up before
1097 * accessing the PHY ID registers. A chip reset is the
1098 * quickest way to bring the device back to an operational state..
1100 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1103 i = mdiobus_register(tp->mdio_bus);
1105 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1106 mdiobus_free(tp->mdio_bus);
1110 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1112 if (!phydev || !phydev->drv) {
1113 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1114 mdiobus_unregister(tp->mdio_bus);
1115 mdiobus_free(tp->mdio_bus);
1119 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1120 case PHY_ID_BCM57780:
1121 phydev->interface = PHY_INTERFACE_MODE_GMII;
1122 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1124 case PHY_ID_BCM50610:
1125 case PHY_ID_BCM50610M:
1126 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1127 PHY_BRCM_RX_REFCLK_UNUSED |
1128 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1129 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1130 if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)
1131 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1132 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1133 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1134 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1135 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1137 case PHY_ID_RTL8211C:
1138 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1140 case PHY_ID_RTL8201E:
1141 case PHY_ID_BCMAC131:
1142 phydev->interface = PHY_INTERFACE_MODE_MII;
1143 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1144 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1148 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1150 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1151 tg3_mdio_config_5785(tp);
1156 static void tg3_mdio_fini(struct tg3 *tp)
1158 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1159 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1160 mdiobus_unregister(tp->mdio_bus);
1161 mdiobus_free(tp->mdio_bus);
1165 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1169 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1173 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1177 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1178 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1182 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1188 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1192 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1196 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1200 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1201 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1205 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1211 /* tp->lock is held. */
1212 static inline void tg3_generate_fw_event(struct tg3 *tp)
1216 val = tr32(GRC_RX_CPU_EVENT);
1217 val |= GRC_RX_CPU_DRIVER_EVENT;
1218 tw32_f(GRC_RX_CPU_EVENT, val);
1220 tp->last_event_jiffies = jiffies;
1223 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1225 /* tp->lock is held. */
1226 static void tg3_wait_for_event_ack(struct tg3 *tp)
1229 unsigned int delay_cnt;
1232 /* If enough time has passed, no wait is necessary. */
1233 time_remain = (long)(tp->last_event_jiffies + 1 +
1234 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1236 if (time_remain < 0)
1239 /* Check if we can shorten the wait time. */
1240 delay_cnt = jiffies_to_usecs(time_remain);
1241 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1242 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1243 delay_cnt = (delay_cnt >> 3) + 1;
1245 for (i = 0; i < delay_cnt; i++) {
1246 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1252 /* tp->lock is held. */
1253 static void tg3_ump_link_report(struct tg3 *tp)
1258 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1259 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1262 tg3_wait_for_event_ack(tp);
1264 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1266 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1269 if (!tg3_readphy(tp, MII_BMCR, ®))
1271 if (!tg3_readphy(tp, MII_BMSR, ®))
1272 val |= (reg & 0xffff);
1273 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1276 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1278 if (!tg3_readphy(tp, MII_LPA, ®))
1279 val |= (reg & 0xffff);
1280 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1283 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1284 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1286 if (!tg3_readphy(tp, MII_STAT1000, ®))
1287 val |= (reg & 0xffff);
1289 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1291 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1295 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1297 tg3_generate_fw_event(tp);
1300 static void tg3_link_report(struct tg3 *tp)
1302 if (!netif_carrier_ok(tp->dev)) {
1303 netif_info(tp, link, tp->dev, "Link is down\n");
1304 tg3_ump_link_report(tp);
1305 } else if (netif_msg_link(tp)) {
1306 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1307 (tp->link_config.active_speed == SPEED_1000 ?
1309 (tp->link_config.active_speed == SPEED_100 ?
1311 (tp->link_config.active_duplex == DUPLEX_FULL ?
1314 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1315 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1317 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1319 tg3_ump_link_report(tp);
1323 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1327 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1328 miireg = ADVERTISE_PAUSE_CAP;
1329 else if (flow_ctrl & FLOW_CTRL_TX)
1330 miireg = ADVERTISE_PAUSE_ASYM;
1331 else if (flow_ctrl & FLOW_CTRL_RX)
1332 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1339 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1343 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1344 miireg = ADVERTISE_1000XPAUSE;
1345 else if (flow_ctrl & FLOW_CTRL_TX)
1346 miireg = ADVERTISE_1000XPSE_ASYM;
1347 else if (flow_ctrl & FLOW_CTRL_RX)
1348 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1355 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1359 if (lcladv & ADVERTISE_1000XPAUSE) {
1360 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1361 if (rmtadv & LPA_1000XPAUSE)
1362 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1363 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1366 if (rmtadv & LPA_1000XPAUSE)
1367 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1369 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1370 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1377 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1381 u32 old_rx_mode = tp->rx_mode;
1382 u32 old_tx_mode = tp->tx_mode;
1384 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1385 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1387 autoneg = tp->link_config.autoneg;
1389 if (autoneg == AUTONEG_ENABLE &&
1390 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1391 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1392 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1394 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1396 flowctrl = tp->link_config.flowctrl;
1398 tp->link_config.active_flowctrl = flowctrl;
1400 if (flowctrl & FLOW_CTRL_RX)
1401 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1403 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1405 if (old_rx_mode != tp->rx_mode)
1406 tw32_f(MAC_RX_MODE, tp->rx_mode);
1408 if (flowctrl & FLOW_CTRL_TX)
1409 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1411 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1413 if (old_tx_mode != tp->tx_mode)
1414 tw32_f(MAC_TX_MODE, tp->tx_mode);
1417 static void tg3_adjust_link(struct net_device *dev)
1419 u8 oldflowctrl, linkmesg = 0;
1420 u32 mac_mode, lcl_adv, rmt_adv;
1421 struct tg3 *tp = netdev_priv(dev);
1422 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1424 spin_lock_bh(&tp->lock);
1426 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1427 MAC_MODE_HALF_DUPLEX);
1429 oldflowctrl = tp->link_config.active_flowctrl;
1435 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1436 mac_mode |= MAC_MODE_PORT_MODE_MII;
1437 else if (phydev->speed == SPEED_1000 ||
1438 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1439 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1441 mac_mode |= MAC_MODE_PORT_MODE_MII;
1443 if (phydev->duplex == DUPLEX_HALF)
1444 mac_mode |= MAC_MODE_HALF_DUPLEX;
1446 lcl_adv = tg3_advert_flowctrl_1000T(
1447 tp->link_config.flowctrl);
1450 rmt_adv = LPA_PAUSE_CAP;
1451 if (phydev->asym_pause)
1452 rmt_adv |= LPA_PAUSE_ASYM;
1455 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1457 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1459 if (mac_mode != tp->mac_mode) {
1460 tp->mac_mode = mac_mode;
1461 tw32_f(MAC_MODE, tp->mac_mode);
1465 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1466 if (phydev->speed == SPEED_10)
1468 MAC_MI_STAT_10MBPS_MODE |
1469 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1471 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1474 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1475 tw32(MAC_TX_LENGTHS,
1476 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1477 (6 << TX_LENGTHS_IPG_SHIFT) |
1478 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1480 tw32(MAC_TX_LENGTHS,
1481 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1482 (6 << TX_LENGTHS_IPG_SHIFT) |
1483 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1485 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1486 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1487 phydev->speed != tp->link_config.active_speed ||
1488 phydev->duplex != tp->link_config.active_duplex ||
1489 oldflowctrl != tp->link_config.active_flowctrl)
1492 tp->link_config.active_speed = phydev->speed;
1493 tp->link_config.active_duplex = phydev->duplex;
1495 spin_unlock_bh(&tp->lock);
1498 tg3_link_report(tp);
1501 static int tg3_phy_init(struct tg3 *tp)
1503 struct phy_device *phydev;
1505 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1508 /* Bring the PHY back to a known state. */
1511 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1513 /* Attach the MAC to the PHY. */
1514 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1515 phydev->dev_flags, phydev->interface);
1516 if (IS_ERR(phydev)) {
1517 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1518 return PTR_ERR(phydev);
1521 /* Mask with MAC supported features. */
1522 switch (phydev->interface) {
1523 case PHY_INTERFACE_MODE_GMII:
1524 case PHY_INTERFACE_MODE_RGMII:
1525 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1526 phydev->supported &= (PHY_GBIT_FEATURES |
1528 SUPPORTED_Asym_Pause);
1532 case PHY_INTERFACE_MODE_MII:
1533 phydev->supported &= (PHY_BASIC_FEATURES |
1535 SUPPORTED_Asym_Pause);
1538 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1542 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1544 phydev->advertising = phydev->supported;
1549 static void tg3_phy_start(struct tg3 *tp)
1551 struct phy_device *phydev;
1553 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1556 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1558 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1559 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1560 phydev->speed = tp->link_config.orig_speed;
1561 phydev->duplex = tp->link_config.orig_duplex;
1562 phydev->autoneg = tp->link_config.orig_autoneg;
1563 phydev->advertising = tp->link_config.orig_advertising;
1568 phy_start_aneg(phydev);
1571 static void tg3_phy_stop(struct tg3 *tp)
1573 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1576 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1579 static void tg3_phy_fini(struct tg3 *tp)
1581 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1582 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1583 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1587 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1591 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1593 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1598 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1602 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1604 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1609 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1613 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1616 tg3_writephy(tp, MII_TG3_FET_TEST,
1617 phytest | MII_TG3_FET_SHADOW_EN);
1618 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1620 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1622 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1623 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1625 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1629 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1633 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1634 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1635 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
1636 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1639 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1640 tg3_phy_fet_toggle_apd(tp, enable);
1644 reg = MII_TG3_MISC_SHDW_WREN |
1645 MII_TG3_MISC_SHDW_SCR5_SEL |
1646 MII_TG3_MISC_SHDW_SCR5_LPED |
1647 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1648 MII_TG3_MISC_SHDW_SCR5_SDTL |
1649 MII_TG3_MISC_SHDW_SCR5_C125OE;
1650 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1651 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1653 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1656 reg = MII_TG3_MISC_SHDW_WREN |
1657 MII_TG3_MISC_SHDW_APD_SEL |
1658 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1660 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1662 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1665 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1669 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1670 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1673 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1676 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1677 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1679 tg3_writephy(tp, MII_TG3_FET_TEST,
1680 ephy | MII_TG3_FET_SHADOW_EN);
1681 if (!tg3_readphy(tp, reg, &phy)) {
1683 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1685 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1686 tg3_writephy(tp, reg, phy);
1688 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1691 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1692 MII_TG3_AUXCTL_SHDWSEL_MISC;
1693 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1694 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1696 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1698 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1699 phy |= MII_TG3_AUXCTL_MISC_WREN;
1700 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1705 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1709 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1712 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1713 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1714 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1715 (val | (1 << 15) | (1 << 4)));
1718 static void tg3_phy_apply_otp(struct tg3 *tp)
1727 /* Enable SM_DSP clock and tx 6dB coding. */
1728 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1729 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1730 MII_TG3_AUXCTL_ACTL_TX_6DB;
1731 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1733 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1734 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1735 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1737 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1738 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1739 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1741 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1742 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1743 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1745 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1746 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1748 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1749 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1751 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1752 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1753 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1755 /* Turn off SM_DSP clock. */
1756 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1757 MII_TG3_AUXCTL_ACTL_TX_6DB;
1758 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1761 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1765 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1770 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1771 current_link_up == 1 &&
1772 (tp->link_config.active_speed == SPEED_1000 ||
1773 (tp->link_config.active_speed == SPEED_100 &&
1774 tp->link_config.active_duplex == DUPLEX_FULL))) {
1777 if (tp->link_config.active_speed == SPEED_1000)
1778 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1780 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1782 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1784 tg3_phy_cl45_read(tp, 0x7, TG3_CL45_D7_EEERES_STAT, &val);
1786 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1787 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1791 if (!tp->setlpicnt) {
1792 val = tr32(TG3_CPMU_EEE_MODE);
1793 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1797 static int tg3_wait_macro_done(struct tg3 *tp)
1804 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1805 if ((tmp32 & 0x1000) == 0)
1815 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1817 static const u32 test_pat[4][6] = {
1818 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1819 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1820 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1821 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1825 for (chan = 0; chan < 4; chan++) {
1828 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1829 (chan * 0x2000) | 0x0200);
1830 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1832 for (i = 0; i < 6; i++)
1833 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1836 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1837 if (tg3_wait_macro_done(tp)) {
1842 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1843 (chan * 0x2000) | 0x0200);
1844 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1845 if (tg3_wait_macro_done(tp)) {
1850 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1851 if (tg3_wait_macro_done(tp)) {
1856 for (i = 0; i < 6; i += 2) {
1859 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1860 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1861 tg3_wait_macro_done(tp)) {
1867 if (low != test_pat[chan][i] ||
1868 high != test_pat[chan][i+1]) {
1869 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1870 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1871 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1881 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1885 for (chan = 0; chan < 4; chan++) {
1888 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1889 (chan * 0x2000) | 0x0200);
1890 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1891 for (i = 0; i < 6; i++)
1892 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1893 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1894 if (tg3_wait_macro_done(tp))
1901 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1903 u32 reg32, phy9_orig;
1904 int retries, do_phy_reset, err;
1910 err = tg3_bmcr_reset(tp);
1916 /* Disable transmitter and interrupt. */
1917 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1921 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1923 /* Set full-duplex, 1000 mbps. */
1924 tg3_writephy(tp, MII_BMCR,
1925 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1927 /* Set to master mode. */
1928 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1931 tg3_writephy(tp, MII_TG3_CTRL,
1932 (MII_TG3_CTRL_AS_MASTER |
1933 MII_TG3_CTRL_ENABLE_AS_MASTER));
1935 /* Enable SM_DSP_CLOCK and 6dB. */
1936 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1938 /* Block the PHY control access. */
1939 tg3_phydsp_write(tp, 0x8005, 0x0800);
1941 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1944 } while (--retries);
1946 err = tg3_phy_reset_chanpat(tp);
1950 tg3_phydsp_write(tp, 0x8005, 0x0000);
1952 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1953 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
1955 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1956 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1957 /* Set Extended packet length bit for jumbo frames */
1958 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1960 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1963 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1965 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1967 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1974 /* This will reset the tigon3 PHY if there is no valid
1975 * link unless the FORCE argument is non-zero.
1977 static int tg3_phy_reset(struct tg3 *tp)
1982 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1983 val = tr32(GRC_MISC_CFG);
1984 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1987 err = tg3_readphy(tp, MII_BMSR, &val);
1988 err |= tg3_readphy(tp, MII_BMSR, &val);
1992 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1993 netif_carrier_off(tp->dev);
1994 tg3_link_report(tp);
1997 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1998 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1999 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2000 err = tg3_phy_reset_5703_4_5(tp);
2007 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2008 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2009 cpmuctrl = tr32(TG3_CPMU_CTRL);
2010 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2012 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2015 err = tg3_bmcr_reset(tp);
2019 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2020 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2021 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2023 tw32(TG3_CPMU_CTRL, cpmuctrl);
2026 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2027 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2028 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2029 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2030 CPMU_LSPD_1000MB_MACCLK_12_5) {
2031 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2033 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2037 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2038 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
2039 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2042 tg3_phy_apply_otp(tp);
2044 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2045 tg3_phy_toggle_apd(tp, true);
2047 tg3_phy_toggle_apd(tp, false);
2050 if (tp->phy_flags & TG3_PHYFLG_ADC_BUG) {
2051 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2052 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2053 tg3_phydsp_write(tp, 0x000a, 0x0323);
2054 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2056 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2057 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2058 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2060 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2061 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2062 tg3_phydsp_write(tp, 0x000a, 0x310b);
2063 tg3_phydsp_write(tp, 0x201f, 0x9506);
2064 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2065 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2066 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2067 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2068 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2069 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2070 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2071 tg3_writephy(tp, MII_TG3_TEST1,
2072 MII_TG3_TEST1_TRIM_EN | 0x4);
2074 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2075 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2077 /* Set Extended packet length bit (bit 14) on all chips that */
2078 /* support jumbo frames */
2079 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2080 /* Cannot do read-modify-write on 5401 */
2081 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2082 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2083 /* Set bit 14 with read-modify-write to preserve other bits */
2084 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
2085 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
2086 tg3_writephy(tp, MII_TG3_AUX_CTRL, val | 0x4000);
2089 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2090 * jumbo frames transmission.
2092 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2093 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2094 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2095 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2098 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2099 /* adjust output voltage */
2100 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2103 tg3_phy_toggle_automdix(tp, 1);
2104 tg3_phy_set_wirespeed(tp);
2108 static void tg3_frob_aux_power(struct tg3 *tp)
2110 struct tg3 *tp_peer = tp;
2112 /* The GPIOs do something completely different on 57765. */
2113 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2114 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2115 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2118 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2119 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2120 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
2121 struct net_device *dev_peer;
2123 dev_peer = pci_get_drvdata(tp->pdev_peer);
2124 /* remove_one() may have been run on the peer. */
2128 tp_peer = netdev_priv(dev_peer);
2131 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2132 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
2133 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2134 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
2135 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2136 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2137 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2138 (GRC_LCLCTRL_GPIO_OE0 |
2139 GRC_LCLCTRL_GPIO_OE1 |
2140 GRC_LCLCTRL_GPIO_OE2 |
2141 GRC_LCLCTRL_GPIO_OUTPUT0 |
2142 GRC_LCLCTRL_GPIO_OUTPUT1),
2144 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2145 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2146 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2147 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2148 GRC_LCLCTRL_GPIO_OE1 |
2149 GRC_LCLCTRL_GPIO_OE2 |
2150 GRC_LCLCTRL_GPIO_OUTPUT0 |
2151 GRC_LCLCTRL_GPIO_OUTPUT1 |
2153 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2155 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2156 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2158 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2159 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2162 u32 grc_local_ctrl = 0;
2164 if (tp_peer != tp &&
2165 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2168 /* Workaround to prevent overdrawing Amps. */
2169 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2171 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2172 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2173 grc_local_ctrl, 100);
2176 /* On 5753 and variants, GPIO2 cannot be used. */
2177 no_gpio2 = tp->nic_sram_data_cfg &
2178 NIC_SRAM_DATA_CFG_NO_GPIO2;
2180 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2181 GRC_LCLCTRL_GPIO_OE1 |
2182 GRC_LCLCTRL_GPIO_OE2 |
2183 GRC_LCLCTRL_GPIO_OUTPUT1 |
2184 GRC_LCLCTRL_GPIO_OUTPUT2;
2186 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2187 GRC_LCLCTRL_GPIO_OUTPUT2);
2189 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2190 grc_local_ctrl, 100);
2192 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2194 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2195 grc_local_ctrl, 100);
2198 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2199 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2200 grc_local_ctrl, 100);
2204 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2205 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2206 if (tp_peer != tp &&
2207 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2210 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2211 (GRC_LCLCTRL_GPIO_OE1 |
2212 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2214 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2215 GRC_LCLCTRL_GPIO_OE1, 100);
2217 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2218 (GRC_LCLCTRL_GPIO_OE1 |
2219 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2224 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2226 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2228 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2229 if (speed != SPEED_10)
2231 } else if (speed == SPEED_10)
2237 static int tg3_setup_phy(struct tg3 *, int);
2239 #define RESET_KIND_SHUTDOWN 0
2240 #define RESET_KIND_INIT 1
2241 #define RESET_KIND_SUSPEND 2
2243 static void tg3_write_sig_post_reset(struct tg3 *, int);
2244 static int tg3_halt_cpu(struct tg3 *, u32);
2246 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2250 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2251 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2252 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2253 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2256 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2257 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2258 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2263 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2265 val = tr32(GRC_MISC_CFG);
2266 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2269 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2271 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2274 tg3_writephy(tp, MII_ADVERTISE, 0);
2275 tg3_writephy(tp, MII_BMCR,
2276 BMCR_ANENABLE | BMCR_ANRESTART);
2278 tg3_writephy(tp, MII_TG3_FET_TEST,
2279 phytest | MII_TG3_FET_SHADOW_EN);
2280 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2281 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2283 MII_TG3_FET_SHDW_AUXMODE4,
2286 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2289 } else if (do_low_power) {
2290 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2291 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2293 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2294 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2295 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2296 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2297 MII_TG3_AUXCTL_PCTL_VREG_11V);
2300 /* The PHY should not be powered down on some chips because
2303 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2304 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2305 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2306 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2309 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2310 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2311 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2312 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2313 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2314 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2317 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2320 /* tp->lock is held. */
2321 static int tg3_nvram_lock(struct tg3 *tp)
2323 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2326 if (tp->nvram_lock_cnt == 0) {
2327 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2328 for (i = 0; i < 8000; i++) {
2329 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2334 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2338 tp->nvram_lock_cnt++;
2343 /* tp->lock is held. */
2344 static void tg3_nvram_unlock(struct tg3 *tp)
2346 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2347 if (tp->nvram_lock_cnt > 0)
2348 tp->nvram_lock_cnt--;
2349 if (tp->nvram_lock_cnt == 0)
2350 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2354 /* tp->lock is held. */
2355 static void tg3_enable_nvram_access(struct tg3 *tp)
2357 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2358 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2359 u32 nvaccess = tr32(NVRAM_ACCESS);
2361 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2365 /* tp->lock is held. */
2366 static void tg3_disable_nvram_access(struct tg3 *tp)
2368 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2369 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2370 u32 nvaccess = tr32(NVRAM_ACCESS);
2372 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2376 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2377 u32 offset, u32 *val)
2382 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2385 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2386 EEPROM_ADDR_DEVID_MASK |
2388 tw32(GRC_EEPROM_ADDR,
2390 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2391 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2392 EEPROM_ADDR_ADDR_MASK) |
2393 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2395 for (i = 0; i < 1000; i++) {
2396 tmp = tr32(GRC_EEPROM_ADDR);
2398 if (tmp & EEPROM_ADDR_COMPLETE)
2402 if (!(tmp & EEPROM_ADDR_COMPLETE))
2405 tmp = tr32(GRC_EEPROM_DATA);
2408 * The data will always be opposite the native endian
2409 * format. Perform a blind byteswap to compensate.
2416 #define NVRAM_CMD_TIMEOUT 10000
2418 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2422 tw32(NVRAM_CMD, nvram_cmd);
2423 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2425 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2431 if (i == NVRAM_CMD_TIMEOUT)
2437 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2439 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2440 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2441 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2442 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2443 (tp->nvram_jedecnum == JEDEC_ATMEL))
2445 addr = ((addr / tp->nvram_pagesize) <<
2446 ATMEL_AT45DB0X1B_PAGE_POS) +
2447 (addr % tp->nvram_pagesize);
2452 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2454 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2455 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2456 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2457 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2458 (tp->nvram_jedecnum == JEDEC_ATMEL))
2460 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2461 tp->nvram_pagesize) +
2462 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2467 /* NOTE: Data read in from NVRAM is byteswapped according to
2468 * the byteswapping settings for all other register accesses.
2469 * tg3 devices are BE devices, so on a BE machine, the data
2470 * returned will be exactly as it is seen in NVRAM. On a LE
2471 * machine, the 32-bit value will be byteswapped.
2473 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2477 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2478 return tg3_nvram_read_using_eeprom(tp, offset, val);
2480 offset = tg3_nvram_phys_addr(tp, offset);
2482 if (offset > NVRAM_ADDR_MSK)
2485 ret = tg3_nvram_lock(tp);
2489 tg3_enable_nvram_access(tp);
2491 tw32(NVRAM_ADDR, offset);
2492 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2493 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2496 *val = tr32(NVRAM_RDDATA);
2498 tg3_disable_nvram_access(tp);
2500 tg3_nvram_unlock(tp);
2505 /* Ensures NVRAM data is in bytestream format. */
2506 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2509 int res = tg3_nvram_read(tp, offset, &v);
2511 *val = cpu_to_be32(v);
2515 /* tp->lock is held. */
2516 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2518 u32 addr_high, addr_low;
2521 addr_high = ((tp->dev->dev_addr[0] << 8) |
2522 tp->dev->dev_addr[1]);
2523 addr_low = ((tp->dev->dev_addr[2] << 24) |
2524 (tp->dev->dev_addr[3] << 16) |
2525 (tp->dev->dev_addr[4] << 8) |
2526 (tp->dev->dev_addr[5] << 0));
2527 for (i = 0; i < 4; i++) {
2528 if (i == 1 && skip_mac_1)
2530 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2531 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2534 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2535 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2536 for (i = 0; i < 12; i++) {
2537 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2538 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2542 addr_high = (tp->dev->dev_addr[0] +
2543 tp->dev->dev_addr[1] +
2544 tp->dev->dev_addr[2] +
2545 tp->dev->dev_addr[3] +
2546 tp->dev->dev_addr[4] +
2547 tp->dev->dev_addr[5]) &
2548 TX_BACKOFF_SEED_MASK;
2549 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2552 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2555 bool device_should_wake, do_low_power;
2557 /* Make sure register accesses (indirect or otherwise)
2558 * will function correctly.
2560 pci_write_config_dword(tp->pdev,
2561 TG3PCI_MISC_HOST_CTRL,
2562 tp->misc_host_ctrl);
2566 pci_enable_wake(tp->pdev, state, false);
2567 pci_set_power_state(tp->pdev, PCI_D0);
2569 /* Switch out of Vaux if it is a NIC */
2570 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2571 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2581 netdev_err(tp->dev, "Invalid power state (D%d) requested\n",
2586 /* Restore the CLKREQ setting. */
2587 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2590 pci_read_config_word(tp->pdev,
2591 tp->pcie_cap + PCI_EXP_LNKCTL,
2593 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2594 pci_write_config_word(tp->pdev,
2595 tp->pcie_cap + PCI_EXP_LNKCTL,
2599 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2600 tw32(TG3PCI_MISC_HOST_CTRL,
2601 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2603 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2604 device_may_wakeup(&tp->pdev->dev) &&
2605 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2607 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2608 do_low_power = false;
2609 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2610 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2611 struct phy_device *phydev;
2612 u32 phyid, advertising;
2614 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2616 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2618 tp->link_config.orig_speed = phydev->speed;
2619 tp->link_config.orig_duplex = phydev->duplex;
2620 tp->link_config.orig_autoneg = phydev->autoneg;
2621 tp->link_config.orig_advertising = phydev->advertising;
2623 advertising = ADVERTISED_TP |
2625 ADVERTISED_Autoneg |
2626 ADVERTISED_10baseT_Half;
2628 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2629 device_should_wake) {
2630 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2632 ADVERTISED_100baseT_Half |
2633 ADVERTISED_100baseT_Full |
2634 ADVERTISED_10baseT_Full;
2636 advertising |= ADVERTISED_10baseT_Full;
2639 phydev->advertising = advertising;
2641 phy_start_aneg(phydev);
2643 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2644 if (phyid != PHY_ID_BCMAC131) {
2645 phyid &= PHY_BCM_OUI_MASK;
2646 if (phyid == PHY_BCM_OUI_1 ||
2647 phyid == PHY_BCM_OUI_2 ||
2648 phyid == PHY_BCM_OUI_3)
2649 do_low_power = true;
2653 do_low_power = true;
2655 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2656 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2657 tp->link_config.orig_speed = tp->link_config.speed;
2658 tp->link_config.orig_duplex = tp->link_config.duplex;
2659 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2662 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2663 tp->link_config.speed = SPEED_10;
2664 tp->link_config.duplex = DUPLEX_HALF;
2665 tp->link_config.autoneg = AUTONEG_ENABLE;
2666 tg3_setup_phy(tp, 0);
2670 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2673 val = tr32(GRC_VCPU_EXT_CTRL);
2674 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2675 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2679 for (i = 0; i < 200; i++) {
2680 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2681 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2686 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2687 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2688 WOL_DRV_STATE_SHUTDOWN |
2692 if (device_should_wake) {
2695 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2697 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2701 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2702 mac_mode = MAC_MODE_PORT_MODE_GMII;
2704 mac_mode = MAC_MODE_PORT_MODE_MII;
2706 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2707 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2709 u32 speed = (tp->tg3_flags &
2710 TG3_FLAG_WOL_SPEED_100MB) ?
2711 SPEED_100 : SPEED_10;
2712 if (tg3_5700_link_polarity(tp, speed))
2713 mac_mode |= MAC_MODE_LINK_POLARITY;
2715 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2718 mac_mode = MAC_MODE_PORT_MODE_TBI;
2721 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2722 tw32(MAC_LED_CTRL, tp->led_ctrl);
2724 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2725 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2726 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2727 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2728 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2729 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2731 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
2732 mac_mode |= MAC_MODE_APE_TX_EN |
2733 MAC_MODE_APE_RX_EN |
2734 MAC_MODE_TDE_ENABLE;
2736 tw32_f(MAC_MODE, mac_mode);
2739 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2743 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2744 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2745 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2748 base_val = tp->pci_clock_ctrl;
2749 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2750 CLOCK_CTRL_TXCLK_DISABLE);
2752 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2753 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2754 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2755 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2756 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2758 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2759 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2760 u32 newbits1, newbits2;
2762 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2763 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2764 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2765 CLOCK_CTRL_TXCLK_DISABLE |
2767 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2768 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2769 newbits1 = CLOCK_CTRL_625_CORE;
2770 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2772 newbits1 = CLOCK_CTRL_ALTCLK;
2773 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2776 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2779 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2782 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2785 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2786 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2787 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2788 CLOCK_CTRL_TXCLK_DISABLE |
2789 CLOCK_CTRL_44MHZ_CORE);
2791 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2794 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2795 tp->pci_clock_ctrl | newbits3, 40);
2799 if (!(device_should_wake) &&
2800 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2801 tg3_power_down_phy(tp, do_low_power);
2803 tg3_frob_aux_power(tp);
2805 /* Workaround for unstable PLL clock */
2806 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2807 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2808 u32 val = tr32(0x7d00);
2810 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2812 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2815 err = tg3_nvram_lock(tp);
2816 tg3_halt_cpu(tp, RX_CPU_BASE);
2818 tg3_nvram_unlock(tp);
2822 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2824 if (device_should_wake)
2825 pci_enable_wake(tp->pdev, state, true);
2827 /* Finally, set the new power state. */
2828 pci_set_power_state(tp->pdev, state);
2833 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2835 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2836 case MII_TG3_AUX_STAT_10HALF:
2838 *duplex = DUPLEX_HALF;
2841 case MII_TG3_AUX_STAT_10FULL:
2843 *duplex = DUPLEX_FULL;
2846 case MII_TG3_AUX_STAT_100HALF:
2848 *duplex = DUPLEX_HALF;
2851 case MII_TG3_AUX_STAT_100FULL:
2853 *duplex = DUPLEX_FULL;
2856 case MII_TG3_AUX_STAT_1000HALF:
2857 *speed = SPEED_1000;
2858 *duplex = DUPLEX_HALF;
2861 case MII_TG3_AUX_STAT_1000FULL:
2862 *speed = SPEED_1000;
2863 *duplex = DUPLEX_FULL;
2867 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2868 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2870 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2874 *speed = SPEED_INVALID;
2875 *duplex = DUPLEX_INVALID;
2880 static void tg3_phy_copper_begin(struct tg3 *tp)
2885 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2886 /* Entering low power mode. Disable gigabit and
2887 * 100baseT advertisements.
2889 tg3_writephy(tp, MII_TG3_CTRL, 0);
2891 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2892 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2893 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2894 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2896 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2897 } else if (tp->link_config.speed == SPEED_INVALID) {
2898 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2899 tp->link_config.advertising &=
2900 ~(ADVERTISED_1000baseT_Half |
2901 ADVERTISED_1000baseT_Full);
2903 new_adv = ADVERTISE_CSMA;
2904 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2905 new_adv |= ADVERTISE_10HALF;
2906 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2907 new_adv |= ADVERTISE_10FULL;
2908 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2909 new_adv |= ADVERTISE_100HALF;
2910 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2911 new_adv |= ADVERTISE_100FULL;
2913 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2915 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2917 if (tp->link_config.advertising &
2918 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2920 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2921 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2922 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2923 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2924 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY) &&
2925 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2926 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2927 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2928 MII_TG3_CTRL_ENABLE_AS_MASTER);
2929 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2931 tg3_writephy(tp, MII_TG3_CTRL, 0);
2934 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2935 new_adv |= ADVERTISE_CSMA;
2937 /* Asking for a specific link mode. */
2938 if (tp->link_config.speed == SPEED_1000) {
2939 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2941 if (tp->link_config.duplex == DUPLEX_FULL)
2942 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2944 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2945 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2946 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2947 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2948 MII_TG3_CTRL_ENABLE_AS_MASTER);
2950 if (tp->link_config.speed == SPEED_100) {
2951 if (tp->link_config.duplex == DUPLEX_FULL)
2952 new_adv |= ADVERTISE_100FULL;
2954 new_adv |= ADVERTISE_100HALF;
2956 if (tp->link_config.duplex == DUPLEX_FULL)
2957 new_adv |= ADVERTISE_10FULL;
2959 new_adv |= ADVERTISE_10HALF;
2961 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2966 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2969 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
2972 tw32(TG3_CPMU_EEE_MODE,
2973 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2975 /* Enable SM_DSP clock and tx 6dB coding. */
2976 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
2977 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
2978 MII_TG3_AUXCTL_ACTL_TX_6DB;
2979 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2981 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2982 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
2983 !tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2984 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2,
2985 val | MII_TG3_DSP_CH34TP2_HIBW01);
2987 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2988 /* Advertise 100-BaseTX EEE ability */
2989 if (tp->link_config.advertising &
2990 (ADVERTISED_100baseT_Half |
2991 ADVERTISED_100baseT_Full))
2992 val |= TG3_CL45_D7_EEEADV_CAP_100TX;
2993 /* Advertise 1000-BaseT EEE ability */
2994 if (tp->link_config.advertising &
2995 (ADVERTISED_1000baseT_Half |
2996 ADVERTISED_1000baseT_Full))
2997 val |= TG3_CL45_D7_EEEADV_CAP_1000T;
2999 tg3_phy_cl45_write(tp, 0x7, TG3_CL45_D7_EEEADV_CAP, val);
3001 /* Turn off SM_DSP clock. */
3002 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
3003 MII_TG3_AUXCTL_ACTL_TX_6DB;
3004 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3007 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3008 tp->link_config.speed != SPEED_INVALID) {
3009 u32 bmcr, orig_bmcr;
3011 tp->link_config.active_speed = tp->link_config.speed;
3012 tp->link_config.active_duplex = tp->link_config.duplex;
3015 switch (tp->link_config.speed) {
3021 bmcr |= BMCR_SPEED100;
3025 bmcr |= TG3_BMCR_SPEED1000;
3029 if (tp->link_config.duplex == DUPLEX_FULL)
3030 bmcr |= BMCR_FULLDPLX;
3032 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3033 (bmcr != orig_bmcr)) {
3034 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3035 for (i = 0; i < 1500; i++) {
3039 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3040 tg3_readphy(tp, MII_BMSR, &tmp))
3042 if (!(tmp & BMSR_LSTATUS)) {
3047 tg3_writephy(tp, MII_BMCR, bmcr);
3051 tg3_writephy(tp, MII_BMCR,
3052 BMCR_ANENABLE | BMCR_ANRESTART);
3056 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3060 /* Turn off tap power management. */
3061 /* Set Extended packet length bit */
3062 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
3064 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3065 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3066 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3067 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3068 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3075 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3077 u32 adv_reg, all_mask = 0;
3079 if (mask & ADVERTISED_10baseT_Half)
3080 all_mask |= ADVERTISE_10HALF;
3081 if (mask & ADVERTISED_10baseT_Full)
3082 all_mask |= ADVERTISE_10FULL;
3083 if (mask & ADVERTISED_100baseT_Half)
3084 all_mask |= ADVERTISE_100HALF;
3085 if (mask & ADVERTISED_100baseT_Full)
3086 all_mask |= ADVERTISE_100FULL;
3088 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3091 if ((adv_reg & all_mask) != all_mask)
3093 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3097 if (mask & ADVERTISED_1000baseT_Half)
3098 all_mask |= ADVERTISE_1000HALF;
3099 if (mask & ADVERTISED_1000baseT_Full)
3100 all_mask |= ADVERTISE_1000FULL;
3102 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3105 if ((tg3_ctrl & all_mask) != all_mask)
3111 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3115 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3118 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3119 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3121 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3122 if (curadv != reqadv)
3125 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
3126 tg3_readphy(tp, MII_LPA, rmtadv);
3128 /* Reprogram the advertisement register, even if it
3129 * does not affect the current link. If the link
3130 * gets renegotiated in the future, we can save an
3131 * additional renegotiation cycle by advertising
3132 * it correctly in the first place.
3134 if (curadv != reqadv) {
3135 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3136 ADVERTISE_PAUSE_ASYM);
3137 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3144 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3146 int current_link_up;
3148 u32 lcl_adv, rmt_adv;
3156 (MAC_STATUS_SYNC_CHANGED |
3157 MAC_STATUS_CFG_CHANGED |
3158 MAC_STATUS_MI_COMPLETION |
3159 MAC_STATUS_LNKSTATE_CHANGED));
3162 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3164 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3168 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3170 /* Some third-party PHYs need to be reset on link going
3173 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3174 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3176 netif_carrier_ok(tp->dev)) {
3177 tg3_readphy(tp, MII_BMSR, &bmsr);
3178 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3179 !(bmsr & BMSR_LSTATUS))
3185 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3186 tg3_readphy(tp, MII_BMSR, &bmsr);
3187 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3188 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3191 if (!(bmsr & BMSR_LSTATUS)) {
3192 err = tg3_init_5401phy_dsp(tp);
3196 tg3_readphy(tp, MII_BMSR, &bmsr);
3197 for (i = 0; i < 1000; i++) {
3199 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3200 (bmsr & BMSR_LSTATUS)) {
3206 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3207 TG3_PHY_REV_BCM5401_B0 &&
3208 !(bmsr & BMSR_LSTATUS) &&
3209 tp->link_config.active_speed == SPEED_1000) {
3210 err = tg3_phy_reset(tp);
3212 err = tg3_init_5401phy_dsp(tp);
3217 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3218 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3219 /* 5701 {A0,B0} CRC bug workaround */
3220 tg3_writephy(tp, 0x15, 0x0a75);
3221 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3222 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3223 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3226 /* Clear pending interrupts... */
3227 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3228 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3230 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3231 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3232 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3233 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3235 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3236 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3237 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3238 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3239 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3241 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3244 current_link_up = 0;
3245 current_speed = SPEED_INVALID;
3246 current_duplex = DUPLEX_INVALID;
3248 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3249 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3250 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3251 if (!(val & (1 << 10))) {
3253 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3259 for (i = 0; i < 100; i++) {
3260 tg3_readphy(tp, MII_BMSR, &bmsr);
3261 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3262 (bmsr & BMSR_LSTATUS))
3267 if (bmsr & BMSR_LSTATUS) {
3270 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3271 for (i = 0; i < 2000; i++) {
3273 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3278 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3283 for (i = 0; i < 200; i++) {
3284 tg3_readphy(tp, MII_BMCR, &bmcr);
3285 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3287 if (bmcr && bmcr != 0x7fff)
3295 tp->link_config.active_speed = current_speed;
3296 tp->link_config.active_duplex = current_duplex;
3298 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3299 if ((bmcr & BMCR_ANENABLE) &&
3300 tg3_copper_is_advertising_all(tp,
3301 tp->link_config.advertising)) {
3302 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3304 current_link_up = 1;
3307 if (!(bmcr & BMCR_ANENABLE) &&
3308 tp->link_config.speed == current_speed &&
3309 tp->link_config.duplex == current_duplex &&
3310 tp->link_config.flowctrl ==
3311 tp->link_config.active_flowctrl) {
3312 current_link_up = 1;
3316 if (current_link_up == 1 &&
3317 tp->link_config.active_duplex == DUPLEX_FULL)
3318 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3322 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3323 tg3_phy_copper_begin(tp);
3325 tg3_readphy(tp, MII_BMSR, &bmsr);
3326 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3327 (bmsr & BMSR_LSTATUS))
3328 current_link_up = 1;
3331 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3332 if (current_link_up == 1) {
3333 if (tp->link_config.active_speed == SPEED_100 ||
3334 tp->link_config.active_speed == SPEED_10)
3335 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3337 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3338 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3339 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3341 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3343 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3344 if (tp->link_config.active_duplex == DUPLEX_HALF)
3345 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3347 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3348 if (current_link_up == 1 &&
3349 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3350 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3352 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3355 /* ??? Without this setting Netgear GA302T PHY does not
3356 * ??? send/receive packets...
3358 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3359 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3360 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3361 tw32_f(MAC_MI_MODE, tp->mi_mode);
3365 tw32_f(MAC_MODE, tp->mac_mode);
3368 tg3_phy_eee_adjust(tp, current_link_up);
3370 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3371 /* Polled via timer. */
3372 tw32_f(MAC_EVENT, 0);
3374 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3378 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3379 current_link_up == 1 &&
3380 tp->link_config.active_speed == SPEED_1000 &&
3381 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3382 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3385 (MAC_STATUS_SYNC_CHANGED |
3386 MAC_STATUS_CFG_CHANGED));
3389 NIC_SRAM_FIRMWARE_MBOX,
3390 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3393 /* Prevent send BD corruption. */
3394 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3395 u16 oldlnkctl, newlnkctl;
3397 pci_read_config_word(tp->pdev,
3398 tp->pcie_cap + PCI_EXP_LNKCTL,
3400 if (tp->link_config.active_speed == SPEED_100 ||
3401 tp->link_config.active_speed == SPEED_10)
3402 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3404 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3405 if (newlnkctl != oldlnkctl)
3406 pci_write_config_word(tp->pdev,
3407 tp->pcie_cap + PCI_EXP_LNKCTL,
3411 if (current_link_up != netif_carrier_ok(tp->dev)) {
3412 if (current_link_up)
3413 netif_carrier_on(tp->dev);
3415 netif_carrier_off(tp->dev);
3416 tg3_link_report(tp);
3422 struct tg3_fiber_aneginfo {
3424 #define ANEG_STATE_UNKNOWN 0
3425 #define ANEG_STATE_AN_ENABLE 1
3426 #define ANEG_STATE_RESTART_INIT 2
3427 #define ANEG_STATE_RESTART 3
3428 #define ANEG_STATE_DISABLE_LINK_OK 4
3429 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3430 #define ANEG_STATE_ABILITY_DETECT 6
3431 #define ANEG_STATE_ACK_DETECT_INIT 7
3432 #define ANEG_STATE_ACK_DETECT 8
3433 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3434 #define ANEG_STATE_COMPLETE_ACK 10
3435 #define ANEG_STATE_IDLE_DETECT_INIT 11
3436 #define ANEG_STATE_IDLE_DETECT 12
3437 #define ANEG_STATE_LINK_OK 13
3438 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3439 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3442 #define MR_AN_ENABLE 0x00000001
3443 #define MR_RESTART_AN 0x00000002
3444 #define MR_AN_COMPLETE 0x00000004
3445 #define MR_PAGE_RX 0x00000008
3446 #define MR_NP_LOADED 0x00000010
3447 #define MR_TOGGLE_TX 0x00000020
3448 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3449 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3450 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3451 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3452 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3453 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3454 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3455 #define MR_TOGGLE_RX 0x00002000
3456 #define MR_NP_RX 0x00004000
3458 #define MR_LINK_OK 0x80000000
3460 unsigned long link_time, cur_time;
3462 u32 ability_match_cfg;
3463 int ability_match_count;
3465 char ability_match, idle_match, ack_match;
3467 u32 txconfig, rxconfig;
3468 #define ANEG_CFG_NP 0x00000080
3469 #define ANEG_CFG_ACK 0x00000040
3470 #define ANEG_CFG_RF2 0x00000020
3471 #define ANEG_CFG_RF1 0x00000010
3472 #define ANEG_CFG_PS2 0x00000001
3473 #define ANEG_CFG_PS1 0x00008000
3474 #define ANEG_CFG_HD 0x00004000
3475 #define ANEG_CFG_FD 0x00002000
3476 #define ANEG_CFG_INVAL 0x00001f06
3481 #define ANEG_TIMER_ENAB 2
3482 #define ANEG_FAILED -1
3484 #define ANEG_STATE_SETTLE_TIME 10000
3486 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3487 struct tg3_fiber_aneginfo *ap)
3490 unsigned long delta;
3494 if (ap->state == ANEG_STATE_UNKNOWN) {
3498 ap->ability_match_cfg = 0;
3499 ap->ability_match_count = 0;
3500 ap->ability_match = 0;
3506 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3507 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3509 if (rx_cfg_reg != ap->ability_match_cfg) {
3510 ap->ability_match_cfg = rx_cfg_reg;
3511 ap->ability_match = 0;
3512 ap->ability_match_count = 0;
3514 if (++ap->ability_match_count > 1) {
3515 ap->ability_match = 1;
3516 ap->ability_match_cfg = rx_cfg_reg;
3519 if (rx_cfg_reg & ANEG_CFG_ACK)
3527 ap->ability_match_cfg = 0;
3528 ap->ability_match_count = 0;
3529 ap->ability_match = 0;
3535 ap->rxconfig = rx_cfg_reg;
3538 switch (ap->state) {
3539 case ANEG_STATE_UNKNOWN:
3540 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3541 ap->state = ANEG_STATE_AN_ENABLE;
3544 case ANEG_STATE_AN_ENABLE:
3545 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3546 if (ap->flags & MR_AN_ENABLE) {
3549 ap->ability_match_cfg = 0;
3550 ap->ability_match_count = 0;
3551 ap->ability_match = 0;
3555 ap->state = ANEG_STATE_RESTART_INIT;
3557 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3561 case ANEG_STATE_RESTART_INIT:
3562 ap->link_time = ap->cur_time;
3563 ap->flags &= ~(MR_NP_LOADED);
3565 tw32(MAC_TX_AUTO_NEG, 0);
3566 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3567 tw32_f(MAC_MODE, tp->mac_mode);
3570 ret = ANEG_TIMER_ENAB;
3571 ap->state = ANEG_STATE_RESTART;
3574 case ANEG_STATE_RESTART:
3575 delta = ap->cur_time - ap->link_time;
3576 if (delta > ANEG_STATE_SETTLE_TIME)
3577 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3579 ret = ANEG_TIMER_ENAB;
3582 case ANEG_STATE_DISABLE_LINK_OK:
3586 case ANEG_STATE_ABILITY_DETECT_INIT:
3587 ap->flags &= ~(MR_TOGGLE_TX);
3588 ap->txconfig = ANEG_CFG_FD;
3589 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3590 if (flowctrl & ADVERTISE_1000XPAUSE)
3591 ap->txconfig |= ANEG_CFG_PS1;
3592 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3593 ap->txconfig |= ANEG_CFG_PS2;
3594 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3595 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3596 tw32_f(MAC_MODE, tp->mac_mode);
3599 ap->state = ANEG_STATE_ABILITY_DETECT;
3602 case ANEG_STATE_ABILITY_DETECT:
3603 if (ap->ability_match != 0 && ap->rxconfig != 0)
3604 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3607 case ANEG_STATE_ACK_DETECT_INIT:
3608 ap->txconfig |= ANEG_CFG_ACK;
3609 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3610 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3611 tw32_f(MAC_MODE, tp->mac_mode);
3614 ap->state = ANEG_STATE_ACK_DETECT;
3617 case ANEG_STATE_ACK_DETECT:
3618 if (ap->ack_match != 0) {
3619 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3620 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3621 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3623 ap->state = ANEG_STATE_AN_ENABLE;
3625 } else if (ap->ability_match != 0 &&
3626 ap->rxconfig == 0) {
3627 ap->state = ANEG_STATE_AN_ENABLE;
3631 case ANEG_STATE_COMPLETE_ACK_INIT:
3632 if (ap->rxconfig & ANEG_CFG_INVAL) {
3636 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3637 MR_LP_ADV_HALF_DUPLEX |
3638 MR_LP_ADV_SYM_PAUSE |
3639 MR_LP_ADV_ASYM_PAUSE |
3640 MR_LP_ADV_REMOTE_FAULT1 |
3641 MR_LP_ADV_REMOTE_FAULT2 |
3642 MR_LP_ADV_NEXT_PAGE |
3645 if (ap->rxconfig & ANEG_CFG_FD)
3646 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3647 if (ap->rxconfig & ANEG_CFG_HD)
3648 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3649 if (ap->rxconfig & ANEG_CFG_PS1)
3650 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3651 if (ap->rxconfig & ANEG_CFG_PS2)
3652 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3653 if (ap->rxconfig & ANEG_CFG_RF1)
3654 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3655 if (ap->rxconfig & ANEG_CFG_RF2)
3656 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3657 if (ap->rxconfig & ANEG_CFG_NP)
3658 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3660 ap->link_time = ap->cur_time;
3662 ap->flags ^= (MR_TOGGLE_TX);
3663 if (ap->rxconfig & 0x0008)
3664 ap->flags |= MR_TOGGLE_RX;
3665 if (ap->rxconfig & ANEG_CFG_NP)
3666 ap->flags |= MR_NP_RX;
3667 ap->flags |= MR_PAGE_RX;
3669 ap->state = ANEG_STATE_COMPLETE_ACK;
3670 ret = ANEG_TIMER_ENAB;
3673 case ANEG_STATE_COMPLETE_ACK:
3674 if (ap->ability_match != 0 &&
3675 ap->rxconfig == 0) {
3676 ap->state = ANEG_STATE_AN_ENABLE;
3679 delta = ap->cur_time - ap->link_time;
3680 if (delta > ANEG_STATE_SETTLE_TIME) {
3681 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3682 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3684 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3685 !(ap->flags & MR_NP_RX)) {
3686 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3694 case ANEG_STATE_IDLE_DETECT_INIT:
3695 ap->link_time = ap->cur_time;
3696 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3697 tw32_f(MAC_MODE, tp->mac_mode);
3700 ap->state = ANEG_STATE_IDLE_DETECT;
3701 ret = ANEG_TIMER_ENAB;
3704 case ANEG_STATE_IDLE_DETECT:
3705 if (ap->ability_match != 0 &&
3706 ap->rxconfig == 0) {
3707 ap->state = ANEG_STATE_AN_ENABLE;
3710 delta = ap->cur_time - ap->link_time;
3711 if (delta > ANEG_STATE_SETTLE_TIME) {
3712 /* XXX another gem from the Broadcom driver :( */
3713 ap->state = ANEG_STATE_LINK_OK;
3717 case ANEG_STATE_LINK_OK:
3718 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3722 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3723 /* ??? unimplemented */
3726 case ANEG_STATE_NEXT_PAGE_WAIT:
3727 /* ??? unimplemented */
3738 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3741 struct tg3_fiber_aneginfo aninfo;
3742 int status = ANEG_FAILED;
3746 tw32_f(MAC_TX_AUTO_NEG, 0);
3748 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3749 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3752 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3755 memset(&aninfo, 0, sizeof(aninfo));
3756 aninfo.flags |= MR_AN_ENABLE;
3757 aninfo.state = ANEG_STATE_UNKNOWN;
3758 aninfo.cur_time = 0;
3760 while (++tick < 195000) {
3761 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3762 if (status == ANEG_DONE || status == ANEG_FAILED)
3768 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3769 tw32_f(MAC_MODE, tp->mac_mode);
3772 *txflags = aninfo.txconfig;
3773 *rxflags = aninfo.flags;
3775 if (status == ANEG_DONE &&
3776 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3777 MR_LP_ADV_FULL_DUPLEX)))
3783 static void tg3_init_bcm8002(struct tg3 *tp)
3785 u32 mac_status = tr32(MAC_STATUS);
3788 /* Reset when initting first time or we have a link. */
3789 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3790 !(mac_status & MAC_STATUS_PCS_SYNCED))
3793 /* Set PLL lock range. */
3794 tg3_writephy(tp, 0x16, 0x8007);
3797 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3799 /* Wait for reset to complete. */
3800 /* XXX schedule_timeout() ... */
3801 for (i = 0; i < 500; i++)
3804 /* Config mode; select PMA/Ch 1 regs. */
3805 tg3_writephy(tp, 0x10, 0x8411);
3807 /* Enable auto-lock and comdet, select txclk for tx. */
3808 tg3_writephy(tp, 0x11, 0x0a10);
3810 tg3_writephy(tp, 0x18, 0x00a0);
3811 tg3_writephy(tp, 0x16, 0x41ff);
3813 /* Assert and deassert POR. */
3814 tg3_writephy(tp, 0x13, 0x0400);
3816 tg3_writephy(tp, 0x13, 0x0000);
3818 tg3_writephy(tp, 0x11, 0x0a50);
3820 tg3_writephy(tp, 0x11, 0x0a10);
3822 /* Wait for signal to stabilize */
3823 /* XXX schedule_timeout() ... */
3824 for (i = 0; i < 15000; i++)
3827 /* Deselect the channel register so we can read the PHYID
3830 tg3_writephy(tp, 0x10, 0x8011);
3833 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3836 u32 sg_dig_ctrl, sg_dig_status;
3837 u32 serdes_cfg, expected_sg_dig_ctrl;
3838 int workaround, port_a;
3839 int current_link_up;
3842 expected_sg_dig_ctrl = 0;
3845 current_link_up = 0;
3847 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3848 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3850 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3853 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3854 /* preserve bits 20-23 for voltage regulator */
3855 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3858 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3860 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3861 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3863 u32 val = serdes_cfg;
3869 tw32_f(MAC_SERDES_CFG, val);
3872 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3874 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3875 tg3_setup_flow_control(tp, 0, 0);
3876 current_link_up = 1;
3881 /* Want auto-negotiation. */
3882 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3884 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3885 if (flowctrl & ADVERTISE_1000XPAUSE)
3886 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3887 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3888 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3890 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3891 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3892 tp->serdes_counter &&
3893 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3894 MAC_STATUS_RCVD_CFG)) ==
3895 MAC_STATUS_PCS_SYNCED)) {
3896 tp->serdes_counter--;
3897 current_link_up = 1;
3902 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3903 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3905 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3907 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3908 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3909 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3910 MAC_STATUS_SIGNAL_DET)) {
3911 sg_dig_status = tr32(SG_DIG_STATUS);
3912 mac_status = tr32(MAC_STATUS);
3914 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3915 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3916 u32 local_adv = 0, remote_adv = 0;
3918 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3919 local_adv |= ADVERTISE_1000XPAUSE;
3920 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3921 local_adv |= ADVERTISE_1000XPSE_ASYM;
3923 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3924 remote_adv |= LPA_1000XPAUSE;
3925 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3926 remote_adv |= LPA_1000XPAUSE_ASYM;
3928 tg3_setup_flow_control(tp, local_adv, remote_adv);
3929 current_link_up = 1;
3930 tp->serdes_counter = 0;
3931 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3932 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3933 if (tp->serdes_counter)
3934 tp->serdes_counter--;
3937 u32 val = serdes_cfg;
3944 tw32_f(MAC_SERDES_CFG, val);
3947 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3950 /* Link parallel detection - link is up */
3951 /* only if we have PCS_SYNC and not */
3952 /* receiving config code words */
3953 mac_status = tr32(MAC_STATUS);
3954 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3955 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3956 tg3_setup_flow_control(tp, 0, 0);
3957 current_link_up = 1;
3959 TG3_PHYFLG_PARALLEL_DETECT;
3960 tp->serdes_counter =
3961 SERDES_PARALLEL_DET_TIMEOUT;
3963 goto restart_autoneg;
3967 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3968 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3972 return current_link_up;
3975 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3977 int current_link_up = 0;
3979 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3982 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3983 u32 txflags, rxflags;
3986 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3987 u32 local_adv = 0, remote_adv = 0;
3989 if (txflags & ANEG_CFG_PS1)
3990 local_adv |= ADVERTISE_1000XPAUSE;
3991 if (txflags & ANEG_CFG_PS2)
3992 local_adv |= ADVERTISE_1000XPSE_ASYM;
3994 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3995 remote_adv |= LPA_1000XPAUSE;
3996 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3997 remote_adv |= LPA_1000XPAUSE_ASYM;
3999 tg3_setup_flow_control(tp, local_adv, remote_adv);
4001 current_link_up = 1;
4003 for (i = 0; i < 30; i++) {
4006 (MAC_STATUS_SYNC_CHANGED |
4007 MAC_STATUS_CFG_CHANGED));
4009 if ((tr32(MAC_STATUS) &
4010 (MAC_STATUS_SYNC_CHANGED |
4011 MAC_STATUS_CFG_CHANGED)) == 0)
4015 mac_status = tr32(MAC_STATUS);
4016 if (current_link_up == 0 &&
4017 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4018 !(mac_status & MAC_STATUS_RCVD_CFG))
4019 current_link_up = 1;
4021 tg3_setup_flow_control(tp, 0, 0);
4023 /* Forcing 1000FD link up. */
4024 current_link_up = 1;
4026 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4029 tw32_f(MAC_MODE, tp->mac_mode);
4034 return current_link_up;
4037 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4040 u16 orig_active_speed;
4041 u8 orig_active_duplex;
4043 int current_link_up;
4046 orig_pause_cfg = tp->link_config.active_flowctrl;
4047 orig_active_speed = tp->link_config.active_speed;
4048 orig_active_duplex = tp->link_config.active_duplex;
4050 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
4051 netif_carrier_ok(tp->dev) &&
4052 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
4053 mac_status = tr32(MAC_STATUS);
4054 mac_status &= (MAC_STATUS_PCS_SYNCED |
4055 MAC_STATUS_SIGNAL_DET |
4056 MAC_STATUS_CFG_CHANGED |
4057 MAC_STATUS_RCVD_CFG);
4058 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4059 MAC_STATUS_SIGNAL_DET)) {
4060 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4061 MAC_STATUS_CFG_CHANGED));
4066 tw32_f(MAC_TX_AUTO_NEG, 0);
4068 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4069 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4070 tw32_f(MAC_MODE, tp->mac_mode);
4073 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4074 tg3_init_bcm8002(tp);
4076 /* Enable link change event even when serdes polling. */
4077 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4080 current_link_up = 0;
4081 mac_status = tr32(MAC_STATUS);
4083 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
4084 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4086 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4088 tp->napi[0].hw_status->status =
4089 (SD_STATUS_UPDATED |
4090 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4092 for (i = 0; i < 100; i++) {
4093 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4094 MAC_STATUS_CFG_CHANGED));
4096 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4097 MAC_STATUS_CFG_CHANGED |
4098 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4102 mac_status = tr32(MAC_STATUS);
4103 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4104 current_link_up = 0;
4105 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4106 tp->serdes_counter == 0) {
4107 tw32_f(MAC_MODE, (tp->mac_mode |
4108 MAC_MODE_SEND_CONFIGS));
4110 tw32_f(MAC_MODE, tp->mac_mode);
4114 if (current_link_up == 1) {
4115 tp->link_config.active_speed = SPEED_1000;
4116 tp->link_config.active_duplex = DUPLEX_FULL;
4117 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4118 LED_CTRL_LNKLED_OVERRIDE |
4119 LED_CTRL_1000MBPS_ON));
4121 tp->link_config.active_speed = SPEED_INVALID;
4122 tp->link_config.active_duplex = DUPLEX_INVALID;
4123 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4124 LED_CTRL_LNKLED_OVERRIDE |
4125 LED_CTRL_TRAFFIC_OVERRIDE));
4128 if (current_link_up != netif_carrier_ok(tp->dev)) {
4129 if (current_link_up)
4130 netif_carrier_on(tp->dev);
4132 netif_carrier_off(tp->dev);
4133 tg3_link_report(tp);
4135 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4136 if (orig_pause_cfg != now_pause_cfg ||
4137 orig_active_speed != tp->link_config.active_speed ||
4138 orig_active_duplex != tp->link_config.active_duplex)
4139 tg3_link_report(tp);
4145 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4147 int current_link_up, err = 0;
4151 u32 local_adv, remote_adv;
4153 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4154 tw32_f(MAC_MODE, tp->mac_mode);
4160 (MAC_STATUS_SYNC_CHANGED |
4161 MAC_STATUS_CFG_CHANGED |
4162 MAC_STATUS_MI_COMPLETION |
4163 MAC_STATUS_LNKSTATE_CHANGED));
4169 current_link_up = 0;
4170 current_speed = SPEED_INVALID;
4171 current_duplex = DUPLEX_INVALID;
4173 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4174 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4175 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4176 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4177 bmsr |= BMSR_LSTATUS;
4179 bmsr &= ~BMSR_LSTATUS;
4182 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4184 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4185 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4186 /* do nothing, just check for link up at the end */
4187 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4190 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4191 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4192 ADVERTISE_1000XPAUSE |
4193 ADVERTISE_1000XPSE_ASYM |
4196 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4198 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4199 new_adv |= ADVERTISE_1000XHALF;
4200 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4201 new_adv |= ADVERTISE_1000XFULL;
4203 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4204 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4205 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4206 tg3_writephy(tp, MII_BMCR, bmcr);
4208 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4209 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4210 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4217 bmcr &= ~BMCR_SPEED1000;
4218 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4220 if (tp->link_config.duplex == DUPLEX_FULL)
4221 new_bmcr |= BMCR_FULLDPLX;
4223 if (new_bmcr != bmcr) {
4224 /* BMCR_SPEED1000 is a reserved bit that needs
4225 * to be set on write.
4227 new_bmcr |= BMCR_SPEED1000;
4229 /* Force a linkdown */
4230 if (netif_carrier_ok(tp->dev)) {
4233 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4234 adv &= ~(ADVERTISE_1000XFULL |
4235 ADVERTISE_1000XHALF |
4237 tg3_writephy(tp, MII_ADVERTISE, adv);
4238 tg3_writephy(tp, MII_BMCR, bmcr |
4242 netif_carrier_off(tp->dev);
4244 tg3_writephy(tp, MII_BMCR, new_bmcr);
4246 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4247 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4248 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4250 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4251 bmsr |= BMSR_LSTATUS;
4253 bmsr &= ~BMSR_LSTATUS;
4255 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4259 if (bmsr & BMSR_LSTATUS) {
4260 current_speed = SPEED_1000;
4261 current_link_up = 1;
4262 if (bmcr & BMCR_FULLDPLX)
4263 current_duplex = DUPLEX_FULL;
4265 current_duplex = DUPLEX_HALF;
4270 if (bmcr & BMCR_ANENABLE) {
4273 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4274 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4275 common = local_adv & remote_adv;
4276 if (common & (ADVERTISE_1000XHALF |
4277 ADVERTISE_1000XFULL)) {
4278 if (common & ADVERTISE_1000XFULL)
4279 current_duplex = DUPLEX_FULL;
4281 current_duplex = DUPLEX_HALF;
4282 } else if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
4283 /* Link is up via parallel detect */
4285 current_link_up = 0;
4290 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4291 tg3_setup_flow_control(tp, local_adv, remote_adv);
4293 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4294 if (tp->link_config.active_duplex == DUPLEX_HALF)
4295 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4297 tw32_f(MAC_MODE, tp->mac_mode);
4300 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4302 tp->link_config.active_speed = current_speed;
4303 tp->link_config.active_duplex = current_duplex;
4305 if (current_link_up != netif_carrier_ok(tp->dev)) {
4306 if (current_link_up)
4307 netif_carrier_on(tp->dev);
4309 netif_carrier_off(tp->dev);
4310 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4312 tg3_link_report(tp);
4317 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4319 if (tp->serdes_counter) {
4320 /* Give autoneg time to complete. */
4321 tp->serdes_counter--;
4325 if (!netif_carrier_ok(tp->dev) &&
4326 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4329 tg3_readphy(tp, MII_BMCR, &bmcr);
4330 if (bmcr & BMCR_ANENABLE) {
4333 /* Select shadow register 0x1f */
4334 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4335 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4337 /* Select expansion interrupt status register */
4338 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4339 MII_TG3_DSP_EXP1_INT_STAT);
4340 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4341 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4343 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4344 /* We have signal detect and not receiving
4345 * config code words, link is up by parallel
4349 bmcr &= ~BMCR_ANENABLE;
4350 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4351 tg3_writephy(tp, MII_BMCR, bmcr);
4352 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4355 } else if (netif_carrier_ok(tp->dev) &&
4356 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4357 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4360 /* Select expansion interrupt status register */
4361 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4362 MII_TG3_DSP_EXP1_INT_STAT);
4363 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4367 /* Config code words received, turn on autoneg. */
4368 tg3_readphy(tp, MII_BMCR, &bmcr);
4369 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4371 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4377 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4381 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4382 err = tg3_setup_fiber_phy(tp, force_reset);
4383 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4384 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4386 err = tg3_setup_copper_phy(tp, force_reset);
4388 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4391 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4392 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4394 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4399 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4400 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4401 tw32(GRC_MISC_CFG, val);
4404 if (tp->link_config.active_speed == SPEED_1000 &&
4405 tp->link_config.active_duplex == DUPLEX_HALF)
4406 tw32(MAC_TX_LENGTHS,
4407 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4408 (6 << TX_LENGTHS_IPG_SHIFT) |
4409 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4411 tw32(MAC_TX_LENGTHS,
4412 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4413 (6 << TX_LENGTHS_IPG_SHIFT) |
4414 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4416 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4417 if (netif_carrier_ok(tp->dev)) {
4418 tw32(HOSTCC_STAT_COAL_TICKS,
4419 tp->coal.stats_block_coalesce_usecs);
4421 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4425 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4426 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4427 if (!netif_carrier_ok(tp->dev))
4428 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4431 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4432 tw32(PCIE_PWR_MGMT_THRESH, val);
4438 static inline int tg3_irq_sync(struct tg3 *tp)
4440 return tp->irq_sync;
4443 /* This is called whenever we suspect that the system chipset is re-
4444 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4445 * is bogus tx completions. We try to recover by setting the
4446 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4449 static void tg3_tx_recover(struct tg3 *tp)
4451 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4452 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4454 netdev_warn(tp->dev,
4455 "The system may be re-ordering memory-mapped I/O "
4456 "cycles to the network device, attempting to recover. "
4457 "Please report the problem to the driver maintainer "
4458 "and include system chipset information.\n");
4460 spin_lock(&tp->lock);
4461 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4462 spin_unlock(&tp->lock);
4465 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4467 /* Tell compiler to fetch tx indices from memory. */
4469 return tnapi->tx_pending -
4470 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4473 /* Tigon3 never reports partial packet sends. So we do not
4474 * need special logic to handle SKBs that have not had all
4475 * of their frags sent yet, like SunGEM does.
4477 static void tg3_tx(struct tg3_napi *tnapi)
4479 struct tg3 *tp = tnapi->tp;
4480 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4481 u32 sw_idx = tnapi->tx_cons;
4482 struct netdev_queue *txq;
4483 int index = tnapi - tp->napi;
4485 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
4488 txq = netdev_get_tx_queue(tp->dev, index);
4490 while (sw_idx != hw_idx) {
4491 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4492 struct sk_buff *skb = ri->skb;
4495 if (unlikely(skb == NULL)) {
4500 pci_unmap_single(tp->pdev,
4501 dma_unmap_addr(ri, mapping),
4507 sw_idx = NEXT_TX(sw_idx);
4509 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4510 ri = &tnapi->tx_buffers[sw_idx];
4511 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4514 pci_unmap_page(tp->pdev,
4515 dma_unmap_addr(ri, mapping),
4516 skb_shinfo(skb)->frags[i].size,
4518 sw_idx = NEXT_TX(sw_idx);
4523 if (unlikely(tx_bug)) {
4529 tnapi->tx_cons = sw_idx;
4531 /* Need to make the tx_cons update visible to tg3_start_xmit()
4532 * before checking for netif_queue_stopped(). Without the
4533 * memory barrier, there is a small possibility that tg3_start_xmit()
4534 * will miss it and cause the queue to be stopped forever.
4538 if (unlikely(netif_tx_queue_stopped(txq) &&
4539 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4540 __netif_tx_lock(txq, smp_processor_id());
4541 if (netif_tx_queue_stopped(txq) &&
4542 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4543 netif_tx_wake_queue(txq);
4544 __netif_tx_unlock(txq);
4548 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4553 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4554 map_sz, PCI_DMA_FROMDEVICE);
4555 dev_kfree_skb_any(ri->skb);
4559 /* Returns size of skb allocated or < 0 on error.
4561 * We only need to fill in the address because the other members
4562 * of the RX descriptor are invariant, see tg3_init_rings.
4564 * Note the purposeful assymetry of cpu vs. chip accesses. For
4565 * posting buffers we only dirty the first cache line of the RX
4566 * descriptor (containing the address). Whereas for the RX status
4567 * buffers the cpu only reads the last cacheline of the RX descriptor
4568 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4570 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4571 u32 opaque_key, u32 dest_idx_unmasked)
4573 struct tg3_rx_buffer_desc *desc;
4574 struct ring_info *map;
4575 struct sk_buff *skb;
4577 int skb_size, dest_idx;
4579 switch (opaque_key) {
4580 case RXD_OPAQUE_RING_STD:
4581 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4582 desc = &tpr->rx_std[dest_idx];
4583 map = &tpr->rx_std_buffers[dest_idx];
4584 skb_size = tp->rx_pkt_map_sz;
4587 case RXD_OPAQUE_RING_JUMBO:
4588 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4589 desc = &tpr->rx_jmb[dest_idx].std;
4590 map = &tpr->rx_jmb_buffers[dest_idx];
4591 skb_size = TG3_RX_JMB_MAP_SZ;
4598 /* Do not overwrite any of the map or rp information
4599 * until we are sure we can commit to a new buffer.
4601 * Callers depend upon this behavior and assume that
4602 * we leave everything unchanged if we fail.
4604 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4608 skb_reserve(skb, tp->rx_offset);
4610 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4611 PCI_DMA_FROMDEVICE);
4612 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4618 dma_unmap_addr_set(map, mapping, mapping);
4620 desc->addr_hi = ((u64)mapping >> 32);
4621 desc->addr_lo = ((u64)mapping & 0xffffffff);
4626 /* We only need to move over in the address because the other
4627 * members of the RX descriptor are invariant. See notes above
4628 * tg3_alloc_rx_skb for full details.
4630 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4631 struct tg3_rx_prodring_set *dpr,
4632 u32 opaque_key, int src_idx,
4633 u32 dest_idx_unmasked)
4635 struct tg3 *tp = tnapi->tp;
4636 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4637 struct ring_info *src_map, *dest_map;
4638 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4641 switch (opaque_key) {
4642 case RXD_OPAQUE_RING_STD:
4643 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4644 dest_desc = &dpr->rx_std[dest_idx];
4645 dest_map = &dpr->rx_std_buffers[dest_idx];
4646 src_desc = &spr->rx_std[src_idx];
4647 src_map = &spr->rx_std_buffers[src_idx];
4650 case RXD_OPAQUE_RING_JUMBO:
4651 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4652 dest_desc = &dpr->rx_jmb[dest_idx].std;
4653 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4654 src_desc = &spr->rx_jmb[src_idx].std;
4655 src_map = &spr->rx_jmb_buffers[src_idx];
4662 dest_map->skb = src_map->skb;
4663 dma_unmap_addr_set(dest_map, mapping,
4664 dma_unmap_addr(src_map, mapping));
4665 dest_desc->addr_hi = src_desc->addr_hi;
4666 dest_desc->addr_lo = src_desc->addr_lo;
4668 /* Ensure that the update to the skb happens after the physical
4669 * addresses have been transferred to the new BD location.
4673 src_map->skb = NULL;
4676 /* The RX ring scheme is composed of multiple rings which post fresh
4677 * buffers to the chip, and one special ring the chip uses to report
4678 * status back to the host.
4680 * The special ring reports the status of received packets to the
4681 * host. The chip does not write into the original descriptor the
4682 * RX buffer was obtained from. The chip simply takes the original
4683 * descriptor as provided by the host, updates the status and length
4684 * field, then writes this into the next status ring entry.
4686 * Each ring the host uses to post buffers to the chip is described
4687 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4688 * it is first placed into the on-chip ram. When the packet's length
4689 * is known, it walks down the TG3_BDINFO entries to select the ring.
4690 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4691 * which is within the range of the new packet's length is chosen.
4693 * The "separate ring for rx status" scheme may sound queer, but it makes
4694 * sense from a cache coherency perspective. If only the host writes
4695 * to the buffer post rings, and only the chip writes to the rx status
4696 * rings, then cache lines never move beyond shared-modified state.
4697 * If both the host and chip were to write into the same ring, cache line
4698 * eviction could occur since both entities want it in an exclusive state.
4700 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4702 struct tg3 *tp = tnapi->tp;
4703 u32 work_mask, rx_std_posted = 0;
4704 u32 std_prod_idx, jmb_prod_idx;
4705 u32 sw_idx = tnapi->rx_rcb_ptr;
4708 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4710 hw_idx = *(tnapi->rx_rcb_prod_idx);
4712 * We need to order the read of hw_idx and the read of
4713 * the opaque cookie.
4718 std_prod_idx = tpr->rx_std_prod_idx;
4719 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4720 while (sw_idx != hw_idx && budget > 0) {
4721 struct ring_info *ri;
4722 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4724 struct sk_buff *skb;
4725 dma_addr_t dma_addr;
4726 u32 opaque_key, desc_idx, *post_ptr;
4727 bool hw_vlan __maybe_unused = false;
4728 u16 vtag __maybe_unused = 0;
4730 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4731 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4732 if (opaque_key == RXD_OPAQUE_RING_STD) {
4733 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4734 dma_addr = dma_unmap_addr(ri, mapping);
4736 post_ptr = &std_prod_idx;
4738 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4739 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4740 dma_addr = dma_unmap_addr(ri, mapping);
4742 post_ptr = &jmb_prod_idx;
4744 goto next_pkt_nopost;
4746 work_mask |= opaque_key;
4748 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4749 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4751 tg3_recycle_rx(tnapi, tpr, opaque_key,
4752 desc_idx, *post_ptr);
4754 /* Other statistics kept track of by card. */
4759 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4762 if (len > TG3_RX_COPY_THRESH(tp)) {
4765 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4770 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4771 PCI_DMA_FROMDEVICE);
4773 /* Ensure that the update to the skb happens
4774 * after the usage of the old DMA mapping.
4782 struct sk_buff *copy_skb;
4784 tg3_recycle_rx(tnapi, tpr, opaque_key,
4785 desc_idx, *post_ptr);
4787 copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN +
4789 if (copy_skb == NULL)
4790 goto drop_it_no_recycle;
4792 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN);
4793 skb_put(copy_skb, len);
4794 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4795 skb_copy_from_linear_data(skb, copy_skb->data, len);
4796 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4798 /* We'll reuse the original ring buffer. */
4802 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4803 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4804 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4805 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4806 skb->ip_summed = CHECKSUM_UNNECESSARY;
4808 skb_checksum_none_assert(skb);
4810 skb->protocol = eth_type_trans(skb, tp->dev);
4812 if (len > (tp->dev->mtu + ETH_HLEN) &&
4813 skb->protocol != htons(ETH_P_8021Q)) {
4815 goto drop_it_no_recycle;
4818 if (desc->type_flags & RXD_FLAG_VLAN &&
4819 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) {
4820 vtag = desc->err_vlan & RXD_VLAN_MASK;
4821 #if TG3_VLAN_TAG_USED
4827 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
4828 __skb_push(skb, VLAN_HLEN);
4830 memmove(ve, skb->data + VLAN_HLEN,
4832 ve->h_vlan_proto = htons(ETH_P_8021Q);
4833 ve->h_vlan_TCI = htons(vtag);
4837 #if TG3_VLAN_TAG_USED
4839 vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb);
4842 napi_gro_receive(&tnapi->napi, skb);
4850 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4851 tpr->rx_std_prod_idx = std_prod_idx &
4852 tp->rx_std_ring_mask;
4853 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4854 tpr->rx_std_prod_idx);
4855 work_mask &= ~RXD_OPAQUE_RING_STD;
4860 sw_idx &= tp->rx_ret_ring_mask;
4862 /* Refresh hw_idx to see if there is new work */
4863 if (sw_idx == hw_idx) {
4864 hw_idx = *(tnapi->rx_rcb_prod_idx);
4869 /* ACK the status ring. */
4870 tnapi->rx_rcb_ptr = sw_idx;
4871 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4873 /* Refill RX ring(s). */
4874 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
4875 if (work_mask & RXD_OPAQUE_RING_STD) {
4876 tpr->rx_std_prod_idx = std_prod_idx &
4877 tp->rx_std_ring_mask;
4878 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4879 tpr->rx_std_prod_idx);
4881 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4882 tpr->rx_jmb_prod_idx = jmb_prod_idx &
4883 tp->rx_jmb_ring_mask;
4884 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4885 tpr->rx_jmb_prod_idx);
4888 } else if (work_mask) {
4889 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4890 * updated before the producer indices can be updated.
4894 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
4895 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
4897 if (tnapi != &tp->napi[1])
4898 napi_schedule(&tp->napi[1].napi);
4904 static void tg3_poll_link(struct tg3 *tp)
4906 /* handle link change and other phy events */
4907 if (!(tp->tg3_flags &
4908 (TG3_FLAG_USE_LINKCHG_REG |
4909 TG3_FLAG_POLL_SERDES))) {
4910 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4912 if (sblk->status & SD_STATUS_LINK_CHG) {
4913 sblk->status = SD_STATUS_UPDATED |
4914 (sblk->status & ~SD_STATUS_LINK_CHG);
4915 spin_lock(&tp->lock);
4916 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4918 (MAC_STATUS_SYNC_CHANGED |
4919 MAC_STATUS_CFG_CHANGED |
4920 MAC_STATUS_MI_COMPLETION |
4921 MAC_STATUS_LNKSTATE_CHANGED));
4924 tg3_setup_phy(tp, 0);
4925 spin_unlock(&tp->lock);
4930 static int tg3_rx_prodring_xfer(struct tg3 *tp,
4931 struct tg3_rx_prodring_set *dpr,
4932 struct tg3_rx_prodring_set *spr)
4934 u32 si, di, cpycnt, src_prod_idx;
4938 src_prod_idx = spr->rx_std_prod_idx;
4940 /* Make sure updates to the rx_std_buffers[] entries and the
4941 * standard producer index are seen in the correct order.
4945 if (spr->rx_std_cons_idx == src_prod_idx)
4948 if (spr->rx_std_cons_idx < src_prod_idx)
4949 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4951 cpycnt = tp->rx_std_ring_mask + 1 -
4952 spr->rx_std_cons_idx;
4954 cpycnt = min(cpycnt,
4955 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
4957 si = spr->rx_std_cons_idx;
4958 di = dpr->rx_std_prod_idx;
4960 for (i = di; i < di + cpycnt; i++) {
4961 if (dpr->rx_std_buffers[i].skb) {
4971 /* Ensure that updates to the rx_std_buffers ring and the
4972 * shadowed hardware producer ring from tg3_recycle_skb() are
4973 * ordered correctly WRT the skb check above.
4977 memcpy(&dpr->rx_std_buffers[di],
4978 &spr->rx_std_buffers[si],
4979 cpycnt * sizeof(struct ring_info));
4981 for (i = 0; i < cpycnt; i++, di++, si++) {
4982 struct tg3_rx_buffer_desc *sbd, *dbd;
4983 sbd = &spr->rx_std[si];
4984 dbd = &dpr->rx_std[di];
4985 dbd->addr_hi = sbd->addr_hi;
4986 dbd->addr_lo = sbd->addr_lo;
4989 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
4990 tp->rx_std_ring_mask;
4991 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
4992 tp->rx_std_ring_mask;
4996 src_prod_idx = spr->rx_jmb_prod_idx;
4998 /* Make sure updates to the rx_jmb_buffers[] entries and
4999 * the jumbo producer index are seen in the correct order.
5003 if (spr->rx_jmb_cons_idx == src_prod_idx)
5006 if (spr->rx_jmb_cons_idx < src_prod_idx)
5007 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5009 cpycnt = tp->rx_jmb_ring_mask + 1 -
5010 spr->rx_jmb_cons_idx;
5012 cpycnt = min(cpycnt,
5013 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5015 si = spr->rx_jmb_cons_idx;
5016 di = dpr->rx_jmb_prod_idx;
5018 for (i = di; i < di + cpycnt; i++) {
5019 if (dpr->rx_jmb_buffers[i].skb) {
5029 /* Ensure that updates to the rx_jmb_buffers ring and the
5030 * shadowed hardware producer ring from tg3_recycle_skb() are
5031 * ordered correctly WRT the skb check above.
5035 memcpy(&dpr->rx_jmb_buffers[di],
5036 &spr->rx_jmb_buffers[si],
5037 cpycnt * sizeof(struct ring_info));
5039 for (i = 0; i < cpycnt; i++, di++, si++) {
5040 struct tg3_rx_buffer_desc *sbd, *dbd;
5041 sbd = &spr->rx_jmb[si].std;
5042 dbd = &dpr->rx_jmb[di].std;
5043 dbd->addr_hi = sbd->addr_hi;
5044 dbd->addr_lo = sbd->addr_lo;
5047 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5048 tp->rx_jmb_ring_mask;
5049 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5050 tp->rx_jmb_ring_mask;
5056 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5058 struct tg3 *tp = tnapi->tp;
5060 /* run TX completion thread */
5061 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5063 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5067 /* run RX thread, within the bounds set by NAPI.
5068 * All RX "locking" is done by ensuring outside
5069 * code synchronizes with tg3->napi.poll()
5071 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5072 work_done += tg3_rx(tnapi, budget - work_done);
5074 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
5075 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5077 u32 std_prod_idx = dpr->rx_std_prod_idx;
5078 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5080 for (i = 1; i < tp->irq_cnt; i++)
5081 err |= tg3_rx_prodring_xfer(tp, dpr,
5082 &tp->napi[i].prodring);
5086 if (std_prod_idx != dpr->rx_std_prod_idx)
5087 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5088 dpr->rx_std_prod_idx);
5090 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5091 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5092 dpr->rx_jmb_prod_idx);
5097 tw32_f(HOSTCC_MODE, tp->coal_now);
5103 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5105 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5106 struct tg3 *tp = tnapi->tp;
5108 struct tg3_hw_status *sblk = tnapi->hw_status;
5111 work_done = tg3_poll_work(tnapi, work_done, budget);
5113 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5116 if (unlikely(work_done >= budget))
5119 /* tp->last_tag is used in tg3_int_reenable() below
5120 * to tell the hw how much work has been processed,
5121 * so we must read it before checking for more work.
5123 tnapi->last_tag = sblk->status_tag;
5124 tnapi->last_irq_tag = tnapi->last_tag;
5127 /* check for RX/TX work to do */
5128 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5129 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5130 napi_complete(napi);
5131 /* Reenable interrupts. */
5132 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5141 /* work_done is guaranteed to be less than budget. */
5142 napi_complete(napi);
5143 schedule_work(&tp->reset_task);
5147 static int tg3_poll(struct napi_struct *napi, int budget)
5149 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5150 struct tg3 *tp = tnapi->tp;
5152 struct tg3_hw_status *sblk = tnapi->hw_status;
5157 work_done = tg3_poll_work(tnapi, work_done, budget);
5159 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5162 if (unlikely(work_done >= budget))
5165 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
5166 /* tp->last_tag is used in tg3_int_reenable() below
5167 * to tell the hw how much work has been processed,
5168 * so we must read it before checking for more work.
5170 tnapi->last_tag = sblk->status_tag;
5171 tnapi->last_irq_tag = tnapi->last_tag;
5174 sblk->status &= ~SD_STATUS_UPDATED;
5176 if (likely(!tg3_has_work(tnapi))) {
5177 napi_complete(napi);
5178 tg3_int_reenable(tnapi);
5186 /* work_done is guaranteed to be less than budget. */
5187 napi_complete(napi);
5188 schedule_work(&tp->reset_task);
5192 static void tg3_napi_disable(struct tg3 *tp)
5196 for (i = tp->irq_cnt - 1; i >= 0; i--)
5197 napi_disable(&tp->napi[i].napi);
5200 static void tg3_napi_enable(struct tg3 *tp)
5204 for (i = 0; i < tp->irq_cnt; i++)
5205 napi_enable(&tp->napi[i].napi);
5208 static void tg3_napi_init(struct tg3 *tp)
5212 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5213 for (i = 1; i < tp->irq_cnt; i++)
5214 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5217 static void tg3_napi_fini(struct tg3 *tp)
5221 for (i = 0; i < tp->irq_cnt; i++)
5222 netif_napi_del(&tp->napi[i].napi);
5225 static inline void tg3_netif_stop(struct tg3 *tp)
5227 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5228 tg3_napi_disable(tp);
5229 netif_tx_disable(tp->dev);
5232 static inline void tg3_netif_start(struct tg3 *tp)
5234 /* NOTE: unconditional netif_tx_wake_all_queues is only
5235 * appropriate so long as all callers are assured to
5236 * have free tx slots (such as after tg3_init_hw)
5238 netif_tx_wake_all_queues(tp->dev);
5240 tg3_napi_enable(tp);
5241 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5242 tg3_enable_ints(tp);
5245 static void tg3_irq_quiesce(struct tg3 *tp)
5249 BUG_ON(tp->irq_sync);
5254 for (i = 0; i < tp->irq_cnt; i++)
5255 synchronize_irq(tp->napi[i].irq_vec);
5258 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5259 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5260 * with as well. Most of the time, this is not necessary except when
5261 * shutting down the device.
5263 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5265 spin_lock_bh(&tp->lock);
5267 tg3_irq_quiesce(tp);
5270 static inline void tg3_full_unlock(struct tg3 *tp)
5272 spin_unlock_bh(&tp->lock);
5275 /* One-shot MSI handler - Chip automatically disables interrupt
5276 * after sending MSI so driver doesn't have to do it.
5278 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5280 struct tg3_napi *tnapi = dev_id;
5281 struct tg3 *tp = tnapi->tp;
5283 prefetch(tnapi->hw_status);
5285 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5287 if (likely(!tg3_irq_sync(tp)))
5288 napi_schedule(&tnapi->napi);
5293 /* MSI ISR - No need to check for interrupt sharing and no need to
5294 * flush status block and interrupt mailbox. PCI ordering rules
5295 * guarantee that MSI will arrive after the status block.
5297 static irqreturn_t tg3_msi(int irq, void *dev_id)
5299 struct tg3_napi *tnapi = dev_id;
5300 struct tg3 *tp = tnapi->tp;
5302 prefetch(tnapi->hw_status);
5304 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5306 * Writing any value to intr-mbox-0 clears PCI INTA# and
5307 * chip-internal interrupt pending events.
5308 * Writing non-zero to intr-mbox-0 additional tells the
5309 * NIC to stop sending us irqs, engaging "in-intr-handler"
5312 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5313 if (likely(!tg3_irq_sync(tp)))
5314 napi_schedule(&tnapi->napi);
5316 return IRQ_RETVAL(1);
5319 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5321 struct tg3_napi *tnapi = dev_id;
5322 struct tg3 *tp = tnapi->tp;
5323 struct tg3_hw_status *sblk = tnapi->hw_status;
5324 unsigned int handled = 1;
5326 /* In INTx mode, it is possible for the interrupt to arrive at
5327 * the CPU before the status block posted prior to the interrupt.
5328 * Reading the PCI State register will confirm whether the
5329 * interrupt is ours and will flush the status block.
5331 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5332 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5333 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5340 * Writing any value to intr-mbox-0 clears PCI INTA# and
5341 * chip-internal interrupt pending events.
5342 * Writing non-zero to intr-mbox-0 additional tells the
5343 * NIC to stop sending us irqs, engaging "in-intr-handler"
5346 * Flush the mailbox to de-assert the IRQ immediately to prevent
5347 * spurious interrupts. The flush impacts performance but
5348 * excessive spurious interrupts can be worse in some cases.
5350 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5351 if (tg3_irq_sync(tp))
5353 sblk->status &= ~SD_STATUS_UPDATED;
5354 if (likely(tg3_has_work(tnapi))) {
5355 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5356 napi_schedule(&tnapi->napi);
5358 /* No work, shared interrupt perhaps? re-enable
5359 * interrupts, and flush that PCI write
5361 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5365 return IRQ_RETVAL(handled);
5368 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5370 struct tg3_napi *tnapi = dev_id;
5371 struct tg3 *tp = tnapi->tp;
5372 struct tg3_hw_status *sblk = tnapi->hw_status;
5373 unsigned int handled = 1;
5375 /* In INTx mode, it is possible for the interrupt to arrive at
5376 * the CPU before the status block posted prior to the interrupt.
5377 * Reading the PCI State register will confirm whether the
5378 * interrupt is ours and will flush the status block.
5380 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5381 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5382 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5389 * writing any value to intr-mbox-0 clears PCI INTA# and
5390 * chip-internal interrupt pending events.
5391 * writing non-zero to intr-mbox-0 additional tells the
5392 * NIC to stop sending us irqs, engaging "in-intr-handler"
5395 * Flush the mailbox to de-assert the IRQ immediately to prevent
5396 * spurious interrupts. The flush impacts performance but
5397 * excessive spurious interrupts can be worse in some cases.
5399 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5402 * In a shared interrupt configuration, sometimes other devices'
5403 * interrupts will scream. We record the current status tag here
5404 * so that the above check can report that the screaming interrupts
5405 * are unhandled. Eventually they will be silenced.
5407 tnapi->last_irq_tag = sblk->status_tag;
5409 if (tg3_irq_sync(tp))
5412 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5414 napi_schedule(&tnapi->napi);
5417 return IRQ_RETVAL(handled);
5420 /* ISR for interrupt test */
5421 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5423 struct tg3_napi *tnapi = dev_id;
5424 struct tg3 *tp = tnapi->tp;
5425 struct tg3_hw_status *sblk = tnapi->hw_status;
5427 if ((sblk->status & SD_STATUS_UPDATED) ||
5428 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5429 tg3_disable_ints(tp);
5430 return IRQ_RETVAL(1);
5432 return IRQ_RETVAL(0);
5435 static int tg3_init_hw(struct tg3 *, int);
5436 static int tg3_halt(struct tg3 *, int, int);
5438 /* Restart hardware after configuration changes, self-test, etc.
5439 * Invoked with tp->lock held.
5441 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5442 __releases(tp->lock)
5443 __acquires(tp->lock)
5447 err = tg3_init_hw(tp, reset_phy);
5450 "Failed to re-initialize device, aborting\n");
5451 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5452 tg3_full_unlock(tp);
5453 del_timer_sync(&tp->timer);
5455 tg3_napi_enable(tp);
5457 tg3_full_lock(tp, 0);
5462 #ifdef CONFIG_NET_POLL_CONTROLLER
5463 static void tg3_poll_controller(struct net_device *dev)
5466 struct tg3 *tp = netdev_priv(dev);
5468 for (i = 0; i < tp->irq_cnt; i++)
5469 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5473 static void tg3_reset_task(struct work_struct *work)
5475 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5477 unsigned int restart_timer;
5479 tg3_full_lock(tp, 0);
5481 if (!netif_running(tp->dev)) {
5482 tg3_full_unlock(tp);
5486 tg3_full_unlock(tp);
5492 tg3_full_lock(tp, 1);
5494 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5495 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5497 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5498 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5499 tp->write32_rx_mbox = tg3_write_flush_reg32;
5500 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5501 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5504 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5505 err = tg3_init_hw(tp, 1);
5509 tg3_netif_start(tp);
5512 mod_timer(&tp->timer, jiffies + 1);
5515 tg3_full_unlock(tp);
5521 static void tg3_dump_short_state(struct tg3 *tp)
5523 netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5524 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5525 netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5526 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5529 static void tg3_tx_timeout(struct net_device *dev)
5531 struct tg3 *tp = netdev_priv(dev);
5533 if (netif_msg_tx_err(tp)) {
5534 netdev_err(dev, "transmit timed out, resetting\n");
5535 tg3_dump_short_state(tp);
5538 schedule_work(&tp->reset_task);
5541 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5542 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5544 u32 base = (u32) mapping & 0xffffffff;
5546 return (base > 0xffffdcc0) && (base + len + 8 < base);
5549 /* Test for DMA addresses > 40-bit */
5550 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5553 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5554 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5555 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5562 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5564 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5565 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5566 struct sk_buff *skb, u32 last_plus_one,
5567 u32 *start, u32 base_flags, u32 mss)
5569 struct tg3 *tp = tnapi->tp;
5570 struct sk_buff *new_skb;
5571 dma_addr_t new_addr = 0;
5575 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5576 new_skb = skb_copy(skb, GFP_ATOMIC);
5578 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5580 new_skb = skb_copy_expand(skb,
5581 skb_headroom(skb) + more_headroom,
5582 skb_tailroom(skb), GFP_ATOMIC);
5588 /* New SKB is guaranteed to be linear. */
5590 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5592 /* Make sure the mapping succeeded */
5593 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5595 dev_kfree_skb(new_skb);
5598 /* Make sure new skb does not cross any 4G boundaries.
5599 * Drop the packet if it does.
5601 } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5602 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5603 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5606 dev_kfree_skb(new_skb);
5609 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5610 base_flags, 1 | (mss << 1));
5611 *start = NEXT_TX(entry);
5615 /* Now clean up the sw ring entries. */
5617 while (entry != last_plus_one) {
5621 len = skb_headlen(skb);
5623 len = skb_shinfo(skb)->frags[i-1].size;
5625 pci_unmap_single(tp->pdev,
5626 dma_unmap_addr(&tnapi->tx_buffers[entry],
5628 len, PCI_DMA_TODEVICE);
5630 tnapi->tx_buffers[entry].skb = new_skb;
5631 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5634 tnapi->tx_buffers[entry].skb = NULL;
5636 entry = NEXT_TX(entry);
5645 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5646 dma_addr_t mapping, int len, u32 flags,
5649 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5650 int is_end = (mss_and_is_end & 0x1);
5651 u32 mss = (mss_and_is_end >> 1);
5655 flags |= TXD_FLAG_END;
5656 if (flags & TXD_FLAG_VLAN) {
5657 vlan_tag = flags >> 16;
5660 vlan_tag |= (mss << TXD_MSS_SHIFT);
5662 txd->addr_hi = ((u64) mapping >> 32);
5663 txd->addr_lo = ((u64) mapping & 0xffffffff);
5664 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5665 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5668 /* hard_start_xmit for devices that don't have any bugs and
5669 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5671 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5672 struct net_device *dev)
5674 struct tg3 *tp = netdev_priv(dev);
5675 u32 len, entry, base_flags, mss;
5677 struct tg3_napi *tnapi;
5678 struct netdev_queue *txq;
5679 unsigned int i, last;
5681 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5682 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5683 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5686 /* We are running in BH disabled context with netif_tx_lock
5687 * and TX reclaim runs via tp->napi.poll inside of a software
5688 * interrupt. Furthermore, IRQ processing runs lockless so we have
5689 * no IRQ context deadlocks to worry about either. Rejoice!
5691 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5692 if (!netif_tx_queue_stopped(txq)) {
5693 netif_tx_stop_queue(txq);
5695 /* This is a hard error, log it. */
5697 "BUG! Tx Ring full when queue awake!\n");
5699 return NETDEV_TX_BUSY;
5702 entry = tnapi->tx_prod;
5704 mss = skb_shinfo(skb)->gso_size;
5706 int tcp_opt_len, ip_tcp_len;
5709 if (skb_header_cloned(skb) &&
5710 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5715 if (skb_is_gso_v6(skb)) {
5716 hdrlen = skb_headlen(skb) - ETH_HLEN;
5718 struct iphdr *iph = ip_hdr(skb);
5720 tcp_opt_len = tcp_optlen(skb);
5721 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5724 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5725 hdrlen = ip_tcp_len + tcp_opt_len;
5728 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5729 mss |= (hdrlen & 0xc) << 12;
5731 base_flags |= 0x00000010;
5732 base_flags |= (hdrlen & 0x3e0) << 5;
5736 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5737 TXD_FLAG_CPU_POST_DMA);
5739 tcp_hdr(skb)->check = 0;
5741 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5742 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5745 #if TG3_VLAN_TAG_USED
5746 if (vlan_tx_tag_present(skb))
5747 base_flags |= (TXD_FLAG_VLAN |
5748 (vlan_tx_tag_get(skb) << 16));
5751 len = skb_headlen(skb);
5753 /* Queue skb data, a.k.a. the main skb fragment. */
5754 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5755 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5760 tnapi->tx_buffers[entry].skb = skb;
5761 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5763 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5764 !mss && skb->len > ETH_DATA_LEN)
5765 base_flags |= TXD_FLAG_JMB_PKT;
5767 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5768 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5770 entry = NEXT_TX(entry);
5772 /* Now loop through additional data fragments, and queue them. */
5773 if (skb_shinfo(skb)->nr_frags > 0) {
5774 last = skb_shinfo(skb)->nr_frags - 1;
5775 for (i = 0; i <= last; i++) {
5776 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5779 mapping = pci_map_page(tp->pdev,
5782 len, PCI_DMA_TODEVICE);
5783 if (pci_dma_mapping_error(tp->pdev, mapping))
5786 tnapi->tx_buffers[entry].skb = NULL;
5787 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5790 tg3_set_txd(tnapi, entry, mapping, len,
5791 base_flags, (i == last) | (mss << 1));
5793 entry = NEXT_TX(entry);
5797 /* Packets are ready, update Tx producer idx local and on card. */
5798 tw32_tx_mbox(tnapi->prodmbox, entry);
5800 tnapi->tx_prod = entry;
5801 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5802 netif_tx_stop_queue(txq);
5804 /* netif_tx_stop_queue() must be done before checking
5805 * checking tx index in tg3_tx_avail() below, because in
5806 * tg3_tx(), we update tx index before checking for
5807 * netif_tx_queue_stopped().
5810 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5811 netif_tx_wake_queue(txq);
5817 return NETDEV_TX_OK;
5821 entry = tnapi->tx_prod;
5822 tnapi->tx_buffers[entry].skb = NULL;
5823 pci_unmap_single(tp->pdev,
5824 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5827 for (i = 0; i <= last; i++) {
5828 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5829 entry = NEXT_TX(entry);
5831 pci_unmap_page(tp->pdev,
5832 dma_unmap_addr(&tnapi->tx_buffers[entry],
5834 frag->size, PCI_DMA_TODEVICE);
5838 return NETDEV_TX_OK;
5841 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5842 struct net_device *);
5844 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5845 * TSO header is greater than 80 bytes.
5847 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5849 struct sk_buff *segs, *nskb;
5850 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5852 /* Estimate the number of fragments in the worst case */
5853 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5854 netif_stop_queue(tp->dev);
5856 /* netif_tx_stop_queue() must be done before checking
5857 * checking tx index in tg3_tx_avail() below, because in
5858 * tg3_tx(), we update tx index before checking for
5859 * netif_tx_queue_stopped().
5862 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5863 return NETDEV_TX_BUSY;
5865 netif_wake_queue(tp->dev);
5868 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5870 goto tg3_tso_bug_end;
5876 tg3_start_xmit_dma_bug(nskb, tp->dev);
5882 return NETDEV_TX_OK;
5885 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5886 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5888 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5889 struct net_device *dev)
5891 struct tg3 *tp = netdev_priv(dev);
5892 u32 len, entry, base_flags, mss;
5893 int would_hit_hwbug;
5895 struct tg3_napi *tnapi;
5896 struct netdev_queue *txq;
5897 unsigned int i, last;
5899 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5900 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5901 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5904 /* We are running in BH disabled context with netif_tx_lock
5905 * and TX reclaim runs via tp->napi.poll inside of a software
5906 * interrupt. Furthermore, IRQ processing runs lockless so we have
5907 * no IRQ context deadlocks to worry about either. Rejoice!
5909 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5910 if (!netif_tx_queue_stopped(txq)) {
5911 netif_tx_stop_queue(txq);
5913 /* This is a hard error, log it. */
5915 "BUG! Tx Ring full when queue awake!\n");
5917 return NETDEV_TX_BUSY;
5920 entry = tnapi->tx_prod;
5922 if (skb->ip_summed == CHECKSUM_PARTIAL)
5923 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5925 mss = skb_shinfo(skb)->gso_size;
5928 u32 tcp_opt_len, hdr_len;
5930 if (skb_header_cloned(skb) &&
5931 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5937 tcp_opt_len = tcp_optlen(skb);
5939 if (skb_is_gso_v6(skb)) {
5940 hdr_len = skb_headlen(skb) - ETH_HLEN;
5944 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5945 hdr_len = ip_tcp_len + tcp_opt_len;
5948 iph->tot_len = htons(mss + hdr_len);
5951 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5952 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5953 return tg3_tso_bug(tp, skb);
5955 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5956 TXD_FLAG_CPU_POST_DMA);
5958 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5959 tcp_hdr(skb)->check = 0;
5960 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5962 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5967 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5968 mss |= (hdr_len & 0xc) << 12;
5970 base_flags |= 0x00000010;
5971 base_flags |= (hdr_len & 0x3e0) << 5;
5972 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
5973 mss |= hdr_len << 9;
5974 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
5975 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5976 if (tcp_opt_len || iph->ihl > 5) {
5979 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5980 mss |= (tsflags << 11);
5983 if (tcp_opt_len || iph->ihl > 5) {
5986 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5987 base_flags |= tsflags << 12;
5991 #if TG3_VLAN_TAG_USED
5992 if (vlan_tx_tag_present(skb))
5993 base_flags |= (TXD_FLAG_VLAN |
5994 (vlan_tx_tag_get(skb) << 16));
5997 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5998 !mss && skb->len > ETH_DATA_LEN)
5999 base_flags |= TXD_FLAG_JMB_PKT;
6001 len = skb_headlen(skb);
6003 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6004 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6009 tnapi->tx_buffers[entry].skb = skb;
6010 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6012 would_hit_hwbug = 0;
6014 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
6015 would_hit_hwbug = 1;
6017 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
6018 tg3_4g_overflow_test(mapping, len))
6019 would_hit_hwbug = 1;
6021 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
6022 tg3_40bit_overflow_test(tp, mapping, len))
6023 would_hit_hwbug = 1;
6025 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
6026 would_hit_hwbug = 1;
6028 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6029 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6031 entry = NEXT_TX(entry);
6033 /* Now loop through additional data fragments, and queue them. */
6034 if (skb_shinfo(skb)->nr_frags > 0) {
6035 last = skb_shinfo(skb)->nr_frags - 1;
6036 for (i = 0; i <= last; i++) {
6037 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6040 mapping = pci_map_page(tp->pdev,
6043 len, PCI_DMA_TODEVICE);
6045 tnapi->tx_buffers[entry].skb = NULL;
6046 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6048 if (pci_dma_mapping_error(tp->pdev, mapping))
6051 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
6053 would_hit_hwbug = 1;
6055 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
6056 tg3_4g_overflow_test(mapping, len))
6057 would_hit_hwbug = 1;
6059 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
6060 tg3_40bit_overflow_test(tp, mapping, len))
6061 would_hit_hwbug = 1;
6063 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6064 tg3_set_txd(tnapi, entry, mapping, len,
6065 base_flags, (i == last)|(mss << 1));
6067 tg3_set_txd(tnapi, entry, mapping, len,
6068 base_flags, (i == last));
6070 entry = NEXT_TX(entry);
6074 if (would_hit_hwbug) {
6075 u32 last_plus_one = entry;
6078 start = entry - 1 - skb_shinfo(skb)->nr_frags;
6079 start &= (TG3_TX_RING_SIZE - 1);
6081 /* If the workaround fails due to memory/mapping
6082 * failure, silently drop this packet.
6084 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
6085 &start, base_flags, mss))
6091 /* Packets are ready, update Tx producer idx local and on card. */
6092 tw32_tx_mbox(tnapi->prodmbox, entry);
6094 tnapi->tx_prod = entry;
6095 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6096 netif_tx_stop_queue(txq);
6098 /* netif_tx_stop_queue() must be done before checking
6099 * checking tx index in tg3_tx_avail() below, because in
6100 * tg3_tx(), we update tx index before checking for
6101 * netif_tx_queue_stopped().
6104 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6105 netif_tx_wake_queue(txq);
6111 return NETDEV_TX_OK;
6115 entry = tnapi->tx_prod;
6116 tnapi->tx_buffers[entry].skb = NULL;
6117 pci_unmap_single(tp->pdev,
6118 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
6121 for (i = 0; i <= last; i++) {
6122 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6123 entry = NEXT_TX(entry);
6125 pci_unmap_page(tp->pdev,
6126 dma_unmap_addr(&tnapi->tx_buffers[entry],
6128 frag->size, PCI_DMA_TODEVICE);
6132 return NETDEV_TX_OK;
6135 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6140 if (new_mtu > ETH_DATA_LEN) {
6141 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6142 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
6143 ethtool_op_set_tso(dev, 0);
6145 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
6148 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6149 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
6150 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
6154 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6156 struct tg3 *tp = netdev_priv(dev);
6159 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6162 if (!netif_running(dev)) {
6163 /* We'll just catch it later when the
6166 tg3_set_mtu(dev, tp, new_mtu);
6174 tg3_full_lock(tp, 1);
6176 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6178 tg3_set_mtu(dev, tp, new_mtu);
6180 err = tg3_restart_hw(tp, 0);
6183 tg3_netif_start(tp);
6185 tg3_full_unlock(tp);
6193 static void tg3_rx_prodring_free(struct tg3 *tp,
6194 struct tg3_rx_prodring_set *tpr)
6198 if (tpr != &tp->napi[0].prodring) {
6199 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6200 i = (i + 1) & tp->rx_std_ring_mask)
6201 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6204 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6205 for (i = tpr->rx_jmb_cons_idx;
6206 i != tpr->rx_jmb_prod_idx;
6207 i = (i + 1) & tp->rx_jmb_ring_mask) {
6208 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6216 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6217 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6220 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
6221 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
6222 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6223 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6228 /* Initialize rx rings for packet processing.
6230 * The chip has been shut down and the driver detached from
6231 * the networking, so no interrupts or new tx packets will
6232 * end up in the driver. tp->{tx,}lock are held and thus
6235 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6236 struct tg3_rx_prodring_set *tpr)
6238 u32 i, rx_pkt_dma_sz;
6240 tpr->rx_std_cons_idx = 0;
6241 tpr->rx_std_prod_idx = 0;
6242 tpr->rx_jmb_cons_idx = 0;
6243 tpr->rx_jmb_prod_idx = 0;
6245 if (tpr != &tp->napi[0].prodring) {
6246 memset(&tpr->rx_std_buffers[0], 0,
6247 TG3_RX_STD_BUFF_RING_SIZE(tp));
6248 if (tpr->rx_jmb_buffers)
6249 memset(&tpr->rx_jmb_buffers[0], 0,
6250 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6254 /* Zero out all descriptors. */
6255 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6257 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6258 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
6259 tp->dev->mtu > ETH_DATA_LEN)
6260 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6261 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6263 /* Initialize invariants of the rings, we only set this
6264 * stuff once. This works because the card does not
6265 * write into the rx buffer posting rings.
6267 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6268 struct tg3_rx_buffer_desc *rxd;
6270 rxd = &tpr->rx_std[i];
6271 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6272 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6273 rxd->opaque = (RXD_OPAQUE_RING_STD |
6274 (i << RXD_OPAQUE_INDEX_SHIFT));
6277 /* Now allocate fresh SKBs for each rx ring. */
6278 for (i = 0; i < tp->rx_pending; i++) {
6279 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6280 netdev_warn(tp->dev,
6281 "Using a smaller RX standard ring. Only "
6282 "%d out of %d buffers were allocated "
6283 "successfully\n", i, tp->rx_pending);
6291 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ||
6292 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6295 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6297 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE))
6300 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6301 struct tg3_rx_buffer_desc *rxd;
6303 rxd = &tpr->rx_jmb[i].std;
6304 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6305 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6307 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6308 (i << RXD_OPAQUE_INDEX_SHIFT));
6311 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6312 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6313 netdev_warn(tp->dev,
6314 "Using a smaller RX jumbo ring. Only %d "
6315 "out of %d buffers were allocated "
6316 "successfully\n", i, tp->rx_jumbo_pending);
6319 tp->rx_jumbo_pending = i;
6328 tg3_rx_prodring_free(tp, tpr);
6332 static void tg3_rx_prodring_fini(struct tg3 *tp,
6333 struct tg3_rx_prodring_set *tpr)
6335 kfree(tpr->rx_std_buffers);
6336 tpr->rx_std_buffers = NULL;
6337 kfree(tpr->rx_jmb_buffers);
6338 tpr->rx_jmb_buffers = NULL;
6340 pci_free_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp),
6341 tpr->rx_std, tpr->rx_std_mapping);
6345 pci_free_consistent(tp->pdev, TG3_RX_JMB_RING_BYTES(tp),
6346 tpr->rx_jmb, tpr->rx_jmb_mapping);
6351 static int tg3_rx_prodring_init(struct tg3 *tp,
6352 struct tg3_rx_prodring_set *tpr)
6354 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6356 if (!tpr->rx_std_buffers)
6359 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp),
6360 &tpr->rx_std_mapping);
6364 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
6365 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
6366 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6368 if (!tpr->rx_jmb_buffers)
6371 tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
6372 TG3_RX_JMB_RING_BYTES(tp),
6373 &tpr->rx_jmb_mapping);
6381 tg3_rx_prodring_fini(tp, tpr);
6385 /* Free up pending packets in all rx/tx rings.
6387 * The chip has been shut down and the driver detached from
6388 * the networking, so no interrupts or new tx packets will
6389 * end up in the driver. tp->{tx,}lock is not held and we are not
6390 * in an interrupt context and thus may sleep.
6392 static void tg3_free_rings(struct tg3 *tp)
6396 for (j = 0; j < tp->irq_cnt; j++) {
6397 struct tg3_napi *tnapi = &tp->napi[j];
6399 tg3_rx_prodring_free(tp, &tnapi->prodring);
6401 if (!tnapi->tx_buffers)
6404 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6405 struct ring_info *txp;
6406 struct sk_buff *skb;
6409 txp = &tnapi->tx_buffers[i];
6417 pci_unmap_single(tp->pdev,
6418 dma_unmap_addr(txp, mapping),
6425 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6426 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6427 pci_unmap_page(tp->pdev,
6428 dma_unmap_addr(txp, mapping),
6429 skb_shinfo(skb)->frags[k].size,
6434 dev_kfree_skb_any(skb);
6439 /* Initialize tx/rx rings for packet processing.
6441 * The chip has been shut down and the driver detached from
6442 * the networking, so no interrupts or new tx packets will
6443 * end up in the driver. tp->{tx,}lock are held and thus
6446 static int tg3_init_rings(struct tg3 *tp)
6450 /* Free up all the SKBs. */
6453 for (i = 0; i < tp->irq_cnt; i++) {
6454 struct tg3_napi *tnapi = &tp->napi[i];
6456 tnapi->last_tag = 0;
6457 tnapi->last_irq_tag = 0;
6458 tnapi->hw_status->status = 0;
6459 tnapi->hw_status->status_tag = 0;
6460 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6465 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6467 tnapi->rx_rcb_ptr = 0;
6469 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6471 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6481 * Must not be invoked with interrupt sources disabled and
6482 * the hardware shutdown down.
6484 static void tg3_free_consistent(struct tg3 *tp)
6488 for (i = 0; i < tp->irq_cnt; i++) {
6489 struct tg3_napi *tnapi = &tp->napi[i];
6491 if (tnapi->tx_ring) {
6492 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
6493 tnapi->tx_ring, tnapi->tx_desc_mapping);
6494 tnapi->tx_ring = NULL;
6497 kfree(tnapi->tx_buffers);
6498 tnapi->tx_buffers = NULL;
6500 if (tnapi->rx_rcb) {
6501 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
6503 tnapi->rx_rcb_mapping);
6504 tnapi->rx_rcb = NULL;
6507 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6509 if (tnapi->hw_status) {
6510 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
6512 tnapi->status_mapping);
6513 tnapi->hw_status = NULL;
6518 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
6519 tp->hw_stats, tp->stats_mapping);
6520 tp->hw_stats = NULL;
6525 * Must not be invoked with interrupt sources disabled and
6526 * the hardware shutdown down. Can sleep.
6528 static int tg3_alloc_consistent(struct tg3 *tp)
6532 tp->hw_stats = pci_alloc_consistent(tp->pdev,
6533 sizeof(struct tg3_hw_stats),
6534 &tp->stats_mapping);
6538 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6540 for (i = 0; i < tp->irq_cnt; i++) {
6541 struct tg3_napi *tnapi = &tp->napi[i];
6542 struct tg3_hw_status *sblk;
6544 tnapi->hw_status = pci_alloc_consistent(tp->pdev,
6546 &tnapi->status_mapping);
6547 if (!tnapi->hw_status)
6550 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6551 sblk = tnapi->hw_status;
6553 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6556 /* If multivector TSS is enabled, vector 0 does not handle
6557 * tx interrupts. Don't allocate any resources for it.
6559 if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) ||
6560 (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) {
6561 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6564 if (!tnapi->tx_buffers)
6567 tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
6569 &tnapi->tx_desc_mapping);
6570 if (!tnapi->tx_ring)
6575 * When RSS is enabled, the status block format changes
6576 * slightly. The "rx_jumbo_consumer", "reserved",
6577 * and "rx_mini_consumer" members get mapped to the
6578 * other three rx return ring producer indexes.
6582 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6585 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6588 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6591 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6596 * If multivector RSS is enabled, vector 0 does not handle
6597 * rx or tx interrupts. Don't allocate any resources for it.
6599 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
6602 tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
6603 TG3_RX_RCB_RING_BYTES(tp),
6604 &tnapi->rx_rcb_mapping);
6608 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6614 tg3_free_consistent(tp);
6618 #define MAX_WAIT_CNT 1000
6620 /* To stop a block, clear the enable bit and poll till it
6621 * clears. tp->lock is held.
6623 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6628 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6635 /* We can't enable/disable these bits of the
6636 * 5705/5750, just say success.
6649 for (i = 0; i < MAX_WAIT_CNT; i++) {
6652 if ((val & enable_bit) == 0)
6656 if (i == MAX_WAIT_CNT && !silent) {
6657 dev_err(&tp->pdev->dev,
6658 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6666 /* tp->lock is held. */
6667 static int tg3_abort_hw(struct tg3 *tp, int silent)
6671 tg3_disable_ints(tp);
6673 tp->rx_mode &= ~RX_MODE_ENABLE;
6674 tw32_f(MAC_RX_MODE, tp->rx_mode);
6677 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6678 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6679 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6680 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6681 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6682 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6684 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6685 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6686 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6687 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6688 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6689 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6690 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6692 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6693 tw32_f(MAC_MODE, tp->mac_mode);
6696 tp->tx_mode &= ~TX_MODE_ENABLE;
6697 tw32_f(MAC_TX_MODE, tp->tx_mode);
6699 for (i = 0; i < MAX_WAIT_CNT; i++) {
6701 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6704 if (i >= MAX_WAIT_CNT) {
6705 dev_err(&tp->pdev->dev,
6706 "%s timed out, TX_MODE_ENABLE will not clear "
6707 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6711 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6712 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6713 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6715 tw32(FTQ_RESET, 0xffffffff);
6716 tw32(FTQ_RESET, 0x00000000);
6718 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6719 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6721 for (i = 0; i < tp->irq_cnt; i++) {
6722 struct tg3_napi *tnapi = &tp->napi[i];
6723 if (tnapi->hw_status)
6724 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6727 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6732 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6737 /* NCSI does not support APE events */
6738 if (tp->tg3_flags3 & TG3_FLG3_APE_HAS_NCSI)
6741 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6742 if (apedata != APE_SEG_SIG_MAGIC)
6745 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6746 if (!(apedata & APE_FW_STATUS_READY))
6749 /* Wait for up to 1 millisecond for APE to service previous event. */
6750 for (i = 0; i < 10; i++) {
6751 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6754 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6756 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6757 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6758 event | APE_EVENT_STATUS_EVENT_PENDING);
6760 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6762 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6768 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6769 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6772 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6777 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
6781 case RESET_KIND_INIT:
6782 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6783 APE_HOST_SEG_SIG_MAGIC);
6784 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6785 APE_HOST_SEG_LEN_MAGIC);
6786 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6787 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6788 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6789 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6790 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6791 APE_HOST_BEHAV_NO_PHYLOCK);
6792 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6793 TG3_APE_HOST_DRVR_STATE_START);
6795 event = APE_EVENT_STATUS_STATE_START;
6797 case RESET_KIND_SHUTDOWN:
6798 /* With the interface we are currently using,
6799 * APE does not track driver state. Wiping
6800 * out the HOST SEGMENT SIGNATURE forces
6801 * the APE to assume OS absent status.
6803 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6805 if (device_may_wakeup(&tp->pdev->dev) &&
6806 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
6807 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6808 TG3_APE_HOST_WOL_SPEED_AUTO);
6809 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6811 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6813 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6815 event = APE_EVENT_STATUS_STATE_UNLOAD;
6817 case RESET_KIND_SUSPEND:
6818 event = APE_EVENT_STATUS_STATE_SUSPEND;
6824 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6826 tg3_ape_send_event(tp, event);
6829 /* tp->lock is held. */
6830 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6832 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6833 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6835 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6837 case RESET_KIND_INIT:
6838 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6842 case RESET_KIND_SHUTDOWN:
6843 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6847 case RESET_KIND_SUSPEND:
6848 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6857 if (kind == RESET_KIND_INIT ||
6858 kind == RESET_KIND_SUSPEND)
6859 tg3_ape_driver_state_change(tp, kind);
6862 /* tp->lock is held. */
6863 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6865 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6867 case RESET_KIND_INIT:
6868 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6869 DRV_STATE_START_DONE);
6872 case RESET_KIND_SHUTDOWN:
6873 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6874 DRV_STATE_UNLOAD_DONE);
6882 if (kind == RESET_KIND_SHUTDOWN)
6883 tg3_ape_driver_state_change(tp, kind);
6886 /* tp->lock is held. */
6887 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6889 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6891 case RESET_KIND_INIT:
6892 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6896 case RESET_KIND_SHUTDOWN:
6897 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6901 case RESET_KIND_SUSPEND:
6902 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6912 static int tg3_poll_fw(struct tg3 *tp)
6917 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6918 /* Wait up to 20ms for init done. */
6919 for (i = 0; i < 200; i++) {
6920 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6927 /* Wait for firmware initialization to complete. */
6928 for (i = 0; i < 100000; i++) {
6929 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6930 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6935 /* Chip might not be fitted with firmware. Some Sun onboard
6936 * parts are configured like that. So don't signal the timeout
6937 * of the above loop as an error, but do report the lack of
6938 * running firmware once.
6941 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
6942 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
6944 netdev_info(tp->dev, "No firmware running\n");
6947 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6948 /* The 57765 A0 needs a little more
6949 * time to do some important work.
6957 /* Save PCI command register before chip reset */
6958 static void tg3_save_pci_state(struct tg3 *tp)
6960 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
6963 /* Restore PCI state after chip reset */
6964 static void tg3_restore_pci_state(struct tg3 *tp)
6968 /* Re-enable indirect register accesses. */
6969 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6970 tp->misc_host_ctrl);
6972 /* Set MAX PCI retry to zero. */
6973 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
6974 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6975 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
6976 val |= PCISTATE_RETRY_SAME_DMA;
6977 /* Allow reads and writes to the APE register and memory space. */
6978 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6979 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6980 PCISTATE_ALLOW_APE_SHMEM_WR |
6981 PCISTATE_ALLOW_APE_PSPACE_WR;
6982 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
6984 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
6986 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6987 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6988 pcie_set_readrq(tp->pdev, 4096);
6990 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6991 tp->pci_cacheline_sz);
6992 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
6997 /* Make sure PCI-X relaxed ordering bit is clear. */
6998 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7001 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7003 pcix_cmd &= ~PCI_X_CMD_ERO;
7004 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7008 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
7010 /* Chip reset on 5780 will reset MSI enable bit,
7011 * so need to restore it.
7013 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7016 pci_read_config_word(tp->pdev,
7017 tp->msi_cap + PCI_MSI_FLAGS,
7019 pci_write_config_word(tp->pdev,
7020 tp->msi_cap + PCI_MSI_FLAGS,
7021 ctrl | PCI_MSI_FLAGS_ENABLE);
7022 val = tr32(MSGINT_MODE);
7023 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7028 static void tg3_stop_fw(struct tg3 *);
7030 /* tp->lock is held. */
7031 static int tg3_chip_reset(struct tg3 *tp)
7034 void (*write_op)(struct tg3 *, u32, u32);
7039 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7041 /* No matching tg3_nvram_unlock() after this because
7042 * chip reset below will undo the nvram lock.
7044 tp->nvram_lock_cnt = 0;
7046 /* GRC_MISC_CFG core clock reset will clear the memory
7047 * enable bit in PCI register 4 and the MSI enable bit
7048 * on some chips, so we save relevant registers here.
7050 tg3_save_pci_state(tp);
7052 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7053 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
7054 tw32(GRC_FASTBOOT_PC, 0);
7057 * We must avoid the readl() that normally takes place.
7058 * It locks machines, causes machine checks, and other
7059 * fun things. So, temporarily disable the 5701
7060 * hardware workaround, while we do the reset.
7062 write_op = tp->write32;
7063 if (write_op == tg3_write_flush_reg32)
7064 tp->write32 = tg3_write32;
7066 /* Prevent the irq handler from reading or writing PCI registers
7067 * during chip reset when the memory enable bit in the PCI command
7068 * register may be cleared. The chip does not generate interrupt
7069 * at this time, but the irq handler may still be called due to irq
7070 * sharing or irqpoll.
7072 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
7073 for (i = 0; i < tp->irq_cnt; i++) {
7074 struct tg3_napi *tnapi = &tp->napi[i];
7075 if (tnapi->hw_status) {
7076 tnapi->hw_status->status = 0;
7077 tnapi->hw_status->status_tag = 0;
7079 tnapi->last_tag = 0;
7080 tnapi->last_irq_tag = 0;
7084 for (i = 0; i < tp->irq_cnt; i++)
7085 synchronize_irq(tp->napi[i].irq_vec);
7087 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7088 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7089 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7093 val = GRC_MISC_CFG_CORECLK_RESET;
7095 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7096 /* Force PCIe 1.0a mode */
7097 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7098 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
7099 tr32(TG3_PCIE_PHY_TSTCTL) ==
7100 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7101 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7103 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7104 tw32(GRC_MISC_CFG, (1 << 29));
7109 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7110 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7111 tw32(GRC_VCPU_EXT_CTRL,
7112 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7115 /* Manage gphy power for all CPMU absent PCIe devices. */
7116 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7117 !(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
7118 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7120 tw32(GRC_MISC_CFG, val);
7122 /* restore 5701 hardware bug workaround write method */
7123 tp->write32 = write_op;
7125 /* Unfortunately, we have to delay before the PCI read back.
7126 * Some 575X chips even will not respond to a PCI cfg access
7127 * when the reset command is given to the chip.
7129 * How do these hardware designers expect things to work
7130 * properly if the PCI write is posted for a long period
7131 * of time? It is always necessary to have some method by
7132 * which a register read back can occur to push the write
7133 * out which does the reset.
7135 * For most tg3 variants the trick below was working.
7140 /* Flush PCI posted writes. The normal MMIO registers
7141 * are inaccessible at this time so this is the only
7142 * way to make this reliably (actually, this is no longer
7143 * the case, see above). I tried to use indirect
7144 * register read/write but this upset some 5701 variants.
7146 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7150 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
7153 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7157 /* Wait for link training to complete. */
7158 for (i = 0; i < 5000; i++)
7161 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7162 pci_write_config_dword(tp->pdev, 0xc4,
7163 cfg_val | (1 << 15));
7166 /* Clear the "no snoop" and "relaxed ordering" bits. */
7167 pci_read_config_word(tp->pdev,
7168 tp->pcie_cap + PCI_EXP_DEVCTL,
7170 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7171 PCI_EXP_DEVCTL_NOSNOOP_EN);
7173 * Older PCIe devices only support the 128 byte
7174 * MPS setting. Enforce the restriction.
7176 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
7177 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7178 pci_write_config_word(tp->pdev,
7179 tp->pcie_cap + PCI_EXP_DEVCTL,
7182 pcie_set_readrq(tp->pdev, 4096);
7184 /* Clear error status */
7185 pci_write_config_word(tp->pdev,
7186 tp->pcie_cap + PCI_EXP_DEVSTA,
7187 PCI_EXP_DEVSTA_CED |
7188 PCI_EXP_DEVSTA_NFED |
7189 PCI_EXP_DEVSTA_FED |
7190 PCI_EXP_DEVSTA_URD);
7193 tg3_restore_pci_state(tp);
7195 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
7198 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
7199 val = tr32(MEMARB_MODE);
7200 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7202 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7204 tw32(0x5000, 0x400);
7207 tw32(GRC_MODE, tp->grc_mode);
7209 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7212 tw32(0xc4, val | (1 << 15));
7215 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7216 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7217 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7218 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7219 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7220 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7223 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7224 tp->mac_mode = MAC_MODE_APE_TX_EN |
7225 MAC_MODE_APE_RX_EN |
7226 MAC_MODE_TDE_ENABLE;
7228 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7229 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7231 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7232 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7237 tw32_f(MAC_MODE, val);
7240 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7242 err = tg3_poll_fw(tp);
7248 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7249 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7250 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7251 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
7254 tw32(0x7c00, val | (1 << 25));
7257 /* Reprobe ASF enable state. */
7258 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
7259 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
7260 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7261 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7264 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7265 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7266 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7267 tp->last_event_jiffies = jiffies;
7268 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7269 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7276 /* tp->lock is held. */
7277 static void tg3_stop_fw(struct tg3 *tp)
7279 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7280 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7281 /* Wait for RX cpu to ACK the previous event. */
7282 tg3_wait_for_event_ack(tp);
7284 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7286 tg3_generate_fw_event(tp);
7288 /* Wait for RX cpu to ACK this event. */
7289 tg3_wait_for_event_ack(tp);
7293 /* tp->lock is held. */
7294 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7300 tg3_write_sig_pre_reset(tp, kind);
7302 tg3_abort_hw(tp, silent);
7303 err = tg3_chip_reset(tp);
7305 __tg3_set_mac_addr(tp, 0);
7307 tg3_write_sig_legacy(tp, kind);
7308 tg3_write_sig_post_reset(tp, kind);
7316 #define RX_CPU_SCRATCH_BASE 0x30000
7317 #define RX_CPU_SCRATCH_SIZE 0x04000
7318 #define TX_CPU_SCRATCH_BASE 0x34000
7319 #define TX_CPU_SCRATCH_SIZE 0x04000
7321 /* tp->lock is held. */
7322 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7326 BUG_ON(offset == TX_CPU_BASE &&
7327 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
7329 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7330 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7332 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7335 if (offset == RX_CPU_BASE) {
7336 for (i = 0; i < 10000; i++) {
7337 tw32(offset + CPU_STATE, 0xffffffff);
7338 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7339 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7343 tw32(offset + CPU_STATE, 0xffffffff);
7344 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7347 for (i = 0; i < 10000; i++) {
7348 tw32(offset + CPU_STATE, 0xffffffff);
7349 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7350 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7356 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7357 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7361 /* Clear firmware's nvram arbitration. */
7362 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7363 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7368 unsigned int fw_base;
7369 unsigned int fw_len;
7370 const __be32 *fw_data;
7373 /* tp->lock is held. */
7374 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7375 int cpu_scratch_size, struct fw_info *info)
7377 int err, lock_err, i;
7378 void (*write_op)(struct tg3 *, u32, u32);
7380 if (cpu_base == TX_CPU_BASE &&
7381 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7383 "%s: Trying to load TX cpu firmware which is 5705\n",
7388 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7389 write_op = tg3_write_mem;
7391 write_op = tg3_write_indirect_reg32;
7393 /* It is possible that bootcode is still loading at this point.
7394 * Get the nvram lock first before halting the cpu.
7396 lock_err = tg3_nvram_lock(tp);
7397 err = tg3_halt_cpu(tp, cpu_base);
7399 tg3_nvram_unlock(tp);
7403 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7404 write_op(tp, cpu_scratch_base + i, 0);
7405 tw32(cpu_base + CPU_STATE, 0xffffffff);
7406 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7407 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7408 write_op(tp, (cpu_scratch_base +
7409 (info->fw_base & 0xffff) +
7411 be32_to_cpu(info->fw_data[i]));
7419 /* tp->lock is held. */
7420 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7422 struct fw_info info;
7423 const __be32 *fw_data;
7426 fw_data = (void *)tp->fw->data;
7428 /* Firmware blob starts with version numbers, followed by
7429 start address and length. We are setting complete length.
7430 length = end_address_of_bss - start_address_of_text.
7431 Remainder is the blob to be loaded contiguously
7432 from start address. */
7434 info.fw_base = be32_to_cpu(fw_data[1]);
7435 info.fw_len = tp->fw->size - 12;
7436 info.fw_data = &fw_data[3];
7438 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7439 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7444 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7445 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7450 /* Now startup only the RX cpu. */
7451 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7452 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7454 for (i = 0; i < 5; i++) {
7455 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7457 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7458 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7459 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7463 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7464 "should be %08x\n", __func__,
7465 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7468 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7469 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7474 /* 5705 needs a special version of the TSO firmware. */
7476 /* tp->lock is held. */
7477 static int tg3_load_tso_firmware(struct tg3 *tp)
7479 struct fw_info info;
7480 const __be32 *fw_data;
7481 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7484 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7487 fw_data = (void *)tp->fw->data;
7489 /* Firmware blob starts with version numbers, followed by
7490 start address and length. We are setting complete length.
7491 length = end_address_of_bss - start_address_of_text.
7492 Remainder is the blob to be loaded contiguously
7493 from start address. */
7495 info.fw_base = be32_to_cpu(fw_data[1]);
7496 cpu_scratch_size = tp->fw_len;
7497 info.fw_len = tp->fw->size - 12;
7498 info.fw_data = &fw_data[3];
7500 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7501 cpu_base = RX_CPU_BASE;
7502 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7504 cpu_base = TX_CPU_BASE;
7505 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7506 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7509 err = tg3_load_firmware_cpu(tp, cpu_base,
7510 cpu_scratch_base, cpu_scratch_size,
7515 /* Now startup the cpu. */
7516 tw32(cpu_base + CPU_STATE, 0xffffffff);
7517 tw32_f(cpu_base + CPU_PC, info.fw_base);
7519 for (i = 0; i < 5; i++) {
7520 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7522 tw32(cpu_base + CPU_STATE, 0xffffffff);
7523 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7524 tw32_f(cpu_base + CPU_PC, info.fw_base);
7529 "%s fails to set CPU PC, is %08x should be %08x\n",
7530 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7533 tw32(cpu_base + CPU_STATE, 0xffffffff);
7534 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7539 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7541 struct tg3 *tp = netdev_priv(dev);
7542 struct sockaddr *addr = p;
7543 int err = 0, skip_mac_1 = 0;
7545 if (!is_valid_ether_addr(addr->sa_data))
7548 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7550 if (!netif_running(dev))
7553 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7554 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7556 addr0_high = tr32(MAC_ADDR_0_HIGH);
7557 addr0_low = tr32(MAC_ADDR_0_LOW);
7558 addr1_high = tr32(MAC_ADDR_1_HIGH);
7559 addr1_low = tr32(MAC_ADDR_1_LOW);
7561 /* Skip MAC addr 1 if ASF is using it. */
7562 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7563 !(addr1_high == 0 && addr1_low == 0))
7566 spin_lock_bh(&tp->lock);
7567 __tg3_set_mac_addr(tp, skip_mac_1);
7568 spin_unlock_bh(&tp->lock);
7573 /* tp->lock is held. */
7574 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7575 dma_addr_t mapping, u32 maxlen_flags,
7579 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7580 ((u64) mapping >> 32));
7582 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7583 ((u64) mapping & 0xffffffff));
7585 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7588 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7590 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7594 static void __tg3_set_rx_mode(struct net_device *);
7595 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7599 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) {
7600 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7601 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7602 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7604 tw32(HOSTCC_TXCOL_TICKS, 0);
7605 tw32(HOSTCC_TXMAX_FRAMES, 0);
7606 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7609 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
7610 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7611 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7612 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7614 tw32(HOSTCC_RXCOL_TICKS, 0);
7615 tw32(HOSTCC_RXMAX_FRAMES, 0);
7616 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7619 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7620 u32 val = ec->stats_block_coalesce_usecs;
7622 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7623 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7625 if (!netif_carrier_ok(tp->dev))
7628 tw32(HOSTCC_STAT_COAL_TICKS, val);
7631 for (i = 0; i < tp->irq_cnt - 1; i++) {
7634 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7635 tw32(reg, ec->rx_coalesce_usecs);
7636 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7637 tw32(reg, ec->rx_max_coalesced_frames);
7638 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7639 tw32(reg, ec->rx_max_coalesced_frames_irq);
7641 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7642 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7643 tw32(reg, ec->tx_coalesce_usecs);
7644 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7645 tw32(reg, ec->tx_max_coalesced_frames);
7646 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7647 tw32(reg, ec->tx_max_coalesced_frames_irq);
7651 for (; i < tp->irq_max - 1; i++) {
7652 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7653 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7654 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7656 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7657 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7658 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7659 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7664 /* tp->lock is held. */
7665 static void tg3_rings_reset(struct tg3 *tp)
7668 u32 stblk, txrcb, rxrcb, limit;
7669 struct tg3_napi *tnapi = &tp->napi[0];
7671 /* Disable all transmit rings but the first. */
7672 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7673 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7674 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7675 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7676 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7677 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7678 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7680 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7682 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7683 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7684 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7685 BDINFO_FLAGS_DISABLED);
7688 /* Disable all receive return rings but the first. */
7689 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7690 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7691 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7692 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7693 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7694 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7695 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7696 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7698 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7700 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7701 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7702 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7703 BDINFO_FLAGS_DISABLED);
7705 /* Disable interrupts */
7706 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7708 /* Zero mailbox registers. */
7709 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
7710 for (i = 1; i < tp->irq_max; i++) {
7711 tp->napi[i].tx_prod = 0;
7712 tp->napi[i].tx_cons = 0;
7713 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
7714 tw32_mailbox(tp->napi[i].prodmbox, 0);
7715 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7716 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7718 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
7719 tw32_mailbox(tp->napi[0].prodmbox, 0);
7721 tp->napi[0].tx_prod = 0;
7722 tp->napi[0].tx_cons = 0;
7723 tw32_mailbox(tp->napi[0].prodmbox, 0);
7724 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7727 /* Make sure the NIC-based send BD rings are disabled. */
7728 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7729 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7730 for (i = 0; i < 16; i++)
7731 tw32_tx_mbox(mbox + i * 8, 0);
7734 txrcb = NIC_SRAM_SEND_RCB;
7735 rxrcb = NIC_SRAM_RCV_RET_RCB;
7737 /* Clear status block in ram. */
7738 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7740 /* Set status block DMA address */
7741 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7742 ((u64) tnapi->status_mapping >> 32));
7743 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7744 ((u64) tnapi->status_mapping & 0xffffffff));
7746 if (tnapi->tx_ring) {
7747 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7748 (TG3_TX_RING_SIZE <<
7749 BDINFO_FLAGS_MAXLEN_SHIFT),
7750 NIC_SRAM_TX_BUFFER_DESC);
7751 txrcb += TG3_BDINFO_SIZE;
7754 if (tnapi->rx_rcb) {
7755 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7756 (tp->rx_ret_ring_mask + 1) <<
7757 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7758 rxrcb += TG3_BDINFO_SIZE;
7761 stblk = HOSTCC_STATBLCK_RING1;
7763 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7764 u64 mapping = (u64)tnapi->status_mapping;
7765 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7766 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7768 /* Clear status block in ram. */
7769 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7771 if (tnapi->tx_ring) {
7772 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7773 (TG3_TX_RING_SIZE <<
7774 BDINFO_FLAGS_MAXLEN_SHIFT),
7775 NIC_SRAM_TX_BUFFER_DESC);
7776 txrcb += TG3_BDINFO_SIZE;
7779 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7780 ((tp->rx_ret_ring_mask + 1) <<
7781 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7784 rxrcb += TG3_BDINFO_SIZE;
7788 /* tp->lock is held. */
7789 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7791 u32 val, rdmac_mode;
7793 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7795 tg3_disable_ints(tp);
7799 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7801 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
7802 tg3_abort_hw(tp, 1);
7807 err = tg3_chip_reset(tp);
7811 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7813 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7814 val = tr32(TG3_CPMU_CTRL);
7815 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7816 tw32(TG3_CPMU_CTRL, val);
7818 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7819 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7820 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7821 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7823 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7824 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7825 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7826 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7828 val = tr32(TG3_CPMU_HST_ACC);
7829 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7830 val |= CPMU_HST_ACC_MACCLK_6_25;
7831 tw32(TG3_CPMU_HST_ACC, val);
7834 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7835 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7836 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7837 PCIE_PWR_MGMT_L1_THRESH_4MS;
7838 tw32(PCIE_PWR_MGMT_THRESH, val);
7840 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7841 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7843 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7845 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7846 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7849 if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
7850 u32 grc_mode = tr32(GRC_MODE);
7852 /* Access the lower 1K of PL PCIE block registers. */
7853 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7854 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7856 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7857 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7858 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7860 tw32(GRC_MODE, grc_mode);
7863 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7864 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7865 u32 grc_mode = tr32(GRC_MODE);
7867 /* Access the lower 1K of PL PCIE block registers. */
7868 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7869 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7871 val = tr32(TG3_PCIE_TLDLPL_PORT +
7872 TG3_PCIE_PL_LO_PHYCTL5);
7873 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7874 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7876 tw32(GRC_MODE, grc_mode);
7879 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7880 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7881 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7882 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7885 /* Enable MAC control of LPI */
7886 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7887 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7888 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7889 TG3_CPMU_EEE_LNKIDL_UART_IDL);
7891 tw32_f(TG3_CPMU_EEE_CTRL,
7892 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7894 tw32_f(TG3_CPMU_EEE_MODE,
7895 TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7896 TG3_CPMU_EEEMD_LPI_IN_TX |
7897 TG3_CPMU_EEEMD_LPI_IN_RX |
7898 TG3_CPMU_EEEMD_EEE_ENABLE);
7901 /* This works around an issue with Athlon chipsets on
7902 * B3 tigon3 silicon. This bit has no effect on any
7903 * other revision. But do not set this on PCI Express
7904 * chips and don't even touch the clocks if the CPMU is present.
7906 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7907 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7908 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7909 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7912 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7913 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7914 val = tr32(TG3PCI_PCISTATE);
7915 val |= PCISTATE_RETRY_SAME_DMA;
7916 tw32(TG3PCI_PCISTATE, val);
7919 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7920 /* Allow reads and writes to the
7921 * APE register and memory space.
7923 val = tr32(TG3PCI_PCISTATE);
7924 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7925 PCISTATE_ALLOW_APE_SHMEM_WR |
7926 PCISTATE_ALLOW_APE_PSPACE_WR;
7927 tw32(TG3PCI_PCISTATE, val);
7930 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7931 /* Enable some hw fixes. */
7932 val = tr32(TG3PCI_MSI_DATA);
7933 val |= (1 << 26) | (1 << 28) | (1 << 29);
7934 tw32(TG3PCI_MSI_DATA, val);
7937 /* Descriptor ring init may make accesses to the
7938 * NIC SRAM area to setup the TX descriptors, so we
7939 * can only do this after the hardware has been
7940 * successfully reset.
7942 err = tg3_init_rings(tp);
7946 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
7947 val = tr32(TG3PCI_DMA_RW_CTRL) &
7948 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
7949 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
7950 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
7951 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
7952 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7953 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7954 /* This value is determined during the probe time DMA
7955 * engine test, tg3_test_dma.
7957 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7960 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7961 GRC_MODE_4X_NIC_SEND_RINGS |
7962 GRC_MODE_NO_TX_PHDR_CSUM |
7963 GRC_MODE_NO_RX_PHDR_CSUM);
7964 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7966 /* Pseudo-header checksum is done by hardware logic and not
7967 * the offload processers, so make the chip do the pseudo-
7968 * header checksums on receive. For transmit it is more
7969 * convenient to do the pseudo-header checksum in software
7970 * as Linux does that on transmit for us in all cases.
7972 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7976 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7978 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7979 val = tr32(GRC_MISC_CFG);
7981 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7982 tw32(GRC_MISC_CFG, val);
7984 /* Initialize MBUF/DESC pool. */
7985 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7987 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7988 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7989 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7990 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7992 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7993 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7994 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7995 } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7998 fw_len = tp->fw_len;
7999 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8000 tw32(BUFMGR_MB_POOL_ADDR,
8001 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8002 tw32(BUFMGR_MB_POOL_SIZE,
8003 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8006 if (tp->dev->mtu <= ETH_DATA_LEN) {
8007 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8008 tp->bufmgr_config.mbuf_read_dma_low_water);
8009 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8010 tp->bufmgr_config.mbuf_mac_rx_low_water);
8011 tw32(BUFMGR_MB_HIGH_WATER,
8012 tp->bufmgr_config.mbuf_high_water);
8014 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8015 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8016 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8017 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8018 tw32(BUFMGR_MB_HIGH_WATER,
8019 tp->bufmgr_config.mbuf_high_water_jumbo);
8021 tw32(BUFMGR_DMA_LOW_WATER,
8022 tp->bufmgr_config.dma_low_water);
8023 tw32(BUFMGR_DMA_HIGH_WATER,
8024 tp->bufmgr_config.dma_high_water);
8026 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8027 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8028 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8029 tw32(BUFMGR_MODE, val);
8030 for (i = 0; i < 2000; i++) {
8031 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8036 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8040 /* Setup replenish threshold. */
8041 val = tp->rx_pending / 8;
8044 else if (val > tp->rx_std_max_post)
8045 val = tp->rx_std_max_post;
8046 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8047 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8048 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8050 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
8051 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
8054 tw32(RCVBDI_STD_THRESH, val);
8056 /* Initialize TG3_BDINFO's at:
8057 * RCVDBDI_STD_BD: standard eth size rx ring
8058 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8059 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8062 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8063 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8064 * ring attribute flags
8065 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8067 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8068 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8070 * The size of each ring is fixed in the firmware, but the location is
8073 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8074 ((u64) tpr->rx_std_mapping >> 32));
8075 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8076 ((u64) tpr->rx_std_mapping & 0xffffffff));
8077 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8078 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
8079 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8080 NIC_SRAM_RX_BUFFER_DESC);
8082 /* Disable the mini ring */
8083 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8084 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8085 BDINFO_FLAGS_DISABLED);
8087 /* Program the jumbo buffer descriptor ring control
8088 * blocks on those devices that have them.
8090 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
8091 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8092 /* Setup replenish threshold. */
8093 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
8095 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
8096 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8097 ((u64) tpr->rx_jmb_mapping >> 32));
8098 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8099 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8100 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8101 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
8102 BDINFO_FLAGS_USE_EXT_RECV);
8103 if (!(tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) ||
8104 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8105 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8106 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8108 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8109 BDINFO_FLAGS_DISABLED);
8112 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
8113 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8114 val = RX_STD_MAX_SIZE_5705;
8116 val = RX_STD_MAX_SIZE_5717;
8117 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8118 val |= (TG3_RX_STD_DMA_SZ << 2);
8120 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8122 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
8124 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8126 tpr->rx_std_prod_idx = tp->rx_pending;
8127 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8129 tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
8130 tp->rx_jumbo_pending : 0;
8131 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8133 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
8134 tw32(STD_REPLENISH_LWM, 32);
8135 tw32(JMB_REPLENISH_LWM, 16);
8138 tg3_rings_reset(tp);
8140 /* Initialize MAC address and backoff seed. */
8141 __tg3_set_mac_addr(tp, 0);
8143 /* MTU + ethernet header + FCS + optional VLAN tag */
8144 tw32(MAC_RX_MTU_SIZE,
8145 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8147 /* The slot time is changed by tg3_setup_phy if we
8148 * run at gigabit with half duplex.
8150 tw32(MAC_TX_LENGTHS,
8151 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8152 (6 << TX_LENGTHS_IPG_SHIFT) |
8153 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
8155 /* Receive rules. */
8156 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8157 tw32(RCVLPC_CONFIG, 0x0181);
8159 /* Calculate RDMAC_MODE setting early, we need it to determine
8160 * the RCVLPC_STATE_ENABLE mask.
8162 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8163 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8164 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8165 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8166 RDMAC_MODE_LNGREAD_ENAB);
8168 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8169 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8170 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8172 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8174 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8175 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8176 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8177 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8179 /* If statement applies to 5705 and 5750 PCI devices only */
8180 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8181 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
8182 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
8183 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
8184 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8185 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8186 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8187 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
8188 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8192 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
8193 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8195 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8196 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8198 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
8199 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8200 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8201 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8203 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8204 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8205 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8206 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8207 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
8208 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8209 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
8210 val &= ~TG3_RDMA_RSRVCTRL_TXMRGN_MASK;
8211 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B;
8213 tw32(TG3_RDMA_RSRVCTRL_REG,
8214 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8217 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
8218 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8219 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8220 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8221 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8224 /* Receive/send statistics. */
8225 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
8226 val = tr32(RCVLPC_STATS_ENABLE);
8227 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8228 tw32(RCVLPC_STATS_ENABLE, val);
8229 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8230 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8231 val = tr32(RCVLPC_STATS_ENABLE);
8232 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8233 tw32(RCVLPC_STATS_ENABLE, val);
8235 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8237 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8238 tw32(SNDDATAI_STATSENAB, 0xffffff);
8239 tw32(SNDDATAI_STATSCTRL,
8240 (SNDDATAI_SCTRL_ENABLE |
8241 SNDDATAI_SCTRL_FASTUPD));
8243 /* Setup host coalescing engine. */
8244 tw32(HOSTCC_MODE, 0);
8245 for (i = 0; i < 2000; i++) {
8246 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8251 __tg3_set_coalesce(tp, &tp->coal);
8253 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8254 /* Status/statistics block address. See tg3_timer,
8255 * the tg3_periodic_fetch_stats call there, and
8256 * tg3_get_stats to see how this works for 5705/5750 chips.
8258 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8259 ((u64) tp->stats_mapping >> 32));
8260 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8261 ((u64) tp->stats_mapping & 0xffffffff));
8262 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8264 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8266 /* Clear statistics and status block memory areas */
8267 for (i = NIC_SRAM_STATS_BLK;
8268 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8270 tg3_write_mem(tp, i, 0);
8275 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8277 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8278 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8279 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8280 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8282 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8283 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8284 /* reset to prevent losing 1st rx packet intermittently */
8285 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8289 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8290 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8293 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8294 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8295 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8296 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8297 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8298 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8299 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8302 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8303 * If TG3_FLG2_IS_NIC is zero, we should read the
8304 * register to preserve the GPIO settings for LOMs. The GPIOs,
8305 * whether used as inputs or outputs, are set by boot code after
8308 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
8311 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8312 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8313 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8315 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8316 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8317 GRC_LCLCTRL_GPIO_OUTPUT3;
8319 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8320 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8322 tp->grc_local_ctrl &= ~gpio_mask;
8323 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8325 /* GPIO1 must be driven high for eeprom write protect */
8326 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
8327 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8328 GRC_LCLCTRL_GPIO_OUTPUT1);
8330 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8333 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
8334 val = tr32(MSGINT_MODE);
8335 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8336 tw32(MSGINT_MODE, val);
8339 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8340 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8344 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8345 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8346 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8347 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8348 WDMAC_MODE_LNGREAD_ENAB);
8350 /* If statement applies to 5705 and 5750 PCI devices only */
8351 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8352 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
8353 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8354 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8355 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8356 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8358 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8359 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8360 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8361 val |= WDMAC_MODE_RX_ACCEL;
8365 /* Enable host coalescing bug fix */
8366 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8367 val |= WDMAC_MODE_STATUS_TAG_FIX;
8369 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8370 val |= WDMAC_MODE_BURST_ALL_DATA;
8372 tw32_f(WDMAC_MODE, val);
8375 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
8378 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8380 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8381 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8382 pcix_cmd |= PCI_X_CMD_READ_2K;
8383 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8384 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8385 pcix_cmd |= PCI_X_CMD_READ_2K;
8387 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8391 tw32_f(RDMAC_MODE, rdmac_mode);
8394 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8395 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8396 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8398 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8400 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8402 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8404 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8405 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8406 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8407 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8408 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8409 val |= RCVDBDI_MODE_LRG_RING_SZ;
8410 tw32(RCVDBDI_MODE, val);
8411 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8412 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8413 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8414 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8415 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
8416 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8417 tw32(SNDBDI_MODE, val);
8418 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8420 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8421 err = tg3_load_5701_a0_firmware_fix(tp);
8426 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
8427 err = tg3_load_tso_firmware(tp);
8432 tp->tx_mode = TX_MODE_ENABLE;
8433 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
8434 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8435 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8436 tw32_f(MAC_TX_MODE, tp->tx_mode);
8439 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
8440 u32 reg = MAC_RSS_INDIR_TBL_0;
8441 u8 *ent = (u8 *)&val;
8443 /* Setup the indirection table */
8444 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8445 int idx = i % sizeof(val);
8447 ent[idx] = i % (tp->irq_cnt - 1);
8448 if (idx == sizeof(val) - 1) {
8454 /* Setup the "secret" hash key. */
8455 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8456 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8457 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8458 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8459 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8460 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8461 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8462 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8463 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8464 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8467 tp->rx_mode = RX_MODE_ENABLE;
8468 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8469 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8471 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
8472 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8473 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8474 RX_MODE_RSS_IPV6_HASH_EN |
8475 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8476 RX_MODE_RSS_IPV4_HASH_EN |
8477 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8479 tw32_f(MAC_RX_MODE, tp->rx_mode);
8482 tw32(MAC_LED_CTRL, tp->led_ctrl);
8484 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8485 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8486 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8489 tw32_f(MAC_RX_MODE, tp->rx_mode);
8492 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8493 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8494 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8495 /* Set drive transmission level to 1.2V */
8496 /* only if the signal pre-emphasis bit is not set */
8497 val = tr32(MAC_SERDES_CFG);
8500 tw32(MAC_SERDES_CFG, val);
8502 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8503 tw32(MAC_SERDES_CFG, 0x616000);
8506 /* Prevent chip from dropping frames when flow control
8509 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8513 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8515 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8516 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8517 /* Use hardware link auto-negotiation */
8518 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
8521 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8522 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8525 tmp = tr32(SERDES_RX_CTRL);
8526 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8527 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8528 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8529 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8532 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
8533 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8534 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8535 tp->link_config.speed = tp->link_config.orig_speed;
8536 tp->link_config.duplex = tp->link_config.orig_duplex;
8537 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8540 err = tg3_setup_phy(tp, 0);
8544 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8545 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8548 /* Clear CRC stats. */
8549 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8550 tg3_writephy(tp, MII_TG3_TEST1,
8551 tmp | MII_TG3_TEST1_CRC_EN);
8552 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8557 __tg3_set_rx_mode(tp->dev);
8559 /* Initialize receive rules. */
8560 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8561 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8562 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8563 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8565 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8566 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
8570 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
8574 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8576 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8578 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8580 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8582 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8584 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8586 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8588 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8590 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8592 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8594 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8596 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8598 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8600 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8608 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8609 /* Write our heartbeat update interval to APE. */
8610 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8611 APE_HOST_HEARTBEAT_INT_DISABLE);
8613 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8618 /* Called at device open time to get the chip ready for
8619 * packet processing. Invoked with tp->lock held.
8621 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8623 tg3_switch_clocks(tp);
8625 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8627 return tg3_reset_hw(tp, reset_phy);
8630 #define TG3_STAT_ADD32(PSTAT, REG) \
8631 do { u32 __val = tr32(REG); \
8632 (PSTAT)->low += __val; \
8633 if ((PSTAT)->low < __val) \
8634 (PSTAT)->high += 1; \
8637 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8639 struct tg3_hw_stats *sp = tp->hw_stats;
8641 if (!netif_carrier_ok(tp->dev))
8644 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8645 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8646 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8647 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8648 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8649 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8650 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8651 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8652 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8653 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8654 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8655 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8656 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8658 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8659 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8660 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8661 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8662 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8663 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8664 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8665 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8666 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8667 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8668 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8669 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8670 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8671 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8673 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8674 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8675 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8678 static void tg3_timer(unsigned long __opaque)
8680 struct tg3 *tp = (struct tg3 *) __opaque;
8685 spin_lock(&tp->lock);
8687 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8688 /* All of this garbage is because when using non-tagged
8689 * IRQ status the mailbox/status_block protocol the chip
8690 * uses with the cpu is race prone.
8692 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8693 tw32(GRC_LOCAL_CTRL,
8694 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8696 tw32(HOSTCC_MODE, tp->coalesce_mode |
8697 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8700 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8701 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
8702 spin_unlock(&tp->lock);
8703 schedule_work(&tp->reset_task);
8708 /* This part only runs once per second. */
8709 if (!--tp->timer_counter) {
8710 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8711 tg3_periodic_fetch_stats(tp);
8713 if (tp->setlpicnt && !--tp->setlpicnt) {
8714 u32 val = tr32(TG3_CPMU_EEE_MODE);
8715 tw32(TG3_CPMU_EEE_MODE,
8716 val | TG3_CPMU_EEEMD_LPI_ENABLE);
8719 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
8723 mac_stat = tr32(MAC_STATUS);
8726 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8727 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8729 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8733 tg3_setup_phy(tp, 0);
8734 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
8735 u32 mac_stat = tr32(MAC_STATUS);
8738 if (netif_carrier_ok(tp->dev) &&
8739 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8742 if (!netif_carrier_ok(tp->dev) &&
8743 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8744 MAC_STATUS_SIGNAL_DET))) {
8748 if (!tp->serdes_counter) {
8751 ~MAC_MODE_PORT_MODE_MASK));
8753 tw32_f(MAC_MODE, tp->mac_mode);
8756 tg3_setup_phy(tp, 0);
8758 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8759 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8760 tg3_serdes_parallel_detect(tp);
8763 tp->timer_counter = tp->timer_multiplier;
8766 /* Heartbeat is only sent once every 2 seconds.
8768 * The heartbeat is to tell the ASF firmware that the host
8769 * driver is still alive. In the event that the OS crashes,
8770 * ASF needs to reset the hardware to free up the FIFO space
8771 * that may be filled with rx packets destined for the host.
8772 * If the FIFO is full, ASF will no longer function properly.
8774 * Unintended resets have been reported on real time kernels
8775 * where the timer doesn't run on time. Netpoll will also have
8778 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8779 * to check the ring condition when the heartbeat is expiring
8780 * before doing the reset. This will prevent most unintended
8783 if (!--tp->asf_counter) {
8784 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
8785 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
8786 tg3_wait_for_event_ack(tp);
8788 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8789 FWCMD_NICDRV_ALIVE3);
8790 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8791 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8792 TG3_FW_UPDATE_TIMEOUT_SEC);
8794 tg3_generate_fw_event(tp);
8796 tp->asf_counter = tp->asf_multiplier;
8799 spin_unlock(&tp->lock);
8802 tp->timer.expires = jiffies + tp->timer_offset;
8803 add_timer(&tp->timer);
8806 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8809 unsigned long flags;
8811 struct tg3_napi *tnapi = &tp->napi[irq_num];
8813 if (tp->irq_cnt == 1)
8814 name = tp->dev->name;
8816 name = &tnapi->irq_lbl[0];
8817 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8818 name[IFNAMSIZ-1] = 0;
8821 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8823 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8825 flags = IRQF_SAMPLE_RANDOM;
8828 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8829 fn = tg3_interrupt_tagged;
8830 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
8833 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8836 static int tg3_test_interrupt(struct tg3 *tp)
8838 struct tg3_napi *tnapi = &tp->napi[0];
8839 struct net_device *dev = tp->dev;
8840 int err, i, intr_ok = 0;
8843 if (!netif_running(dev))
8846 tg3_disable_ints(tp);
8848 free_irq(tnapi->irq_vec, tnapi);
8851 * Turn off MSI one shot mode. Otherwise this test has no
8852 * observable way to know whether the interrupt was delivered.
8854 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
8855 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8856 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8857 tw32(MSGINT_MODE, val);
8860 err = request_irq(tnapi->irq_vec, tg3_test_isr,
8861 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8865 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
8866 tg3_enable_ints(tp);
8868 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8871 for (i = 0; i < 5; i++) {
8872 u32 int_mbox, misc_host_ctrl;
8874 int_mbox = tr32_mailbox(tnapi->int_mbox);
8875 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8877 if ((int_mbox != 0) ||
8878 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8886 tg3_disable_ints(tp);
8888 free_irq(tnapi->irq_vec, tnapi);
8890 err = tg3_request_irq(tp, 0);
8896 /* Reenable MSI one shot mode. */
8897 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
8898 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8899 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
8900 tw32(MSGINT_MODE, val);
8908 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8909 * successfully restored
8911 static int tg3_test_msi(struct tg3 *tp)
8916 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8919 /* Turn off SERR reporting in case MSI terminates with Master
8922 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8923 pci_write_config_word(tp->pdev, PCI_COMMAND,
8924 pci_cmd & ~PCI_COMMAND_SERR);
8926 err = tg3_test_interrupt(tp);
8928 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8933 /* other failures */
8937 /* MSI test failed, go back to INTx mode */
8938 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
8939 "to INTx mode. Please report this failure to the PCI "
8940 "maintainer and include system chipset information\n");
8942 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8944 pci_disable_msi(tp->pdev);
8946 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8947 tp->napi[0].irq_vec = tp->pdev->irq;
8949 err = tg3_request_irq(tp, 0);
8953 /* Need to reset the chip because the MSI cycle may have terminated
8954 * with Master Abort.
8956 tg3_full_lock(tp, 1);
8958 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8959 err = tg3_init_hw(tp, 1);
8961 tg3_full_unlock(tp);
8964 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8969 static int tg3_request_firmware(struct tg3 *tp)
8971 const __be32 *fw_data;
8973 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
8974 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
8979 fw_data = (void *)tp->fw->data;
8981 /* Firmware blob starts with version numbers, followed by
8982 * start address and _full_ length including BSS sections
8983 * (which must be longer than the actual data, of course
8986 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
8987 if (tp->fw_len < (tp->fw->size - 12)) {
8988 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
8989 tp->fw_len, tp->fw_needed);
8990 release_firmware(tp->fw);
8995 /* We no longer need firmware; we have it. */
8996 tp->fw_needed = NULL;
9000 static bool tg3_enable_msix(struct tg3 *tp)
9002 int i, rc, cpus = num_online_cpus();
9003 struct msix_entry msix_ent[tp->irq_max];
9006 /* Just fallback to the simpler MSI mode. */
9010 * We want as many rx rings enabled as there are cpus.
9011 * The first MSIX vector only deals with link interrupts, etc,
9012 * so we add one to the number of vectors we are requesting.
9014 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9016 for (i = 0; i < tp->irq_max; i++) {
9017 msix_ent[i].entry = i;
9018 msix_ent[i].vector = 0;
9021 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9024 } else if (rc != 0) {
9025 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9027 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9032 for (i = 0; i < tp->irq_max; i++)
9033 tp->napi[i].irq_vec = msix_ent[i].vector;
9035 netif_set_real_num_tx_queues(tp->dev, 1);
9036 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9037 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9038 pci_disable_msix(tp->pdev);
9041 if (tp->irq_cnt > 1)
9042 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
9047 static void tg3_ints_init(struct tg3 *tp)
9049 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
9050 !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
9051 /* All MSI supporting chips should support tagged
9052 * status. Assert that this is the case.
9054 netdev_warn(tp->dev,
9055 "MSI without TAGGED_STATUS? Not using MSI\n");
9059 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
9060 tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
9061 else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
9062 pci_enable_msi(tp->pdev) == 0)
9063 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
9065 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
9066 u32 msi_mode = tr32(MSGINT_MODE);
9067 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
9068 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9069 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9072 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
9074 tp->napi[0].irq_vec = tp->pdev->irq;
9075 netif_set_real_num_tx_queues(tp->dev, 1);
9076 netif_set_real_num_rx_queues(tp->dev, 1);
9080 static void tg3_ints_fini(struct tg3 *tp)
9082 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
9083 pci_disable_msix(tp->pdev);
9084 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
9085 pci_disable_msi(tp->pdev);
9086 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
9087 tp->tg3_flags3 &= ~(TG3_FLG3_ENABLE_RSS | TG3_FLG3_ENABLE_TSS);
9090 static int tg3_open(struct net_device *dev)
9092 struct tg3 *tp = netdev_priv(dev);
9095 if (tp->fw_needed) {
9096 err = tg3_request_firmware(tp);
9097 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9101 netdev_warn(tp->dev, "TSO capability disabled\n");
9102 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
9103 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9104 netdev_notice(tp->dev, "TSO capability restored\n");
9105 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
9109 netif_carrier_off(tp->dev);
9111 err = tg3_set_power_state(tp, PCI_D0);
9115 tg3_full_lock(tp, 0);
9117 tg3_disable_ints(tp);
9118 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
9120 tg3_full_unlock(tp);
9123 * Setup interrupts first so we know how
9124 * many NAPI resources to allocate
9128 /* The placement of this call is tied
9129 * to the setup and use of Host TX descriptors.
9131 err = tg3_alloc_consistent(tp);
9137 tg3_napi_enable(tp);
9139 for (i = 0; i < tp->irq_cnt; i++) {
9140 struct tg3_napi *tnapi = &tp->napi[i];
9141 err = tg3_request_irq(tp, i);
9143 for (i--; i >= 0; i--)
9144 free_irq(tnapi->irq_vec, tnapi);
9152 tg3_full_lock(tp, 0);
9154 err = tg3_init_hw(tp, 1);
9156 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9159 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
9160 tp->timer_offset = HZ;
9162 tp->timer_offset = HZ / 10;
9164 BUG_ON(tp->timer_offset > HZ);
9165 tp->timer_counter = tp->timer_multiplier =
9166 (HZ / tp->timer_offset);
9167 tp->asf_counter = tp->asf_multiplier =
9168 ((HZ / tp->timer_offset) * 2);
9170 init_timer(&tp->timer);
9171 tp->timer.expires = jiffies + tp->timer_offset;
9172 tp->timer.data = (unsigned long) tp;
9173 tp->timer.function = tg3_timer;
9176 tg3_full_unlock(tp);
9181 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
9182 err = tg3_test_msi(tp);
9185 tg3_full_lock(tp, 0);
9186 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9188 tg3_full_unlock(tp);
9193 if (!(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
9194 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
9195 u32 val = tr32(PCIE_TRANSACTION_CFG);
9197 tw32(PCIE_TRANSACTION_CFG,
9198 val | PCIE_TRANS_CFG_1SHOT_MSI);
9204 tg3_full_lock(tp, 0);
9206 add_timer(&tp->timer);
9207 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9208 tg3_enable_ints(tp);
9210 tg3_full_unlock(tp);
9212 netif_tx_start_all_queues(dev);
9217 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9218 struct tg3_napi *tnapi = &tp->napi[i];
9219 free_irq(tnapi->irq_vec, tnapi);
9223 tg3_napi_disable(tp);
9225 tg3_free_consistent(tp);
9232 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9233 struct rtnl_link_stats64 *);
9234 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9236 static int tg3_close(struct net_device *dev)
9239 struct tg3 *tp = netdev_priv(dev);
9241 tg3_napi_disable(tp);
9242 cancel_work_sync(&tp->reset_task);
9244 netif_tx_stop_all_queues(dev);
9246 del_timer_sync(&tp->timer);
9250 tg3_full_lock(tp, 1);
9252 tg3_disable_ints(tp);
9254 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9256 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
9258 tg3_full_unlock(tp);
9260 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9261 struct tg3_napi *tnapi = &tp->napi[i];
9262 free_irq(tnapi->irq_vec, tnapi);
9267 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9269 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9270 sizeof(tp->estats_prev));
9274 tg3_free_consistent(tp);
9276 tg3_set_power_state(tp, PCI_D3hot);
9278 netif_carrier_off(tp->dev);
9283 static inline u64 get_stat64(tg3_stat64_t *val)
9285 return ((u64)val->high << 32) | ((u64)val->low);
9288 static u64 calc_crc_errors(struct tg3 *tp)
9290 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9292 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9293 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9294 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9297 spin_lock_bh(&tp->lock);
9298 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9299 tg3_writephy(tp, MII_TG3_TEST1,
9300 val | MII_TG3_TEST1_CRC_EN);
9301 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9304 spin_unlock_bh(&tp->lock);
9306 tp->phy_crc_errors += val;
9308 return tp->phy_crc_errors;
9311 return get_stat64(&hw_stats->rx_fcs_errors);
9314 #define ESTAT_ADD(member) \
9315 estats->member = old_estats->member + \
9316 get_stat64(&hw_stats->member)
9318 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9320 struct tg3_ethtool_stats *estats = &tp->estats;
9321 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9322 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9327 ESTAT_ADD(rx_octets);
9328 ESTAT_ADD(rx_fragments);
9329 ESTAT_ADD(rx_ucast_packets);
9330 ESTAT_ADD(rx_mcast_packets);
9331 ESTAT_ADD(rx_bcast_packets);
9332 ESTAT_ADD(rx_fcs_errors);
9333 ESTAT_ADD(rx_align_errors);
9334 ESTAT_ADD(rx_xon_pause_rcvd);
9335 ESTAT_ADD(rx_xoff_pause_rcvd);
9336 ESTAT_ADD(rx_mac_ctrl_rcvd);
9337 ESTAT_ADD(rx_xoff_entered);
9338 ESTAT_ADD(rx_frame_too_long_errors);
9339 ESTAT_ADD(rx_jabbers);
9340 ESTAT_ADD(rx_undersize_packets);
9341 ESTAT_ADD(rx_in_length_errors);
9342 ESTAT_ADD(rx_out_length_errors);
9343 ESTAT_ADD(rx_64_or_less_octet_packets);
9344 ESTAT_ADD(rx_65_to_127_octet_packets);
9345 ESTAT_ADD(rx_128_to_255_octet_packets);
9346 ESTAT_ADD(rx_256_to_511_octet_packets);
9347 ESTAT_ADD(rx_512_to_1023_octet_packets);
9348 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9349 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9350 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9351 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9352 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9354 ESTAT_ADD(tx_octets);
9355 ESTAT_ADD(tx_collisions);
9356 ESTAT_ADD(tx_xon_sent);
9357 ESTAT_ADD(tx_xoff_sent);
9358 ESTAT_ADD(tx_flow_control);
9359 ESTAT_ADD(tx_mac_errors);
9360 ESTAT_ADD(tx_single_collisions);
9361 ESTAT_ADD(tx_mult_collisions);
9362 ESTAT_ADD(tx_deferred);
9363 ESTAT_ADD(tx_excessive_collisions);
9364 ESTAT_ADD(tx_late_collisions);
9365 ESTAT_ADD(tx_collide_2times);
9366 ESTAT_ADD(tx_collide_3times);
9367 ESTAT_ADD(tx_collide_4times);
9368 ESTAT_ADD(tx_collide_5times);
9369 ESTAT_ADD(tx_collide_6times);
9370 ESTAT_ADD(tx_collide_7times);
9371 ESTAT_ADD(tx_collide_8times);
9372 ESTAT_ADD(tx_collide_9times);
9373 ESTAT_ADD(tx_collide_10times);
9374 ESTAT_ADD(tx_collide_11times);
9375 ESTAT_ADD(tx_collide_12times);
9376 ESTAT_ADD(tx_collide_13times);
9377 ESTAT_ADD(tx_collide_14times);
9378 ESTAT_ADD(tx_collide_15times);
9379 ESTAT_ADD(tx_ucast_packets);
9380 ESTAT_ADD(tx_mcast_packets);
9381 ESTAT_ADD(tx_bcast_packets);
9382 ESTAT_ADD(tx_carrier_sense_errors);
9383 ESTAT_ADD(tx_discards);
9384 ESTAT_ADD(tx_errors);
9386 ESTAT_ADD(dma_writeq_full);
9387 ESTAT_ADD(dma_write_prioq_full);
9388 ESTAT_ADD(rxbds_empty);
9389 ESTAT_ADD(rx_discards);
9390 ESTAT_ADD(rx_errors);
9391 ESTAT_ADD(rx_threshold_hit);
9393 ESTAT_ADD(dma_readq_full);
9394 ESTAT_ADD(dma_read_prioq_full);
9395 ESTAT_ADD(tx_comp_queue_full);
9397 ESTAT_ADD(ring_set_send_prod_index);
9398 ESTAT_ADD(ring_status_update);
9399 ESTAT_ADD(nic_irqs);
9400 ESTAT_ADD(nic_avoided_irqs);
9401 ESTAT_ADD(nic_tx_threshold_hit);
9406 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9407 struct rtnl_link_stats64 *stats)
9409 struct tg3 *tp = netdev_priv(dev);
9410 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9411 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9416 stats->rx_packets = old_stats->rx_packets +
9417 get_stat64(&hw_stats->rx_ucast_packets) +
9418 get_stat64(&hw_stats->rx_mcast_packets) +
9419 get_stat64(&hw_stats->rx_bcast_packets);
9421 stats->tx_packets = old_stats->tx_packets +
9422 get_stat64(&hw_stats->tx_ucast_packets) +
9423 get_stat64(&hw_stats->tx_mcast_packets) +
9424 get_stat64(&hw_stats->tx_bcast_packets);
9426 stats->rx_bytes = old_stats->rx_bytes +
9427 get_stat64(&hw_stats->rx_octets);
9428 stats->tx_bytes = old_stats->tx_bytes +
9429 get_stat64(&hw_stats->tx_octets);
9431 stats->rx_errors = old_stats->rx_errors +
9432 get_stat64(&hw_stats->rx_errors);
9433 stats->tx_errors = old_stats->tx_errors +
9434 get_stat64(&hw_stats->tx_errors) +
9435 get_stat64(&hw_stats->tx_mac_errors) +
9436 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9437 get_stat64(&hw_stats->tx_discards);
9439 stats->multicast = old_stats->multicast +
9440 get_stat64(&hw_stats->rx_mcast_packets);
9441 stats->collisions = old_stats->collisions +
9442 get_stat64(&hw_stats->tx_collisions);
9444 stats->rx_length_errors = old_stats->rx_length_errors +
9445 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9446 get_stat64(&hw_stats->rx_undersize_packets);
9448 stats->rx_over_errors = old_stats->rx_over_errors +
9449 get_stat64(&hw_stats->rxbds_empty);
9450 stats->rx_frame_errors = old_stats->rx_frame_errors +
9451 get_stat64(&hw_stats->rx_align_errors);
9452 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9453 get_stat64(&hw_stats->tx_discards);
9454 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9455 get_stat64(&hw_stats->tx_carrier_sense_errors);
9457 stats->rx_crc_errors = old_stats->rx_crc_errors +
9458 calc_crc_errors(tp);
9460 stats->rx_missed_errors = old_stats->rx_missed_errors +
9461 get_stat64(&hw_stats->rx_discards);
9463 stats->rx_dropped = tp->rx_dropped;
9468 static inline u32 calc_crc(unsigned char *buf, int len)
9476 for (j = 0; j < len; j++) {
9479 for (k = 0; k < 8; k++) {
9492 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9494 /* accept or reject all multicast frames */
9495 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9496 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9497 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9498 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9501 static void __tg3_set_rx_mode(struct net_device *dev)
9503 struct tg3 *tp = netdev_priv(dev);
9506 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9507 RX_MODE_KEEP_VLAN_TAG);
9509 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9512 #if TG3_VLAN_TAG_USED
9514 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9515 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9517 /* By definition, VLAN is disabled always in this
9520 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9521 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9524 if (dev->flags & IFF_PROMISC) {
9525 /* Promiscuous mode. */
9526 rx_mode |= RX_MODE_PROMISC;
9527 } else if (dev->flags & IFF_ALLMULTI) {
9528 /* Accept all multicast. */
9529 tg3_set_multi(tp, 1);
9530 } else if (netdev_mc_empty(dev)) {
9531 /* Reject all multicast. */
9532 tg3_set_multi(tp, 0);
9534 /* Accept one or more multicast(s). */
9535 struct netdev_hw_addr *ha;
9536 u32 mc_filter[4] = { 0, };
9541 netdev_for_each_mc_addr(ha, dev) {
9542 crc = calc_crc(ha->addr, ETH_ALEN);
9544 regidx = (bit & 0x60) >> 5;
9546 mc_filter[regidx] |= (1 << bit);
9549 tw32(MAC_HASH_REG_0, mc_filter[0]);
9550 tw32(MAC_HASH_REG_1, mc_filter[1]);
9551 tw32(MAC_HASH_REG_2, mc_filter[2]);
9552 tw32(MAC_HASH_REG_3, mc_filter[3]);
9555 if (rx_mode != tp->rx_mode) {
9556 tp->rx_mode = rx_mode;
9557 tw32_f(MAC_RX_MODE, rx_mode);
9562 static void tg3_set_rx_mode(struct net_device *dev)
9564 struct tg3 *tp = netdev_priv(dev);
9566 if (!netif_running(dev))
9569 tg3_full_lock(tp, 0);
9570 __tg3_set_rx_mode(dev);
9571 tg3_full_unlock(tp);
9574 #define TG3_REGDUMP_LEN (32 * 1024)
9576 static int tg3_get_regs_len(struct net_device *dev)
9578 return TG3_REGDUMP_LEN;
9581 static void tg3_get_regs(struct net_device *dev,
9582 struct ethtool_regs *regs, void *_p)
9585 struct tg3 *tp = netdev_priv(dev);
9591 memset(p, 0, TG3_REGDUMP_LEN);
9593 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9596 tg3_full_lock(tp, 0);
9598 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
9599 #define GET_REG32_LOOP(base, len) \
9600 do { p = (u32 *)(orig_p + (base)); \
9601 for (i = 0; i < len; i += 4) \
9602 __GET_REG32((base) + i); \
9604 #define GET_REG32_1(reg) \
9605 do { p = (u32 *)(orig_p + (reg)); \
9606 __GET_REG32((reg)); \
9609 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
9610 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
9611 GET_REG32_LOOP(MAC_MODE, 0x4f0);
9612 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
9613 GET_REG32_1(SNDDATAC_MODE);
9614 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
9615 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
9616 GET_REG32_1(SNDBDC_MODE);
9617 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
9618 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
9619 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
9620 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
9621 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
9622 GET_REG32_1(RCVDCC_MODE);
9623 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
9624 GET_REG32_LOOP(RCVCC_MODE, 0x14);
9625 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
9626 GET_REG32_1(MBFREE_MODE);
9627 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
9628 GET_REG32_LOOP(MEMARB_MODE, 0x10);
9629 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
9630 GET_REG32_LOOP(RDMAC_MODE, 0x08);
9631 GET_REG32_LOOP(WDMAC_MODE, 0x08);
9632 GET_REG32_1(RX_CPU_MODE);
9633 GET_REG32_1(RX_CPU_STATE);
9634 GET_REG32_1(RX_CPU_PGMCTR);
9635 GET_REG32_1(RX_CPU_HWBKPT);
9636 GET_REG32_1(TX_CPU_MODE);
9637 GET_REG32_1(TX_CPU_STATE);
9638 GET_REG32_1(TX_CPU_PGMCTR);
9639 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
9640 GET_REG32_LOOP(FTQ_RESET, 0x120);
9641 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
9642 GET_REG32_1(DMAC_MODE);
9643 GET_REG32_LOOP(GRC_MODE, 0x4c);
9644 if (tp->tg3_flags & TG3_FLAG_NVRAM)
9645 GET_REG32_LOOP(NVRAM_CMD, 0x24);
9648 #undef GET_REG32_LOOP
9651 tg3_full_unlock(tp);
9654 static int tg3_get_eeprom_len(struct net_device *dev)
9656 struct tg3 *tp = netdev_priv(dev);
9658 return tp->nvram_size;
9661 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9663 struct tg3 *tp = netdev_priv(dev);
9666 u32 i, offset, len, b_offset, b_count;
9669 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9672 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9675 offset = eeprom->offset;
9679 eeprom->magic = TG3_EEPROM_MAGIC;
9682 /* adjustments to start on required 4 byte boundary */
9683 b_offset = offset & 3;
9684 b_count = 4 - b_offset;
9685 if (b_count > len) {
9686 /* i.e. offset=1 len=2 */
9689 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9692 memcpy(data, ((char *)&val) + b_offset, b_count);
9695 eeprom->len += b_count;
9698 /* read bytes upto the last 4 byte boundary */
9699 pd = &data[eeprom->len];
9700 for (i = 0; i < (len - (len & 3)); i += 4) {
9701 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9706 memcpy(pd + i, &val, 4);
9711 /* read last bytes not ending on 4 byte boundary */
9712 pd = &data[eeprom->len];
9714 b_offset = offset + len - b_count;
9715 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9718 memcpy(pd, &val, b_count);
9719 eeprom->len += b_count;
9724 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9726 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9728 struct tg3 *tp = netdev_priv(dev);
9730 u32 offset, len, b_offset, odd_len;
9734 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9737 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
9738 eeprom->magic != TG3_EEPROM_MAGIC)
9741 offset = eeprom->offset;
9744 if ((b_offset = (offset & 3))) {
9745 /* adjustments to start on required 4 byte boundary */
9746 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9757 /* adjustments to end on required 4 byte boundary */
9759 len = (len + 3) & ~3;
9760 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9766 if (b_offset || odd_len) {
9767 buf = kmalloc(len, GFP_KERNEL);
9771 memcpy(buf, &start, 4);
9773 memcpy(buf+len-4, &end, 4);
9774 memcpy(buf + b_offset, data, eeprom->len);
9777 ret = tg3_nvram_write_block(tp, offset, len, buf);
9785 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9787 struct tg3 *tp = netdev_priv(dev);
9789 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9790 struct phy_device *phydev;
9791 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9793 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9794 return phy_ethtool_gset(phydev, cmd);
9797 cmd->supported = (SUPPORTED_Autoneg);
9799 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9800 cmd->supported |= (SUPPORTED_1000baseT_Half |
9801 SUPPORTED_1000baseT_Full);
9803 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9804 cmd->supported |= (SUPPORTED_100baseT_Half |
9805 SUPPORTED_100baseT_Full |
9806 SUPPORTED_10baseT_Half |
9807 SUPPORTED_10baseT_Full |
9809 cmd->port = PORT_TP;
9811 cmd->supported |= SUPPORTED_FIBRE;
9812 cmd->port = PORT_FIBRE;
9815 cmd->advertising = tp->link_config.advertising;
9816 if (netif_running(dev)) {
9817 cmd->speed = tp->link_config.active_speed;
9818 cmd->duplex = tp->link_config.active_duplex;
9820 cmd->speed = SPEED_INVALID;
9821 cmd->duplex = DUPLEX_INVALID;
9823 cmd->phy_address = tp->phy_addr;
9824 cmd->transceiver = XCVR_INTERNAL;
9825 cmd->autoneg = tp->link_config.autoneg;
9831 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9833 struct tg3 *tp = netdev_priv(dev);
9835 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9836 struct phy_device *phydev;
9837 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9839 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9840 return phy_ethtool_sset(phydev, cmd);
9843 if (cmd->autoneg != AUTONEG_ENABLE &&
9844 cmd->autoneg != AUTONEG_DISABLE)
9847 if (cmd->autoneg == AUTONEG_DISABLE &&
9848 cmd->duplex != DUPLEX_FULL &&
9849 cmd->duplex != DUPLEX_HALF)
9852 if (cmd->autoneg == AUTONEG_ENABLE) {
9853 u32 mask = ADVERTISED_Autoneg |
9855 ADVERTISED_Asym_Pause;
9857 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9858 mask |= ADVERTISED_1000baseT_Half |
9859 ADVERTISED_1000baseT_Full;
9861 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9862 mask |= ADVERTISED_100baseT_Half |
9863 ADVERTISED_100baseT_Full |
9864 ADVERTISED_10baseT_Half |
9865 ADVERTISED_10baseT_Full |
9868 mask |= ADVERTISED_FIBRE;
9870 if (cmd->advertising & ~mask)
9873 mask &= (ADVERTISED_1000baseT_Half |
9874 ADVERTISED_1000baseT_Full |
9875 ADVERTISED_100baseT_Half |
9876 ADVERTISED_100baseT_Full |
9877 ADVERTISED_10baseT_Half |
9878 ADVERTISED_10baseT_Full);
9880 cmd->advertising &= mask;
9882 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
9883 if (cmd->speed != SPEED_1000)
9886 if (cmd->duplex != DUPLEX_FULL)
9889 if (cmd->speed != SPEED_100 &&
9890 cmd->speed != SPEED_10)
9895 tg3_full_lock(tp, 0);
9897 tp->link_config.autoneg = cmd->autoneg;
9898 if (cmd->autoneg == AUTONEG_ENABLE) {
9899 tp->link_config.advertising = (cmd->advertising |
9900 ADVERTISED_Autoneg);
9901 tp->link_config.speed = SPEED_INVALID;
9902 tp->link_config.duplex = DUPLEX_INVALID;
9904 tp->link_config.advertising = 0;
9905 tp->link_config.speed = cmd->speed;
9906 tp->link_config.duplex = cmd->duplex;
9909 tp->link_config.orig_speed = tp->link_config.speed;
9910 tp->link_config.orig_duplex = tp->link_config.duplex;
9911 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9913 if (netif_running(dev))
9914 tg3_setup_phy(tp, 1);
9916 tg3_full_unlock(tp);
9921 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9923 struct tg3 *tp = netdev_priv(dev);
9925 strcpy(info->driver, DRV_MODULE_NAME);
9926 strcpy(info->version, DRV_MODULE_VERSION);
9927 strcpy(info->fw_version, tp->fw_ver);
9928 strcpy(info->bus_info, pci_name(tp->pdev));
9931 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9933 struct tg3 *tp = netdev_priv(dev);
9935 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9936 device_can_wakeup(&tp->pdev->dev))
9937 wol->supported = WAKE_MAGIC;
9941 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9942 device_can_wakeup(&tp->pdev->dev))
9943 wol->wolopts = WAKE_MAGIC;
9944 memset(&wol->sopass, 0, sizeof(wol->sopass));
9947 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9949 struct tg3 *tp = netdev_priv(dev);
9950 struct device *dp = &tp->pdev->dev;
9952 if (wol->wolopts & ~WAKE_MAGIC)
9954 if ((wol->wolopts & WAKE_MAGIC) &&
9955 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9958 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
9960 spin_lock_bh(&tp->lock);
9961 if (device_may_wakeup(dp))
9962 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9964 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9965 spin_unlock_bh(&tp->lock);
9971 static u32 tg3_get_msglevel(struct net_device *dev)
9973 struct tg3 *tp = netdev_priv(dev);
9974 return tp->msg_enable;
9977 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9979 struct tg3 *tp = netdev_priv(dev);
9980 tp->msg_enable = value;
9983 static int tg3_set_tso(struct net_device *dev, u32 value)
9985 struct tg3 *tp = netdev_priv(dev);
9987 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9992 if ((dev->features & NETIF_F_IPV6_CSUM) &&
9993 ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
9994 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
9996 dev->features |= NETIF_F_TSO6;
9997 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
9998 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9999 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
10000 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
10001 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
10002 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
10003 dev->features |= NETIF_F_TSO_ECN;
10005 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
10007 return ethtool_op_set_tso(dev, value);
10010 static int tg3_nway_reset(struct net_device *dev)
10012 struct tg3 *tp = netdev_priv(dev);
10015 if (!netif_running(dev))
10018 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10021 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10022 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10024 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10028 spin_lock_bh(&tp->lock);
10030 tg3_readphy(tp, MII_BMCR, &bmcr);
10031 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10032 ((bmcr & BMCR_ANENABLE) ||
10033 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10034 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10038 spin_unlock_bh(&tp->lock);
10044 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10046 struct tg3 *tp = netdev_priv(dev);
10048 ering->rx_max_pending = tp->rx_std_ring_mask;
10049 ering->rx_mini_max_pending = 0;
10050 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
10051 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10053 ering->rx_jumbo_max_pending = 0;
10055 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10057 ering->rx_pending = tp->rx_pending;
10058 ering->rx_mini_pending = 0;
10059 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
10060 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10062 ering->rx_jumbo_pending = 0;
10064 ering->tx_pending = tp->napi[0].tx_pending;
10067 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10069 struct tg3 *tp = netdev_priv(dev);
10070 int i, irq_sync = 0, err = 0;
10072 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10073 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10074 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10075 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10076 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
10077 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10080 if (netif_running(dev)) {
10082 tg3_netif_stop(tp);
10086 tg3_full_lock(tp, irq_sync);
10088 tp->rx_pending = ering->rx_pending;
10090 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
10091 tp->rx_pending > 63)
10092 tp->rx_pending = 63;
10093 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10095 for (i = 0; i < tp->irq_max; i++)
10096 tp->napi[i].tx_pending = ering->tx_pending;
10098 if (netif_running(dev)) {
10099 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10100 err = tg3_restart_hw(tp, 1);
10102 tg3_netif_start(tp);
10105 tg3_full_unlock(tp);
10107 if (irq_sync && !err)
10113 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10115 struct tg3 *tp = netdev_priv(dev);
10117 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
10119 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10120 epause->rx_pause = 1;
10122 epause->rx_pause = 0;
10124 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10125 epause->tx_pause = 1;
10127 epause->tx_pause = 0;
10130 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10132 struct tg3 *tp = netdev_priv(dev);
10135 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10137 struct phy_device *phydev;
10139 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10141 if (!(phydev->supported & SUPPORTED_Pause) ||
10142 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10143 (epause->rx_pause != epause->tx_pause)))
10146 tp->link_config.flowctrl = 0;
10147 if (epause->rx_pause) {
10148 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10150 if (epause->tx_pause) {
10151 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10152 newadv = ADVERTISED_Pause;
10154 newadv = ADVERTISED_Pause |
10155 ADVERTISED_Asym_Pause;
10156 } else if (epause->tx_pause) {
10157 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10158 newadv = ADVERTISED_Asym_Pause;
10162 if (epause->autoneg)
10163 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10165 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
10167 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10168 u32 oldadv = phydev->advertising &
10169 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10170 if (oldadv != newadv) {
10171 phydev->advertising &=
10172 ~(ADVERTISED_Pause |
10173 ADVERTISED_Asym_Pause);
10174 phydev->advertising |= newadv;
10175 if (phydev->autoneg) {
10177 * Always renegotiate the link to
10178 * inform our link partner of our
10179 * flow control settings, even if the
10180 * flow control is forced. Let
10181 * tg3_adjust_link() do the final
10182 * flow control setup.
10184 return phy_start_aneg(phydev);
10188 if (!epause->autoneg)
10189 tg3_setup_flow_control(tp, 0, 0);
10191 tp->link_config.orig_advertising &=
10192 ~(ADVERTISED_Pause |
10193 ADVERTISED_Asym_Pause);
10194 tp->link_config.orig_advertising |= newadv;
10199 if (netif_running(dev)) {
10200 tg3_netif_stop(tp);
10204 tg3_full_lock(tp, irq_sync);
10206 if (epause->autoneg)
10207 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10209 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
10210 if (epause->rx_pause)
10211 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10213 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10214 if (epause->tx_pause)
10215 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10217 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10219 if (netif_running(dev)) {
10220 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10221 err = tg3_restart_hw(tp, 1);
10223 tg3_netif_start(tp);
10226 tg3_full_unlock(tp);
10232 static u32 tg3_get_rx_csum(struct net_device *dev)
10234 struct tg3 *tp = netdev_priv(dev);
10235 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
10238 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
10240 struct tg3 *tp = netdev_priv(dev);
10242 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10248 spin_lock_bh(&tp->lock);
10250 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10252 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10253 spin_unlock_bh(&tp->lock);
10258 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
10260 struct tg3 *tp = netdev_priv(dev);
10262 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10268 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10269 ethtool_op_set_tx_ipv6_csum(dev, data);
10271 ethtool_op_set_tx_csum(dev, data);
10276 static int tg3_get_sset_count(struct net_device *dev, int sset)
10280 return TG3_NUM_TEST;
10282 return TG3_NUM_STATS;
10284 return -EOPNOTSUPP;
10288 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10290 switch (stringset) {
10292 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10295 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10298 WARN_ON(1); /* we need a WARN() */
10303 static int tg3_phys_id(struct net_device *dev, u32 data)
10305 struct tg3 *tp = netdev_priv(dev);
10308 if (!netif_running(tp->dev))
10312 data = UINT_MAX / 2;
10314 for (i = 0; i < (data * 2); i++) {
10316 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10317 LED_CTRL_1000MBPS_ON |
10318 LED_CTRL_100MBPS_ON |
10319 LED_CTRL_10MBPS_ON |
10320 LED_CTRL_TRAFFIC_OVERRIDE |
10321 LED_CTRL_TRAFFIC_BLINK |
10322 LED_CTRL_TRAFFIC_LED);
10325 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10326 LED_CTRL_TRAFFIC_OVERRIDE);
10328 if (msleep_interruptible(500))
10331 tw32(MAC_LED_CTRL, tp->led_ctrl);
10335 static void tg3_get_ethtool_stats(struct net_device *dev,
10336 struct ethtool_stats *estats, u64 *tmp_stats)
10338 struct tg3 *tp = netdev_priv(dev);
10339 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10342 #define NVRAM_TEST_SIZE 0x100
10343 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10344 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10345 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10346 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10347 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10349 static int tg3_test_nvram(struct tg3 *tp)
10353 int i, j, k, err = 0, size;
10355 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
10358 if (tg3_nvram_read(tp, 0, &magic) != 0)
10361 if (magic == TG3_EEPROM_MAGIC)
10362 size = NVRAM_TEST_SIZE;
10363 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10364 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10365 TG3_EEPROM_SB_FORMAT_1) {
10366 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10367 case TG3_EEPROM_SB_REVISION_0:
10368 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10370 case TG3_EEPROM_SB_REVISION_2:
10371 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10373 case TG3_EEPROM_SB_REVISION_3:
10374 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10381 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10382 size = NVRAM_SELFBOOT_HW_SIZE;
10386 buf = kmalloc(size, GFP_KERNEL);
10391 for (i = 0, j = 0; i < size; i += 4, j++) {
10392 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10399 /* Selfboot format */
10400 magic = be32_to_cpu(buf[0]);
10401 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10402 TG3_EEPROM_MAGIC_FW) {
10403 u8 *buf8 = (u8 *) buf, csum8 = 0;
10405 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10406 TG3_EEPROM_SB_REVISION_2) {
10407 /* For rev 2, the csum doesn't include the MBA. */
10408 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10410 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10413 for (i = 0; i < size; i++)
10426 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10427 TG3_EEPROM_MAGIC_HW) {
10428 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10429 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10430 u8 *buf8 = (u8 *) buf;
10432 /* Separate the parity bits and the data bytes. */
10433 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10434 if ((i == 0) || (i == 8)) {
10438 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10439 parity[k++] = buf8[i] & msk;
10441 } else if (i == 16) {
10445 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10446 parity[k++] = buf8[i] & msk;
10449 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10450 parity[k++] = buf8[i] & msk;
10453 data[j++] = buf8[i];
10457 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10458 u8 hw8 = hweight8(data[i]);
10460 if ((hw8 & 0x1) && parity[i])
10462 else if (!(hw8 & 0x1) && !parity[i])
10469 /* Bootstrap checksum at offset 0x10 */
10470 csum = calc_crc((unsigned char *) buf, 0x10);
10471 if (csum != be32_to_cpu(buf[0x10/4]))
10474 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10475 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10476 if (csum != be32_to_cpu(buf[0xfc/4]))
10486 #define TG3_SERDES_TIMEOUT_SEC 2
10487 #define TG3_COPPER_TIMEOUT_SEC 6
10489 static int tg3_test_link(struct tg3 *tp)
10493 if (!netif_running(tp->dev))
10496 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10497 max = TG3_SERDES_TIMEOUT_SEC;
10499 max = TG3_COPPER_TIMEOUT_SEC;
10501 for (i = 0; i < max; i++) {
10502 if (netif_carrier_ok(tp->dev))
10505 if (msleep_interruptible(1000))
10512 /* Only test the commonly used registers */
10513 static int tg3_test_registers(struct tg3 *tp)
10515 int i, is_5705, is_5750;
10516 u32 offset, read_mask, write_mask, val, save_val, read_val;
10520 #define TG3_FL_5705 0x1
10521 #define TG3_FL_NOT_5705 0x2
10522 #define TG3_FL_NOT_5788 0x4
10523 #define TG3_FL_NOT_5750 0x8
10527 /* MAC Control Registers */
10528 { MAC_MODE, TG3_FL_NOT_5705,
10529 0x00000000, 0x00ef6f8c },
10530 { MAC_MODE, TG3_FL_5705,
10531 0x00000000, 0x01ef6b8c },
10532 { MAC_STATUS, TG3_FL_NOT_5705,
10533 0x03800107, 0x00000000 },
10534 { MAC_STATUS, TG3_FL_5705,
10535 0x03800100, 0x00000000 },
10536 { MAC_ADDR_0_HIGH, 0x0000,
10537 0x00000000, 0x0000ffff },
10538 { MAC_ADDR_0_LOW, 0x0000,
10539 0x00000000, 0xffffffff },
10540 { MAC_RX_MTU_SIZE, 0x0000,
10541 0x00000000, 0x0000ffff },
10542 { MAC_TX_MODE, 0x0000,
10543 0x00000000, 0x00000070 },
10544 { MAC_TX_LENGTHS, 0x0000,
10545 0x00000000, 0x00003fff },
10546 { MAC_RX_MODE, TG3_FL_NOT_5705,
10547 0x00000000, 0x000007fc },
10548 { MAC_RX_MODE, TG3_FL_5705,
10549 0x00000000, 0x000007dc },
10550 { MAC_HASH_REG_0, 0x0000,
10551 0x00000000, 0xffffffff },
10552 { MAC_HASH_REG_1, 0x0000,
10553 0x00000000, 0xffffffff },
10554 { MAC_HASH_REG_2, 0x0000,
10555 0x00000000, 0xffffffff },
10556 { MAC_HASH_REG_3, 0x0000,
10557 0x00000000, 0xffffffff },
10559 /* Receive Data and Receive BD Initiator Control Registers. */
10560 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10561 0x00000000, 0xffffffff },
10562 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10563 0x00000000, 0xffffffff },
10564 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10565 0x00000000, 0x00000003 },
10566 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10567 0x00000000, 0xffffffff },
10568 { RCVDBDI_STD_BD+0, 0x0000,
10569 0x00000000, 0xffffffff },
10570 { RCVDBDI_STD_BD+4, 0x0000,
10571 0x00000000, 0xffffffff },
10572 { RCVDBDI_STD_BD+8, 0x0000,
10573 0x00000000, 0xffff0002 },
10574 { RCVDBDI_STD_BD+0xc, 0x0000,
10575 0x00000000, 0xffffffff },
10577 /* Receive BD Initiator Control Registers. */
10578 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10579 0x00000000, 0xffffffff },
10580 { RCVBDI_STD_THRESH, TG3_FL_5705,
10581 0x00000000, 0x000003ff },
10582 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10583 0x00000000, 0xffffffff },
10585 /* Host Coalescing Control Registers. */
10586 { HOSTCC_MODE, TG3_FL_NOT_5705,
10587 0x00000000, 0x00000004 },
10588 { HOSTCC_MODE, TG3_FL_5705,
10589 0x00000000, 0x000000f6 },
10590 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10591 0x00000000, 0xffffffff },
10592 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10593 0x00000000, 0x000003ff },
10594 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10595 0x00000000, 0xffffffff },
10596 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10597 0x00000000, 0x000003ff },
10598 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10599 0x00000000, 0xffffffff },
10600 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10601 0x00000000, 0x000000ff },
10602 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10603 0x00000000, 0xffffffff },
10604 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10605 0x00000000, 0x000000ff },
10606 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10607 0x00000000, 0xffffffff },
10608 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10609 0x00000000, 0xffffffff },
10610 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10611 0x00000000, 0xffffffff },
10612 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10613 0x00000000, 0x000000ff },
10614 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10615 0x00000000, 0xffffffff },
10616 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10617 0x00000000, 0x000000ff },
10618 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10619 0x00000000, 0xffffffff },
10620 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10621 0x00000000, 0xffffffff },
10622 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10623 0x00000000, 0xffffffff },
10624 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10625 0x00000000, 0xffffffff },
10626 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10627 0x00000000, 0xffffffff },
10628 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10629 0xffffffff, 0x00000000 },
10630 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10631 0xffffffff, 0x00000000 },
10633 /* Buffer Manager Control Registers. */
10634 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10635 0x00000000, 0x007fff80 },
10636 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10637 0x00000000, 0x007fffff },
10638 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10639 0x00000000, 0x0000003f },
10640 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10641 0x00000000, 0x000001ff },
10642 { BUFMGR_MB_HIGH_WATER, 0x0000,
10643 0x00000000, 0x000001ff },
10644 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10645 0xffffffff, 0x00000000 },
10646 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10647 0xffffffff, 0x00000000 },
10649 /* Mailbox Registers */
10650 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10651 0x00000000, 0x000001ff },
10652 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10653 0x00000000, 0x000001ff },
10654 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10655 0x00000000, 0x000007ff },
10656 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10657 0x00000000, 0x000001ff },
10659 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10662 is_5705 = is_5750 = 0;
10663 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10665 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10669 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10670 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10673 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10676 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10677 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10680 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10683 offset = (u32) reg_tbl[i].offset;
10684 read_mask = reg_tbl[i].read_mask;
10685 write_mask = reg_tbl[i].write_mask;
10687 /* Save the original register content */
10688 save_val = tr32(offset);
10690 /* Determine the read-only value. */
10691 read_val = save_val & read_mask;
10693 /* Write zero to the register, then make sure the read-only bits
10694 * are not changed and the read/write bits are all zeros.
10698 val = tr32(offset);
10700 /* Test the read-only and read/write bits. */
10701 if (((val & read_mask) != read_val) || (val & write_mask))
10704 /* Write ones to all the bits defined by RdMask and WrMask, then
10705 * make sure the read-only bits are not changed and the
10706 * read/write bits are all ones.
10708 tw32(offset, read_mask | write_mask);
10710 val = tr32(offset);
10712 /* Test the read-only bits. */
10713 if ((val & read_mask) != read_val)
10716 /* Test the read/write bits. */
10717 if ((val & write_mask) != write_mask)
10720 tw32(offset, save_val);
10726 if (netif_msg_hw(tp))
10727 netdev_err(tp->dev,
10728 "Register test failed at offset %x\n", offset);
10729 tw32(offset, save_val);
10733 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10735 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10739 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10740 for (j = 0; j < len; j += 4) {
10743 tg3_write_mem(tp, offset + j, test_pattern[i]);
10744 tg3_read_mem(tp, offset + j, &val);
10745 if (val != test_pattern[i])
10752 static int tg3_test_memory(struct tg3 *tp)
10754 static struct mem_entry {
10757 } mem_tbl_570x[] = {
10758 { 0x00000000, 0x00b50},
10759 { 0x00002000, 0x1c000},
10760 { 0xffffffff, 0x00000}
10761 }, mem_tbl_5705[] = {
10762 { 0x00000100, 0x0000c},
10763 { 0x00000200, 0x00008},
10764 { 0x00004000, 0x00800},
10765 { 0x00006000, 0x01000},
10766 { 0x00008000, 0x02000},
10767 { 0x00010000, 0x0e000},
10768 { 0xffffffff, 0x00000}
10769 }, mem_tbl_5755[] = {
10770 { 0x00000200, 0x00008},
10771 { 0x00004000, 0x00800},
10772 { 0x00006000, 0x00800},
10773 { 0x00008000, 0x02000},
10774 { 0x00010000, 0x0c000},
10775 { 0xffffffff, 0x00000}
10776 }, mem_tbl_5906[] = {
10777 { 0x00000200, 0x00008},
10778 { 0x00004000, 0x00400},
10779 { 0x00006000, 0x00400},
10780 { 0x00008000, 0x01000},
10781 { 0x00010000, 0x01000},
10782 { 0xffffffff, 0x00000}
10783 }, mem_tbl_5717[] = {
10784 { 0x00000200, 0x00008},
10785 { 0x00010000, 0x0a000},
10786 { 0x00020000, 0x13c00},
10787 { 0xffffffff, 0x00000}
10788 }, mem_tbl_57765[] = {
10789 { 0x00000200, 0x00008},
10790 { 0x00004000, 0x00800},
10791 { 0x00006000, 0x09800},
10792 { 0x00010000, 0x0a000},
10793 { 0xffffffff, 0x00000}
10795 struct mem_entry *mem_tbl;
10799 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
10800 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
10801 mem_tbl = mem_tbl_5717;
10802 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10803 mem_tbl = mem_tbl_57765;
10804 else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10805 mem_tbl = mem_tbl_5755;
10806 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10807 mem_tbl = mem_tbl_5906;
10808 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
10809 mem_tbl = mem_tbl_5705;
10811 mem_tbl = mem_tbl_570x;
10813 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10814 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10822 #define TG3_MAC_LOOPBACK 0
10823 #define TG3_PHY_LOOPBACK 1
10825 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10827 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10828 u32 desc_idx, coal_now;
10829 struct sk_buff *skb, *rx_skb;
10832 int num_pkts, tx_len, rx_len, i, err;
10833 struct tg3_rx_buffer_desc *desc;
10834 struct tg3_napi *tnapi, *rnapi;
10835 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
10837 tnapi = &tp->napi[0];
10838 rnapi = &tp->napi[0];
10839 if (tp->irq_cnt > 1) {
10840 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
10841 rnapi = &tp->napi[1];
10842 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
10843 tnapi = &tp->napi[1];
10845 coal_now = tnapi->coal_now | rnapi->coal_now;
10847 if (loopback_mode == TG3_MAC_LOOPBACK) {
10848 /* HW errata - mac loopback fails in some cases on 5780.
10849 * Normal traffic and PHY loopback are not affected by
10852 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10855 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
10856 MAC_MODE_PORT_INT_LPBACK;
10857 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10858 mac_mode |= MAC_MODE_LINK_POLARITY;
10859 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
10860 mac_mode |= MAC_MODE_PORT_MODE_MII;
10862 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10863 tw32(MAC_MODE, mac_mode);
10864 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10867 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
10868 tg3_phy_fet_toggle_apd(tp, false);
10869 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10871 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10873 tg3_phy_toggle_automdix(tp, 0);
10875 tg3_writephy(tp, MII_BMCR, val);
10878 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10879 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
10880 tg3_writephy(tp, MII_TG3_FET_PTEST,
10881 MII_TG3_FET_PTEST_FRC_TX_LINK |
10882 MII_TG3_FET_PTEST_FRC_TX_LOCK);
10883 /* The write needs to be flushed for the AC131 */
10884 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10885 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
10886 mac_mode |= MAC_MODE_PORT_MODE_MII;
10888 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10890 /* reset to prevent losing 1st rx packet intermittently */
10891 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10892 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10894 tw32_f(MAC_RX_MODE, tp->rx_mode);
10896 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10897 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
10898 if (masked_phy_id == TG3_PHY_ID_BCM5401)
10899 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10900 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
10901 mac_mode |= MAC_MODE_LINK_POLARITY;
10902 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10903 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10905 tw32(MAC_MODE, mac_mode);
10913 skb = netdev_alloc_skb(tp->dev, tx_len);
10917 tx_data = skb_put(skb, tx_len);
10918 memcpy(tx_data, tp->dev->dev_addr, 6);
10919 memset(tx_data + 6, 0x0, 8);
10921 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10923 for (i = 14; i < tx_len; i++)
10924 tx_data[i] = (u8) (i & 0xff);
10926 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10927 if (pci_dma_mapping_error(tp->pdev, map)) {
10928 dev_kfree_skb(skb);
10932 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10937 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
10941 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
10946 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
10947 tr32_mailbox(tnapi->prodmbox);
10951 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
10952 for (i = 0; i < 35; i++) {
10953 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10958 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
10959 rx_idx = rnapi->hw_status->idx[0].rx_producer;
10960 if ((tx_idx == tnapi->tx_prod) &&
10961 (rx_idx == (rx_start_idx + num_pkts)))
10965 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10966 dev_kfree_skb(skb);
10968 if (tx_idx != tnapi->tx_prod)
10971 if (rx_idx != rx_start_idx + num_pkts)
10974 desc = &rnapi->rx_rcb[rx_start_idx];
10975 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10976 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10977 if (opaque_key != RXD_OPAQUE_RING_STD)
10980 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10981 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10984 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10985 if (rx_len != tx_len)
10988 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
10990 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
10991 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10993 for (i = 14; i < tx_len; i++) {
10994 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10999 /* tg3_free_rings will unmap and free the rx_skb */
11004 #define TG3_MAC_LOOPBACK_FAILED 1
11005 #define TG3_PHY_LOOPBACK_FAILED 2
11006 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
11007 TG3_PHY_LOOPBACK_FAILED)
11009 static int tg3_test_loopback(struct tg3 *tp)
11014 if (!netif_running(tp->dev))
11015 return TG3_LOOPBACK_FAILED;
11017 err = tg3_reset_hw(tp, 1);
11019 return TG3_LOOPBACK_FAILED;
11021 /* Turn off gphy autopowerdown. */
11022 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11023 tg3_phy_toggle_apd(tp, false);
11025 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
11029 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11031 /* Wait for up to 40 microseconds to acquire lock. */
11032 for (i = 0; i < 4; i++) {
11033 status = tr32(TG3_CPMU_MUTEX_GNT);
11034 if (status == CPMU_MUTEX_GNT_DRIVER)
11039 if (status != CPMU_MUTEX_GNT_DRIVER)
11040 return TG3_LOOPBACK_FAILED;
11042 /* Turn off link-based power management. */
11043 cpmuctrl = tr32(TG3_CPMU_CTRL);
11044 tw32(TG3_CPMU_CTRL,
11045 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11046 CPMU_CTRL_LINK_AWARE_MODE));
11049 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
11050 err |= TG3_MAC_LOOPBACK_FAILED;
11052 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
11053 tw32(TG3_CPMU_CTRL, cpmuctrl);
11055 /* Release the mutex */
11056 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11059 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11060 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
11061 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
11062 err |= TG3_PHY_LOOPBACK_FAILED;
11065 /* Re-enable gphy autopowerdown. */
11066 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11067 tg3_phy_toggle_apd(tp, true);
11072 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11075 struct tg3 *tp = netdev_priv(dev);
11077 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11078 tg3_set_power_state(tp, PCI_D0);
11080 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11082 if (tg3_test_nvram(tp) != 0) {
11083 etest->flags |= ETH_TEST_FL_FAILED;
11086 if (tg3_test_link(tp) != 0) {
11087 etest->flags |= ETH_TEST_FL_FAILED;
11090 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11091 int err, err2 = 0, irq_sync = 0;
11093 if (netif_running(dev)) {
11095 tg3_netif_stop(tp);
11099 tg3_full_lock(tp, irq_sync);
11101 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11102 err = tg3_nvram_lock(tp);
11103 tg3_halt_cpu(tp, RX_CPU_BASE);
11104 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11105 tg3_halt_cpu(tp, TX_CPU_BASE);
11107 tg3_nvram_unlock(tp);
11109 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11112 if (tg3_test_registers(tp) != 0) {
11113 etest->flags |= ETH_TEST_FL_FAILED;
11116 if (tg3_test_memory(tp) != 0) {
11117 etest->flags |= ETH_TEST_FL_FAILED;
11120 if ((data[4] = tg3_test_loopback(tp)) != 0)
11121 etest->flags |= ETH_TEST_FL_FAILED;
11123 tg3_full_unlock(tp);
11125 if (tg3_test_interrupt(tp) != 0) {
11126 etest->flags |= ETH_TEST_FL_FAILED;
11130 tg3_full_lock(tp, 0);
11132 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11133 if (netif_running(dev)) {
11134 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11135 err2 = tg3_restart_hw(tp, 1);
11137 tg3_netif_start(tp);
11140 tg3_full_unlock(tp);
11142 if (irq_sync && !err2)
11145 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11146 tg3_set_power_state(tp, PCI_D3hot);
11150 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11152 struct mii_ioctl_data *data = if_mii(ifr);
11153 struct tg3 *tp = netdev_priv(dev);
11156 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
11157 struct phy_device *phydev;
11158 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11160 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11161 return phy_mii_ioctl(phydev, ifr, cmd);
11166 data->phy_id = tp->phy_addr;
11169 case SIOCGMIIREG: {
11172 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11173 break; /* We have no PHY */
11175 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11178 spin_lock_bh(&tp->lock);
11179 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11180 spin_unlock_bh(&tp->lock);
11182 data->val_out = mii_regval;
11188 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11189 break; /* We have no PHY */
11191 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11194 spin_lock_bh(&tp->lock);
11195 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11196 spin_unlock_bh(&tp->lock);
11204 return -EOPNOTSUPP;
11207 #if TG3_VLAN_TAG_USED
11208 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
11210 struct tg3 *tp = netdev_priv(dev);
11212 if (!netif_running(dev)) {
11217 tg3_netif_stop(tp);
11219 tg3_full_lock(tp, 0);
11223 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
11224 __tg3_set_rx_mode(dev);
11226 tg3_netif_start(tp);
11228 tg3_full_unlock(tp);
11232 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11234 struct tg3 *tp = netdev_priv(dev);
11236 memcpy(ec, &tp->coal, sizeof(*ec));
11240 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11242 struct tg3 *tp = netdev_priv(dev);
11243 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11244 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11246 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
11247 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11248 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11249 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11250 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11253 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11254 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11255 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11256 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11257 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11258 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11259 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11260 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11261 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11262 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11265 /* No rx interrupts will be generated if both are zero */
11266 if ((ec->rx_coalesce_usecs == 0) &&
11267 (ec->rx_max_coalesced_frames == 0))
11270 /* No tx interrupts will be generated if both are zero */
11271 if ((ec->tx_coalesce_usecs == 0) &&
11272 (ec->tx_max_coalesced_frames == 0))
11275 /* Only copy relevant parameters, ignore all others. */
11276 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11277 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11278 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11279 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11280 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11281 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11282 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11283 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11284 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11286 if (netif_running(dev)) {
11287 tg3_full_lock(tp, 0);
11288 __tg3_set_coalesce(tp, &tp->coal);
11289 tg3_full_unlock(tp);
11294 static const struct ethtool_ops tg3_ethtool_ops = {
11295 .get_settings = tg3_get_settings,
11296 .set_settings = tg3_set_settings,
11297 .get_drvinfo = tg3_get_drvinfo,
11298 .get_regs_len = tg3_get_regs_len,
11299 .get_regs = tg3_get_regs,
11300 .get_wol = tg3_get_wol,
11301 .set_wol = tg3_set_wol,
11302 .get_msglevel = tg3_get_msglevel,
11303 .set_msglevel = tg3_set_msglevel,
11304 .nway_reset = tg3_nway_reset,
11305 .get_link = ethtool_op_get_link,
11306 .get_eeprom_len = tg3_get_eeprom_len,
11307 .get_eeprom = tg3_get_eeprom,
11308 .set_eeprom = tg3_set_eeprom,
11309 .get_ringparam = tg3_get_ringparam,
11310 .set_ringparam = tg3_set_ringparam,
11311 .get_pauseparam = tg3_get_pauseparam,
11312 .set_pauseparam = tg3_set_pauseparam,
11313 .get_rx_csum = tg3_get_rx_csum,
11314 .set_rx_csum = tg3_set_rx_csum,
11315 .set_tx_csum = tg3_set_tx_csum,
11316 .set_sg = ethtool_op_set_sg,
11317 .set_tso = tg3_set_tso,
11318 .self_test = tg3_self_test,
11319 .get_strings = tg3_get_strings,
11320 .phys_id = tg3_phys_id,
11321 .get_ethtool_stats = tg3_get_ethtool_stats,
11322 .get_coalesce = tg3_get_coalesce,
11323 .set_coalesce = tg3_set_coalesce,
11324 .get_sset_count = tg3_get_sset_count,
11327 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11329 u32 cursize, val, magic;
11331 tp->nvram_size = EEPROM_CHIP_SIZE;
11333 if (tg3_nvram_read(tp, 0, &magic) != 0)
11336 if ((magic != TG3_EEPROM_MAGIC) &&
11337 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11338 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11342 * Size the chip by reading offsets at increasing powers of two.
11343 * When we encounter our validation signature, we know the addressing
11344 * has wrapped around, and thus have our chip size.
11348 while (cursize < tp->nvram_size) {
11349 if (tg3_nvram_read(tp, cursize, &val) != 0)
11358 tp->nvram_size = cursize;
11361 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11365 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
11366 tg3_nvram_read(tp, 0, &val) != 0)
11369 /* Selfboot format */
11370 if (val != TG3_EEPROM_MAGIC) {
11371 tg3_get_eeprom_size(tp);
11375 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11377 /* This is confusing. We want to operate on the
11378 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11379 * call will read from NVRAM and byteswap the data
11380 * according to the byteswapping settings for all
11381 * other register accesses. This ensures the data we
11382 * want will always reside in the lower 16-bits.
11383 * However, the data in NVRAM is in LE format, which
11384 * means the data from the NVRAM read will always be
11385 * opposite the endianness of the CPU. The 16-bit
11386 * byteswap then brings the data to CPU endianness.
11388 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11392 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11395 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11399 nvcfg1 = tr32(NVRAM_CFG1);
11400 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11401 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11403 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11404 tw32(NVRAM_CFG1, nvcfg1);
11407 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11408 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11409 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11410 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11411 tp->nvram_jedecnum = JEDEC_ATMEL;
11412 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11413 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11415 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11416 tp->nvram_jedecnum = JEDEC_ATMEL;
11417 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11419 case FLASH_VENDOR_ATMEL_EEPROM:
11420 tp->nvram_jedecnum = JEDEC_ATMEL;
11421 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11422 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11424 case FLASH_VENDOR_ST:
11425 tp->nvram_jedecnum = JEDEC_ST;
11426 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11427 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11429 case FLASH_VENDOR_SAIFUN:
11430 tp->nvram_jedecnum = JEDEC_SAIFUN;
11431 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11433 case FLASH_VENDOR_SST_SMALL:
11434 case FLASH_VENDOR_SST_LARGE:
11435 tp->nvram_jedecnum = JEDEC_SST;
11436 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11440 tp->nvram_jedecnum = JEDEC_ATMEL;
11441 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11442 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11446 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11448 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11449 case FLASH_5752PAGE_SIZE_256:
11450 tp->nvram_pagesize = 256;
11452 case FLASH_5752PAGE_SIZE_512:
11453 tp->nvram_pagesize = 512;
11455 case FLASH_5752PAGE_SIZE_1K:
11456 tp->nvram_pagesize = 1024;
11458 case FLASH_5752PAGE_SIZE_2K:
11459 tp->nvram_pagesize = 2048;
11461 case FLASH_5752PAGE_SIZE_4K:
11462 tp->nvram_pagesize = 4096;
11464 case FLASH_5752PAGE_SIZE_264:
11465 tp->nvram_pagesize = 264;
11467 case FLASH_5752PAGE_SIZE_528:
11468 tp->nvram_pagesize = 528;
11473 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11477 nvcfg1 = tr32(NVRAM_CFG1);
11479 /* NVRAM protection for TPM */
11480 if (nvcfg1 & (1 << 27))
11481 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11483 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11484 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11485 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11486 tp->nvram_jedecnum = JEDEC_ATMEL;
11487 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11489 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11490 tp->nvram_jedecnum = JEDEC_ATMEL;
11491 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11492 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11494 case FLASH_5752VENDOR_ST_M45PE10:
11495 case FLASH_5752VENDOR_ST_M45PE20:
11496 case FLASH_5752VENDOR_ST_M45PE40:
11497 tp->nvram_jedecnum = JEDEC_ST;
11498 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11499 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11503 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
11504 tg3_nvram_get_pagesize(tp, nvcfg1);
11506 /* For eeprom, set pagesize to maximum eeprom size */
11507 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11509 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11510 tw32(NVRAM_CFG1, nvcfg1);
11514 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11516 u32 nvcfg1, protect = 0;
11518 nvcfg1 = tr32(NVRAM_CFG1);
11520 /* NVRAM protection for TPM */
11521 if (nvcfg1 & (1 << 27)) {
11522 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11526 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11528 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11529 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11530 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11531 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11532 tp->nvram_jedecnum = JEDEC_ATMEL;
11533 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11534 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11535 tp->nvram_pagesize = 264;
11536 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11537 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11538 tp->nvram_size = (protect ? 0x3e200 :
11539 TG3_NVRAM_SIZE_512KB);
11540 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11541 tp->nvram_size = (protect ? 0x1f200 :
11542 TG3_NVRAM_SIZE_256KB);
11544 tp->nvram_size = (protect ? 0x1f200 :
11545 TG3_NVRAM_SIZE_128KB);
11547 case FLASH_5752VENDOR_ST_M45PE10:
11548 case FLASH_5752VENDOR_ST_M45PE20:
11549 case FLASH_5752VENDOR_ST_M45PE40:
11550 tp->nvram_jedecnum = JEDEC_ST;
11551 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11552 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11553 tp->nvram_pagesize = 256;
11554 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11555 tp->nvram_size = (protect ?
11556 TG3_NVRAM_SIZE_64KB :
11557 TG3_NVRAM_SIZE_128KB);
11558 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11559 tp->nvram_size = (protect ?
11560 TG3_NVRAM_SIZE_64KB :
11561 TG3_NVRAM_SIZE_256KB);
11563 tp->nvram_size = (protect ?
11564 TG3_NVRAM_SIZE_128KB :
11565 TG3_NVRAM_SIZE_512KB);
11570 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11574 nvcfg1 = tr32(NVRAM_CFG1);
11576 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11577 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11578 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11579 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11580 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11581 tp->nvram_jedecnum = JEDEC_ATMEL;
11582 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11583 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11585 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11586 tw32(NVRAM_CFG1, nvcfg1);
11588 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11589 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11590 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11591 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11592 tp->nvram_jedecnum = JEDEC_ATMEL;
11593 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11594 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11595 tp->nvram_pagesize = 264;
11597 case FLASH_5752VENDOR_ST_M45PE10:
11598 case FLASH_5752VENDOR_ST_M45PE20:
11599 case FLASH_5752VENDOR_ST_M45PE40:
11600 tp->nvram_jedecnum = JEDEC_ST;
11601 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11602 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11603 tp->nvram_pagesize = 256;
11608 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11610 u32 nvcfg1, protect = 0;
11612 nvcfg1 = tr32(NVRAM_CFG1);
11614 /* NVRAM protection for TPM */
11615 if (nvcfg1 & (1 << 27)) {
11616 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11620 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11622 case FLASH_5761VENDOR_ATMEL_ADB021D:
11623 case FLASH_5761VENDOR_ATMEL_ADB041D:
11624 case FLASH_5761VENDOR_ATMEL_ADB081D:
11625 case FLASH_5761VENDOR_ATMEL_ADB161D:
11626 case FLASH_5761VENDOR_ATMEL_MDB021D:
11627 case FLASH_5761VENDOR_ATMEL_MDB041D:
11628 case FLASH_5761VENDOR_ATMEL_MDB081D:
11629 case FLASH_5761VENDOR_ATMEL_MDB161D:
11630 tp->nvram_jedecnum = JEDEC_ATMEL;
11631 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11632 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11633 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11634 tp->nvram_pagesize = 256;
11636 case FLASH_5761VENDOR_ST_A_M45PE20:
11637 case FLASH_5761VENDOR_ST_A_M45PE40:
11638 case FLASH_5761VENDOR_ST_A_M45PE80:
11639 case FLASH_5761VENDOR_ST_A_M45PE16:
11640 case FLASH_5761VENDOR_ST_M_M45PE20:
11641 case FLASH_5761VENDOR_ST_M_M45PE40:
11642 case FLASH_5761VENDOR_ST_M_M45PE80:
11643 case FLASH_5761VENDOR_ST_M_M45PE16:
11644 tp->nvram_jedecnum = JEDEC_ST;
11645 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11646 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11647 tp->nvram_pagesize = 256;
11652 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11655 case FLASH_5761VENDOR_ATMEL_ADB161D:
11656 case FLASH_5761VENDOR_ATMEL_MDB161D:
11657 case FLASH_5761VENDOR_ST_A_M45PE16:
11658 case FLASH_5761VENDOR_ST_M_M45PE16:
11659 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11661 case FLASH_5761VENDOR_ATMEL_ADB081D:
11662 case FLASH_5761VENDOR_ATMEL_MDB081D:
11663 case FLASH_5761VENDOR_ST_A_M45PE80:
11664 case FLASH_5761VENDOR_ST_M_M45PE80:
11665 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11667 case FLASH_5761VENDOR_ATMEL_ADB041D:
11668 case FLASH_5761VENDOR_ATMEL_MDB041D:
11669 case FLASH_5761VENDOR_ST_A_M45PE40:
11670 case FLASH_5761VENDOR_ST_M_M45PE40:
11671 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11673 case FLASH_5761VENDOR_ATMEL_ADB021D:
11674 case FLASH_5761VENDOR_ATMEL_MDB021D:
11675 case FLASH_5761VENDOR_ST_A_M45PE20:
11676 case FLASH_5761VENDOR_ST_M_M45PE20:
11677 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11683 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11685 tp->nvram_jedecnum = JEDEC_ATMEL;
11686 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11687 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11690 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11694 nvcfg1 = tr32(NVRAM_CFG1);
11696 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11697 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11698 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11699 tp->nvram_jedecnum = JEDEC_ATMEL;
11700 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11701 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11703 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11704 tw32(NVRAM_CFG1, nvcfg1);
11706 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11707 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11708 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11709 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11710 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11711 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11712 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11713 tp->nvram_jedecnum = JEDEC_ATMEL;
11714 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11715 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11717 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11718 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11719 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11720 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11721 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11723 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11724 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11725 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11727 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11728 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11729 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11733 case FLASH_5752VENDOR_ST_M45PE10:
11734 case FLASH_5752VENDOR_ST_M45PE20:
11735 case FLASH_5752VENDOR_ST_M45PE40:
11736 tp->nvram_jedecnum = JEDEC_ST;
11737 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11738 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11740 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11741 case FLASH_5752VENDOR_ST_M45PE10:
11742 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11744 case FLASH_5752VENDOR_ST_M45PE20:
11745 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11747 case FLASH_5752VENDOR_ST_M45PE40:
11748 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11753 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11757 tg3_nvram_get_pagesize(tp, nvcfg1);
11758 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11759 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11763 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11767 nvcfg1 = tr32(NVRAM_CFG1);
11769 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11770 case FLASH_5717VENDOR_ATMEL_EEPROM:
11771 case FLASH_5717VENDOR_MICRO_EEPROM:
11772 tp->nvram_jedecnum = JEDEC_ATMEL;
11773 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11774 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11776 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11777 tw32(NVRAM_CFG1, nvcfg1);
11779 case FLASH_5717VENDOR_ATMEL_MDB011D:
11780 case FLASH_5717VENDOR_ATMEL_ADB011B:
11781 case FLASH_5717VENDOR_ATMEL_ADB011D:
11782 case FLASH_5717VENDOR_ATMEL_MDB021D:
11783 case FLASH_5717VENDOR_ATMEL_ADB021B:
11784 case FLASH_5717VENDOR_ATMEL_ADB021D:
11785 case FLASH_5717VENDOR_ATMEL_45USPT:
11786 tp->nvram_jedecnum = JEDEC_ATMEL;
11787 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11788 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11790 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11791 case FLASH_5717VENDOR_ATMEL_MDB021D:
11792 case FLASH_5717VENDOR_ATMEL_ADB021B:
11793 case FLASH_5717VENDOR_ATMEL_ADB021D:
11794 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11797 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11801 case FLASH_5717VENDOR_ST_M_M25PE10:
11802 case FLASH_5717VENDOR_ST_A_M25PE10:
11803 case FLASH_5717VENDOR_ST_M_M45PE10:
11804 case FLASH_5717VENDOR_ST_A_M45PE10:
11805 case FLASH_5717VENDOR_ST_M_M25PE20:
11806 case FLASH_5717VENDOR_ST_A_M25PE20:
11807 case FLASH_5717VENDOR_ST_M_M45PE20:
11808 case FLASH_5717VENDOR_ST_A_M45PE20:
11809 case FLASH_5717VENDOR_ST_25USPT:
11810 case FLASH_5717VENDOR_ST_45USPT:
11811 tp->nvram_jedecnum = JEDEC_ST;
11812 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11813 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11815 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11816 case FLASH_5717VENDOR_ST_M_M25PE20:
11817 case FLASH_5717VENDOR_ST_A_M25PE20:
11818 case FLASH_5717VENDOR_ST_M_M45PE20:
11819 case FLASH_5717VENDOR_ST_A_M45PE20:
11820 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11823 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11828 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11832 tg3_nvram_get_pagesize(tp, nvcfg1);
11833 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11834 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11837 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
11838 static void __devinit tg3_nvram_init(struct tg3 *tp)
11840 tw32_f(GRC_EEPROM_ADDR,
11841 (EEPROM_ADDR_FSM_RESET |
11842 (EEPROM_DEFAULT_CLOCK_PERIOD <<
11843 EEPROM_ADDR_CLKPERD_SHIFT)));
11847 /* Enable seeprom accesses. */
11848 tw32_f(GRC_LOCAL_CTRL,
11849 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
11852 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11853 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
11854 tp->tg3_flags |= TG3_FLAG_NVRAM;
11856 if (tg3_nvram_lock(tp)) {
11857 netdev_warn(tp->dev,
11858 "Cannot get nvram lock, %s failed\n",
11862 tg3_enable_nvram_access(tp);
11864 tp->nvram_size = 0;
11866 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11867 tg3_get_5752_nvram_info(tp);
11868 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11869 tg3_get_5755_nvram_info(tp);
11870 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11871 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11872 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11873 tg3_get_5787_nvram_info(tp);
11874 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11875 tg3_get_5761_nvram_info(tp);
11876 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11877 tg3_get_5906_nvram_info(tp);
11878 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
11879 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11880 tg3_get_57780_nvram_info(tp);
11881 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
11882 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
11883 tg3_get_5717_nvram_info(tp);
11885 tg3_get_nvram_info(tp);
11887 if (tp->nvram_size == 0)
11888 tg3_get_nvram_size(tp);
11890 tg3_disable_nvram_access(tp);
11891 tg3_nvram_unlock(tp);
11894 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
11896 tg3_get_eeprom_size(tp);
11900 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11901 u32 offset, u32 len, u8 *buf)
11906 for (i = 0; i < len; i += 4) {
11912 memcpy(&data, buf + i, 4);
11915 * The SEEPROM interface expects the data to always be opposite
11916 * the native endian format. We accomplish this by reversing
11917 * all the operations that would have been performed on the
11918 * data from a call to tg3_nvram_read_be32().
11920 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
11922 val = tr32(GRC_EEPROM_ADDR);
11923 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11925 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11927 tw32(GRC_EEPROM_ADDR, val |
11928 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11929 (addr & EEPROM_ADDR_ADDR_MASK) |
11930 EEPROM_ADDR_START |
11931 EEPROM_ADDR_WRITE);
11933 for (j = 0; j < 1000; j++) {
11934 val = tr32(GRC_EEPROM_ADDR);
11936 if (val & EEPROM_ADDR_COMPLETE)
11940 if (!(val & EEPROM_ADDR_COMPLETE)) {
11949 /* offset and length are dword aligned */
11950 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11954 u32 pagesize = tp->nvram_pagesize;
11955 u32 pagemask = pagesize - 1;
11959 tmp = kmalloc(pagesize, GFP_KERNEL);
11965 u32 phy_addr, page_off, size;
11967 phy_addr = offset & ~pagemask;
11969 for (j = 0; j < pagesize; j += 4) {
11970 ret = tg3_nvram_read_be32(tp, phy_addr + j,
11971 (__be32 *) (tmp + j));
11978 page_off = offset & pagemask;
11985 memcpy(tmp + page_off, buf, size);
11987 offset = offset + (pagesize - page_off);
11989 tg3_enable_nvram_access(tp);
11992 * Before we can erase the flash page, we need
11993 * to issue a special "write enable" command.
11995 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11997 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12000 /* Erase the target page */
12001 tw32(NVRAM_ADDR, phy_addr);
12003 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12004 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12006 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12009 /* Issue another write enable to start the write. */
12010 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12012 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12015 for (j = 0; j < pagesize; j += 4) {
12018 data = *((__be32 *) (tmp + j));
12020 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12022 tw32(NVRAM_ADDR, phy_addr + j);
12024 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12028 nvram_cmd |= NVRAM_CMD_FIRST;
12029 else if (j == (pagesize - 4))
12030 nvram_cmd |= NVRAM_CMD_LAST;
12032 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12039 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12040 tg3_nvram_exec_cmd(tp, nvram_cmd);
12047 /* offset and length are dword aligned */
12048 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12053 for (i = 0; i < len; i += 4, offset += 4) {
12054 u32 page_off, phy_addr, nvram_cmd;
12057 memcpy(&data, buf + i, 4);
12058 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12060 page_off = offset % tp->nvram_pagesize;
12062 phy_addr = tg3_nvram_phys_addr(tp, offset);
12064 tw32(NVRAM_ADDR, phy_addr);
12066 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12068 if (page_off == 0 || i == 0)
12069 nvram_cmd |= NVRAM_CMD_FIRST;
12070 if (page_off == (tp->nvram_pagesize - 4))
12071 nvram_cmd |= NVRAM_CMD_LAST;
12073 if (i == (len - 4))
12074 nvram_cmd |= NVRAM_CMD_LAST;
12076 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12077 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
12078 (tp->nvram_jedecnum == JEDEC_ST) &&
12079 (nvram_cmd & NVRAM_CMD_FIRST)) {
12081 if ((ret = tg3_nvram_exec_cmd(tp,
12082 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12087 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
12088 /* We always do complete word writes to eeprom. */
12089 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12092 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12098 /* offset and length are dword aligned */
12099 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12103 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
12104 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12105 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12109 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
12110 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12114 ret = tg3_nvram_lock(tp);
12118 tg3_enable_nvram_access(tp);
12119 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
12120 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
12121 tw32(NVRAM_WRITE1, 0x406);
12123 grc_mode = tr32(GRC_MODE);
12124 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12126 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
12127 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
12129 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12132 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12136 grc_mode = tr32(GRC_MODE);
12137 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12139 tg3_disable_nvram_access(tp);
12140 tg3_nvram_unlock(tp);
12143 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
12144 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12151 struct subsys_tbl_ent {
12152 u16 subsys_vendor, subsys_devid;
12156 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12157 /* Broadcom boards. */
12158 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12159 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12160 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12161 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12162 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12163 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12164 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12165 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12166 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12167 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12168 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12169 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12170 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12171 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12172 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12173 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12174 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12175 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12176 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12177 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12178 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12179 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12182 { TG3PCI_SUBVENDOR_ID_3COM,
12183 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12184 { TG3PCI_SUBVENDOR_ID_3COM,
12185 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12186 { TG3PCI_SUBVENDOR_ID_3COM,
12187 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12188 { TG3PCI_SUBVENDOR_ID_3COM,
12189 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12190 { TG3PCI_SUBVENDOR_ID_3COM,
12191 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12194 { TG3PCI_SUBVENDOR_ID_DELL,
12195 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12196 { TG3PCI_SUBVENDOR_ID_DELL,
12197 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12198 { TG3PCI_SUBVENDOR_ID_DELL,
12199 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12200 { TG3PCI_SUBVENDOR_ID_DELL,
12201 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12203 /* Compaq boards. */
12204 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12205 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12206 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12207 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12208 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12209 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12210 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12211 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12212 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12213 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12216 { TG3PCI_SUBVENDOR_ID_IBM,
12217 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12220 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12224 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12225 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12226 tp->pdev->subsystem_vendor) &&
12227 (subsys_id_to_phy_id[i].subsys_devid ==
12228 tp->pdev->subsystem_device))
12229 return &subsys_id_to_phy_id[i];
12234 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12239 /* On some early chips the SRAM cannot be accessed in D3hot state,
12240 * so need make sure we're in D0.
12242 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12243 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12244 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12247 /* Make sure register accesses (indirect or otherwise)
12248 * will function correctly.
12250 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12251 tp->misc_host_ctrl);
12253 /* The memory arbiter has to be enabled in order for SRAM accesses
12254 * to succeed. Normally on powerup the tg3 chip firmware will make
12255 * sure it is enabled, but other entities such as system netboot
12256 * code might disable it.
12258 val = tr32(MEMARB_MODE);
12259 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12261 tp->phy_id = TG3_PHY_ID_INVALID;
12262 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12264 /* Assume an onboard device and WOL capable by default. */
12265 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
12267 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12268 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12269 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12270 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12272 val = tr32(VCPU_CFGSHDW);
12273 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12274 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12275 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12276 (val & VCPU_CFGSHDW_WOL_MAGPKT))
12277 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12281 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12282 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12283 u32 nic_cfg, led_cfg;
12284 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12285 int eeprom_phy_serdes = 0;
12287 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12288 tp->nic_sram_data_cfg = nic_cfg;
12290 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12291 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12292 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
12293 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
12294 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
12295 (ver > 0) && (ver < 0x100))
12296 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12298 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12299 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12301 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12302 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12303 eeprom_phy_serdes = 1;
12305 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12306 if (nic_phy_id != 0) {
12307 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12308 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12310 eeprom_phy_id = (id1 >> 16) << 10;
12311 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12312 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12316 tp->phy_id = eeprom_phy_id;
12317 if (eeprom_phy_serdes) {
12318 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12319 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12321 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12324 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12325 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12326 SHASTA_EXT_LED_MODE_MASK);
12328 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12332 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12333 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12336 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12337 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12340 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12341 tp->led_ctrl = LED_CTRL_MODE_MAC;
12343 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12344 * read on some older 5700/5701 bootcode.
12346 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12348 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12350 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12354 case SHASTA_EXT_LED_SHARED:
12355 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12356 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12357 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12358 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12359 LED_CTRL_MODE_PHY_2);
12362 case SHASTA_EXT_LED_MAC:
12363 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12366 case SHASTA_EXT_LED_COMBO:
12367 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12368 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12369 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12370 LED_CTRL_MODE_PHY_2);
12375 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12376 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12377 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12378 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12380 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12381 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12383 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12384 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
12385 if ((tp->pdev->subsystem_vendor ==
12386 PCI_VENDOR_ID_ARIMA) &&
12387 (tp->pdev->subsystem_device == 0x205a ||
12388 tp->pdev->subsystem_device == 0x2063))
12389 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12391 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12392 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12395 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12396 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
12397 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12398 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
12401 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12402 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12403 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
12405 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12406 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12407 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
12409 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
12410 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
12411 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12413 if (cfg2 & (1 << 17))
12414 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12416 /* serdes signal pre-emphasis in register 0x590 set by */
12417 /* bootcode if bit 18 is set */
12418 if (cfg2 & (1 << 18))
12419 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12421 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12422 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12423 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12424 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12426 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12427 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12428 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
12431 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12432 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12433 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12436 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12437 tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE;
12438 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12439 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
12440 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12441 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
12444 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
12445 device_set_wakeup_enable(&tp->pdev->dev,
12446 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
12449 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12454 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12455 tw32(OTP_CTRL, cmd);
12457 /* Wait for up to 1 ms for command to execute. */
12458 for (i = 0; i < 100; i++) {
12459 val = tr32(OTP_STATUS);
12460 if (val & OTP_STATUS_CMD_DONE)
12465 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12468 /* Read the gphy configuration from the OTP region of the chip. The gphy
12469 * configuration is a 32-bit value that straddles the alignment boundary.
12470 * We do two 32-bit reads and then shift and merge the results.
12472 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12474 u32 bhalf_otp, thalf_otp;
12476 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12478 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12481 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12483 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12486 thalf_otp = tr32(OTP_READ_DATA);
12488 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12490 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12493 bhalf_otp = tr32(OTP_READ_DATA);
12495 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12498 static int __devinit tg3_phy_probe(struct tg3 *tp)
12500 u32 hw_phy_id_1, hw_phy_id_2;
12501 u32 hw_phy_id, hw_phy_id_masked;
12504 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
12505 return tg3_phy_init(tp);
12507 /* Reading the PHY ID register can conflict with ASF
12508 * firmware access to the PHY hardware.
12511 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12512 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
12513 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12515 /* Now read the physical PHY_ID from the chip and verify
12516 * that it is sane. If it doesn't look good, we fall back
12517 * to either the hard-coded table based PHY_ID and failing
12518 * that the value found in the eeprom area.
12520 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12521 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12523 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
12524 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12525 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
12527 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12530 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12531 tp->phy_id = hw_phy_id;
12532 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12533 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12535 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12537 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12538 /* Do nothing, phy ID already set up in
12539 * tg3_get_eeprom_hw_cfg().
12542 struct subsys_tbl_ent *p;
12544 /* No eeprom signature? Try the hardcoded
12545 * subsys device table.
12547 p = tg3_lookup_by_subsys(tp);
12551 tp->phy_id = p->phy_id;
12553 tp->phy_id == TG3_PHY_ID_BCM8002)
12554 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12558 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
12559 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12560 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0))
12561 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12563 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12564 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
12565 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
12566 u32 bmsr, adv_reg, tg3_ctrl, mask;
12568 tg3_readphy(tp, MII_BMSR, &bmsr);
12569 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12570 (bmsr & BMSR_LSTATUS))
12571 goto skip_phy_reset;
12573 err = tg3_phy_reset(tp);
12577 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
12578 ADVERTISE_100HALF | ADVERTISE_100FULL |
12579 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
12581 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
12582 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
12583 MII_TG3_CTRL_ADV_1000_FULL);
12584 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12585 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
12586 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
12587 MII_TG3_CTRL_ENABLE_AS_MASTER);
12590 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12591 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12592 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12593 if (!tg3_copper_is_advertising_all(tp, mask)) {
12594 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12596 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12597 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12599 tg3_writephy(tp, MII_BMCR,
12600 BMCR_ANENABLE | BMCR_ANRESTART);
12602 tg3_phy_set_wirespeed(tp);
12604 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12605 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12606 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12610 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
12611 err = tg3_init_5401phy_dsp(tp);
12615 err = tg3_init_5401phy_dsp(tp);
12618 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12619 tp->link_config.advertising =
12620 (ADVERTISED_1000baseT_Half |
12621 ADVERTISED_1000baseT_Full |
12622 ADVERTISED_Autoneg |
12624 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
12625 tp->link_config.advertising &=
12626 ~(ADVERTISED_1000baseT_Half |
12627 ADVERTISED_1000baseT_Full);
12632 static void __devinit tg3_read_vpd(struct tg3 *tp)
12635 unsigned int block_end, rosize, len;
12639 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
12640 tg3_nvram_read(tp, 0x0, &magic))
12643 vpd_data = kmalloc(TG3_NVM_VPD_LEN, GFP_KERNEL);
12647 if (magic == TG3_EEPROM_MAGIC) {
12648 for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
12651 /* The data is in little-endian format in NVRAM.
12652 * Use the big-endian read routines to preserve
12653 * the byte order as it exists in NVRAM.
12655 if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp))
12656 goto out_not_found;
12658 memcpy(&vpd_data[i], &tmp, sizeof(tmp));
12662 unsigned int pos = 0;
12664 for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) {
12665 cnt = pci_read_vpd(tp->pdev, pos,
12666 TG3_NVM_VPD_LEN - pos,
12668 if (cnt == -ETIMEDOUT || -EINTR)
12671 goto out_not_found;
12673 if (pos != TG3_NVM_VPD_LEN)
12674 goto out_not_found;
12677 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
12678 PCI_VPD_LRDT_RO_DATA);
12680 goto out_not_found;
12682 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
12683 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
12684 i += PCI_VPD_LRDT_TAG_SIZE;
12686 if (block_end > TG3_NVM_VPD_LEN)
12687 goto out_not_found;
12689 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12690 PCI_VPD_RO_KEYWORD_MFR_ID);
12692 len = pci_vpd_info_field_size(&vpd_data[j]);
12694 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12695 if (j + len > block_end || len != 4 ||
12696 memcmp(&vpd_data[j], "1028", 4))
12699 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12700 PCI_VPD_RO_KEYWORD_VENDOR0);
12704 len = pci_vpd_info_field_size(&vpd_data[j]);
12706 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12707 if (j + len > block_end)
12710 memcpy(tp->fw_ver, &vpd_data[j], len);
12711 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
12715 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12716 PCI_VPD_RO_KEYWORD_PARTNO);
12718 goto out_not_found;
12720 len = pci_vpd_info_field_size(&vpd_data[i]);
12722 i += PCI_VPD_INFO_FLD_HDR_SIZE;
12723 if (len > TG3_BPN_SIZE ||
12724 (len + i) > TG3_NVM_VPD_LEN)
12725 goto out_not_found;
12727 memcpy(tp->board_part_number, &vpd_data[i], len);
12731 if (tp->board_part_number[0])
12735 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
12736 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
12737 strcpy(tp->board_part_number, "BCM5717");
12738 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
12739 strcpy(tp->board_part_number, "BCM5718");
12742 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
12743 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
12744 strcpy(tp->board_part_number, "BCM57780");
12745 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
12746 strcpy(tp->board_part_number, "BCM57760");
12747 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
12748 strcpy(tp->board_part_number, "BCM57790");
12749 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
12750 strcpy(tp->board_part_number, "BCM57788");
12753 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
12754 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
12755 strcpy(tp->board_part_number, "BCM57761");
12756 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
12757 strcpy(tp->board_part_number, "BCM57765");
12758 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
12759 strcpy(tp->board_part_number, "BCM57781");
12760 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
12761 strcpy(tp->board_part_number, "BCM57785");
12762 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
12763 strcpy(tp->board_part_number, "BCM57791");
12764 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
12765 strcpy(tp->board_part_number, "BCM57795");
12768 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12769 strcpy(tp->board_part_number, "BCM95906");
12772 strcpy(tp->board_part_number, "none");
12776 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
12780 if (tg3_nvram_read(tp, offset, &val) ||
12781 (val & 0xfc000000) != 0x0c000000 ||
12782 tg3_nvram_read(tp, offset + 4, &val) ||
12789 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
12791 u32 val, offset, start, ver_offset;
12793 bool newver = false;
12795 if (tg3_nvram_read(tp, 0xc, &offset) ||
12796 tg3_nvram_read(tp, 0x4, &start))
12799 offset = tg3_nvram_logical_addr(tp, offset);
12801 if (tg3_nvram_read(tp, offset, &val))
12804 if ((val & 0xfc000000) == 0x0c000000) {
12805 if (tg3_nvram_read(tp, offset + 4, &val))
12812 dst_off = strlen(tp->fw_ver);
12815 if (TG3_VER_SIZE - dst_off < 16 ||
12816 tg3_nvram_read(tp, offset + 8, &ver_offset))
12819 offset = offset + ver_offset - start;
12820 for (i = 0; i < 16; i += 4) {
12822 if (tg3_nvram_read_be32(tp, offset + i, &v))
12825 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
12830 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
12833 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
12834 TG3_NVM_BCVER_MAJSFT;
12835 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
12836 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
12837 "v%d.%02d", major, minor);
12841 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
12843 u32 val, major, minor;
12845 /* Use native endian representation */
12846 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
12849 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
12850 TG3_NVM_HWSB_CFG1_MAJSFT;
12851 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
12852 TG3_NVM_HWSB_CFG1_MINSFT;
12854 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
12857 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12859 u32 offset, major, minor, build;
12861 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
12863 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
12866 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
12867 case TG3_EEPROM_SB_REVISION_0:
12868 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
12870 case TG3_EEPROM_SB_REVISION_2:
12871 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
12873 case TG3_EEPROM_SB_REVISION_3:
12874 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
12876 case TG3_EEPROM_SB_REVISION_4:
12877 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
12879 case TG3_EEPROM_SB_REVISION_5:
12880 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
12882 case TG3_EEPROM_SB_REVISION_6:
12883 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
12889 if (tg3_nvram_read(tp, offset, &val))
12892 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
12893 TG3_EEPROM_SB_EDH_BLD_SHFT;
12894 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
12895 TG3_EEPROM_SB_EDH_MAJ_SHFT;
12896 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
12898 if (minor > 99 || build > 26)
12901 offset = strlen(tp->fw_ver);
12902 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
12903 " v%d.%02d", major, minor);
12906 offset = strlen(tp->fw_ver);
12907 if (offset < TG3_VER_SIZE - 1)
12908 tp->fw_ver[offset] = 'a' + build - 1;
12912 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
12914 u32 val, offset, start;
12917 for (offset = TG3_NVM_DIR_START;
12918 offset < TG3_NVM_DIR_END;
12919 offset += TG3_NVM_DIRENT_SIZE) {
12920 if (tg3_nvram_read(tp, offset, &val))
12923 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
12927 if (offset == TG3_NVM_DIR_END)
12930 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12931 start = 0x08000000;
12932 else if (tg3_nvram_read(tp, offset - 4, &start))
12935 if (tg3_nvram_read(tp, offset + 4, &offset) ||
12936 !tg3_fw_img_is_valid(tp, offset) ||
12937 tg3_nvram_read(tp, offset + 8, &val))
12940 offset += val - start;
12942 vlen = strlen(tp->fw_ver);
12944 tp->fw_ver[vlen++] = ',';
12945 tp->fw_ver[vlen++] = ' ';
12947 for (i = 0; i < 4; i++) {
12949 if (tg3_nvram_read_be32(tp, offset, &v))
12952 offset += sizeof(v);
12954 if (vlen > TG3_VER_SIZE - sizeof(v)) {
12955 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
12959 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
12964 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
12970 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
12971 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
12974 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
12975 if (apedata != APE_SEG_SIG_MAGIC)
12978 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
12979 if (!(apedata & APE_FW_STATUS_READY))
12982 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
12984 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
12985 tp->tg3_flags3 |= TG3_FLG3_APE_HAS_NCSI;
12991 vlen = strlen(tp->fw_ver);
12993 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
12995 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
12996 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
12997 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
12998 (apedata & APE_FW_VERSION_BLDMSK));
13001 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13004 bool vpd_vers = false;
13006 if (tp->fw_ver[0] != 0)
13009 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
13010 strcat(tp->fw_ver, "sb");
13014 if (tg3_nvram_read(tp, 0, &val))
13017 if (val == TG3_EEPROM_MAGIC)
13018 tg3_read_bc_ver(tp);
13019 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13020 tg3_read_sb_ver(tp, val);
13021 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13022 tg3_read_hwsb_ver(tp);
13026 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
13027 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers)
13030 tg3_read_mgmtfw_ver(tp);
13033 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13036 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13038 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
13040 #if TG3_VLAN_TAG_USED
13041 dev->vlan_features |= flags;
13045 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13047 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13048 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13050 else if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
13051 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13057 static int __devinit tg3_get_invariants(struct tg3 *tp)
13059 static struct pci_device_id write_reorder_chipsets[] = {
13060 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
13061 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13062 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
13063 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13064 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
13065 PCI_DEVICE_ID_VIA_8385_0) },
13069 u32 pci_state_reg, grc_misc_cfg;
13074 /* Force memory write invalidate off. If we leave it on,
13075 * then on 5700_BX chips we have to enable a workaround.
13076 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13077 * to match the cacheline size. The Broadcom driver have this
13078 * workaround but turns MWI off all the times so never uses
13079 * it. This seems to suggest that the workaround is insufficient.
13081 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13082 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13083 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13085 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13086 * has the register indirect write enable bit set before
13087 * we try to access any of the MMIO registers. It is also
13088 * critical that the PCI-X hw workaround situation is decided
13089 * before that as well.
13091 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13094 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13095 MISC_HOST_CTRL_CHIPREV_SHIFT);
13096 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13097 u32 prod_id_asic_rev;
13099 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13100 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13101 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719)
13102 pci_read_config_dword(tp->pdev,
13103 TG3PCI_GEN2_PRODID_ASICREV,
13104 &prod_id_asic_rev);
13105 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13106 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13107 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13108 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13109 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13110 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13111 pci_read_config_dword(tp->pdev,
13112 TG3PCI_GEN15_PRODID_ASICREV,
13113 &prod_id_asic_rev);
13115 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13116 &prod_id_asic_rev);
13118 tp->pci_chip_rev_id = prod_id_asic_rev;
13121 /* Wrong chip ID in 5752 A0. This code can be removed later
13122 * as A0 is not in production.
13124 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13125 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13127 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13128 * we need to disable memory and use config. cycles
13129 * only to access all registers. The 5702/03 chips
13130 * can mistakenly decode the special cycles from the
13131 * ICH chipsets as memory write cycles, causing corruption
13132 * of register and memory space. Only certain ICH bridges
13133 * will drive special cycles with non-zero data during the
13134 * address phase which can fall within the 5703's address
13135 * range. This is not an ICH bug as the PCI spec allows
13136 * non-zero address during special cycles. However, only
13137 * these ICH bridges are known to drive non-zero addresses
13138 * during special cycles.
13140 * Since special cycles do not cross PCI bridges, we only
13141 * enable this workaround if the 5703 is on the secondary
13142 * bus of these ICH bridges.
13144 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13145 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13146 static struct tg3_dev_id {
13150 } ich_chipsets[] = {
13151 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13153 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13155 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13157 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13161 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13162 struct pci_dev *bridge = NULL;
13164 while (pci_id->vendor != 0) {
13165 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13171 if (pci_id->rev != PCI_ANY_ID) {
13172 if (bridge->revision > pci_id->rev)
13175 if (bridge->subordinate &&
13176 (bridge->subordinate->number ==
13177 tp->pdev->bus->number)) {
13179 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
13180 pci_dev_put(bridge);
13186 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
13187 static struct tg3_dev_id {
13190 } bridge_chipsets[] = {
13191 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13192 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13195 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13196 struct pci_dev *bridge = NULL;
13198 while (pci_id->vendor != 0) {
13199 bridge = pci_get_device(pci_id->vendor,
13206 if (bridge->subordinate &&
13207 (bridge->subordinate->number <=
13208 tp->pdev->bus->number) &&
13209 (bridge->subordinate->subordinate >=
13210 tp->pdev->bus->number)) {
13211 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
13212 pci_dev_put(bridge);
13218 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13219 * DMA addresses > 40-bit. This bridge may have other additional
13220 * 57xx devices behind it in some 4-port NIC designs for example.
13221 * Any tg3 device found behind the bridge will also need the 40-bit
13224 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13225 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13226 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
13227 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
13228 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13230 struct pci_dev *bridge = NULL;
13233 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13234 PCI_DEVICE_ID_SERVERWORKS_EPB,
13236 if (bridge && bridge->subordinate &&
13237 (bridge->subordinate->number <=
13238 tp->pdev->bus->number) &&
13239 (bridge->subordinate->subordinate >=
13240 tp->pdev->bus->number)) {
13241 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
13242 pci_dev_put(bridge);
13248 /* Initialize misc host control in PCI block. */
13249 tp->misc_host_ctrl |= (misc_ctrl_reg &
13250 MISC_HOST_CTRL_CHIPREV);
13251 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13252 tp->misc_host_ctrl);
13254 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13255 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13256 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
13257 tp->pdev_peer = tg3_find_peer(tp);
13259 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13260 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13261 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13262 tp->tg3_flags3 |= TG3_FLG3_5717_PLUS;
13264 /* Intentionally exclude ASIC_REV_5906 */
13265 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13266 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13267 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13268 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13269 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13270 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13271 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
13272 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
13274 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13275 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13276 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13277 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13278 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13279 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
13281 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
13282 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
13283 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
13285 /* 5700 B0 chips do not support checksumming correctly due
13286 * to hardware bugs.
13288 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
13289 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
13291 unsigned long features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
13293 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13294 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
13295 features |= NETIF_F_IPV6_CSUM;
13296 tp->dev->features |= features;
13297 vlan_features_add(tp->dev, features);
13300 /* Determine TSO capabilities */
13301 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13302 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
13303 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13304 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13305 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
13306 else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13307 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
13308 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13309 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13310 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
13311 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13312 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13313 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13314 tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
13315 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13316 tp->fw_needed = FIRMWARE_TG3TSO5;
13318 tp->fw_needed = FIRMWARE_TG3TSO;
13323 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13324 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
13325 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13326 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13327 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13328 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13329 tp->pdev_peer == tp->pdev))
13330 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
13332 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13333 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13334 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
13337 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
13338 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
13339 tp->irq_max = TG3_IRQ_MAX_VECS;
13343 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13344 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13345 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13346 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
13347 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
13348 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
13349 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
13352 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13353 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
13355 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13356 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
13357 (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
13358 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
13360 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13363 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13364 if (tp->pcie_cap != 0) {
13367 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13369 pcie_set_readrq(tp->pdev, 4096);
13371 pci_read_config_word(tp->pdev,
13372 tp->pcie_cap + PCI_EXP_LNKCTL,
13374 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13375 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13376 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
13377 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13378 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13379 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13380 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13381 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
13382 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13383 tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
13385 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13386 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13387 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13388 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13389 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13390 if (!tp->pcix_cap) {
13391 dev_err(&tp->pdev->dev,
13392 "Cannot find PCI-X capability, aborting\n");
13396 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13397 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
13400 /* If we have an AMD 762 or VIA K8T800 chipset, write
13401 * reordering to the mailbox registers done by the host
13402 * controller can cause major troubles. We read back from
13403 * every mailbox register write to force the writes to be
13404 * posted to the chip in order.
13406 if (pci_dev_present(write_reorder_chipsets) &&
13407 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13408 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
13410 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13411 &tp->pci_cacheline_sz);
13412 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13413 &tp->pci_lat_timer);
13414 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13415 tp->pci_lat_timer < 64) {
13416 tp->pci_lat_timer = 64;
13417 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13418 tp->pci_lat_timer);
13421 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13422 /* 5700 BX chips need to have their TX producer index
13423 * mailboxes written twice to workaround a bug.
13425 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
13427 /* If we are in PCI-X mode, enable register write workaround.
13429 * The workaround is to use indirect register accesses
13430 * for all chip writes not to mailbox registers.
13432 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13435 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13437 /* The chip can have it's power management PCI config
13438 * space registers clobbered due to this bug.
13439 * So explicitly force the chip into D0 here.
13441 pci_read_config_dword(tp->pdev,
13442 tp->pm_cap + PCI_PM_CTRL,
13444 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13445 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13446 pci_write_config_dword(tp->pdev,
13447 tp->pm_cap + PCI_PM_CTRL,
13450 /* Also, force SERR#/PERR# in PCI command. */
13451 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13452 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13453 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13457 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13458 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
13459 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13460 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
13462 /* Chip-specific fixup from Broadcom driver */
13463 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13464 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13465 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13466 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13469 /* Default fast path register access methods */
13470 tp->read32 = tg3_read32;
13471 tp->write32 = tg3_write32;
13472 tp->read32_mbox = tg3_read32;
13473 tp->write32_mbox = tg3_write32;
13474 tp->write32_tx_mbox = tg3_write32;
13475 tp->write32_rx_mbox = tg3_write32;
13477 /* Various workaround register access methods */
13478 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
13479 tp->write32 = tg3_write_indirect_reg32;
13480 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13481 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
13482 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13484 * Back to back register writes can cause problems on these
13485 * chips, the workaround is to read back all reg writes
13486 * except those to mailbox regs.
13488 * See tg3_write_indirect_reg32().
13490 tp->write32 = tg3_write_flush_reg32;
13493 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
13494 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
13495 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13496 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
13497 tp->write32_rx_mbox = tg3_write_flush_reg32;
13500 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
13501 tp->read32 = tg3_read_indirect_reg32;
13502 tp->write32 = tg3_write_indirect_reg32;
13503 tp->read32_mbox = tg3_read_indirect_mbox;
13504 tp->write32_mbox = tg3_write_indirect_mbox;
13505 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13506 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13511 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13512 pci_cmd &= ~PCI_COMMAND_MEMORY;
13513 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13515 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13516 tp->read32_mbox = tg3_read32_mbox_5906;
13517 tp->write32_mbox = tg3_write32_mbox_5906;
13518 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13519 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13522 if (tp->write32 == tg3_write_indirect_reg32 ||
13523 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13524 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13525 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13526 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
13528 /* Get eeprom hw config before calling tg3_set_power_state().
13529 * In particular, the TG3_FLG2_IS_NIC flag must be
13530 * determined before calling tg3_set_power_state() so that
13531 * we know whether or not to switch out of Vaux power.
13532 * When the flag is set, it means that GPIO1 is used for eeprom
13533 * write protect and also implies that it is a LOM where GPIOs
13534 * are not used to switch power.
13536 tg3_get_eeprom_hw_cfg(tp);
13538 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13539 /* Allow reads and writes to the
13540 * APE register and memory space.
13542 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13543 PCISTATE_ALLOW_APE_SHMEM_WR |
13544 PCISTATE_ALLOW_APE_PSPACE_WR;
13545 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13549 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13550 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13551 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13552 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13553 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
13554 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
13556 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
13557 * GPIO1 driven high will bring 5700's external PHY out of reset.
13558 * It is also used as eeprom write protect on LOMs.
13560 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13561 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13562 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
13563 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13564 GRC_LCLCTRL_GPIO_OUTPUT1);
13565 /* Unused GPIO3 must be driven as output on 5752 because there
13566 * are no pull-up resistors on unused GPIO pins.
13568 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13569 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13571 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13572 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13573 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13574 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13576 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13577 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13578 /* Turn off the debug UART. */
13579 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13580 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
13581 /* Keep VMain power. */
13582 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13583 GRC_LCLCTRL_GPIO_OUTPUT0;
13586 /* Force the chip into D0. */
13587 err = tg3_set_power_state(tp, PCI_D0);
13589 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13593 /* Derive initial jumbo mode from MTU assigned in
13594 * ether_setup() via the alloc_etherdev() call
13596 if (tp->dev->mtu > ETH_DATA_LEN &&
13597 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13598 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
13600 /* Determine WakeOnLan speed to use. */
13601 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13602 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13603 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13604 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13605 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
13607 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
13610 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13611 tp->phy_flags |= TG3_PHYFLG_IS_FET;
13613 /* A few boards don't want Ethernet@WireSpeed phy feature */
13614 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13615 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
13616 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13617 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13618 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13619 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13620 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13622 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13623 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13624 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
13625 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13626 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
13628 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
13629 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
13630 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13631 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13632 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
13633 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13634 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13635 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13636 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13637 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13638 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13639 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
13640 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13641 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
13643 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
13646 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13647 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13648 tp->phy_otp = tg3_read_otp_phycfg(tp);
13649 if (tp->phy_otp == 0)
13650 tp->phy_otp = TG3_OTP_DEFAULT;
13653 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
13654 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13656 tp->mi_mode = MAC_MI_MODE_BASE;
13658 tp->coalesce_mode = 0;
13659 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
13660 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
13661 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
13663 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13664 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13665 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
13667 err = tg3_mdio_init(tp);
13671 /* Initialize data/descriptor byte/word swapping. */
13672 val = tr32(GRC_MODE);
13673 val &= GRC_MODE_HOST_STACKUP;
13674 tw32(GRC_MODE, val | tp->grc_mode);
13676 tg3_switch_clocks(tp);
13678 /* Clear this out for sanity. */
13679 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
13681 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13683 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
13684 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
13685 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
13687 if (chiprevid == CHIPREV_ID_5701_A0 ||
13688 chiprevid == CHIPREV_ID_5701_B0 ||
13689 chiprevid == CHIPREV_ID_5701_B2 ||
13690 chiprevid == CHIPREV_ID_5701_B5) {
13691 void __iomem *sram_base;
13693 /* Write some dummy words into the SRAM status block
13694 * area, see if it reads back correctly. If the return
13695 * value is bad, force enable the PCIX workaround.
13697 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
13699 writel(0x00000000, sram_base);
13700 writel(0x00000000, sram_base + 4);
13701 writel(0xffffffff, sram_base + 4);
13702 if (readl(sram_base) != 0x00000000)
13703 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13708 tg3_nvram_init(tp);
13710 grc_misc_cfg = tr32(GRC_MISC_CFG);
13711 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
13713 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13714 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
13715 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
13716 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
13718 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
13719 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
13720 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
13721 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
13722 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
13723 HOSTCC_MODE_CLRTICK_TXBD);
13725 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
13726 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13727 tp->misc_host_ctrl);
13730 /* Preserve the APE MAC_MODE bits */
13731 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
13732 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13734 tp->mac_mode = TG3_DEF_MAC_MODE;
13736 /* these are limited to 10/100 only */
13737 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13738 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
13739 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13740 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13741 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
13742 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
13743 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
13744 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13745 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
13746 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
13747 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
13748 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
13749 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13750 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13751 (tp->phy_flags & TG3_PHYFLG_IS_FET))
13752 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
13754 err = tg3_phy_probe(tp);
13756 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
13757 /* ... but do not return immediately ... */
13762 tg3_read_fw_ver(tp);
13764 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
13765 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
13767 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13768 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
13770 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
13773 /* 5700 {AX,BX} chips have a broken status block link
13774 * change bit implementation, so we must use the
13775 * status register in those cases.
13777 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13778 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
13780 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
13782 /* The led_ctrl is set during tg3_phy_probe, here we might
13783 * have to force the link status polling mechanism based
13784 * upon subsystem IDs.
13786 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
13787 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13788 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
13789 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
13790 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
13793 /* For all SERDES we poll the MAC status register. */
13794 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13795 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
13797 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
13799 tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM;
13800 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
13801 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13802 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
13803 tp->rx_offset -= NET_IP_ALIGN;
13804 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13805 tp->rx_copy_thresh = ~(u16)0;
13809 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
13810 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
13811 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
13813 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
13815 /* Increment the rx prod index on the rx std ring by at most
13816 * 8 for these chips to workaround hw errata.
13818 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13819 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13820 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13821 tp->rx_std_max_post = 8;
13823 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
13824 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
13825 PCIE_PWR_MGMT_L1_THRESH_MSK;
13830 #ifdef CONFIG_SPARC
13831 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
13833 struct net_device *dev = tp->dev;
13834 struct pci_dev *pdev = tp->pdev;
13835 struct device_node *dp = pci_device_to_OF_node(pdev);
13836 const unsigned char *addr;
13839 addr = of_get_property(dp, "local-mac-address", &len);
13840 if (addr && len == 6) {
13841 memcpy(dev->dev_addr, addr, 6);
13842 memcpy(dev->perm_addr, dev->dev_addr, 6);
13848 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
13850 struct net_device *dev = tp->dev;
13852 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
13853 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
13858 static int __devinit tg3_get_device_address(struct tg3 *tp)
13860 struct net_device *dev = tp->dev;
13861 u32 hi, lo, mac_offset;
13864 #ifdef CONFIG_SPARC
13865 if (!tg3_get_macaddr_sparc(tp))
13870 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
13871 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13872 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
13874 if (tg3_nvram_lock(tp))
13875 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
13877 tg3_nvram_unlock(tp);
13878 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13879 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
13880 if (PCI_FUNC(tp->pdev->devfn) & 1)
13882 if (PCI_FUNC(tp->pdev->devfn) > 1)
13883 mac_offset += 0x18c;
13884 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13887 /* First try to get it from MAC address mailbox. */
13888 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
13889 if ((hi >> 16) == 0x484b) {
13890 dev->dev_addr[0] = (hi >> 8) & 0xff;
13891 dev->dev_addr[1] = (hi >> 0) & 0xff;
13893 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
13894 dev->dev_addr[2] = (lo >> 24) & 0xff;
13895 dev->dev_addr[3] = (lo >> 16) & 0xff;
13896 dev->dev_addr[4] = (lo >> 8) & 0xff;
13897 dev->dev_addr[5] = (lo >> 0) & 0xff;
13899 /* Some old bootcode may report a 0 MAC address in SRAM */
13900 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
13903 /* Next, try NVRAM. */
13904 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
13905 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
13906 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
13907 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
13908 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
13910 /* Finally just fetch it out of the MAC control regs. */
13912 hi = tr32(MAC_ADDR_0_HIGH);
13913 lo = tr32(MAC_ADDR_0_LOW);
13915 dev->dev_addr[5] = lo & 0xff;
13916 dev->dev_addr[4] = (lo >> 8) & 0xff;
13917 dev->dev_addr[3] = (lo >> 16) & 0xff;
13918 dev->dev_addr[2] = (lo >> 24) & 0xff;
13919 dev->dev_addr[1] = hi & 0xff;
13920 dev->dev_addr[0] = (hi >> 8) & 0xff;
13924 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
13925 #ifdef CONFIG_SPARC
13926 if (!tg3_get_default_macaddr_sparc(tp))
13931 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
13935 #define BOUNDARY_SINGLE_CACHELINE 1
13936 #define BOUNDARY_MULTI_CACHELINE 2
13938 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
13940 int cacheline_size;
13944 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
13946 cacheline_size = 1024;
13948 cacheline_size = (int) byte * 4;
13950 /* On 5703 and later chips, the boundary bits have no
13953 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13954 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13955 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13958 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
13959 goal = BOUNDARY_MULTI_CACHELINE;
13961 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
13962 goal = BOUNDARY_SINGLE_CACHELINE;
13968 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
13969 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
13976 /* PCI controllers on most RISC systems tend to disconnect
13977 * when a device tries to burst across a cache-line boundary.
13978 * Therefore, letting tg3 do so just wastes PCI bandwidth.
13980 * Unfortunately, for PCI-E there are only limited
13981 * write-side controls for this, and thus for reads
13982 * we will still get the disconnects. We'll also waste
13983 * these PCI cycles for both read and write for chips
13984 * other than 5700 and 5701 which do not implement the
13987 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13988 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
13989 switch (cacheline_size) {
13994 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13995 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
13996 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
13998 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13999 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14004 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14005 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14009 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14010 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14013 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14014 switch (cacheline_size) {
14018 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14019 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14020 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14026 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14027 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14031 switch (cacheline_size) {
14033 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14034 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14035 DMA_RWCTRL_WRITE_BNDRY_16);
14040 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14041 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14042 DMA_RWCTRL_WRITE_BNDRY_32);
14047 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14048 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14049 DMA_RWCTRL_WRITE_BNDRY_64);
14054 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14055 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14056 DMA_RWCTRL_WRITE_BNDRY_128);
14061 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14062 DMA_RWCTRL_WRITE_BNDRY_256);
14065 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14066 DMA_RWCTRL_WRITE_BNDRY_512);
14070 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14071 DMA_RWCTRL_WRITE_BNDRY_1024);
14080 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14082 struct tg3_internal_buffer_desc test_desc;
14083 u32 sram_dma_descs;
14086 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14088 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14089 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14090 tw32(RDMAC_STATUS, 0);
14091 tw32(WDMAC_STATUS, 0);
14093 tw32(BUFMGR_MODE, 0);
14094 tw32(FTQ_RESET, 0);
14096 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14097 test_desc.addr_lo = buf_dma & 0xffffffff;
14098 test_desc.nic_mbuf = 0x00002100;
14099 test_desc.len = size;
14102 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14103 * the *second* time the tg3 driver was getting loaded after an
14106 * Broadcom tells me:
14107 * ...the DMA engine is connected to the GRC block and a DMA
14108 * reset may affect the GRC block in some unpredictable way...
14109 * The behavior of resets to individual blocks has not been tested.
14111 * Broadcom noted the GRC reset will also reset all sub-components.
14114 test_desc.cqid_sqid = (13 << 8) | 2;
14116 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14119 test_desc.cqid_sqid = (16 << 8) | 7;
14121 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14124 test_desc.flags = 0x00000005;
14126 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14129 val = *(((u32 *)&test_desc) + i);
14130 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14131 sram_dma_descs + (i * sizeof(u32)));
14132 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14134 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14137 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14139 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14142 for (i = 0; i < 40; i++) {
14146 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14148 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14149 if ((val & 0xffff) == sram_dma_descs) {
14160 #define TEST_BUFFER_SIZE 0x2000
14162 static int __devinit tg3_test_dma(struct tg3 *tp)
14164 dma_addr_t buf_dma;
14165 u32 *buf, saved_dma_rwctrl;
14168 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
14174 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14175 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14177 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14179 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
14182 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14183 /* DMA read watermark not used on PCIE */
14184 tp->dma_rwctrl |= 0x00180000;
14185 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
14186 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14187 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14188 tp->dma_rwctrl |= 0x003f0000;
14190 tp->dma_rwctrl |= 0x003f000f;
14192 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14193 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14194 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14195 u32 read_water = 0x7;
14197 /* If the 5704 is behind the EPB bridge, we can
14198 * do the less restrictive ONE_DMA workaround for
14199 * better performance.
14201 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
14202 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14203 tp->dma_rwctrl |= 0x8000;
14204 else if (ccval == 0x6 || ccval == 0x7)
14205 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14207 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14209 /* Set bit 23 to enable PCIX hw bug fix */
14211 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14212 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14214 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14215 /* 5780 always in PCIX mode */
14216 tp->dma_rwctrl |= 0x00144000;
14217 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14218 /* 5714 always in PCIX mode */
14219 tp->dma_rwctrl |= 0x00148000;
14221 tp->dma_rwctrl |= 0x001b000f;
14225 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14226 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14227 tp->dma_rwctrl &= 0xfffffff0;
14229 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14230 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14231 /* Remove this if it causes problems for some boards. */
14232 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14234 /* On 5700/5701 chips, we need to set this bit.
14235 * Otherwise the chip will issue cacheline transactions
14236 * to streamable DMA memory with not all the byte
14237 * enables turned on. This is an error on several
14238 * RISC PCI controllers, in particular sparc64.
14240 * On 5703/5704 chips, this bit has been reassigned
14241 * a different meaning. In particular, it is used
14242 * on those chips to enable a PCI-X workaround.
14244 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14247 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14250 /* Unneeded, already done by tg3_get_invariants. */
14251 tg3_switch_clocks(tp);
14254 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14255 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14258 /* It is best to perform DMA test with maximum write burst size
14259 * to expose the 5700/5701 write DMA bug.
14261 saved_dma_rwctrl = tp->dma_rwctrl;
14262 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14263 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14268 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14271 /* Send the buffer to the chip. */
14272 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14274 dev_err(&tp->pdev->dev,
14275 "%s: Buffer write failed. err = %d\n",
14281 /* validate data reached card RAM correctly. */
14282 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14284 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14285 if (le32_to_cpu(val) != p[i]) {
14286 dev_err(&tp->pdev->dev,
14287 "%s: Buffer corrupted on device! "
14288 "(%d != %d)\n", __func__, val, i);
14289 /* ret = -ENODEV here? */
14294 /* Now read it back. */
14295 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14297 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14298 "err = %d\n", __func__, ret);
14303 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14307 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14308 DMA_RWCTRL_WRITE_BNDRY_16) {
14309 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14310 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14311 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14314 dev_err(&tp->pdev->dev,
14315 "%s: Buffer corrupted on read back! "
14316 "(%d != %d)\n", __func__, p[i], i);
14322 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14328 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14329 DMA_RWCTRL_WRITE_BNDRY_16) {
14330 static struct pci_device_id dma_wait_state_chipsets[] = {
14331 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
14332 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14336 /* DMA test passed without adjusting DMA boundary,
14337 * now look for chipsets that are known to expose the
14338 * DMA bug without failing the test.
14340 if (pci_dev_present(dma_wait_state_chipsets)) {
14341 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14342 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14344 /* Safe to use the calculated DMA boundary. */
14345 tp->dma_rwctrl = saved_dma_rwctrl;
14348 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14352 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
14357 static void __devinit tg3_init_link_config(struct tg3 *tp)
14359 tp->link_config.advertising =
14360 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
14361 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
14362 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
14363 ADVERTISED_Autoneg | ADVERTISED_MII);
14364 tp->link_config.speed = SPEED_INVALID;
14365 tp->link_config.duplex = DUPLEX_INVALID;
14366 tp->link_config.autoneg = AUTONEG_ENABLE;
14367 tp->link_config.active_speed = SPEED_INVALID;
14368 tp->link_config.active_duplex = DUPLEX_INVALID;
14369 tp->link_config.orig_speed = SPEED_INVALID;
14370 tp->link_config.orig_duplex = DUPLEX_INVALID;
14371 tp->link_config.orig_autoneg = AUTONEG_INVALID;
14374 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14376 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
14377 tp->bufmgr_config.mbuf_read_dma_low_water =
14378 DEFAULT_MB_RDMA_LOW_WATER_5705;
14379 tp->bufmgr_config.mbuf_mac_rx_low_water =
14380 DEFAULT_MB_MACRX_LOW_WATER_57765;
14381 tp->bufmgr_config.mbuf_high_water =
14382 DEFAULT_MB_HIGH_WATER_57765;
14384 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14385 DEFAULT_MB_RDMA_LOW_WATER_5705;
14386 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14387 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14388 tp->bufmgr_config.mbuf_high_water_jumbo =
14389 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14390 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14391 tp->bufmgr_config.mbuf_read_dma_low_water =
14392 DEFAULT_MB_RDMA_LOW_WATER_5705;
14393 tp->bufmgr_config.mbuf_mac_rx_low_water =
14394 DEFAULT_MB_MACRX_LOW_WATER_5705;
14395 tp->bufmgr_config.mbuf_high_water =
14396 DEFAULT_MB_HIGH_WATER_5705;
14397 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14398 tp->bufmgr_config.mbuf_mac_rx_low_water =
14399 DEFAULT_MB_MACRX_LOW_WATER_5906;
14400 tp->bufmgr_config.mbuf_high_water =
14401 DEFAULT_MB_HIGH_WATER_5906;
14404 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14405 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14406 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14407 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14408 tp->bufmgr_config.mbuf_high_water_jumbo =
14409 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14411 tp->bufmgr_config.mbuf_read_dma_low_water =
14412 DEFAULT_MB_RDMA_LOW_WATER;
14413 tp->bufmgr_config.mbuf_mac_rx_low_water =
14414 DEFAULT_MB_MACRX_LOW_WATER;
14415 tp->bufmgr_config.mbuf_high_water =
14416 DEFAULT_MB_HIGH_WATER;
14418 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14419 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14420 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14421 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14422 tp->bufmgr_config.mbuf_high_water_jumbo =
14423 DEFAULT_MB_HIGH_WATER_JUMBO;
14426 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14427 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14430 static char * __devinit tg3_phy_string(struct tg3 *tp)
14432 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14433 case TG3_PHY_ID_BCM5400: return "5400";
14434 case TG3_PHY_ID_BCM5401: return "5401";
14435 case TG3_PHY_ID_BCM5411: return "5411";
14436 case TG3_PHY_ID_BCM5701: return "5701";
14437 case TG3_PHY_ID_BCM5703: return "5703";
14438 case TG3_PHY_ID_BCM5704: return "5704";
14439 case TG3_PHY_ID_BCM5705: return "5705";
14440 case TG3_PHY_ID_BCM5750: return "5750";
14441 case TG3_PHY_ID_BCM5752: return "5752";
14442 case TG3_PHY_ID_BCM5714: return "5714";
14443 case TG3_PHY_ID_BCM5780: return "5780";
14444 case TG3_PHY_ID_BCM5755: return "5755";
14445 case TG3_PHY_ID_BCM5787: return "5787";
14446 case TG3_PHY_ID_BCM5784: return "5784";
14447 case TG3_PHY_ID_BCM5756: return "5722/5756";
14448 case TG3_PHY_ID_BCM5906: return "5906";
14449 case TG3_PHY_ID_BCM5761: return "5761";
14450 case TG3_PHY_ID_BCM5718C: return "5718C";
14451 case TG3_PHY_ID_BCM5718S: return "5718S";
14452 case TG3_PHY_ID_BCM57765: return "57765";
14453 case TG3_PHY_ID_BCM5719C: return "5719C";
14454 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14455 case 0: return "serdes";
14456 default: return "unknown";
14460 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14462 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14463 strcpy(str, "PCI Express");
14465 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
14466 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14468 strcpy(str, "PCIX:");
14470 if ((clock_ctrl == 7) ||
14471 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14472 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14473 strcat(str, "133MHz");
14474 else if (clock_ctrl == 0)
14475 strcat(str, "33MHz");
14476 else if (clock_ctrl == 2)
14477 strcat(str, "50MHz");
14478 else if (clock_ctrl == 4)
14479 strcat(str, "66MHz");
14480 else if (clock_ctrl == 6)
14481 strcat(str, "100MHz");
14483 strcpy(str, "PCI:");
14484 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
14485 strcat(str, "66MHz");
14487 strcat(str, "33MHz");
14489 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
14490 strcat(str, ":32-bit");
14492 strcat(str, ":64-bit");
14496 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14498 struct pci_dev *peer;
14499 unsigned int func, devnr = tp->pdev->devfn & ~7;
14501 for (func = 0; func < 8; func++) {
14502 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14503 if (peer && peer != tp->pdev)
14507 /* 5704 can be configured in single-port mode, set peer to
14508 * tp->pdev in that case.
14516 * We don't need to keep the refcount elevated; there's no way
14517 * to remove one half of this device without removing the other
14524 static void __devinit tg3_init_coal(struct tg3 *tp)
14526 struct ethtool_coalesce *ec = &tp->coal;
14528 memset(ec, 0, sizeof(*ec));
14529 ec->cmd = ETHTOOL_GCOALESCE;
14530 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14531 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14532 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14533 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14534 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14535 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14536 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14537 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14538 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14540 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14541 HOSTCC_MODE_CLRTICK_TXBD)) {
14542 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14543 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14544 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14545 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14548 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14549 ec->rx_coalesce_usecs_irq = 0;
14550 ec->tx_coalesce_usecs_irq = 0;
14551 ec->stats_block_coalesce_usecs = 0;
14555 static const struct net_device_ops tg3_netdev_ops = {
14556 .ndo_open = tg3_open,
14557 .ndo_stop = tg3_close,
14558 .ndo_start_xmit = tg3_start_xmit,
14559 .ndo_get_stats64 = tg3_get_stats64,
14560 .ndo_validate_addr = eth_validate_addr,
14561 .ndo_set_multicast_list = tg3_set_rx_mode,
14562 .ndo_set_mac_address = tg3_set_mac_addr,
14563 .ndo_do_ioctl = tg3_ioctl,
14564 .ndo_tx_timeout = tg3_tx_timeout,
14565 .ndo_change_mtu = tg3_change_mtu,
14566 #if TG3_VLAN_TAG_USED
14567 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14569 #ifdef CONFIG_NET_POLL_CONTROLLER
14570 .ndo_poll_controller = tg3_poll_controller,
14574 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
14575 .ndo_open = tg3_open,
14576 .ndo_stop = tg3_close,
14577 .ndo_start_xmit = tg3_start_xmit_dma_bug,
14578 .ndo_get_stats64 = tg3_get_stats64,
14579 .ndo_validate_addr = eth_validate_addr,
14580 .ndo_set_multicast_list = tg3_set_rx_mode,
14581 .ndo_set_mac_address = tg3_set_mac_addr,
14582 .ndo_do_ioctl = tg3_ioctl,
14583 .ndo_tx_timeout = tg3_tx_timeout,
14584 .ndo_change_mtu = tg3_change_mtu,
14585 #if TG3_VLAN_TAG_USED
14586 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14588 #ifdef CONFIG_NET_POLL_CONTROLLER
14589 .ndo_poll_controller = tg3_poll_controller,
14593 static int __devinit tg3_init_one(struct pci_dev *pdev,
14594 const struct pci_device_id *ent)
14596 struct net_device *dev;
14598 int i, err, pm_cap;
14599 u32 sndmbx, rcvmbx, intmbx;
14601 u64 dma_mask, persist_dma_mask;
14603 printk_once(KERN_INFO "%s\n", version);
14605 err = pci_enable_device(pdev);
14607 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14611 err = pci_request_regions(pdev, DRV_MODULE_NAME);
14613 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14614 goto err_out_disable_pdev;
14617 pci_set_master(pdev);
14619 /* Find power-management capability. */
14620 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14622 dev_err(&pdev->dev,
14623 "Cannot find Power Management capability, aborting\n");
14625 goto err_out_free_res;
14628 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14630 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14632 goto err_out_free_res;
14635 SET_NETDEV_DEV(dev, &pdev->dev);
14637 #if TG3_VLAN_TAG_USED
14638 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
14641 tp = netdev_priv(dev);
14644 tp->pm_cap = pm_cap;
14645 tp->rx_mode = TG3_DEF_RX_MODE;
14646 tp->tx_mode = TG3_DEF_TX_MODE;
14649 tp->msg_enable = tg3_debug;
14651 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14653 /* The word/byte swap controls here control register access byte
14654 * swapping. DMA data byte swapping is controlled in the GRC_MODE
14657 tp->misc_host_ctrl =
14658 MISC_HOST_CTRL_MASK_PCI_INT |
14659 MISC_HOST_CTRL_WORD_SWAP |
14660 MISC_HOST_CTRL_INDIR_ACCESS |
14661 MISC_HOST_CTRL_PCISTATE_RW;
14663 /* The NONFRM (non-frame) byte/word swap controls take effect
14664 * on descriptor entries, anything which isn't packet data.
14666 * The StrongARM chips on the board (one for tx, one for rx)
14667 * are running in big-endian mode.
14669 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14670 GRC_MODE_WSWAP_NONFRM_DATA);
14671 #ifdef __BIG_ENDIAN
14672 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14674 spin_lock_init(&tp->lock);
14675 spin_lock_init(&tp->indirect_lock);
14676 INIT_WORK(&tp->reset_task, tg3_reset_task);
14678 tp->regs = pci_ioremap_bar(pdev, BAR_0);
14680 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14682 goto err_out_free_dev;
14685 tg3_init_link_config(tp);
14687 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
14688 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
14690 dev->ethtool_ops = &tg3_ethtool_ops;
14691 dev->watchdog_timeo = TG3_TX_TIMEOUT;
14692 dev->irq = pdev->irq;
14694 err = tg3_get_invariants(tp);
14696 dev_err(&pdev->dev,
14697 "Problem fetching invariants of chip, aborting\n");
14698 goto err_out_iounmap;
14701 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
14702 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
14703 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
14704 dev->netdev_ops = &tg3_netdev_ops;
14706 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
14709 /* The EPB bridge inside 5714, 5715, and 5780 and any
14710 * device behind the EPB cannot support DMA addresses > 40-bit.
14711 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
14712 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
14713 * do DMA address check in tg3_start_xmit().
14715 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
14716 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
14717 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
14718 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
14719 #ifdef CONFIG_HIGHMEM
14720 dma_mask = DMA_BIT_MASK(64);
14723 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
14725 /* Configure DMA attributes. */
14726 if (dma_mask > DMA_BIT_MASK(32)) {
14727 err = pci_set_dma_mask(pdev, dma_mask);
14729 dev->features |= NETIF_F_HIGHDMA;
14730 err = pci_set_consistent_dma_mask(pdev,
14733 dev_err(&pdev->dev, "Unable to obtain 64 bit "
14734 "DMA for consistent allocations\n");
14735 goto err_out_iounmap;
14739 if (err || dma_mask == DMA_BIT_MASK(32)) {
14740 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
14742 dev_err(&pdev->dev,
14743 "No usable DMA configuration, aborting\n");
14744 goto err_out_iounmap;
14748 tg3_init_bufmgr_config(tp);
14750 /* Selectively allow TSO based on operating conditions */
14751 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
14752 (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
14753 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
14755 tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
14756 tp->fw_needed = NULL;
14759 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14760 tp->fw_needed = FIRMWARE_TG3;
14762 /* TSO is on by default on chips that support hardware TSO.
14763 * Firmware TSO on older chips gives lower performance, so it
14764 * is off by default, but can be enabled using ethtool.
14766 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
14767 (dev->features & NETIF_F_IP_CSUM)) {
14768 dev->features |= NETIF_F_TSO;
14769 vlan_features_add(dev, NETIF_F_TSO);
14771 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
14772 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
14773 if (dev->features & NETIF_F_IPV6_CSUM) {
14774 dev->features |= NETIF_F_TSO6;
14775 vlan_features_add(dev, NETIF_F_TSO6);
14777 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
14778 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14779 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14780 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
14781 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14782 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14783 dev->features |= NETIF_F_TSO_ECN;
14784 vlan_features_add(dev, NETIF_F_TSO_ECN);
14788 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
14789 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
14790 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
14791 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
14792 tp->rx_pending = 63;
14795 err = tg3_get_device_address(tp);
14797 dev_err(&pdev->dev,
14798 "Could not obtain valid ethernet address, aborting\n");
14799 goto err_out_iounmap;
14802 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
14803 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
14804 if (!tp->aperegs) {
14805 dev_err(&pdev->dev,
14806 "Cannot map APE registers, aborting\n");
14808 goto err_out_iounmap;
14811 tg3_ape_lock_init(tp);
14813 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
14814 tg3_read_dash_ver(tp);
14818 * Reset chip in case UNDI or EFI driver did not shutdown
14819 * DMA self test will enable WDMAC and we'll see (spurious)
14820 * pending DMA on the PCI bus at that point.
14822 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
14823 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
14824 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
14825 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14828 err = tg3_test_dma(tp);
14830 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
14831 goto err_out_apeunmap;
14834 /* flow control autonegotiation is default behavior */
14835 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
14836 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14838 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
14839 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
14840 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
14841 for (i = 0; i < tp->irq_max; i++) {
14842 struct tg3_napi *tnapi = &tp->napi[i];
14845 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
14847 tnapi->int_mbox = intmbx;
14853 tnapi->consmbox = rcvmbx;
14854 tnapi->prodmbox = sndmbx;
14857 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
14859 tnapi->coal_now = HOSTCC_MODE_NOW;
14861 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
14865 * If we support MSIX, we'll be using RSS. If we're using
14866 * RSS, the first vector only handles link interrupts and the
14867 * remaining vectors handle rx and tx interrupts. Reuse the
14868 * mailbox values for the next iteration. The values we setup
14869 * above are still useful for the single vectored mode.
14884 pci_set_drvdata(pdev, dev);
14886 err = register_netdev(dev);
14888 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
14889 goto err_out_apeunmap;
14892 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
14893 tp->board_part_number,
14894 tp->pci_chip_rev_id,
14895 tg3_bus_string(tp, str),
14898 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
14899 struct phy_device *phydev;
14900 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
14902 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14903 phydev->drv->name, dev_name(&phydev->dev));
14907 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
14908 ethtype = "10/100Base-TX";
14909 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
14910 ethtype = "1000Base-SX";
14912 ethtype = "10/100/1000Base-T";
14914 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
14915 "(WireSpeed[%d])\n", tg3_phy_string(tp), ethtype,
14916 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0);
14919 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
14920 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
14921 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
14922 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
14923 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
14924 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
14925 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
14927 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
14928 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
14934 iounmap(tp->aperegs);
14935 tp->aperegs = NULL;
14948 pci_release_regions(pdev);
14950 err_out_disable_pdev:
14951 pci_disable_device(pdev);
14952 pci_set_drvdata(pdev, NULL);
14956 static void __devexit tg3_remove_one(struct pci_dev *pdev)
14958 struct net_device *dev = pci_get_drvdata(pdev);
14961 struct tg3 *tp = netdev_priv(dev);
14964 release_firmware(tp->fw);
14966 flush_scheduled_work();
14968 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
14973 unregister_netdev(dev);
14975 iounmap(tp->aperegs);
14976 tp->aperegs = NULL;
14983 pci_release_regions(pdev);
14984 pci_disable_device(pdev);
14985 pci_set_drvdata(pdev, NULL);
14989 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
14991 struct net_device *dev = pci_get_drvdata(pdev);
14992 struct tg3 *tp = netdev_priv(dev);
14993 pci_power_t target_state;
14996 /* PCI register 4 needs to be saved whether netif_running() or not.
14997 * MSI address and data need to be saved if using MSI and
15000 pci_save_state(pdev);
15002 if (!netif_running(dev))
15005 flush_scheduled_work();
15007 tg3_netif_stop(tp);
15009 del_timer_sync(&tp->timer);
15011 tg3_full_lock(tp, 1);
15012 tg3_disable_ints(tp);
15013 tg3_full_unlock(tp);
15015 netif_device_detach(dev);
15017 tg3_full_lock(tp, 0);
15018 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15019 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
15020 tg3_full_unlock(tp);
15022 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
15024 err = tg3_set_power_state(tp, target_state);
15028 tg3_full_lock(tp, 0);
15030 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
15031 err2 = tg3_restart_hw(tp, 1);
15035 tp->timer.expires = jiffies + tp->timer_offset;
15036 add_timer(&tp->timer);
15038 netif_device_attach(dev);
15039 tg3_netif_start(tp);
15042 tg3_full_unlock(tp);
15051 static int tg3_resume(struct pci_dev *pdev)
15053 struct net_device *dev = pci_get_drvdata(pdev);
15054 struct tg3 *tp = netdev_priv(dev);
15057 pci_restore_state(tp->pdev);
15059 if (!netif_running(dev))
15062 err = tg3_set_power_state(tp, PCI_D0);
15066 netif_device_attach(dev);
15068 tg3_full_lock(tp, 0);
15070 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
15071 err = tg3_restart_hw(tp, 1);
15075 tp->timer.expires = jiffies + tp->timer_offset;
15076 add_timer(&tp->timer);
15078 tg3_netif_start(tp);
15081 tg3_full_unlock(tp);
15089 static struct pci_driver tg3_driver = {
15090 .name = DRV_MODULE_NAME,
15091 .id_table = tg3_pci_tbl,
15092 .probe = tg3_init_one,
15093 .remove = __devexit_p(tg3_remove_one),
15094 .suspend = tg3_suspend,
15095 .resume = tg3_resume
15098 static int __init tg3_init(void)
15100 return pci_register_driver(&tg3_driver);
15103 static void __exit tg3_cleanup(void)
15105 pci_unregister_driver(&tg3_driver);
15108 module_init(tg3_init);
15109 module_exit(tg3_cleanup);