2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2010 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/phy.h>
37 #include <linux/brcmphy.h>
38 #include <linux/if_vlan.h>
40 #include <linux/tcp.h>
41 #include <linux/workqueue.h>
42 #include <linux/prefetch.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/firmware.h>
46 #include <net/checksum.h>
49 #include <asm/system.h>
51 #include <asm/byteorder.h>
52 #include <asm/uaccess.h>
55 #include <asm/idprom.h>
62 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
63 #define TG3_VLAN_TAG_USED 1
65 #define TG3_VLAN_TAG_USED 0
70 #define DRV_MODULE_NAME "tg3"
72 #define TG3_MIN_NUM 113
73 #define DRV_MODULE_VERSION \
74 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
75 #define DRV_MODULE_RELDATE "August 2, 2010"
77 #define TG3_DEF_MAC_MODE 0
78 #define TG3_DEF_RX_MODE 0
79 #define TG3_DEF_TX_MODE 0
80 #define TG3_DEF_MSG_ENABLE \
90 /* length of time before we decide the hardware is borked,
91 * and dev->tx_timeout() should be called to fix the problem
93 #define TG3_TX_TIMEOUT (5 * HZ)
95 /* hardware minimum and maximum for a single frame's data payload */
96 #define TG3_MIN_MTU 60
97 #define TG3_MAX_MTU(tp) \
98 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
100 /* These numbers seem to be hard coded in the NIC firmware somehow.
101 * You can't change the ring sizes, but you can change where you place
102 * them in the NIC onboard memory.
104 #define TG3_RX_RING_SIZE 512
105 #define TG3_DEF_RX_RING_PENDING 200
106 #define TG3_RX_JUMBO_RING_SIZE 256
107 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
108 #define TG3_RSS_INDIR_TBL_SIZE 128
110 /* Do not place this n-ring entries value into the tp struct itself,
111 * we really want to expose these constants to GCC so that modulo et
112 * al. operations are done with shifts and masks instead of with
113 * hw multiply/modulo instructions. Another solution would be to
114 * replace things like '% foo' with '& (foo - 1)'.
116 #define TG3_RX_RCB_RING_SIZE(tp) \
117 (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \
118 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512)
120 #define TG3_TX_RING_SIZE 512
121 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
123 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
125 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
126 TG3_RX_JUMBO_RING_SIZE)
127 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
128 TG3_RX_RCB_RING_SIZE(tp))
129 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
131 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
133 #define TG3_RX_DMA_ALIGN 16
134 #define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN)
136 #define TG3_DMA_BYTE_ENAB 64
138 #define TG3_RX_STD_DMA_SZ 1536
139 #define TG3_RX_JMB_DMA_SZ 9046
141 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
143 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
144 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
146 #define TG3_RX_STD_BUFF_RING_SIZE \
147 (sizeof(struct ring_info) * TG3_RX_RING_SIZE)
149 #define TG3_RX_JMB_BUFF_RING_SIZE \
150 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
152 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
153 * that are at least dword aligned when used in PCIX mode. The driver
154 * works around this bug by double copying the packet. This workaround
155 * is built into the normal double copy length check for efficiency.
157 * However, the double copy is only necessary on those architectures
158 * where unaligned memory accesses are inefficient. For those architectures
159 * where unaligned memory accesses incur little penalty, we can reintegrate
160 * the 5701 in the normal rx path. Doing so saves a device structure
161 * dereference by hardcoding the double copy threshold in place.
163 #define TG3_RX_COPY_THRESHOLD 256
164 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
165 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
167 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
170 /* minimum number of free TX descriptors required to wake up TX process */
171 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
173 #define TG3_RAW_IP_ALIGN 2
175 /* number of ETHTOOL_GSTATS u64's */
176 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
178 #define TG3_NUM_TEST 6
180 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
182 #define FIRMWARE_TG3 "tigon/tg3.bin"
183 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
184 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
186 static char version[] __devinitdata =
187 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
189 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
190 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
191 MODULE_LICENSE("GPL");
192 MODULE_VERSION(DRV_MODULE_VERSION);
193 MODULE_FIRMWARE(FIRMWARE_TG3);
194 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
195 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
197 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
198 module_param(tg3_debug, int, 0);
199 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
201 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
275 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
276 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
277 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
278 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
279 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
280 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
281 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
285 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
287 static const struct {
288 const char string[ETH_GSTRING_LEN];
289 } ethtool_stats_keys[TG3_NUM_STATS] = {
292 { "rx_ucast_packets" },
293 { "rx_mcast_packets" },
294 { "rx_bcast_packets" },
296 { "rx_align_errors" },
297 { "rx_xon_pause_rcvd" },
298 { "rx_xoff_pause_rcvd" },
299 { "rx_mac_ctrl_rcvd" },
300 { "rx_xoff_entered" },
301 { "rx_frame_too_long_errors" },
303 { "rx_undersize_packets" },
304 { "rx_in_length_errors" },
305 { "rx_out_length_errors" },
306 { "rx_64_or_less_octet_packets" },
307 { "rx_65_to_127_octet_packets" },
308 { "rx_128_to_255_octet_packets" },
309 { "rx_256_to_511_octet_packets" },
310 { "rx_512_to_1023_octet_packets" },
311 { "rx_1024_to_1522_octet_packets" },
312 { "rx_1523_to_2047_octet_packets" },
313 { "rx_2048_to_4095_octet_packets" },
314 { "rx_4096_to_8191_octet_packets" },
315 { "rx_8192_to_9022_octet_packets" },
322 { "tx_flow_control" },
324 { "tx_single_collisions" },
325 { "tx_mult_collisions" },
327 { "tx_excessive_collisions" },
328 { "tx_late_collisions" },
329 { "tx_collide_2times" },
330 { "tx_collide_3times" },
331 { "tx_collide_4times" },
332 { "tx_collide_5times" },
333 { "tx_collide_6times" },
334 { "tx_collide_7times" },
335 { "tx_collide_8times" },
336 { "tx_collide_9times" },
337 { "tx_collide_10times" },
338 { "tx_collide_11times" },
339 { "tx_collide_12times" },
340 { "tx_collide_13times" },
341 { "tx_collide_14times" },
342 { "tx_collide_15times" },
343 { "tx_ucast_packets" },
344 { "tx_mcast_packets" },
345 { "tx_bcast_packets" },
346 { "tx_carrier_sense_errors" },
350 { "dma_writeq_full" },
351 { "dma_write_prioq_full" },
355 { "rx_threshold_hit" },
357 { "dma_readq_full" },
358 { "dma_read_prioq_full" },
359 { "tx_comp_queue_full" },
361 { "ring_set_send_prod_index" },
362 { "ring_status_update" },
364 { "nic_avoided_irqs" },
365 { "nic_tx_threshold_hit" }
368 static const struct {
369 const char string[ETH_GSTRING_LEN];
370 } ethtool_test_keys[TG3_NUM_TEST] = {
371 { "nvram test (online) " },
372 { "link test (online) " },
373 { "register test (offline)" },
374 { "memory test (offline)" },
375 { "loopback test (offline)" },
376 { "interrupt test (offline)" },
379 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
381 writel(val, tp->regs + off);
384 static u32 tg3_read32(struct tg3 *tp, u32 off)
386 return readl(tp->regs + off);
389 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
391 writel(val, tp->aperegs + off);
394 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
396 return readl(tp->aperegs + off);
399 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
403 spin_lock_irqsave(&tp->indirect_lock, flags);
404 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
405 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
406 spin_unlock_irqrestore(&tp->indirect_lock, flags);
409 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
411 writel(val, tp->regs + off);
412 readl(tp->regs + off);
415 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
420 spin_lock_irqsave(&tp->indirect_lock, flags);
421 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
422 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
423 spin_unlock_irqrestore(&tp->indirect_lock, flags);
427 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
431 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
432 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
433 TG3_64BIT_REG_LOW, val);
436 if (off == TG3_RX_STD_PROD_IDX_REG) {
437 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
438 TG3_64BIT_REG_LOW, val);
442 spin_lock_irqsave(&tp->indirect_lock, flags);
443 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
444 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
445 spin_unlock_irqrestore(&tp->indirect_lock, flags);
447 /* In indirect mode when disabling interrupts, we also need
448 * to clear the interrupt bit in the GRC local ctrl register.
450 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
452 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
453 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
457 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
462 spin_lock_irqsave(&tp->indirect_lock, flags);
463 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
464 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
465 spin_unlock_irqrestore(&tp->indirect_lock, flags);
469 /* usec_wait specifies the wait time in usec when writing to certain registers
470 * where it is unsafe to read back the register without some delay.
471 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
472 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
474 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
476 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
477 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
478 /* Non-posted methods */
479 tp->write32(tp, off, val);
482 tg3_write32(tp, off, val);
487 /* Wait again after the read for the posted method to guarantee that
488 * the wait time is met.
494 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
496 tp->write32_mbox(tp, off, val);
497 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
498 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
499 tp->read32_mbox(tp, off);
502 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
504 void __iomem *mbox = tp->regs + off;
506 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
508 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
512 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
514 return readl(tp->regs + off + GRCMBOX_BASE);
517 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
519 writel(val, tp->regs + off + GRCMBOX_BASE);
522 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
523 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
524 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
525 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
526 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
528 #define tw32(reg, val) tp->write32(tp, reg, val)
529 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
530 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
531 #define tr32(reg) tp->read32(tp, reg)
533 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
537 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
538 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
541 spin_lock_irqsave(&tp->indirect_lock, flags);
542 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
543 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
544 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
546 /* Always leave this as zero. */
547 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
549 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
550 tw32_f(TG3PCI_MEM_WIN_DATA, val);
552 /* Always leave this as zero. */
553 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
555 spin_unlock_irqrestore(&tp->indirect_lock, flags);
558 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
562 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
563 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
568 spin_lock_irqsave(&tp->indirect_lock, flags);
569 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
570 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
571 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
573 /* Always leave this as zero. */
574 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
576 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
577 *val = tr32(TG3PCI_MEM_WIN_DATA);
579 /* Always leave this as zero. */
580 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
582 spin_unlock_irqrestore(&tp->indirect_lock, flags);
585 static void tg3_ape_lock_init(struct tg3 *tp)
590 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
591 regbase = TG3_APE_LOCK_GRANT;
593 regbase = TG3_APE_PER_LOCK_GRANT;
595 /* Make sure the driver hasn't any stale locks. */
596 for (i = 0; i < 8; i++)
597 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
600 static int tg3_ape_lock(struct tg3 *tp, int locknum)
604 u32 status, req, gnt;
606 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
610 case TG3_APE_LOCK_GRC:
611 case TG3_APE_LOCK_MEM:
617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
618 req = TG3_APE_LOCK_REQ;
619 gnt = TG3_APE_LOCK_GRANT;
621 req = TG3_APE_PER_LOCK_REQ;
622 gnt = TG3_APE_PER_LOCK_GRANT;
627 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
629 /* Wait for up to 1 millisecond to acquire lock. */
630 for (i = 0; i < 100; i++) {
631 status = tg3_ape_read32(tp, gnt + off);
632 if (status == APE_LOCK_GRANT_DRIVER)
637 if (status != APE_LOCK_GRANT_DRIVER) {
638 /* Revoke the lock request. */
639 tg3_ape_write32(tp, gnt + off,
640 APE_LOCK_GRANT_DRIVER);
648 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
652 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
656 case TG3_APE_LOCK_GRC:
657 case TG3_APE_LOCK_MEM:
663 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
664 gnt = TG3_APE_LOCK_GRANT;
666 gnt = TG3_APE_PER_LOCK_GRANT;
668 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
671 static void tg3_disable_ints(struct tg3 *tp)
675 tw32(TG3PCI_MISC_HOST_CTRL,
676 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
677 for (i = 0; i < tp->irq_max; i++)
678 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
681 static void tg3_enable_ints(struct tg3 *tp)
688 tw32(TG3PCI_MISC_HOST_CTRL,
689 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
691 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
692 for (i = 0; i < tp->irq_cnt; i++) {
693 struct tg3_napi *tnapi = &tp->napi[i];
695 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
696 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
697 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
699 tp->coal_now |= tnapi->coal_now;
702 /* Force an initial interrupt */
703 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
704 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
705 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
707 tw32(HOSTCC_MODE, tp->coal_now);
709 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
712 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
714 struct tg3 *tp = tnapi->tp;
715 struct tg3_hw_status *sblk = tnapi->hw_status;
716 unsigned int work_exists = 0;
718 /* check for phy events */
719 if (!(tp->tg3_flags &
720 (TG3_FLAG_USE_LINKCHG_REG |
721 TG3_FLAG_POLL_SERDES))) {
722 if (sblk->status & SD_STATUS_LINK_CHG)
725 /* check for RX/TX work to do */
726 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
727 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
734 * similar to tg3_enable_ints, but it accurately determines whether there
735 * is new work pending and can return without flushing the PIO write
736 * which reenables interrupts
738 static void tg3_int_reenable(struct tg3_napi *tnapi)
740 struct tg3 *tp = tnapi->tp;
742 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
745 /* When doing tagged status, this work check is unnecessary.
746 * The last_tag we write above tells the chip which piece of
747 * work we've completed.
749 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
751 tw32(HOSTCC_MODE, tp->coalesce_mode |
752 HOSTCC_MODE_ENABLE | tnapi->coal_now);
755 static void tg3_switch_clocks(struct tg3 *tp)
760 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
761 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
764 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
766 orig_clock_ctrl = clock_ctrl;
767 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
768 CLOCK_CTRL_CLKRUN_OENABLE |
770 tp->pci_clock_ctrl = clock_ctrl;
772 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
773 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
774 tw32_wait_f(TG3PCI_CLOCK_CTRL,
775 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
777 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
778 tw32_wait_f(TG3PCI_CLOCK_CTRL,
780 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
782 tw32_wait_f(TG3PCI_CLOCK_CTRL,
783 clock_ctrl | (CLOCK_CTRL_ALTCLK),
786 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
789 #define PHY_BUSY_LOOPS 5000
791 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
797 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
799 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
805 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
806 MI_COM_PHY_ADDR_MASK);
807 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
808 MI_COM_REG_ADDR_MASK);
809 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
811 tw32_f(MAC_MI_COM, frame_val);
813 loops = PHY_BUSY_LOOPS;
816 frame_val = tr32(MAC_MI_COM);
818 if ((frame_val & MI_COM_BUSY) == 0) {
820 frame_val = tr32(MAC_MI_COM);
828 *val = frame_val & MI_COM_DATA_MASK;
832 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
833 tw32_f(MAC_MI_MODE, tp->mi_mode);
840 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
846 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
847 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
850 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
852 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
856 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
857 MI_COM_PHY_ADDR_MASK);
858 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
859 MI_COM_REG_ADDR_MASK);
860 frame_val |= (val & MI_COM_DATA_MASK);
861 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
863 tw32_f(MAC_MI_COM, frame_val);
865 loops = PHY_BUSY_LOOPS;
868 frame_val = tr32(MAC_MI_COM);
869 if ((frame_val & MI_COM_BUSY) == 0) {
871 frame_val = tr32(MAC_MI_COM);
881 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
882 tw32_f(MAC_MI_MODE, tp->mi_mode);
889 static int tg3_bmcr_reset(struct tg3 *tp)
894 /* OK, reset it, and poll the BMCR_RESET bit until it
895 * clears or we time out.
897 phy_control = BMCR_RESET;
898 err = tg3_writephy(tp, MII_BMCR, phy_control);
904 err = tg3_readphy(tp, MII_BMCR, &phy_control);
908 if ((phy_control & BMCR_RESET) == 0) {
920 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
922 struct tg3 *tp = bp->priv;
925 spin_lock_bh(&tp->lock);
927 if (tg3_readphy(tp, reg, &val))
930 spin_unlock_bh(&tp->lock);
935 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
937 struct tg3 *tp = bp->priv;
940 spin_lock_bh(&tp->lock);
942 if (tg3_writephy(tp, reg, val))
945 spin_unlock_bh(&tp->lock);
950 static int tg3_mdio_reset(struct mii_bus *bp)
955 static void tg3_mdio_config_5785(struct tg3 *tp)
958 struct phy_device *phydev;
960 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
961 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
962 case PHY_ID_BCM50610:
963 case PHY_ID_BCM50610M:
964 val = MAC_PHYCFG2_50610_LED_MODES;
966 case PHY_ID_BCMAC131:
967 val = MAC_PHYCFG2_AC131_LED_MODES;
969 case PHY_ID_RTL8211C:
970 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
972 case PHY_ID_RTL8201E:
973 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
979 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
980 tw32(MAC_PHYCFG2, val);
982 val = tr32(MAC_PHYCFG1);
983 val &= ~(MAC_PHYCFG1_RGMII_INT |
984 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
985 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
986 tw32(MAC_PHYCFG1, val);
991 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE))
992 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
993 MAC_PHYCFG2_FMODE_MASK_MASK |
994 MAC_PHYCFG2_GMODE_MASK_MASK |
995 MAC_PHYCFG2_ACT_MASK_MASK |
996 MAC_PHYCFG2_QUAL_MASK_MASK |
997 MAC_PHYCFG2_INBAND_ENABLE;
999 tw32(MAC_PHYCFG2, val);
1001 val = tr32(MAC_PHYCFG1);
1002 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1003 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1004 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1005 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1006 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1007 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1008 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1010 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1011 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1012 tw32(MAC_PHYCFG1, val);
1014 val = tr32(MAC_EXT_RGMII_MODE);
1015 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1016 MAC_RGMII_MODE_RX_QUALITY |
1017 MAC_RGMII_MODE_RX_ACTIVITY |
1018 MAC_RGMII_MODE_RX_ENG_DET |
1019 MAC_RGMII_MODE_TX_ENABLE |
1020 MAC_RGMII_MODE_TX_LOWPWR |
1021 MAC_RGMII_MODE_TX_RESET);
1022 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1023 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1024 val |= MAC_RGMII_MODE_RX_INT_B |
1025 MAC_RGMII_MODE_RX_QUALITY |
1026 MAC_RGMII_MODE_RX_ACTIVITY |
1027 MAC_RGMII_MODE_RX_ENG_DET;
1028 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1029 val |= MAC_RGMII_MODE_TX_ENABLE |
1030 MAC_RGMII_MODE_TX_LOWPWR |
1031 MAC_RGMII_MODE_TX_RESET;
1033 tw32(MAC_EXT_RGMII_MODE, val);
1036 static void tg3_mdio_start(struct tg3 *tp)
1038 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1039 tw32_f(MAC_MI_MODE, tp->mi_mode);
1042 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1043 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1044 tg3_mdio_config_5785(tp);
1047 static int tg3_mdio_init(struct tg3 *tp)
1051 struct phy_device *phydev;
1053 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1054 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
1057 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1059 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1060 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1062 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1063 TG3_CPMU_PHY_STRAP_IS_SERDES;
1067 tp->phy_addr = TG3_PHY_MII_ADDR;
1071 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1072 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1075 tp->mdio_bus = mdiobus_alloc();
1076 if (tp->mdio_bus == NULL)
1079 tp->mdio_bus->name = "tg3 mdio bus";
1080 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1081 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1082 tp->mdio_bus->priv = tp;
1083 tp->mdio_bus->parent = &tp->pdev->dev;
1084 tp->mdio_bus->read = &tg3_mdio_read;
1085 tp->mdio_bus->write = &tg3_mdio_write;
1086 tp->mdio_bus->reset = &tg3_mdio_reset;
1087 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1088 tp->mdio_bus->irq = &tp->mdio_irq[0];
1090 for (i = 0; i < PHY_MAX_ADDR; i++)
1091 tp->mdio_bus->irq[i] = PHY_POLL;
1093 /* The bus registration will look for all the PHYs on the mdio bus.
1094 * Unfortunately, it does not ensure the PHY is powered up before
1095 * accessing the PHY ID registers. A chip reset is the
1096 * quickest way to bring the device back to an operational state..
1098 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1101 i = mdiobus_register(tp->mdio_bus);
1103 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1104 mdiobus_free(tp->mdio_bus);
1108 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1110 if (!phydev || !phydev->drv) {
1111 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1112 mdiobus_unregister(tp->mdio_bus);
1113 mdiobus_free(tp->mdio_bus);
1117 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1118 case PHY_ID_BCM57780:
1119 phydev->interface = PHY_INTERFACE_MODE_GMII;
1120 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1122 case PHY_ID_BCM50610:
1123 case PHY_ID_BCM50610M:
1124 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1125 PHY_BRCM_RX_REFCLK_UNUSED |
1126 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1127 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1128 if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)
1129 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1130 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1131 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1132 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1133 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1135 case PHY_ID_RTL8211C:
1136 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1138 case PHY_ID_RTL8201E:
1139 case PHY_ID_BCMAC131:
1140 phydev->interface = PHY_INTERFACE_MODE_MII;
1141 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1142 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1146 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1148 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1149 tg3_mdio_config_5785(tp);
1154 static void tg3_mdio_fini(struct tg3 *tp)
1156 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1157 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1158 mdiobus_unregister(tp->mdio_bus);
1159 mdiobus_free(tp->mdio_bus);
1163 /* tp->lock is held. */
1164 static inline void tg3_generate_fw_event(struct tg3 *tp)
1168 val = tr32(GRC_RX_CPU_EVENT);
1169 val |= GRC_RX_CPU_DRIVER_EVENT;
1170 tw32_f(GRC_RX_CPU_EVENT, val);
1172 tp->last_event_jiffies = jiffies;
1175 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1177 /* tp->lock is held. */
1178 static void tg3_wait_for_event_ack(struct tg3 *tp)
1181 unsigned int delay_cnt;
1184 /* If enough time has passed, no wait is necessary. */
1185 time_remain = (long)(tp->last_event_jiffies + 1 +
1186 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1188 if (time_remain < 0)
1191 /* Check if we can shorten the wait time. */
1192 delay_cnt = jiffies_to_usecs(time_remain);
1193 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1194 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1195 delay_cnt = (delay_cnt >> 3) + 1;
1197 for (i = 0; i < delay_cnt; i++) {
1198 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1204 /* tp->lock is held. */
1205 static void tg3_ump_link_report(struct tg3 *tp)
1210 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1211 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1214 tg3_wait_for_event_ack(tp);
1216 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1218 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1221 if (!tg3_readphy(tp, MII_BMCR, ®))
1223 if (!tg3_readphy(tp, MII_BMSR, ®))
1224 val |= (reg & 0xffff);
1225 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1228 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1230 if (!tg3_readphy(tp, MII_LPA, ®))
1231 val |= (reg & 0xffff);
1232 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1235 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1236 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1238 if (!tg3_readphy(tp, MII_STAT1000, ®))
1239 val |= (reg & 0xffff);
1241 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1243 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1247 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1249 tg3_generate_fw_event(tp);
1252 static void tg3_link_report(struct tg3 *tp)
1254 if (!netif_carrier_ok(tp->dev)) {
1255 netif_info(tp, link, tp->dev, "Link is down\n");
1256 tg3_ump_link_report(tp);
1257 } else if (netif_msg_link(tp)) {
1258 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1259 (tp->link_config.active_speed == SPEED_1000 ?
1261 (tp->link_config.active_speed == SPEED_100 ?
1263 (tp->link_config.active_duplex == DUPLEX_FULL ?
1266 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1267 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1269 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1271 tg3_ump_link_report(tp);
1275 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1279 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1280 miireg = ADVERTISE_PAUSE_CAP;
1281 else if (flow_ctrl & FLOW_CTRL_TX)
1282 miireg = ADVERTISE_PAUSE_ASYM;
1283 else if (flow_ctrl & FLOW_CTRL_RX)
1284 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1291 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1295 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1296 miireg = ADVERTISE_1000XPAUSE;
1297 else if (flow_ctrl & FLOW_CTRL_TX)
1298 miireg = ADVERTISE_1000XPSE_ASYM;
1299 else if (flow_ctrl & FLOW_CTRL_RX)
1300 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1307 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1311 if (lcladv & ADVERTISE_1000XPAUSE) {
1312 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1313 if (rmtadv & LPA_1000XPAUSE)
1314 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1315 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1318 if (rmtadv & LPA_1000XPAUSE)
1319 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1321 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1322 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1329 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1333 u32 old_rx_mode = tp->rx_mode;
1334 u32 old_tx_mode = tp->tx_mode;
1336 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1337 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1339 autoneg = tp->link_config.autoneg;
1341 if (autoneg == AUTONEG_ENABLE &&
1342 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1343 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1344 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1346 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1348 flowctrl = tp->link_config.flowctrl;
1350 tp->link_config.active_flowctrl = flowctrl;
1352 if (flowctrl & FLOW_CTRL_RX)
1353 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1355 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1357 if (old_rx_mode != tp->rx_mode)
1358 tw32_f(MAC_RX_MODE, tp->rx_mode);
1360 if (flowctrl & FLOW_CTRL_TX)
1361 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1363 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1365 if (old_tx_mode != tp->tx_mode)
1366 tw32_f(MAC_TX_MODE, tp->tx_mode);
1369 static void tg3_adjust_link(struct net_device *dev)
1371 u8 oldflowctrl, linkmesg = 0;
1372 u32 mac_mode, lcl_adv, rmt_adv;
1373 struct tg3 *tp = netdev_priv(dev);
1374 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1376 spin_lock_bh(&tp->lock);
1378 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1379 MAC_MODE_HALF_DUPLEX);
1381 oldflowctrl = tp->link_config.active_flowctrl;
1387 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1388 mac_mode |= MAC_MODE_PORT_MODE_MII;
1389 else if (phydev->speed == SPEED_1000 ||
1390 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1391 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1393 mac_mode |= MAC_MODE_PORT_MODE_MII;
1395 if (phydev->duplex == DUPLEX_HALF)
1396 mac_mode |= MAC_MODE_HALF_DUPLEX;
1398 lcl_adv = tg3_advert_flowctrl_1000T(
1399 tp->link_config.flowctrl);
1402 rmt_adv = LPA_PAUSE_CAP;
1403 if (phydev->asym_pause)
1404 rmt_adv |= LPA_PAUSE_ASYM;
1407 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1409 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1411 if (mac_mode != tp->mac_mode) {
1412 tp->mac_mode = mac_mode;
1413 tw32_f(MAC_MODE, tp->mac_mode);
1417 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1418 if (phydev->speed == SPEED_10)
1420 MAC_MI_STAT_10MBPS_MODE |
1421 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1423 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1426 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1427 tw32(MAC_TX_LENGTHS,
1428 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1429 (6 << TX_LENGTHS_IPG_SHIFT) |
1430 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1432 tw32(MAC_TX_LENGTHS,
1433 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1434 (6 << TX_LENGTHS_IPG_SHIFT) |
1435 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1437 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1438 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1439 phydev->speed != tp->link_config.active_speed ||
1440 phydev->duplex != tp->link_config.active_duplex ||
1441 oldflowctrl != tp->link_config.active_flowctrl)
1444 tp->link_config.active_speed = phydev->speed;
1445 tp->link_config.active_duplex = phydev->duplex;
1447 spin_unlock_bh(&tp->lock);
1450 tg3_link_report(tp);
1453 static int tg3_phy_init(struct tg3 *tp)
1455 struct phy_device *phydev;
1457 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1460 /* Bring the PHY back to a known state. */
1463 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1465 /* Attach the MAC to the PHY. */
1466 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1467 phydev->dev_flags, phydev->interface);
1468 if (IS_ERR(phydev)) {
1469 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1470 return PTR_ERR(phydev);
1473 /* Mask with MAC supported features. */
1474 switch (phydev->interface) {
1475 case PHY_INTERFACE_MODE_GMII:
1476 case PHY_INTERFACE_MODE_RGMII:
1477 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1478 phydev->supported &= (PHY_GBIT_FEATURES |
1480 SUPPORTED_Asym_Pause);
1484 case PHY_INTERFACE_MODE_MII:
1485 phydev->supported &= (PHY_BASIC_FEATURES |
1487 SUPPORTED_Asym_Pause);
1490 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1494 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1496 phydev->advertising = phydev->supported;
1501 static void tg3_phy_start(struct tg3 *tp)
1503 struct phy_device *phydev;
1505 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1508 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1510 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1511 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1512 phydev->speed = tp->link_config.orig_speed;
1513 phydev->duplex = tp->link_config.orig_duplex;
1514 phydev->autoneg = tp->link_config.orig_autoneg;
1515 phydev->advertising = tp->link_config.orig_advertising;
1520 phy_start_aneg(phydev);
1523 static void tg3_phy_stop(struct tg3 *tp)
1525 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1528 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1531 static void tg3_phy_fini(struct tg3 *tp)
1533 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1534 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1535 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1539 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1543 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1545 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1550 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1554 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1557 tg3_writephy(tp, MII_TG3_FET_TEST,
1558 phytest | MII_TG3_FET_SHADOW_EN);
1559 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1561 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1563 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1564 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1566 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1570 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1574 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1575 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1576 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
1577 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1580 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1581 tg3_phy_fet_toggle_apd(tp, enable);
1585 reg = MII_TG3_MISC_SHDW_WREN |
1586 MII_TG3_MISC_SHDW_SCR5_SEL |
1587 MII_TG3_MISC_SHDW_SCR5_LPED |
1588 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1589 MII_TG3_MISC_SHDW_SCR5_SDTL |
1590 MII_TG3_MISC_SHDW_SCR5_C125OE;
1591 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1592 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1594 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1597 reg = MII_TG3_MISC_SHDW_WREN |
1598 MII_TG3_MISC_SHDW_APD_SEL |
1599 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1601 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1603 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1606 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1610 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1611 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1614 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1617 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1618 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1620 tg3_writephy(tp, MII_TG3_FET_TEST,
1621 ephy | MII_TG3_FET_SHADOW_EN);
1622 if (!tg3_readphy(tp, reg, &phy)) {
1624 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1626 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1627 tg3_writephy(tp, reg, phy);
1629 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1632 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1633 MII_TG3_AUXCTL_SHDWSEL_MISC;
1634 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1635 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1637 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1639 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1640 phy |= MII_TG3_AUXCTL_MISC_WREN;
1641 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1646 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1650 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1653 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1654 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1655 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1656 (val | (1 << 15) | (1 << 4)));
1659 static void tg3_phy_apply_otp(struct tg3 *tp)
1668 /* Enable SM_DSP clock and tx 6dB coding. */
1669 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1670 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1671 MII_TG3_AUXCTL_ACTL_TX_6DB;
1672 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1674 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1675 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1676 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1678 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1679 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1680 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1682 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1683 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1684 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1686 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1687 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1689 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1690 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1692 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1693 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1694 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1696 /* Turn off SM_DSP clock. */
1697 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1698 MII_TG3_AUXCTL_ACTL_TX_6DB;
1699 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1702 static int tg3_wait_macro_done(struct tg3 *tp)
1709 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1710 if ((tmp32 & 0x1000) == 0)
1720 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1722 static const u32 test_pat[4][6] = {
1723 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1724 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1725 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1726 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1730 for (chan = 0; chan < 4; chan++) {
1733 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1734 (chan * 0x2000) | 0x0200);
1735 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1737 for (i = 0; i < 6; i++)
1738 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1741 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1742 if (tg3_wait_macro_done(tp)) {
1747 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1748 (chan * 0x2000) | 0x0200);
1749 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1750 if (tg3_wait_macro_done(tp)) {
1755 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1756 if (tg3_wait_macro_done(tp)) {
1761 for (i = 0; i < 6; i += 2) {
1764 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1765 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1766 tg3_wait_macro_done(tp)) {
1772 if (low != test_pat[chan][i] ||
1773 high != test_pat[chan][i+1]) {
1774 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1775 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1776 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1786 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1790 for (chan = 0; chan < 4; chan++) {
1793 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1794 (chan * 0x2000) | 0x0200);
1795 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1796 for (i = 0; i < 6; i++)
1797 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1798 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1799 if (tg3_wait_macro_done(tp))
1806 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1808 u32 reg32, phy9_orig;
1809 int retries, do_phy_reset, err;
1815 err = tg3_bmcr_reset(tp);
1821 /* Disable transmitter and interrupt. */
1822 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1826 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1828 /* Set full-duplex, 1000 mbps. */
1829 tg3_writephy(tp, MII_BMCR,
1830 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1832 /* Set to master mode. */
1833 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1836 tg3_writephy(tp, MII_TG3_CTRL,
1837 (MII_TG3_CTRL_AS_MASTER |
1838 MII_TG3_CTRL_ENABLE_AS_MASTER));
1840 /* Enable SM_DSP_CLOCK and 6dB. */
1841 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1843 /* Block the PHY control access. */
1844 tg3_phydsp_write(tp, 0x8005, 0x0800);
1846 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1849 } while (--retries);
1851 err = tg3_phy_reset_chanpat(tp);
1855 tg3_phydsp_write(tp, 0x8005, 0x0000);
1857 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1858 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
1860 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1861 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1862 /* Set Extended packet length bit for jumbo frames */
1863 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1865 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1868 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1870 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1872 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1879 /* This will reset the tigon3 PHY if there is no valid
1880 * link unless the FORCE argument is non-zero.
1882 static int tg3_phy_reset(struct tg3 *tp)
1887 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1888 val = tr32(GRC_MISC_CFG);
1889 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1892 err = tg3_readphy(tp, MII_BMSR, &val);
1893 err |= tg3_readphy(tp, MII_BMSR, &val);
1897 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1898 netif_carrier_off(tp->dev);
1899 tg3_link_report(tp);
1902 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1903 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1904 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1905 err = tg3_phy_reset_5703_4_5(tp);
1912 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1913 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1914 cpmuctrl = tr32(TG3_CPMU_CTRL);
1915 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1917 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1920 err = tg3_bmcr_reset(tp);
1924 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1925 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1926 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
1928 tw32(TG3_CPMU_CTRL, cpmuctrl);
1931 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1932 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1933 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1934 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1935 CPMU_LSPD_1000MB_MACCLK_12_5) {
1936 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1938 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1942 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1943 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
1944 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
1947 tg3_phy_apply_otp(tp);
1949 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
1950 tg3_phy_toggle_apd(tp, true);
1952 tg3_phy_toggle_apd(tp, false);
1955 if (tp->phy_flags & TG3_PHYFLG_ADC_BUG) {
1956 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1957 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
1958 tg3_phydsp_write(tp, 0x000a, 0x0323);
1959 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1961 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
1962 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
1963 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
1965 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
1966 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1967 tg3_phydsp_write(tp, 0x000a, 0x310b);
1968 tg3_phydsp_write(tp, 0x201f, 0x9506);
1969 tg3_phydsp_write(tp, 0x401f, 0x14e2);
1970 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1971 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
1972 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1973 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1974 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
1975 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1976 tg3_writephy(tp, MII_TG3_TEST1,
1977 MII_TG3_TEST1_TRIM_EN | 0x4);
1979 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1980 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1982 /* Set Extended packet length bit (bit 14) on all chips that */
1983 /* support jumbo frames */
1984 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1985 /* Cannot do read-modify-write on 5401 */
1986 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1987 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1988 /* Set bit 14 with read-modify-write to preserve other bits */
1989 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1990 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1991 tg3_writephy(tp, MII_TG3_AUX_CTRL, val | 0x4000);
1994 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1995 * jumbo frames transmission.
1997 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1998 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
1999 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2000 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2003 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2004 /* adjust output voltage */
2005 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2008 tg3_phy_toggle_automdix(tp, 1);
2009 tg3_phy_set_wirespeed(tp);
2013 static void tg3_frob_aux_power(struct tg3 *tp)
2015 struct tg3 *tp_peer = tp;
2017 /* The GPIOs do something completely different on 57765. */
2018 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2019 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2020 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2023 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2024 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2025 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
2026 struct net_device *dev_peer;
2028 dev_peer = pci_get_drvdata(tp->pdev_peer);
2029 /* remove_one() may have been run on the peer. */
2033 tp_peer = netdev_priv(dev_peer);
2036 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2037 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
2038 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2039 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
2040 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2041 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2042 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2043 (GRC_LCLCTRL_GPIO_OE0 |
2044 GRC_LCLCTRL_GPIO_OE1 |
2045 GRC_LCLCTRL_GPIO_OE2 |
2046 GRC_LCLCTRL_GPIO_OUTPUT0 |
2047 GRC_LCLCTRL_GPIO_OUTPUT1),
2049 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2050 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2051 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2052 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2053 GRC_LCLCTRL_GPIO_OE1 |
2054 GRC_LCLCTRL_GPIO_OE2 |
2055 GRC_LCLCTRL_GPIO_OUTPUT0 |
2056 GRC_LCLCTRL_GPIO_OUTPUT1 |
2058 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2060 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2061 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2063 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2064 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2067 u32 grc_local_ctrl = 0;
2069 if (tp_peer != tp &&
2070 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2073 /* Workaround to prevent overdrawing Amps. */
2074 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2076 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2077 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2078 grc_local_ctrl, 100);
2081 /* On 5753 and variants, GPIO2 cannot be used. */
2082 no_gpio2 = tp->nic_sram_data_cfg &
2083 NIC_SRAM_DATA_CFG_NO_GPIO2;
2085 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2086 GRC_LCLCTRL_GPIO_OE1 |
2087 GRC_LCLCTRL_GPIO_OE2 |
2088 GRC_LCLCTRL_GPIO_OUTPUT1 |
2089 GRC_LCLCTRL_GPIO_OUTPUT2;
2091 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2092 GRC_LCLCTRL_GPIO_OUTPUT2);
2094 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2095 grc_local_ctrl, 100);
2097 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2099 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2100 grc_local_ctrl, 100);
2103 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2104 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2105 grc_local_ctrl, 100);
2109 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2110 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2111 if (tp_peer != tp &&
2112 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2115 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2116 (GRC_LCLCTRL_GPIO_OE1 |
2117 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2119 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2120 GRC_LCLCTRL_GPIO_OE1, 100);
2122 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2123 (GRC_LCLCTRL_GPIO_OE1 |
2124 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2129 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2131 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2133 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2134 if (speed != SPEED_10)
2136 } else if (speed == SPEED_10)
2142 static int tg3_setup_phy(struct tg3 *, int);
2144 #define RESET_KIND_SHUTDOWN 0
2145 #define RESET_KIND_INIT 1
2146 #define RESET_KIND_SUSPEND 2
2148 static void tg3_write_sig_post_reset(struct tg3 *, int);
2149 static int tg3_halt_cpu(struct tg3 *, u32);
2151 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2155 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2156 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2157 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2158 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2161 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2162 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2163 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2168 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2170 val = tr32(GRC_MISC_CFG);
2171 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2174 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2176 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2179 tg3_writephy(tp, MII_ADVERTISE, 0);
2180 tg3_writephy(tp, MII_BMCR,
2181 BMCR_ANENABLE | BMCR_ANRESTART);
2183 tg3_writephy(tp, MII_TG3_FET_TEST,
2184 phytest | MII_TG3_FET_SHADOW_EN);
2185 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2186 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2188 MII_TG3_FET_SHDW_AUXMODE4,
2191 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2194 } else if (do_low_power) {
2195 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2196 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2198 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2199 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2200 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2201 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2202 MII_TG3_AUXCTL_PCTL_VREG_11V);
2205 /* The PHY should not be powered down on some chips because
2208 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2209 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2210 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2211 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2214 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2215 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2216 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2217 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2218 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2219 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2222 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2225 /* tp->lock is held. */
2226 static int tg3_nvram_lock(struct tg3 *tp)
2228 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2231 if (tp->nvram_lock_cnt == 0) {
2232 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2233 for (i = 0; i < 8000; i++) {
2234 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2239 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2243 tp->nvram_lock_cnt++;
2248 /* tp->lock is held. */
2249 static void tg3_nvram_unlock(struct tg3 *tp)
2251 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2252 if (tp->nvram_lock_cnt > 0)
2253 tp->nvram_lock_cnt--;
2254 if (tp->nvram_lock_cnt == 0)
2255 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2259 /* tp->lock is held. */
2260 static void tg3_enable_nvram_access(struct tg3 *tp)
2262 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2263 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2264 u32 nvaccess = tr32(NVRAM_ACCESS);
2266 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2270 /* tp->lock is held. */
2271 static void tg3_disable_nvram_access(struct tg3 *tp)
2273 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2274 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2275 u32 nvaccess = tr32(NVRAM_ACCESS);
2277 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2281 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2282 u32 offset, u32 *val)
2287 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2290 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2291 EEPROM_ADDR_DEVID_MASK |
2293 tw32(GRC_EEPROM_ADDR,
2295 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2296 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2297 EEPROM_ADDR_ADDR_MASK) |
2298 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2300 for (i = 0; i < 1000; i++) {
2301 tmp = tr32(GRC_EEPROM_ADDR);
2303 if (tmp & EEPROM_ADDR_COMPLETE)
2307 if (!(tmp & EEPROM_ADDR_COMPLETE))
2310 tmp = tr32(GRC_EEPROM_DATA);
2313 * The data will always be opposite the native endian
2314 * format. Perform a blind byteswap to compensate.
2321 #define NVRAM_CMD_TIMEOUT 10000
2323 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2327 tw32(NVRAM_CMD, nvram_cmd);
2328 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2330 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2336 if (i == NVRAM_CMD_TIMEOUT)
2342 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2344 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2345 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2346 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2347 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2348 (tp->nvram_jedecnum == JEDEC_ATMEL))
2350 addr = ((addr / tp->nvram_pagesize) <<
2351 ATMEL_AT45DB0X1B_PAGE_POS) +
2352 (addr % tp->nvram_pagesize);
2357 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2359 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2360 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2361 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2362 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2363 (tp->nvram_jedecnum == JEDEC_ATMEL))
2365 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2366 tp->nvram_pagesize) +
2367 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2372 /* NOTE: Data read in from NVRAM is byteswapped according to
2373 * the byteswapping settings for all other register accesses.
2374 * tg3 devices are BE devices, so on a BE machine, the data
2375 * returned will be exactly as it is seen in NVRAM. On a LE
2376 * machine, the 32-bit value will be byteswapped.
2378 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2382 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2383 return tg3_nvram_read_using_eeprom(tp, offset, val);
2385 offset = tg3_nvram_phys_addr(tp, offset);
2387 if (offset > NVRAM_ADDR_MSK)
2390 ret = tg3_nvram_lock(tp);
2394 tg3_enable_nvram_access(tp);
2396 tw32(NVRAM_ADDR, offset);
2397 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2398 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2401 *val = tr32(NVRAM_RDDATA);
2403 tg3_disable_nvram_access(tp);
2405 tg3_nvram_unlock(tp);
2410 /* Ensures NVRAM data is in bytestream format. */
2411 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2414 int res = tg3_nvram_read(tp, offset, &v);
2416 *val = cpu_to_be32(v);
2420 /* tp->lock is held. */
2421 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2423 u32 addr_high, addr_low;
2426 addr_high = ((tp->dev->dev_addr[0] << 8) |
2427 tp->dev->dev_addr[1]);
2428 addr_low = ((tp->dev->dev_addr[2] << 24) |
2429 (tp->dev->dev_addr[3] << 16) |
2430 (tp->dev->dev_addr[4] << 8) |
2431 (tp->dev->dev_addr[5] << 0));
2432 for (i = 0; i < 4; i++) {
2433 if (i == 1 && skip_mac_1)
2435 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2436 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2439 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2440 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2441 for (i = 0; i < 12; i++) {
2442 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2443 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2447 addr_high = (tp->dev->dev_addr[0] +
2448 tp->dev->dev_addr[1] +
2449 tp->dev->dev_addr[2] +
2450 tp->dev->dev_addr[3] +
2451 tp->dev->dev_addr[4] +
2452 tp->dev->dev_addr[5]) &
2453 TX_BACKOFF_SEED_MASK;
2454 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2457 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2460 bool device_should_wake, do_low_power;
2462 /* Make sure register accesses (indirect or otherwise)
2463 * will function correctly.
2465 pci_write_config_dword(tp->pdev,
2466 TG3PCI_MISC_HOST_CTRL,
2467 tp->misc_host_ctrl);
2471 pci_enable_wake(tp->pdev, state, false);
2472 pci_set_power_state(tp->pdev, PCI_D0);
2474 /* Switch out of Vaux if it is a NIC */
2475 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2476 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2486 netdev_err(tp->dev, "Invalid power state (D%d) requested\n",
2491 /* Restore the CLKREQ setting. */
2492 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2495 pci_read_config_word(tp->pdev,
2496 tp->pcie_cap + PCI_EXP_LNKCTL,
2498 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2499 pci_write_config_word(tp->pdev,
2500 tp->pcie_cap + PCI_EXP_LNKCTL,
2504 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2505 tw32(TG3PCI_MISC_HOST_CTRL,
2506 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2508 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2509 device_may_wakeup(&tp->pdev->dev) &&
2510 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2512 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2513 do_low_power = false;
2514 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2515 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2516 struct phy_device *phydev;
2517 u32 phyid, advertising;
2519 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2521 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2523 tp->link_config.orig_speed = phydev->speed;
2524 tp->link_config.orig_duplex = phydev->duplex;
2525 tp->link_config.orig_autoneg = phydev->autoneg;
2526 tp->link_config.orig_advertising = phydev->advertising;
2528 advertising = ADVERTISED_TP |
2530 ADVERTISED_Autoneg |
2531 ADVERTISED_10baseT_Half;
2533 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2534 device_should_wake) {
2535 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2537 ADVERTISED_100baseT_Half |
2538 ADVERTISED_100baseT_Full |
2539 ADVERTISED_10baseT_Full;
2541 advertising |= ADVERTISED_10baseT_Full;
2544 phydev->advertising = advertising;
2546 phy_start_aneg(phydev);
2548 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2549 if (phyid != PHY_ID_BCMAC131) {
2550 phyid &= PHY_BCM_OUI_MASK;
2551 if (phyid == PHY_BCM_OUI_1 ||
2552 phyid == PHY_BCM_OUI_2 ||
2553 phyid == PHY_BCM_OUI_3)
2554 do_low_power = true;
2558 do_low_power = true;
2560 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2561 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2562 tp->link_config.orig_speed = tp->link_config.speed;
2563 tp->link_config.orig_duplex = tp->link_config.duplex;
2564 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2567 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2568 tp->link_config.speed = SPEED_10;
2569 tp->link_config.duplex = DUPLEX_HALF;
2570 tp->link_config.autoneg = AUTONEG_ENABLE;
2571 tg3_setup_phy(tp, 0);
2575 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2578 val = tr32(GRC_VCPU_EXT_CTRL);
2579 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2580 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2584 for (i = 0; i < 200; i++) {
2585 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2586 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2591 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2592 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2593 WOL_DRV_STATE_SHUTDOWN |
2597 if (device_should_wake) {
2600 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2602 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2606 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2607 mac_mode = MAC_MODE_PORT_MODE_GMII;
2609 mac_mode = MAC_MODE_PORT_MODE_MII;
2611 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2612 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2614 u32 speed = (tp->tg3_flags &
2615 TG3_FLAG_WOL_SPEED_100MB) ?
2616 SPEED_100 : SPEED_10;
2617 if (tg3_5700_link_polarity(tp, speed))
2618 mac_mode |= MAC_MODE_LINK_POLARITY;
2620 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2623 mac_mode = MAC_MODE_PORT_MODE_TBI;
2626 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2627 tw32(MAC_LED_CTRL, tp->led_ctrl);
2629 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2630 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2631 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2632 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2633 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2634 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2636 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2637 mac_mode |= tp->mac_mode &
2638 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2639 if (mac_mode & MAC_MODE_APE_TX_EN)
2640 mac_mode |= MAC_MODE_TDE_ENABLE;
2643 tw32_f(MAC_MODE, mac_mode);
2646 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2650 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2651 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2652 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2655 base_val = tp->pci_clock_ctrl;
2656 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2657 CLOCK_CTRL_TXCLK_DISABLE);
2659 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2660 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2661 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2662 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2663 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2665 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2666 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2667 u32 newbits1, newbits2;
2669 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2670 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2671 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2672 CLOCK_CTRL_TXCLK_DISABLE |
2674 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2675 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2676 newbits1 = CLOCK_CTRL_625_CORE;
2677 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2679 newbits1 = CLOCK_CTRL_ALTCLK;
2680 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2683 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2686 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2689 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2692 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2693 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2694 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2695 CLOCK_CTRL_TXCLK_DISABLE |
2696 CLOCK_CTRL_44MHZ_CORE);
2698 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2701 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2702 tp->pci_clock_ctrl | newbits3, 40);
2706 if (!(device_should_wake) &&
2707 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2708 tg3_power_down_phy(tp, do_low_power);
2710 tg3_frob_aux_power(tp);
2712 /* Workaround for unstable PLL clock */
2713 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2714 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2715 u32 val = tr32(0x7d00);
2717 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2719 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2722 err = tg3_nvram_lock(tp);
2723 tg3_halt_cpu(tp, RX_CPU_BASE);
2725 tg3_nvram_unlock(tp);
2729 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2731 if (device_should_wake)
2732 pci_enable_wake(tp->pdev, state, true);
2734 /* Finally, set the new power state. */
2735 pci_set_power_state(tp->pdev, state);
2740 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2742 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2743 case MII_TG3_AUX_STAT_10HALF:
2745 *duplex = DUPLEX_HALF;
2748 case MII_TG3_AUX_STAT_10FULL:
2750 *duplex = DUPLEX_FULL;
2753 case MII_TG3_AUX_STAT_100HALF:
2755 *duplex = DUPLEX_HALF;
2758 case MII_TG3_AUX_STAT_100FULL:
2760 *duplex = DUPLEX_FULL;
2763 case MII_TG3_AUX_STAT_1000HALF:
2764 *speed = SPEED_1000;
2765 *duplex = DUPLEX_HALF;
2768 case MII_TG3_AUX_STAT_1000FULL:
2769 *speed = SPEED_1000;
2770 *duplex = DUPLEX_FULL;
2774 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2775 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2777 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2781 *speed = SPEED_INVALID;
2782 *duplex = DUPLEX_INVALID;
2787 static void tg3_phy_copper_begin(struct tg3 *tp)
2792 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2793 /* Entering low power mode. Disable gigabit and
2794 * 100baseT advertisements.
2796 tg3_writephy(tp, MII_TG3_CTRL, 0);
2798 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2799 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2800 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2801 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2803 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2804 } else if (tp->link_config.speed == SPEED_INVALID) {
2805 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2806 tp->link_config.advertising &=
2807 ~(ADVERTISED_1000baseT_Half |
2808 ADVERTISED_1000baseT_Full);
2810 new_adv = ADVERTISE_CSMA;
2811 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2812 new_adv |= ADVERTISE_10HALF;
2813 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2814 new_adv |= ADVERTISE_10FULL;
2815 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2816 new_adv |= ADVERTISE_100HALF;
2817 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2818 new_adv |= ADVERTISE_100FULL;
2820 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2822 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2824 if (tp->link_config.advertising &
2825 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2827 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2828 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2829 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2830 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2831 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY) &&
2832 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2833 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2834 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2835 MII_TG3_CTRL_ENABLE_AS_MASTER);
2836 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2838 tg3_writephy(tp, MII_TG3_CTRL, 0);
2841 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2842 new_adv |= ADVERTISE_CSMA;
2844 /* Asking for a specific link mode. */
2845 if (tp->link_config.speed == SPEED_1000) {
2846 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2848 if (tp->link_config.duplex == DUPLEX_FULL)
2849 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2851 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2852 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2853 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2854 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2855 MII_TG3_CTRL_ENABLE_AS_MASTER);
2857 if (tp->link_config.speed == SPEED_100) {
2858 if (tp->link_config.duplex == DUPLEX_FULL)
2859 new_adv |= ADVERTISE_100FULL;
2861 new_adv |= ADVERTISE_100HALF;
2863 if (tp->link_config.duplex == DUPLEX_FULL)
2864 new_adv |= ADVERTISE_10FULL;
2866 new_adv |= ADVERTISE_10HALF;
2868 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2873 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2876 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2877 tp->link_config.speed != SPEED_INVALID) {
2878 u32 bmcr, orig_bmcr;
2880 tp->link_config.active_speed = tp->link_config.speed;
2881 tp->link_config.active_duplex = tp->link_config.duplex;
2884 switch (tp->link_config.speed) {
2890 bmcr |= BMCR_SPEED100;
2894 bmcr |= TG3_BMCR_SPEED1000;
2898 if (tp->link_config.duplex == DUPLEX_FULL)
2899 bmcr |= BMCR_FULLDPLX;
2901 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2902 (bmcr != orig_bmcr)) {
2903 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2904 for (i = 0; i < 1500; i++) {
2908 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2909 tg3_readphy(tp, MII_BMSR, &tmp))
2911 if (!(tmp & BMSR_LSTATUS)) {
2916 tg3_writephy(tp, MII_BMCR, bmcr);
2920 tg3_writephy(tp, MII_BMCR,
2921 BMCR_ANENABLE | BMCR_ANRESTART);
2925 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2929 /* Turn off tap power management. */
2930 /* Set Extended packet length bit */
2931 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2933 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
2934 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
2935 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
2936 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
2937 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
2944 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2946 u32 adv_reg, all_mask = 0;
2948 if (mask & ADVERTISED_10baseT_Half)
2949 all_mask |= ADVERTISE_10HALF;
2950 if (mask & ADVERTISED_10baseT_Full)
2951 all_mask |= ADVERTISE_10FULL;
2952 if (mask & ADVERTISED_100baseT_Half)
2953 all_mask |= ADVERTISE_100HALF;
2954 if (mask & ADVERTISED_100baseT_Full)
2955 all_mask |= ADVERTISE_100FULL;
2957 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2960 if ((adv_reg & all_mask) != all_mask)
2962 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2966 if (mask & ADVERTISED_1000baseT_Half)
2967 all_mask |= ADVERTISE_1000HALF;
2968 if (mask & ADVERTISED_1000baseT_Full)
2969 all_mask |= ADVERTISE_1000FULL;
2971 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2974 if ((tg3_ctrl & all_mask) != all_mask)
2980 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2984 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2987 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2988 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2990 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2991 if (curadv != reqadv)
2994 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2995 tg3_readphy(tp, MII_LPA, rmtadv);
2997 /* Reprogram the advertisement register, even if it
2998 * does not affect the current link. If the link
2999 * gets renegotiated in the future, we can save an
3000 * additional renegotiation cycle by advertising
3001 * it correctly in the first place.
3003 if (curadv != reqadv) {
3004 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3005 ADVERTISE_PAUSE_ASYM);
3006 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3013 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3015 int current_link_up;
3017 u32 lcl_adv, rmt_adv;
3025 (MAC_STATUS_SYNC_CHANGED |
3026 MAC_STATUS_CFG_CHANGED |
3027 MAC_STATUS_MI_COMPLETION |
3028 MAC_STATUS_LNKSTATE_CHANGED));
3031 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3033 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3037 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3039 /* Some third-party PHYs need to be reset on link going
3042 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3043 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3044 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3045 netif_carrier_ok(tp->dev)) {
3046 tg3_readphy(tp, MII_BMSR, &bmsr);
3047 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3048 !(bmsr & BMSR_LSTATUS))
3054 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3055 tg3_readphy(tp, MII_BMSR, &bmsr);
3056 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3057 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3060 if (!(bmsr & BMSR_LSTATUS)) {
3061 err = tg3_init_5401phy_dsp(tp);
3065 tg3_readphy(tp, MII_BMSR, &bmsr);
3066 for (i = 0; i < 1000; i++) {
3068 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3069 (bmsr & BMSR_LSTATUS)) {
3075 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3076 TG3_PHY_REV_BCM5401_B0 &&
3077 !(bmsr & BMSR_LSTATUS) &&
3078 tp->link_config.active_speed == SPEED_1000) {
3079 err = tg3_phy_reset(tp);
3081 err = tg3_init_5401phy_dsp(tp);
3086 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3087 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3088 /* 5701 {A0,B0} CRC bug workaround */
3089 tg3_writephy(tp, 0x15, 0x0a75);
3090 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3091 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3092 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3095 /* Clear pending interrupts... */
3096 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3097 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3099 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3100 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3101 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3102 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3104 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3105 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3106 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3107 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3108 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3110 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3113 current_link_up = 0;
3114 current_speed = SPEED_INVALID;
3115 current_duplex = DUPLEX_INVALID;
3117 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3118 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3119 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3120 if (!(val & (1 << 10))) {
3122 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3128 for (i = 0; i < 100; i++) {
3129 tg3_readphy(tp, MII_BMSR, &bmsr);
3130 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3131 (bmsr & BMSR_LSTATUS))
3136 if (bmsr & BMSR_LSTATUS) {
3139 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3140 for (i = 0; i < 2000; i++) {
3142 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3147 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3152 for (i = 0; i < 200; i++) {
3153 tg3_readphy(tp, MII_BMCR, &bmcr);
3154 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3156 if (bmcr && bmcr != 0x7fff)
3164 tp->link_config.active_speed = current_speed;
3165 tp->link_config.active_duplex = current_duplex;
3167 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3168 if ((bmcr & BMCR_ANENABLE) &&
3169 tg3_copper_is_advertising_all(tp,
3170 tp->link_config.advertising)) {
3171 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3173 current_link_up = 1;
3176 if (!(bmcr & BMCR_ANENABLE) &&
3177 tp->link_config.speed == current_speed &&
3178 tp->link_config.duplex == current_duplex &&
3179 tp->link_config.flowctrl ==
3180 tp->link_config.active_flowctrl) {
3181 current_link_up = 1;
3185 if (current_link_up == 1 &&
3186 tp->link_config.active_duplex == DUPLEX_FULL)
3187 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3191 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3192 tg3_phy_copper_begin(tp);
3194 tg3_readphy(tp, MII_BMSR, &bmsr);
3195 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3196 (bmsr & BMSR_LSTATUS))
3197 current_link_up = 1;
3200 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3201 if (current_link_up == 1) {
3202 if (tp->link_config.active_speed == SPEED_100 ||
3203 tp->link_config.active_speed == SPEED_10)
3204 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3206 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3207 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3208 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3210 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3212 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3213 if (tp->link_config.active_duplex == DUPLEX_HALF)
3214 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3216 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3217 if (current_link_up == 1 &&
3218 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3219 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3221 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3224 /* ??? Without this setting Netgear GA302T PHY does not
3225 * ??? send/receive packets...
3227 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3228 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3229 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3230 tw32_f(MAC_MI_MODE, tp->mi_mode);
3234 tw32_f(MAC_MODE, tp->mac_mode);
3237 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3238 /* Polled via timer. */
3239 tw32_f(MAC_EVENT, 0);
3241 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3245 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3246 current_link_up == 1 &&
3247 tp->link_config.active_speed == SPEED_1000 &&
3248 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3249 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3252 (MAC_STATUS_SYNC_CHANGED |
3253 MAC_STATUS_CFG_CHANGED));
3256 NIC_SRAM_FIRMWARE_MBOX,
3257 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3260 /* Prevent send BD corruption. */
3261 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3262 u16 oldlnkctl, newlnkctl;
3264 pci_read_config_word(tp->pdev,
3265 tp->pcie_cap + PCI_EXP_LNKCTL,
3267 if (tp->link_config.active_speed == SPEED_100 ||
3268 tp->link_config.active_speed == SPEED_10)
3269 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3271 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3272 if (newlnkctl != oldlnkctl)
3273 pci_write_config_word(tp->pdev,
3274 tp->pcie_cap + PCI_EXP_LNKCTL,
3278 if (current_link_up != netif_carrier_ok(tp->dev)) {
3279 if (current_link_up)
3280 netif_carrier_on(tp->dev);
3282 netif_carrier_off(tp->dev);
3283 tg3_link_report(tp);
3289 struct tg3_fiber_aneginfo {
3291 #define ANEG_STATE_UNKNOWN 0
3292 #define ANEG_STATE_AN_ENABLE 1
3293 #define ANEG_STATE_RESTART_INIT 2
3294 #define ANEG_STATE_RESTART 3
3295 #define ANEG_STATE_DISABLE_LINK_OK 4
3296 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3297 #define ANEG_STATE_ABILITY_DETECT 6
3298 #define ANEG_STATE_ACK_DETECT_INIT 7
3299 #define ANEG_STATE_ACK_DETECT 8
3300 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3301 #define ANEG_STATE_COMPLETE_ACK 10
3302 #define ANEG_STATE_IDLE_DETECT_INIT 11
3303 #define ANEG_STATE_IDLE_DETECT 12
3304 #define ANEG_STATE_LINK_OK 13
3305 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3306 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3309 #define MR_AN_ENABLE 0x00000001
3310 #define MR_RESTART_AN 0x00000002
3311 #define MR_AN_COMPLETE 0x00000004
3312 #define MR_PAGE_RX 0x00000008
3313 #define MR_NP_LOADED 0x00000010
3314 #define MR_TOGGLE_TX 0x00000020
3315 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3316 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3317 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3318 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3319 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3320 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3321 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3322 #define MR_TOGGLE_RX 0x00002000
3323 #define MR_NP_RX 0x00004000
3325 #define MR_LINK_OK 0x80000000
3327 unsigned long link_time, cur_time;
3329 u32 ability_match_cfg;
3330 int ability_match_count;
3332 char ability_match, idle_match, ack_match;
3334 u32 txconfig, rxconfig;
3335 #define ANEG_CFG_NP 0x00000080
3336 #define ANEG_CFG_ACK 0x00000040
3337 #define ANEG_CFG_RF2 0x00000020
3338 #define ANEG_CFG_RF1 0x00000010
3339 #define ANEG_CFG_PS2 0x00000001
3340 #define ANEG_CFG_PS1 0x00008000
3341 #define ANEG_CFG_HD 0x00004000
3342 #define ANEG_CFG_FD 0x00002000
3343 #define ANEG_CFG_INVAL 0x00001f06
3348 #define ANEG_TIMER_ENAB 2
3349 #define ANEG_FAILED -1
3351 #define ANEG_STATE_SETTLE_TIME 10000
3353 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3354 struct tg3_fiber_aneginfo *ap)
3357 unsigned long delta;
3361 if (ap->state == ANEG_STATE_UNKNOWN) {
3365 ap->ability_match_cfg = 0;
3366 ap->ability_match_count = 0;
3367 ap->ability_match = 0;
3373 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3374 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3376 if (rx_cfg_reg != ap->ability_match_cfg) {
3377 ap->ability_match_cfg = rx_cfg_reg;
3378 ap->ability_match = 0;
3379 ap->ability_match_count = 0;
3381 if (++ap->ability_match_count > 1) {
3382 ap->ability_match = 1;
3383 ap->ability_match_cfg = rx_cfg_reg;
3386 if (rx_cfg_reg & ANEG_CFG_ACK)
3394 ap->ability_match_cfg = 0;
3395 ap->ability_match_count = 0;
3396 ap->ability_match = 0;
3402 ap->rxconfig = rx_cfg_reg;
3405 switch (ap->state) {
3406 case ANEG_STATE_UNKNOWN:
3407 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3408 ap->state = ANEG_STATE_AN_ENABLE;
3411 case ANEG_STATE_AN_ENABLE:
3412 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3413 if (ap->flags & MR_AN_ENABLE) {
3416 ap->ability_match_cfg = 0;
3417 ap->ability_match_count = 0;
3418 ap->ability_match = 0;
3422 ap->state = ANEG_STATE_RESTART_INIT;
3424 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3428 case ANEG_STATE_RESTART_INIT:
3429 ap->link_time = ap->cur_time;
3430 ap->flags &= ~(MR_NP_LOADED);
3432 tw32(MAC_TX_AUTO_NEG, 0);
3433 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3434 tw32_f(MAC_MODE, tp->mac_mode);
3437 ret = ANEG_TIMER_ENAB;
3438 ap->state = ANEG_STATE_RESTART;
3441 case ANEG_STATE_RESTART:
3442 delta = ap->cur_time - ap->link_time;
3443 if (delta > ANEG_STATE_SETTLE_TIME)
3444 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3446 ret = ANEG_TIMER_ENAB;
3449 case ANEG_STATE_DISABLE_LINK_OK:
3453 case ANEG_STATE_ABILITY_DETECT_INIT:
3454 ap->flags &= ~(MR_TOGGLE_TX);
3455 ap->txconfig = ANEG_CFG_FD;
3456 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3457 if (flowctrl & ADVERTISE_1000XPAUSE)
3458 ap->txconfig |= ANEG_CFG_PS1;
3459 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3460 ap->txconfig |= ANEG_CFG_PS2;
3461 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3462 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3463 tw32_f(MAC_MODE, tp->mac_mode);
3466 ap->state = ANEG_STATE_ABILITY_DETECT;
3469 case ANEG_STATE_ABILITY_DETECT:
3470 if (ap->ability_match != 0 && ap->rxconfig != 0)
3471 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3474 case ANEG_STATE_ACK_DETECT_INIT:
3475 ap->txconfig |= ANEG_CFG_ACK;
3476 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3477 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3478 tw32_f(MAC_MODE, tp->mac_mode);
3481 ap->state = ANEG_STATE_ACK_DETECT;
3484 case ANEG_STATE_ACK_DETECT:
3485 if (ap->ack_match != 0) {
3486 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3487 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3488 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3490 ap->state = ANEG_STATE_AN_ENABLE;
3492 } else if (ap->ability_match != 0 &&
3493 ap->rxconfig == 0) {
3494 ap->state = ANEG_STATE_AN_ENABLE;
3498 case ANEG_STATE_COMPLETE_ACK_INIT:
3499 if (ap->rxconfig & ANEG_CFG_INVAL) {
3503 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3504 MR_LP_ADV_HALF_DUPLEX |
3505 MR_LP_ADV_SYM_PAUSE |
3506 MR_LP_ADV_ASYM_PAUSE |
3507 MR_LP_ADV_REMOTE_FAULT1 |
3508 MR_LP_ADV_REMOTE_FAULT2 |
3509 MR_LP_ADV_NEXT_PAGE |
3512 if (ap->rxconfig & ANEG_CFG_FD)
3513 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3514 if (ap->rxconfig & ANEG_CFG_HD)
3515 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3516 if (ap->rxconfig & ANEG_CFG_PS1)
3517 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3518 if (ap->rxconfig & ANEG_CFG_PS2)
3519 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3520 if (ap->rxconfig & ANEG_CFG_RF1)
3521 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3522 if (ap->rxconfig & ANEG_CFG_RF2)
3523 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3524 if (ap->rxconfig & ANEG_CFG_NP)
3525 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3527 ap->link_time = ap->cur_time;
3529 ap->flags ^= (MR_TOGGLE_TX);
3530 if (ap->rxconfig & 0x0008)
3531 ap->flags |= MR_TOGGLE_RX;
3532 if (ap->rxconfig & ANEG_CFG_NP)
3533 ap->flags |= MR_NP_RX;
3534 ap->flags |= MR_PAGE_RX;
3536 ap->state = ANEG_STATE_COMPLETE_ACK;
3537 ret = ANEG_TIMER_ENAB;
3540 case ANEG_STATE_COMPLETE_ACK:
3541 if (ap->ability_match != 0 &&
3542 ap->rxconfig == 0) {
3543 ap->state = ANEG_STATE_AN_ENABLE;
3546 delta = ap->cur_time - ap->link_time;
3547 if (delta > ANEG_STATE_SETTLE_TIME) {
3548 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3549 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3551 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3552 !(ap->flags & MR_NP_RX)) {
3553 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3561 case ANEG_STATE_IDLE_DETECT_INIT:
3562 ap->link_time = ap->cur_time;
3563 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3564 tw32_f(MAC_MODE, tp->mac_mode);
3567 ap->state = ANEG_STATE_IDLE_DETECT;
3568 ret = ANEG_TIMER_ENAB;
3571 case ANEG_STATE_IDLE_DETECT:
3572 if (ap->ability_match != 0 &&
3573 ap->rxconfig == 0) {
3574 ap->state = ANEG_STATE_AN_ENABLE;
3577 delta = ap->cur_time - ap->link_time;
3578 if (delta > ANEG_STATE_SETTLE_TIME) {
3579 /* XXX another gem from the Broadcom driver :( */
3580 ap->state = ANEG_STATE_LINK_OK;
3584 case ANEG_STATE_LINK_OK:
3585 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3589 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3590 /* ??? unimplemented */
3593 case ANEG_STATE_NEXT_PAGE_WAIT:
3594 /* ??? unimplemented */
3605 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3608 struct tg3_fiber_aneginfo aninfo;
3609 int status = ANEG_FAILED;
3613 tw32_f(MAC_TX_AUTO_NEG, 0);
3615 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3616 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3619 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3622 memset(&aninfo, 0, sizeof(aninfo));
3623 aninfo.flags |= MR_AN_ENABLE;
3624 aninfo.state = ANEG_STATE_UNKNOWN;
3625 aninfo.cur_time = 0;
3627 while (++tick < 195000) {
3628 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3629 if (status == ANEG_DONE || status == ANEG_FAILED)
3635 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3636 tw32_f(MAC_MODE, tp->mac_mode);
3639 *txflags = aninfo.txconfig;
3640 *rxflags = aninfo.flags;
3642 if (status == ANEG_DONE &&
3643 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3644 MR_LP_ADV_FULL_DUPLEX)))
3650 static void tg3_init_bcm8002(struct tg3 *tp)
3652 u32 mac_status = tr32(MAC_STATUS);
3655 /* Reset when initting first time or we have a link. */
3656 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3657 !(mac_status & MAC_STATUS_PCS_SYNCED))
3660 /* Set PLL lock range. */
3661 tg3_writephy(tp, 0x16, 0x8007);
3664 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3666 /* Wait for reset to complete. */
3667 /* XXX schedule_timeout() ... */
3668 for (i = 0; i < 500; i++)
3671 /* Config mode; select PMA/Ch 1 regs. */
3672 tg3_writephy(tp, 0x10, 0x8411);
3674 /* Enable auto-lock and comdet, select txclk for tx. */
3675 tg3_writephy(tp, 0x11, 0x0a10);
3677 tg3_writephy(tp, 0x18, 0x00a0);
3678 tg3_writephy(tp, 0x16, 0x41ff);
3680 /* Assert and deassert POR. */
3681 tg3_writephy(tp, 0x13, 0x0400);
3683 tg3_writephy(tp, 0x13, 0x0000);
3685 tg3_writephy(tp, 0x11, 0x0a50);
3687 tg3_writephy(tp, 0x11, 0x0a10);
3689 /* Wait for signal to stabilize */
3690 /* XXX schedule_timeout() ... */
3691 for (i = 0; i < 15000; i++)
3694 /* Deselect the channel register so we can read the PHYID
3697 tg3_writephy(tp, 0x10, 0x8011);
3700 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3703 u32 sg_dig_ctrl, sg_dig_status;
3704 u32 serdes_cfg, expected_sg_dig_ctrl;
3705 int workaround, port_a;
3706 int current_link_up;
3709 expected_sg_dig_ctrl = 0;
3712 current_link_up = 0;
3714 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3715 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3717 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3720 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3721 /* preserve bits 20-23 for voltage regulator */
3722 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3725 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3727 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3728 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3730 u32 val = serdes_cfg;
3736 tw32_f(MAC_SERDES_CFG, val);
3739 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3741 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3742 tg3_setup_flow_control(tp, 0, 0);
3743 current_link_up = 1;
3748 /* Want auto-negotiation. */
3749 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3751 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3752 if (flowctrl & ADVERTISE_1000XPAUSE)
3753 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3754 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3755 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3757 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3758 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3759 tp->serdes_counter &&
3760 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3761 MAC_STATUS_RCVD_CFG)) ==
3762 MAC_STATUS_PCS_SYNCED)) {
3763 tp->serdes_counter--;
3764 current_link_up = 1;
3769 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3770 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3772 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3774 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3775 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3776 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3777 MAC_STATUS_SIGNAL_DET)) {
3778 sg_dig_status = tr32(SG_DIG_STATUS);
3779 mac_status = tr32(MAC_STATUS);
3781 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3782 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3783 u32 local_adv = 0, remote_adv = 0;
3785 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3786 local_adv |= ADVERTISE_1000XPAUSE;
3787 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3788 local_adv |= ADVERTISE_1000XPSE_ASYM;
3790 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3791 remote_adv |= LPA_1000XPAUSE;
3792 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3793 remote_adv |= LPA_1000XPAUSE_ASYM;
3795 tg3_setup_flow_control(tp, local_adv, remote_adv);
3796 current_link_up = 1;
3797 tp->serdes_counter = 0;
3798 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3799 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3800 if (tp->serdes_counter)
3801 tp->serdes_counter--;
3804 u32 val = serdes_cfg;
3811 tw32_f(MAC_SERDES_CFG, val);
3814 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3817 /* Link parallel detection - link is up */
3818 /* only if we have PCS_SYNC and not */
3819 /* receiving config code words */
3820 mac_status = tr32(MAC_STATUS);
3821 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3822 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3823 tg3_setup_flow_control(tp, 0, 0);
3824 current_link_up = 1;
3826 TG3_PHYFLG_PARALLEL_DETECT;
3827 tp->serdes_counter =
3828 SERDES_PARALLEL_DET_TIMEOUT;
3830 goto restart_autoneg;
3834 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3835 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3839 return current_link_up;
3842 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3844 int current_link_up = 0;
3846 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3849 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3850 u32 txflags, rxflags;
3853 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3854 u32 local_adv = 0, remote_adv = 0;
3856 if (txflags & ANEG_CFG_PS1)
3857 local_adv |= ADVERTISE_1000XPAUSE;
3858 if (txflags & ANEG_CFG_PS2)
3859 local_adv |= ADVERTISE_1000XPSE_ASYM;
3861 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3862 remote_adv |= LPA_1000XPAUSE;
3863 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3864 remote_adv |= LPA_1000XPAUSE_ASYM;
3866 tg3_setup_flow_control(tp, local_adv, remote_adv);
3868 current_link_up = 1;
3870 for (i = 0; i < 30; i++) {
3873 (MAC_STATUS_SYNC_CHANGED |
3874 MAC_STATUS_CFG_CHANGED));
3876 if ((tr32(MAC_STATUS) &
3877 (MAC_STATUS_SYNC_CHANGED |
3878 MAC_STATUS_CFG_CHANGED)) == 0)
3882 mac_status = tr32(MAC_STATUS);
3883 if (current_link_up == 0 &&
3884 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3885 !(mac_status & MAC_STATUS_RCVD_CFG))
3886 current_link_up = 1;
3888 tg3_setup_flow_control(tp, 0, 0);
3890 /* Forcing 1000FD link up. */
3891 current_link_up = 1;
3893 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3896 tw32_f(MAC_MODE, tp->mac_mode);
3901 return current_link_up;
3904 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3907 u16 orig_active_speed;
3908 u8 orig_active_duplex;
3910 int current_link_up;
3913 orig_pause_cfg = tp->link_config.active_flowctrl;
3914 orig_active_speed = tp->link_config.active_speed;
3915 orig_active_duplex = tp->link_config.active_duplex;
3917 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3918 netif_carrier_ok(tp->dev) &&
3919 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3920 mac_status = tr32(MAC_STATUS);
3921 mac_status &= (MAC_STATUS_PCS_SYNCED |
3922 MAC_STATUS_SIGNAL_DET |
3923 MAC_STATUS_CFG_CHANGED |
3924 MAC_STATUS_RCVD_CFG);
3925 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3926 MAC_STATUS_SIGNAL_DET)) {
3927 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3928 MAC_STATUS_CFG_CHANGED));
3933 tw32_f(MAC_TX_AUTO_NEG, 0);
3935 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3936 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3937 tw32_f(MAC_MODE, tp->mac_mode);
3940 if (tp->phy_id == TG3_PHY_ID_BCM8002)
3941 tg3_init_bcm8002(tp);
3943 /* Enable link change event even when serdes polling. */
3944 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3947 current_link_up = 0;
3948 mac_status = tr32(MAC_STATUS);
3950 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3951 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3953 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3955 tp->napi[0].hw_status->status =
3956 (SD_STATUS_UPDATED |
3957 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
3959 for (i = 0; i < 100; i++) {
3960 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3961 MAC_STATUS_CFG_CHANGED));
3963 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3964 MAC_STATUS_CFG_CHANGED |
3965 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3969 mac_status = tr32(MAC_STATUS);
3970 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3971 current_link_up = 0;
3972 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3973 tp->serdes_counter == 0) {
3974 tw32_f(MAC_MODE, (tp->mac_mode |
3975 MAC_MODE_SEND_CONFIGS));
3977 tw32_f(MAC_MODE, tp->mac_mode);
3981 if (current_link_up == 1) {
3982 tp->link_config.active_speed = SPEED_1000;
3983 tp->link_config.active_duplex = DUPLEX_FULL;
3984 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3985 LED_CTRL_LNKLED_OVERRIDE |
3986 LED_CTRL_1000MBPS_ON));
3988 tp->link_config.active_speed = SPEED_INVALID;
3989 tp->link_config.active_duplex = DUPLEX_INVALID;
3990 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3991 LED_CTRL_LNKLED_OVERRIDE |
3992 LED_CTRL_TRAFFIC_OVERRIDE));
3995 if (current_link_up != netif_carrier_ok(tp->dev)) {
3996 if (current_link_up)
3997 netif_carrier_on(tp->dev);
3999 netif_carrier_off(tp->dev);
4000 tg3_link_report(tp);
4002 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4003 if (orig_pause_cfg != now_pause_cfg ||
4004 orig_active_speed != tp->link_config.active_speed ||
4005 orig_active_duplex != tp->link_config.active_duplex)
4006 tg3_link_report(tp);
4012 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4014 int current_link_up, err = 0;
4018 u32 local_adv, remote_adv;
4020 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4021 tw32_f(MAC_MODE, tp->mac_mode);
4027 (MAC_STATUS_SYNC_CHANGED |
4028 MAC_STATUS_CFG_CHANGED |
4029 MAC_STATUS_MI_COMPLETION |
4030 MAC_STATUS_LNKSTATE_CHANGED));
4036 current_link_up = 0;
4037 current_speed = SPEED_INVALID;
4038 current_duplex = DUPLEX_INVALID;
4040 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4041 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4042 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4043 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4044 bmsr |= BMSR_LSTATUS;
4046 bmsr &= ~BMSR_LSTATUS;
4049 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4051 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4052 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4053 /* do nothing, just check for link up at the end */
4054 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4057 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4058 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4059 ADVERTISE_1000XPAUSE |
4060 ADVERTISE_1000XPSE_ASYM |
4063 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4065 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4066 new_adv |= ADVERTISE_1000XHALF;
4067 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4068 new_adv |= ADVERTISE_1000XFULL;
4070 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4071 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4072 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4073 tg3_writephy(tp, MII_BMCR, bmcr);
4075 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4076 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4077 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4084 bmcr &= ~BMCR_SPEED1000;
4085 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4087 if (tp->link_config.duplex == DUPLEX_FULL)
4088 new_bmcr |= BMCR_FULLDPLX;
4090 if (new_bmcr != bmcr) {
4091 /* BMCR_SPEED1000 is a reserved bit that needs
4092 * to be set on write.
4094 new_bmcr |= BMCR_SPEED1000;
4096 /* Force a linkdown */
4097 if (netif_carrier_ok(tp->dev)) {
4100 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4101 adv &= ~(ADVERTISE_1000XFULL |
4102 ADVERTISE_1000XHALF |
4104 tg3_writephy(tp, MII_ADVERTISE, adv);
4105 tg3_writephy(tp, MII_BMCR, bmcr |
4109 netif_carrier_off(tp->dev);
4111 tg3_writephy(tp, MII_BMCR, new_bmcr);
4113 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4114 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4115 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4117 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4118 bmsr |= BMSR_LSTATUS;
4120 bmsr &= ~BMSR_LSTATUS;
4122 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4126 if (bmsr & BMSR_LSTATUS) {
4127 current_speed = SPEED_1000;
4128 current_link_up = 1;
4129 if (bmcr & BMCR_FULLDPLX)
4130 current_duplex = DUPLEX_FULL;
4132 current_duplex = DUPLEX_HALF;
4137 if (bmcr & BMCR_ANENABLE) {
4140 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4141 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4142 common = local_adv & remote_adv;
4143 if (common & (ADVERTISE_1000XHALF |
4144 ADVERTISE_1000XFULL)) {
4145 if (common & ADVERTISE_1000XFULL)
4146 current_duplex = DUPLEX_FULL;
4148 current_duplex = DUPLEX_HALF;
4149 } else if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
4150 /* Link is up via parallel detect */
4152 current_link_up = 0;
4157 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4158 tg3_setup_flow_control(tp, local_adv, remote_adv);
4160 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4161 if (tp->link_config.active_duplex == DUPLEX_HALF)
4162 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4164 tw32_f(MAC_MODE, tp->mac_mode);
4167 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4169 tp->link_config.active_speed = current_speed;
4170 tp->link_config.active_duplex = current_duplex;
4172 if (current_link_up != netif_carrier_ok(tp->dev)) {
4173 if (current_link_up)
4174 netif_carrier_on(tp->dev);
4176 netif_carrier_off(tp->dev);
4177 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4179 tg3_link_report(tp);
4184 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4186 if (tp->serdes_counter) {
4187 /* Give autoneg time to complete. */
4188 tp->serdes_counter--;
4192 if (!netif_carrier_ok(tp->dev) &&
4193 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4196 tg3_readphy(tp, MII_BMCR, &bmcr);
4197 if (bmcr & BMCR_ANENABLE) {
4200 /* Select shadow register 0x1f */
4201 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4202 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4204 /* Select expansion interrupt status register */
4205 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4206 MII_TG3_DSP_EXP1_INT_STAT);
4207 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4208 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4210 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4211 /* We have signal detect and not receiving
4212 * config code words, link is up by parallel
4216 bmcr &= ~BMCR_ANENABLE;
4217 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4218 tg3_writephy(tp, MII_BMCR, bmcr);
4219 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4222 } else if (netif_carrier_ok(tp->dev) &&
4223 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4224 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4227 /* Select expansion interrupt status register */
4228 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4229 MII_TG3_DSP_EXP1_INT_STAT);
4230 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4234 /* Config code words received, turn on autoneg. */
4235 tg3_readphy(tp, MII_BMCR, &bmcr);
4236 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4238 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4244 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4248 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4249 err = tg3_setup_fiber_phy(tp, force_reset);
4250 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4251 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4253 err = tg3_setup_copper_phy(tp, force_reset);
4255 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4258 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4259 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4261 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4266 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4267 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4268 tw32(GRC_MISC_CFG, val);
4271 if (tp->link_config.active_speed == SPEED_1000 &&
4272 tp->link_config.active_duplex == DUPLEX_HALF)
4273 tw32(MAC_TX_LENGTHS,
4274 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4275 (6 << TX_LENGTHS_IPG_SHIFT) |
4276 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4278 tw32(MAC_TX_LENGTHS,
4279 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4280 (6 << TX_LENGTHS_IPG_SHIFT) |
4281 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4283 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4284 if (netif_carrier_ok(tp->dev)) {
4285 tw32(HOSTCC_STAT_COAL_TICKS,
4286 tp->coal.stats_block_coalesce_usecs);
4288 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4292 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4293 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4294 if (!netif_carrier_ok(tp->dev))
4295 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4298 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4299 tw32(PCIE_PWR_MGMT_THRESH, val);
4305 static inline int tg3_irq_sync(struct tg3 *tp)
4307 return tp->irq_sync;
4310 /* This is called whenever we suspect that the system chipset is re-
4311 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4312 * is bogus tx completions. We try to recover by setting the
4313 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4316 static void tg3_tx_recover(struct tg3 *tp)
4318 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4319 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4321 netdev_warn(tp->dev,
4322 "The system may be re-ordering memory-mapped I/O "
4323 "cycles to the network device, attempting to recover. "
4324 "Please report the problem to the driver maintainer "
4325 "and include system chipset information.\n");
4327 spin_lock(&tp->lock);
4328 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4329 spin_unlock(&tp->lock);
4332 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4334 /* Tell compiler to fetch tx indices from memory. */
4336 return tnapi->tx_pending -
4337 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4340 /* Tigon3 never reports partial packet sends. So we do not
4341 * need special logic to handle SKBs that have not had all
4342 * of their frags sent yet, like SunGEM does.
4344 static void tg3_tx(struct tg3_napi *tnapi)
4346 struct tg3 *tp = tnapi->tp;
4347 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4348 u32 sw_idx = tnapi->tx_cons;
4349 struct netdev_queue *txq;
4350 int index = tnapi - tp->napi;
4352 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
4355 txq = netdev_get_tx_queue(tp->dev, index);
4357 while (sw_idx != hw_idx) {
4358 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4359 struct sk_buff *skb = ri->skb;
4362 if (unlikely(skb == NULL)) {
4367 pci_unmap_single(tp->pdev,
4368 dma_unmap_addr(ri, mapping),
4374 sw_idx = NEXT_TX(sw_idx);
4376 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4377 ri = &tnapi->tx_buffers[sw_idx];
4378 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4381 pci_unmap_page(tp->pdev,
4382 dma_unmap_addr(ri, mapping),
4383 skb_shinfo(skb)->frags[i].size,
4385 sw_idx = NEXT_TX(sw_idx);
4390 if (unlikely(tx_bug)) {
4396 tnapi->tx_cons = sw_idx;
4398 /* Need to make the tx_cons update visible to tg3_start_xmit()
4399 * before checking for netif_queue_stopped(). Without the
4400 * memory barrier, there is a small possibility that tg3_start_xmit()
4401 * will miss it and cause the queue to be stopped forever.
4405 if (unlikely(netif_tx_queue_stopped(txq) &&
4406 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4407 __netif_tx_lock(txq, smp_processor_id());
4408 if (netif_tx_queue_stopped(txq) &&
4409 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4410 netif_tx_wake_queue(txq);
4411 __netif_tx_unlock(txq);
4415 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4420 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4421 map_sz, PCI_DMA_FROMDEVICE);
4422 dev_kfree_skb_any(ri->skb);
4426 /* Returns size of skb allocated or < 0 on error.
4428 * We only need to fill in the address because the other members
4429 * of the RX descriptor are invariant, see tg3_init_rings.
4431 * Note the purposeful assymetry of cpu vs. chip accesses. For
4432 * posting buffers we only dirty the first cache line of the RX
4433 * descriptor (containing the address). Whereas for the RX status
4434 * buffers the cpu only reads the last cacheline of the RX descriptor
4435 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4437 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4438 u32 opaque_key, u32 dest_idx_unmasked)
4440 struct tg3_rx_buffer_desc *desc;
4441 struct ring_info *map, *src_map;
4442 struct sk_buff *skb;
4444 int skb_size, dest_idx;
4447 switch (opaque_key) {
4448 case RXD_OPAQUE_RING_STD:
4449 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4450 desc = &tpr->rx_std[dest_idx];
4451 map = &tpr->rx_std_buffers[dest_idx];
4452 skb_size = tp->rx_pkt_map_sz;
4455 case RXD_OPAQUE_RING_JUMBO:
4456 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4457 desc = &tpr->rx_jmb[dest_idx].std;
4458 map = &tpr->rx_jmb_buffers[dest_idx];
4459 skb_size = TG3_RX_JMB_MAP_SZ;
4466 /* Do not overwrite any of the map or rp information
4467 * until we are sure we can commit to a new buffer.
4469 * Callers depend upon this behavior and assume that
4470 * we leave everything unchanged if we fail.
4472 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4476 skb_reserve(skb, tp->rx_offset);
4478 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4479 PCI_DMA_FROMDEVICE);
4480 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4486 dma_unmap_addr_set(map, mapping, mapping);
4488 desc->addr_hi = ((u64)mapping >> 32);
4489 desc->addr_lo = ((u64)mapping & 0xffffffff);
4494 /* We only need to move over in the address because the other
4495 * members of the RX descriptor are invariant. See notes above
4496 * tg3_alloc_rx_skb for full details.
4498 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4499 struct tg3_rx_prodring_set *dpr,
4500 u32 opaque_key, int src_idx,
4501 u32 dest_idx_unmasked)
4503 struct tg3 *tp = tnapi->tp;
4504 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4505 struct ring_info *src_map, *dest_map;
4506 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4509 switch (opaque_key) {
4510 case RXD_OPAQUE_RING_STD:
4511 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4512 dest_desc = &dpr->rx_std[dest_idx];
4513 dest_map = &dpr->rx_std_buffers[dest_idx];
4514 src_desc = &spr->rx_std[src_idx];
4515 src_map = &spr->rx_std_buffers[src_idx];
4518 case RXD_OPAQUE_RING_JUMBO:
4519 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4520 dest_desc = &dpr->rx_jmb[dest_idx].std;
4521 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4522 src_desc = &spr->rx_jmb[src_idx].std;
4523 src_map = &spr->rx_jmb_buffers[src_idx];
4530 dest_map->skb = src_map->skb;
4531 dma_unmap_addr_set(dest_map, mapping,
4532 dma_unmap_addr(src_map, mapping));
4533 dest_desc->addr_hi = src_desc->addr_hi;
4534 dest_desc->addr_lo = src_desc->addr_lo;
4536 /* Ensure that the update to the skb happens after the physical
4537 * addresses have been transferred to the new BD location.
4541 src_map->skb = NULL;
4544 /* The RX ring scheme is composed of multiple rings which post fresh
4545 * buffers to the chip, and one special ring the chip uses to report
4546 * status back to the host.
4548 * The special ring reports the status of received packets to the
4549 * host. The chip does not write into the original descriptor the
4550 * RX buffer was obtained from. The chip simply takes the original
4551 * descriptor as provided by the host, updates the status and length
4552 * field, then writes this into the next status ring entry.
4554 * Each ring the host uses to post buffers to the chip is described
4555 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4556 * it is first placed into the on-chip ram. When the packet's length
4557 * is known, it walks down the TG3_BDINFO entries to select the ring.
4558 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4559 * which is within the range of the new packet's length is chosen.
4561 * The "separate ring for rx status" scheme may sound queer, but it makes
4562 * sense from a cache coherency perspective. If only the host writes
4563 * to the buffer post rings, and only the chip writes to the rx status
4564 * rings, then cache lines never move beyond shared-modified state.
4565 * If both the host and chip were to write into the same ring, cache line
4566 * eviction could occur since both entities want it in an exclusive state.
4568 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4570 struct tg3 *tp = tnapi->tp;
4571 u32 work_mask, rx_std_posted = 0;
4572 u32 std_prod_idx, jmb_prod_idx;
4573 u32 sw_idx = tnapi->rx_rcb_ptr;
4576 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4578 hw_idx = *(tnapi->rx_rcb_prod_idx);
4580 * We need to order the read of hw_idx and the read of
4581 * the opaque cookie.
4586 std_prod_idx = tpr->rx_std_prod_idx;
4587 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4588 while (sw_idx != hw_idx && budget > 0) {
4589 struct ring_info *ri;
4590 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4592 struct sk_buff *skb;
4593 dma_addr_t dma_addr;
4594 u32 opaque_key, desc_idx, *post_ptr;
4595 bool hw_vlan __maybe_unused = false;
4596 u16 vtag __maybe_unused = 0;
4598 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4599 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4600 if (opaque_key == RXD_OPAQUE_RING_STD) {
4601 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4602 dma_addr = dma_unmap_addr(ri, mapping);
4604 post_ptr = &std_prod_idx;
4606 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4607 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4608 dma_addr = dma_unmap_addr(ri, mapping);
4610 post_ptr = &jmb_prod_idx;
4612 goto next_pkt_nopost;
4614 work_mask |= opaque_key;
4616 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4617 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4619 tg3_recycle_rx(tnapi, tpr, opaque_key,
4620 desc_idx, *post_ptr);
4622 /* Other statistics kept track of by card. */
4623 tp->net_stats.rx_dropped++;
4627 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4630 if (len > TG3_RX_COPY_THRESH(tp)) {
4633 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4638 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4639 PCI_DMA_FROMDEVICE);
4641 /* Ensure that the update to the skb happens
4642 * after the usage of the old DMA mapping.
4650 struct sk_buff *copy_skb;
4652 tg3_recycle_rx(tnapi, tpr, opaque_key,
4653 desc_idx, *post_ptr);
4655 copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN +
4657 if (copy_skb == NULL)
4658 goto drop_it_no_recycle;
4660 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN);
4661 skb_put(copy_skb, len);
4662 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4663 skb_copy_from_linear_data(skb, copy_skb->data, len);
4664 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4666 /* We'll reuse the original ring buffer. */
4670 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4671 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4672 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4673 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4674 skb->ip_summed = CHECKSUM_UNNECESSARY;
4676 skb_checksum_none_assert(skb);
4678 skb->protocol = eth_type_trans(skb, tp->dev);
4680 if (len > (tp->dev->mtu + ETH_HLEN) &&
4681 skb->protocol != htons(ETH_P_8021Q)) {
4686 if (desc->type_flags & RXD_FLAG_VLAN &&
4687 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) {
4688 vtag = desc->err_vlan & RXD_VLAN_MASK;
4689 #if TG3_VLAN_TAG_USED
4695 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
4696 __skb_push(skb, VLAN_HLEN);
4698 memmove(ve, skb->data + VLAN_HLEN,
4700 ve->h_vlan_proto = htons(ETH_P_8021Q);
4701 ve->h_vlan_TCI = htons(vtag);
4705 #if TG3_VLAN_TAG_USED
4707 vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb);
4710 napi_gro_receive(&tnapi->napi, skb);
4718 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4719 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4720 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4721 tpr->rx_std_prod_idx);
4722 work_mask &= ~RXD_OPAQUE_RING_STD;
4727 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4729 /* Refresh hw_idx to see if there is new work */
4730 if (sw_idx == hw_idx) {
4731 hw_idx = *(tnapi->rx_rcb_prod_idx);
4736 /* ACK the status ring. */
4737 tnapi->rx_rcb_ptr = sw_idx;
4738 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4740 /* Refill RX ring(s). */
4741 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
4742 if (work_mask & RXD_OPAQUE_RING_STD) {
4743 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4744 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4745 tpr->rx_std_prod_idx);
4747 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4748 tpr->rx_jmb_prod_idx = jmb_prod_idx %
4749 TG3_RX_JUMBO_RING_SIZE;
4750 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4751 tpr->rx_jmb_prod_idx);
4754 } else if (work_mask) {
4755 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4756 * updated before the producer indices can be updated.
4760 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4761 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
4763 if (tnapi != &tp->napi[1])
4764 napi_schedule(&tp->napi[1].napi);
4770 static void tg3_poll_link(struct tg3 *tp)
4772 /* handle link change and other phy events */
4773 if (!(tp->tg3_flags &
4774 (TG3_FLAG_USE_LINKCHG_REG |
4775 TG3_FLAG_POLL_SERDES))) {
4776 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4778 if (sblk->status & SD_STATUS_LINK_CHG) {
4779 sblk->status = SD_STATUS_UPDATED |
4780 (sblk->status & ~SD_STATUS_LINK_CHG);
4781 spin_lock(&tp->lock);
4782 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4784 (MAC_STATUS_SYNC_CHANGED |
4785 MAC_STATUS_CFG_CHANGED |
4786 MAC_STATUS_MI_COMPLETION |
4787 MAC_STATUS_LNKSTATE_CHANGED));
4790 tg3_setup_phy(tp, 0);
4791 spin_unlock(&tp->lock);
4796 static int tg3_rx_prodring_xfer(struct tg3 *tp,
4797 struct tg3_rx_prodring_set *dpr,
4798 struct tg3_rx_prodring_set *spr)
4800 u32 si, di, cpycnt, src_prod_idx;
4804 src_prod_idx = spr->rx_std_prod_idx;
4806 /* Make sure updates to the rx_std_buffers[] entries and the
4807 * standard producer index are seen in the correct order.
4811 if (spr->rx_std_cons_idx == src_prod_idx)
4814 if (spr->rx_std_cons_idx < src_prod_idx)
4815 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4817 cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
4819 cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
4821 si = spr->rx_std_cons_idx;
4822 di = dpr->rx_std_prod_idx;
4824 for (i = di; i < di + cpycnt; i++) {
4825 if (dpr->rx_std_buffers[i].skb) {
4835 /* Ensure that updates to the rx_std_buffers ring and the
4836 * shadowed hardware producer ring from tg3_recycle_skb() are
4837 * ordered correctly WRT the skb check above.
4841 memcpy(&dpr->rx_std_buffers[di],
4842 &spr->rx_std_buffers[si],
4843 cpycnt * sizeof(struct ring_info));
4845 for (i = 0; i < cpycnt; i++, di++, si++) {
4846 struct tg3_rx_buffer_desc *sbd, *dbd;
4847 sbd = &spr->rx_std[si];
4848 dbd = &dpr->rx_std[di];
4849 dbd->addr_hi = sbd->addr_hi;
4850 dbd->addr_lo = sbd->addr_lo;
4853 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
4855 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
4860 src_prod_idx = spr->rx_jmb_prod_idx;
4862 /* Make sure updates to the rx_jmb_buffers[] entries and
4863 * the jumbo producer index are seen in the correct order.
4867 if (spr->rx_jmb_cons_idx == src_prod_idx)
4870 if (spr->rx_jmb_cons_idx < src_prod_idx)
4871 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4873 cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
4875 cpycnt = min(cpycnt,
4876 TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
4878 si = spr->rx_jmb_cons_idx;
4879 di = dpr->rx_jmb_prod_idx;
4881 for (i = di; i < di + cpycnt; i++) {
4882 if (dpr->rx_jmb_buffers[i].skb) {
4892 /* Ensure that updates to the rx_jmb_buffers ring and the
4893 * shadowed hardware producer ring from tg3_recycle_skb() are
4894 * ordered correctly WRT the skb check above.
4898 memcpy(&dpr->rx_jmb_buffers[di],
4899 &spr->rx_jmb_buffers[si],
4900 cpycnt * sizeof(struct ring_info));
4902 for (i = 0; i < cpycnt; i++, di++, si++) {
4903 struct tg3_rx_buffer_desc *sbd, *dbd;
4904 sbd = &spr->rx_jmb[si].std;
4905 dbd = &dpr->rx_jmb[di].std;
4906 dbd->addr_hi = sbd->addr_hi;
4907 dbd->addr_lo = sbd->addr_lo;
4910 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
4911 TG3_RX_JUMBO_RING_SIZE;
4912 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
4913 TG3_RX_JUMBO_RING_SIZE;
4919 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4921 struct tg3 *tp = tnapi->tp;
4923 /* run TX completion thread */
4924 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4926 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4930 /* run RX thread, within the bounds set by NAPI.
4931 * All RX "locking" is done by ensuring outside
4932 * code synchronizes with tg3->napi.poll()
4934 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4935 work_done += tg3_rx(tnapi, budget - work_done);
4937 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4938 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
4940 u32 std_prod_idx = dpr->rx_std_prod_idx;
4941 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
4943 for (i = 1; i < tp->irq_cnt; i++)
4944 err |= tg3_rx_prodring_xfer(tp, dpr,
4945 &tp->napi[i].prodring);
4949 if (std_prod_idx != dpr->rx_std_prod_idx)
4950 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4951 dpr->rx_std_prod_idx);
4953 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
4954 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4955 dpr->rx_jmb_prod_idx);
4960 tw32_f(HOSTCC_MODE, tp->coal_now);
4966 static int tg3_poll_msix(struct napi_struct *napi, int budget)
4968 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4969 struct tg3 *tp = tnapi->tp;
4971 struct tg3_hw_status *sblk = tnapi->hw_status;
4974 work_done = tg3_poll_work(tnapi, work_done, budget);
4976 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4979 if (unlikely(work_done >= budget))
4982 /* tp->last_tag is used in tg3_int_reenable() below
4983 * to tell the hw how much work has been processed,
4984 * so we must read it before checking for more work.
4986 tnapi->last_tag = sblk->status_tag;
4987 tnapi->last_irq_tag = tnapi->last_tag;
4990 /* check for RX/TX work to do */
4991 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
4992 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
4993 napi_complete(napi);
4994 /* Reenable interrupts. */
4995 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5004 /* work_done is guaranteed to be less than budget. */
5005 napi_complete(napi);
5006 schedule_work(&tp->reset_task);
5010 static int tg3_poll(struct napi_struct *napi, int budget)
5012 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5013 struct tg3 *tp = tnapi->tp;
5015 struct tg3_hw_status *sblk = tnapi->hw_status;
5020 work_done = tg3_poll_work(tnapi, work_done, budget);
5022 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5025 if (unlikely(work_done >= budget))
5028 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
5029 /* tp->last_tag is used in tg3_int_reenable() below
5030 * to tell the hw how much work has been processed,
5031 * so we must read it before checking for more work.
5033 tnapi->last_tag = sblk->status_tag;
5034 tnapi->last_irq_tag = tnapi->last_tag;
5037 sblk->status &= ~SD_STATUS_UPDATED;
5039 if (likely(!tg3_has_work(tnapi))) {
5040 napi_complete(napi);
5041 tg3_int_reenable(tnapi);
5049 /* work_done is guaranteed to be less than budget. */
5050 napi_complete(napi);
5051 schedule_work(&tp->reset_task);
5055 static void tg3_napi_disable(struct tg3 *tp)
5059 for (i = tp->irq_cnt - 1; i >= 0; i--)
5060 napi_disable(&tp->napi[i].napi);
5063 static void tg3_napi_enable(struct tg3 *tp)
5067 for (i = 0; i < tp->irq_cnt; i++)
5068 napi_enable(&tp->napi[i].napi);
5071 static void tg3_napi_init(struct tg3 *tp)
5075 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5076 for (i = 1; i < tp->irq_cnt; i++)
5077 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5080 static void tg3_napi_fini(struct tg3 *tp)
5084 for (i = 0; i < tp->irq_cnt; i++)
5085 netif_napi_del(&tp->napi[i].napi);
5088 static inline void tg3_netif_stop(struct tg3 *tp)
5090 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5091 tg3_napi_disable(tp);
5092 netif_tx_disable(tp->dev);
5095 static inline void tg3_netif_start(struct tg3 *tp)
5097 /* NOTE: unconditional netif_tx_wake_all_queues is only
5098 * appropriate so long as all callers are assured to
5099 * have free tx slots (such as after tg3_init_hw)
5101 netif_tx_wake_all_queues(tp->dev);
5103 tg3_napi_enable(tp);
5104 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5105 tg3_enable_ints(tp);
5108 static void tg3_irq_quiesce(struct tg3 *tp)
5112 BUG_ON(tp->irq_sync);
5117 for (i = 0; i < tp->irq_cnt; i++)
5118 synchronize_irq(tp->napi[i].irq_vec);
5121 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5122 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5123 * with as well. Most of the time, this is not necessary except when
5124 * shutting down the device.
5126 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5128 spin_lock_bh(&tp->lock);
5130 tg3_irq_quiesce(tp);
5133 static inline void tg3_full_unlock(struct tg3 *tp)
5135 spin_unlock_bh(&tp->lock);
5138 /* One-shot MSI handler - Chip automatically disables interrupt
5139 * after sending MSI so driver doesn't have to do it.
5141 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5143 struct tg3_napi *tnapi = dev_id;
5144 struct tg3 *tp = tnapi->tp;
5146 prefetch(tnapi->hw_status);
5148 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5150 if (likely(!tg3_irq_sync(tp)))
5151 napi_schedule(&tnapi->napi);
5156 /* MSI ISR - No need to check for interrupt sharing and no need to
5157 * flush status block and interrupt mailbox. PCI ordering rules
5158 * guarantee that MSI will arrive after the status block.
5160 static irqreturn_t tg3_msi(int irq, void *dev_id)
5162 struct tg3_napi *tnapi = dev_id;
5163 struct tg3 *tp = tnapi->tp;
5165 prefetch(tnapi->hw_status);
5167 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5169 * Writing any value to intr-mbox-0 clears PCI INTA# and
5170 * chip-internal interrupt pending events.
5171 * Writing non-zero to intr-mbox-0 additional tells the
5172 * NIC to stop sending us irqs, engaging "in-intr-handler"
5175 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5176 if (likely(!tg3_irq_sync(tp)))
5177 napi_schedule(&tnapi->napi);
5179 return IRQ_RETVAL(1);
5182 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5184 struct tg3_napi *tnapi = dev_id;
5185 struct tg3 *tp = tnapi->tp;
5186 struct tg3_hw_status *sblk = tnapi->hw_status;
5187 unsigned int handled = 1;
5189 /* In INTx mode, it is possible for the interrupt to arrive at
5190 * the CPU before the status block posted prior to the interrupt.
5191 * Reading the PCI State register will confirm whether the
5192 * interrupt is ours and will flush the status block.
5194 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5195 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5196 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5203 * Writing any value to intr-mbox-0 clears PCI INTA# and
5204 * chip-internal interrupt pending events.
5205 * Writing non-zero to intr-mbox-0 additional tells the
5206 * NIC to stop sending us irqs, engaging "in-intr-handler"
5209 * Flush the mailbox to de-assert the IRQ immediately to prevent
5210 * spurious interrupts. The flush impacts performance but
5211 * excessive spurious interrupts can be worse in some cases.
5213 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5214 if (tg3_irq_sync(tp))
5216 sblk->status &= ~SD_STATUS_UPDATED;
5217 if (likely(tg3_has_work(tnapi))) {
5218 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5219 napi_schedule(&tnapi->napi);
5221 /* No work, shared interrupt perhaps? re-enable
5222 * interrupts, and flush that PCI write
5224 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5228 return IRQ_RETVAL(handled);
5231 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5233 struct tg3_napi *tnapi = dev_id;
5234 struct tg3 *tp = tnapi->tp;
5235 struct tg3_hw_status *sblk = tnapi->hw_status;
5236 unsigned int handled = 1;
5238 /* In INTx mode, it is possible for the interrupt to arrive at
5239 * the CPU before the status block posted prior to the interrupt.
5240 * Reading the PCI State register will confirm whether the
5241 * interrupt is ours and will flush the status block.
5243 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5244 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5245 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5252 * writing any value to intr-mbox-0 clears PCI INTA# and
5253 * chip-internal interrupt pending events.
5254 * writing non-zero to intr-mbox-0 additional tells the
5255 * NIC to stop sending us irqs, engaging "in-intr-handler"
5258 * Flush the mailbox to de-assert the IRQ immediately to prevent
5259 * spurious interrupts. The flush impacts performance but
5260 * excessive spurious interrupts can be worse in some cases.
5262 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5265 * In a shared interrupt configuration, sometimes other devices'
5266 * interrupts will scream. We record the current status tag here
5267 * so that the above check can report that the screaming interrupts
5268 * are unhandled. Eventually they will be silenced.
5270 tnapi->last_irq_tag = sblk->status_tag;
5272 if (tg3_irq_sync(tp))
5275 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5277 napi_schedule(&tnapi->napi);
5280 return IRQ_RETVAL(handled);
5283 /* ISR for interrupt test */
5284 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5286 struct tg3_napi *tnapi = dev_id;
5287 struct tg3 *tp = tnapi->tp;
5288 struct tg3_hw_status *sblk = tnapi->hw_status;
5290 if ((sblk->status & SD_STATUS_UPDATED) ||
5291 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5292 tg3_disable_ints(tp);
5293 return IRQ_RETVAL(1);
5295 return IRQ_RETVAL(0);
5298 static int tg3_init_hw(struct tg3 *, int);
5299 static int tg3_halt(struct tg3 *, int, int);
5301 /* Restart hardware after configuration changes, self-test, etc.
5302 * Invoked with tp->lock held.
5304 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5305 __releases(tp->lock)
5306 __acquires(tp->lock)
5310 err = tg3_init_hw(tp, reset_phy);
5313 "Failed to re-initialize device, aborting\n");
5314 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5315 tg3_full_unlock(tp);
5316 del_timer_sync(&tp->timer);
5318 tg3_napi_enable(tp);
5320 tg3_full_lock(tp, 0);
5325 #ifdef CONFIG_NET_POLL_CONTROLLER
5326 static void tg3_poll_controller(struct net_device *dev)
5329 struct tg3 *tp = netdev_priv(dev);
5331 for (i = 0; i < tp->irq_cnt; i++)
5332 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5336 static void tg3_reset_task(struct work_struct *work)
5338 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5340 unsigned int restart_timer;
5342 tg3_full_lock(tp, 0);
5344 if (!netif_running(tp->dev)) {
5345 tg3_full_unlock(tp);
5349 tg3_full_unlock(tp);
5355 tg3_full_lock(tp, 1);
5357 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5358 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5360 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5361 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5362 tp->write32_rx_mbox = tg3_write_flush_reg32;
5363 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5364 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5367 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5368 err = tg3_init_hw(tp, 1);
5372 tg3_netif_start(tp);
5375 mod_timer(&tp->timer, jiffies + 1);
5378 tg3_full_unlock(tp);
5384 static void tg3_dump_short_state(struct tg3 *tp)
5386 netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5387 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5388 netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5389 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5392 static void tg3_tx_timeout(struct net_device *dev)
5394 struct tg3 *tp = netdev_priv(dev);
5396 if (netif_msg_tx_err(tp)) {
5397 netdev_err(dev, "transmit timed out, resetting\n");
5398 tg3_dump_short_state(tp);
5401 schedule_work(&tp->reset_task);
5404 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5405 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5407 u32 base = (u32) mapping & 0xffffffff;
5409 return (base > 0xffffdcc0) && (base + len + 8 < base);
5412 /* Test for DMA addresses > 40-bit */
5413 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5416 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5417 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5418 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5425 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5427 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5428 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5429 struct sk_buff *skb, u32 last_plus_one,
5430 u32 *start, u32 base_flags, u32 mss)
5432 struct tg3 *tp = tnapi->tp;
5433 struct sk_buff *new_skb;
5434 dma_addr_t new_addr = 0;
5438 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5439 new_skb = skb_copy(skb, GFP_ATOMIC);
5441 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5443 new_skb = skb_copy_expand(skb,
5444 skb_headroom(skb) + more_headroom,
5445 skb_tailroom(skb), GFP_ATOMIC);
5451 /* New SKB is guaranteed to be linear. */
5453 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5455 /* Make sure the mapping succeeded */
5456 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5458 dev_kfree_skb(new_skb);
5461 /* Make sure new skb does not cross any 4G boundaries.
5462 * Drop the packet if it does.
5464 } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5465 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5466 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5469 dev_kfree_skb(new_skb);
5472 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5473 base_flags, 1 | (mss << 1));
5474 *start = NEXT_TX(entry);
5478 /* Now clean up the sw ring entries. */
5480 while (entry != last_plus_one) {
5484 len = skb_headlen(skb);
5486 len = skb_shinfo(skb)->frags[i-1].size;
5488 pci_unmap_single(tp->pdev,
5489 dma_unmap_addr(&tnapi->tx_buffers[entry],
5491 len, PCI_DMA_TODEVICE);
5493 tnapi->tx_buffers[entry].skb = new_skb;
5494 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5497 tnapi->tx_buffers[entry].skb = NULL;
5499 entry = NEXT_TX(entry);
5508 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5509 dma_addr_t mapping, int len, u32 flags,
5512 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5513 int is_end = (mss_and_is_end & 0x1);
5514 u32 mss = (mss_and_is_end >> 1);
5518 flags |= TXD_FLAG_END;
5519 if (flags & TXD_FLAG_VLAN) {
5520 vlan_tag = flags >> 16;
5523 vlan_tag |= (mss << TXD_MSS_SHIFT);
5525 txd->addr_hi = ((u64) mapping >> 32);
5526 txd->addr_lo = ((u64) mapping & 0xffffffff);
5527 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5528 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5531 /* hard_start_xmit for devices that don't have any bugs and
5532 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5534 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5535 struct net_device *dev)
5537 struct tg3 *tp = netdev_priv(dev);
5538 u32 len, entry, base_flags, mss;
5540 struct tg3_napi *tnapi;
5541 struct netdev_queue *txq;
5542 unsigned int i, last;
5544 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5545 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5546 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5549 /* We are running in BH disabled context with netif_tx_lock
5550 * and TX reclaim runs via tp->napi.poll inside of a software
5551 * interrupt. Furthermore, IRQ processing runs lockless so we have
5552 * no IRQ context deadlocks to worry about either. Rejoice!
5554 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5555 if (!netif_tx_queue_stopped(txq)) {
5556 netif_tx_stop_queue(txq);
5558 /* This is a hard error, log it. */
5560 "BUG! Tx Ring full when queue awake!\n");
5562 return NETDEV_TX_BUSY;
5565 entry = tnapi->tx_prod;
5567 mss = skb_shinfo(skb)->gso_size;
5569 int tcp_opt_len, ip_tcp_len;
5572 if (skb_header_cloned(skb) &&
5573 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5578 if (skb_is_gso_v6(skb)) {
5579 hdrlen = skb_headlen(skb) - ETH_HLEN;
5581 struct iphdr *iph = ip_hdr(skb);
5583 tcp_opt_len = tcp_optlen(skb);
5584 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5587 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5588 hdrlen = ip_tcp_len + tcp_opt_len;
5591 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5592 mss |= (hdrlen & 0xc) << 12;
5594 base_flags |= 0x00000010;
5595 base_flags |= (hdrlen & 0x3e0) << 5;
5599 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5600 TXD_FLAG_CPU_POST_DMA);
5602 tcp_hdr(skb)->check = 0;
5604 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5605 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5608 #if TG3_VLAN_TAG_USED
5609 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5610 base_flags |= (TXD_FLAG_VLAN |
5611 (vlan_tx_tag_get(skb) << 16));
5614 len = skb_headlen(skb);
5616 /* Queue skb data, a.k.a. the main skb fragment. */
5617 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5618 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5623 tnapi->tx_buffers[entry].skb = skb;
5624 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5626 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5627 !mss && skb->len > ETH_DATA_LEN)
5628 base_flags |= TXD_FLAG_JMB_PKT;
5630 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5631 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5633 entry = NEXT_TX(entry);
5635 /* Now loop through additional data fragments, and queue them. */
5636 if (skb_shinfo(skb)->nr_frags > 0) {
5637 last = skb_shinfo(skb)->nr_frags - 1;
5638 for (i = 0; i <= last; i++) {
5639 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5642 mapping = pci_map_page(tp->pdev,
5645 len, PCI_DMA_TODEVICE);
5646 if (pci_dma_mapping_error(tp->pdev, mapping))
5649 tnapi->tx_buffers[entry].skb = NULL;
5650 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5653 tg3_set_txd(tnapi, entry, mapping, len,
5654 base_flags, (i == last) | (mss << 1));
5656 entry = NEXT_TX(entry);
5660 /* Packets are ready, update Tx producer idx local and on card. */
5661 tw32_tx_mbox(tnapi->prodmbox, entry);
5663 tnapi->tx_prod = entry;
5664 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5665 netif_tx_stop_queue(txq);
5667 /* netif_tx_stop_queue() must be done before checking
5668 * checking tx index in tg3_tx_avail() below, because in
5669 * tg3_tx(), we update tx index before checking for
5670 * netif_tx_queue_stopped().
5673 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5674 netif_tx_wake_queue(txq);
5680 return NETDEV_TX_OK;
5684 entry = tnapi->tx_prod;
5685 tnapi->tx_buffers[entry].skb = NULL;
5686 pci_unmap_single(tp->pdev,
5687 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5690 for (i = 0; i <= last; i++) {
5691 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5692 entry = NEXT_TX(entry);
5694 pci_unmap_page(tp->pdev,
5695 dma_unmap_addr(&tnapi->tx_buffers[entry],
5697 frag->size, PCI_DMA_TODEVICE);
5701 return NETDEV_TX_OK;
5704 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5705 struct net_device *);
5707 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5708 * TSO header is greater than 80 bytes.
5710 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5712 struct sk_buff *segs, *nskb;
5713 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5715 /* Estimate the number of fragments in the worst case */
5716 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5717 netif_stop_queue(tp->dev);
5719 /* netif_tx_stop_queue() must be done before checking
5720 * checking tx index in tg3_tx_avail() below, because in
5721 * tg3_tx(), we update tx index before checking for
5722 * netif_tx_queue_stopped().
5725 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5726 return NETDEV_TX_BUSY;
5728 netif_wake_queue(tp->dev);
5731 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5733 goto tg3_tso_bug_end;
5739 tg3_start_xmit_dma_bug(nskb, tp->dev);
5745 return NETDEV_TX_OK;
5748 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5749 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5751 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5752 struct net_device *dev)
5754 struct tg3 *tp = netdev_priv(dev);
5755 u32 len, entry, base_flags, mss;
5756 int would_hit_hwbug;
5758 struct tg3_napi *tnapi;
5759 struct netdev_queue *txq;
5760 unsigned int i, last;
5762 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5763 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5764 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5767 /* We are running in BH disabled context with netif_tx_lock
5768 * and TX reclaim runs via tp->napi.poll inside of a software
5769 * interrupt. Furthermore, IRQ processing runs lockless so we have
5770 * no IRQ context deadlocks to worry about either. Rejoice!
5772 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5773 if (!netif_tx_queue_stopped(txq)) {
5774 netif_tx_stop_queue(txq);
5776 /* This is a hard error, log it. */
5778 "BUG! Tx Ring full when queue awake!\n");
5780 return NETDEV_TX_BUSY;
5783 entry = tnapi->tx_prod;
5785 if (skb->ip_summed == CHECKSUM_PARTIAL)
5786 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5788 mss = skb_shinfo(skb)->gso_size;
5791 u32 tcp_opt_len, hdr_len;
5793 if (skb_header_cloned(skb) &&
5794 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5800 tcp_opt_len = tcp_optlen(skb);
5802 if (skb_is_gso_v6(skb)) {
5803 hdr_len = skb_headlen(skb) - ETH_HLEN;
5807 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5808 hdr_len = ip_tcp_len + tcp_opt_len;
5811 iph->tot_len = htons(mss + hdr_len);
5814 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5815 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5816 return tg3_tso_bug(tp, skb);
5818 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5819 TXD_FLAG_CPU_POST_DMA);
5821 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5822 tcp_hdr(skb)->check = 0;
5823 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5825 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5830 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5831 mss |= (hdr_len & 0xc) << 12;
5833 base_flags |= 0x00000010;
5834 base_flags |= (hdr_len & 0x3e0) << 5;
5835 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
5836 mss |= hdr_len << 9;
5837 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
5838 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5839 if (tcp_opt_len || iph->ihl > 5) {
5842 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5843 mss |= (tsflags << 11);
5846 if (tcp_opt_len || iph->ihl > 5) {
5849 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5850 base_flags |= tsflags << 12;
5854 #if TG3_VLAN_TAG_USED
5855 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5856 base_flags |= (TXD_FLAG_VLAN |
5857 (vlan_tx_tag_get(skb) << 16));
5860 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5861 !mss && skb->len > ETH_DATA_LEN)
5862 base_flags |= TXD_FLAG_JMB_PKT;
5864 len = skb_headlen(skb);
5866 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5867 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5872 tnapi->tx_buffers[entry].skb = skb;
5873 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5875 would_hit_hwbug = 0;
5877 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
5878 would_hit_hwbug = 1;
5880 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5881 tg3_4g_overflow_test(mapping, len))
5882 would_hit_hwbug = 1;
5884 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5885 tg3_40bit_overflow_test(tp, mapping, len))
5886 would_hit_hwbug = 1;
5888 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5889 would_hit_hwbug = 1;
5891 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5892 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5894 entry = NEXT_TX(entry);
5896 /* Now loop through additional data fragments, and queue them. */
5897 if (skb_shinfo(skb)->nr_frags > 0) {
5898 last = skb_shinfo(skb)->nr_frags - 1;
5899 for (i = 0; i <= last; i++) {
5900 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5903 mapping = pci_map_page(tp->pdev,
5906 len, PCI_DMA_TODEVICE);
5908 tnapi->tx_buffers[entry].skb = NULL;
5909 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5911 if (pci_dma_mapping_error(tp->pdev, mapping))
5914 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
5916 would_hit_hwbug = 1;
5918 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5919 tg3_4g_overflow_test(mapping, len))
5920 would_hit_hwbug = 1;
5922 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5923 tg3_40bit_overflow_test(tp, mapping, len))
5924 would_hit_hwbug = 1;
5926 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5927 tg3_set_txd(tnapi, entry, mapping, len,
5928 base_flags, (i == last)|(mss << 1));
5930 tg3_set_txd(tnapi, entry, mapping, len,
5931 base_flags, (i == last));
5933 entry = NEXT_TX(entry);
5937 if (would_hit_hwbug) {
5938 u32 last_plus_one = entry;
5941 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5942 start &= (TG3_TX_RING_SIZE - 1);
5944 /* If the workaround fails due to memory/mapping
5945 * failure, silently drop this packet.
5947 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
5948 &start, base_flags, mss))
5954 /* Packets are ready, update Tx producer idx local and on card. */
5955 tw32_tx_mbox(tnapi->prodmbox, entry);
5957 tnapi->tx_prod = entry;
5958 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5959 netif_tx_stop_queue(txq);
5961 /* netif_tx_stop_queue() must be done before checking
5962 * checking tx index in tg3_tx_avail() below, because in
5963 * tg3_tx(), we update tx index before checking for
5964 * netif_tx_queue_stopped().
5967 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5968 netif_tx_wake_queue(txq);
5974 return NETDEV_TX_OK;
5978 entry = tnapi->tx_prod;
5979 tnapi->tx_buffers[entry].skb = NULL;
5980 pci_unmap_single(tp->pdev,
5981 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5984 for (i = 0; i <= last; i++) {
5985 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5986 entry = NEXT_TX(entry);
5988 pci_unmap_page(tp->pdev,
5989 dma_unmap_addr(&tnapi->tx_buffers[entry],
5991 frag->size, PCI_DMA_TODEVICE);
5995 return NETDEV_TX_OK;
5998 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6003 if (new_mtu > ETH_DATA_LEN) {
6004 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6005 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
6006 ethtool_op_set_tso(dev, 0);
6008 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
6011 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6012 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
6013 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
6017 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6019 struct tg3 *tp = netdev_priv(dev);
6022 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6025 if (!netif_running(dev)) {
6026 /* We'll just catch it later when the
6029 tg3_set_mtu(dev, tp, new_mtu);
6037 tg3_full_lock(tp, 1);
6039 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6041 tg3_set_mtu(dev, tp, new_mtu);
6043 err = tg3_restart_hw(tp, 0);
6046 tg3_netif_start(tp);
6048 tg3_full_unlock(tp);
6056 static void tg3_rx_prodring_free(struct tg3 *tp,
6057 struct tg3_rx_prodring_set *tpr)
6061 if (tpr != &tp->napi[0].prodring) {
6062 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6063 i = (i + 1) % TG3_RX_RING_SIZE)
6064 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6067 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6068 for (i = tpr->rx_jmb_cons_idx;
6069 i != tpr->rx_jmb_prod_idx;
6070 i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) {
6071 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6079 for (i = 0; i < TG3_RX_RING_SIZE; i++)
6080 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6083 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6084 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++)
6085 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6090 /* Initialize rx rings for packet processing.
6092 * The chip has been shut down and the driver detached from
6093 * the networking, so no interrupts or new tx packets will
6094 * end up in the driver. tp->{tx,}lock are held and thus
6097 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6098 struct tg3_rx_prodring_set *tpr)
6100 u32 i, rx_pkt_dma_sz;
6102 tpr->rx_std_cons_idx = 0;
6103 tpr->rx_std_prod_idx = 0;
6104 tpr->rx_jmb_cons_idx = 0;
6105 tpr->rx_jmb_prod_idx = 0;
6107 if (tpr != &tp->napi[0].prodring) {
6108 memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE);
6109 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
6110 memset(&tpr->rx_jmb_buffers[0], 0,
6111 TG3_RX_JMB_BUFF_RING_SIZE);
6115 /* Zero out all descriptors. */
6116 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
6118 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6119 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
6120 tp->dev->mtu > ETH_DATA_LEN)
6121 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6122 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6124 /* Initialize invariants of the rings, we only set this
6125 * stuff once. This works because the card does not
6126 * write into the rx buffer posting rings.
6128 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
6129 struct tg3_rx_buffer_desc *rxd;
6131 rxd = &tpr->rx_std[i];
6132 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6133 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6134 rxd->opaque = (RXD_OPAQUE_RING_STD |
6135 (i << RXD_OPAQUE_INDEX_SHIFT));
6138 /* Now allocate fresh SKBs for each rx ring. */
6139 for (i = 0; i < tp->rx_pending; i++) {
6140 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6141 netdev_warn(tp->dev,
6142 "Using a smaller RX standard ring. Only "
6143 "%d out of %d buffers were allocated "
6144 "successfully\n", i, tp->rx_pending);
6152 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
6155 memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
6157 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE))
6160 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
6161 struct tg3_rx_buffer_desc *rxd;
6163 rxd = &tpr->rx_jmb[i].std;
6164 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6165 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6167 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6168 (i << RXD_OPAQUE_INDEX_SHIFT));
6171 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6172 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6173 netdev_warn(tp->dev,
6174 "Using a smaller RX jumbo ring. Only %d "
6175 "out of %d buffers were allocated "
6176 "successfully\n", i, tp->rx_jumbo_pending);
6179 tp->rx_jumbo_pending = i;
6188 tg3_rx_prodring_free(tp, tpr);
6192 static void tg3_rx_prodring_fini(struct tg3 *tp,
6193 struct tg3_rx_prodring_set *tpr)
6195 kfree(tpr->rx_std_buffers);
6196 tpr->rx_std_buffers = NULL;
6197 kfree(tpr->rx_jmb_buffers);
6198 tpr->rx_jmb_buffers = NULL;
6200 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
6201 tpr->rx_std, tpr->rx_std_mapping);
6205 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
6206 tpr->rx_jmb, tpr->rx_jmb_mapping);
6211 static int tg3_rx_prodring_init(struct tg3 *tp,
6212 struct tg3_rx_prodring_set *tpr)
6214 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL);
6215 if (!tpr->rx_std_buffers)
6218 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
6219 &tpr->rx_std_mapping);
6223 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6224 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE,
6226 if (!tpr->rx_jmb_buffers)
6229 tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
6230 TG3_RX_JUMBO_RING_BYTES,
6231 &tpr->rx_jmb_mapping);
6239 tg3_rx_prodring_fini(tp, tpr);
6243 /* Free up pending packets in all rx/tx rings.
6245 * The chip has been shut down and the driver detached from
6246 * the networking, so no interrupts or new tx packets will
6247 * end up in the driver. tp->{tx,}lock is not held and we are not
6248 * in an interrupt context and thus may sleep.
6250 static void tg3_free_rings(struct tg3 *tp)
6254 for (j = 0; j < tp->irq_cnt; j++) {
6255 struct tg3_napi *tnapi = &tp->napi[j];
6257 tg3_rx_prodring_free(tp, &tnapi->prodring);
6259 if (!tnapi->tx_buffers)
6262 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6263 struct ring_info *txp;
6264 struct sk_buff *skb;
6267 txp = &tnapi->tx_buffers[i];
6275 pci_unmap_single(tp->pdev,
6276 dma_unmap_addr(txp, mapping),
6283 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6284 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6285 pci_unmap_page(tp->pdev,
6286 dma_unmap_addr(txp, mapping),
6287 skb_shinfo(skb)->frags[k].size,
6292 dev_kfree_skb_any(skb);
6297 /* Initialize tx/rx rings for packet processing.
6299 * The chip has been shut down and the driver detached from
6300 * the networking, so no interrupts or new tx packets will
6301 * end up in the driver. tp->{tx,}lock are held and thus
6304 static int tg3_init_rings(struct tg3 *tp)
6308 /* Free up all the SKBs. */
6311 for (i = 0; i < tp->irq_cnt; i++) {
6312 struct tg3_napi *tnapi = &tp->napi[i];
6314 tnapi->last_tag = 0;
6315 tnapi->last_irq_tag = 0;
6316 tnapi->hw_status->status = 0;
6317 tnapi->hw_status->status_tag = 0;
6318 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6323 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6325 tnapi->rx_rcb_ptr = 0;
6327 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6329 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6339 * Must not be invoked with interrupt sources disabled and
6340 * the hardware shutdown down.
6342 static void tg3_free_consistent(struct tg3 *tp)
6346 for (i = 0; i < tp->irq_cnt; i++) {
6347 struct tg3_napi *tnapi = &tp->napi[i];
6349 if (tnapi->tx_ring) {
6350 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
6351 tnapi->tx_ring, tnapi->tx_desc_mapping);
6352 tnapi->tx_ring = NULL;
6355 kfree(tnapi->tx_buffers);
6356 tnapi->tx_buffers = NULL;
6358 if (tnapi->rx_rcb) {
6359 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
6361 tnapi->rx_rcb_mapping);
6362 tnapi->rx_rcb = NULL;
6365 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6367 if (tnapi->hw_status) {
6368 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
6370 tnapi->status_mapping);
6371 tnapi->hw_status = NULL;
6376 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
6377 tp->hw_stats, tp->stats_mapping);
6378 tp->hw_stats = NULL;
6383 * Must not be invoked with interrupt sources disabled and
6384 * the hardware shutdown down. Can sleep.
6386 static int tg3_alloc_consistent(struct tg3 *tp)
6390 tp->hw_stats = pci_alloc_consistent(tp->pdev,
6391 sizeof(struct tg3_hw_stats),
6392 &tp->stats_mapping);
6396 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6398 for (i = 0; i < tp->irq_cnt; i++) {
6399 struct tg3_napi *tnapi = &tp->napi[i];
6400 struct tg3_hw_status *sblk;
6402 tnapi->hw_status = pci_alloc_consistent(tp->pdev,
6404 &tnapi->status_mapping);
6405 if (!tnapi->hw_status)
6408 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6409 sblk = tnapi->hw_status;
6411 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6414 /* If multivector TSS is enabled, vector 0 does not handle
6415 * tx interrupts. Don't allocate any resources for it.
6417 if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) ||
6418 (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) {
6419 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6422 if (!tnapi->tx_buffers)
6425 tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
6427 &tnapi->tx_desc_mapping);
6428 if (!tnapi->tx_ring)
6433 * When RSS is enabled, the status block format changes
6434 * slightly. The "rx_jumbo_consumer", "reserved",
6435 * and "rx_mini_consumer" members get mapped to the
6436 * other three rx return ring producer indexes.
6440 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6443 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6446 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6449 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6454 * If multivector RSS is enabled, vector 0 does not handle
6455 * rx or tx interrupts. Don't allocate any resources for it.
6457 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
6460 tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
6461 TG3_RX_RCB_RING_BYTES(tp),
6462 &tnapi->rx_rcb_mapping);
6466 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6472 tg3_free_consistent(tp);
6476 #define MAX_WAIT_CNT 1000
6478 /* To stop a block, clear the enable bit and poll till it
6479 * clears. tp->lock is held.
6481 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6486 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6493 /* We can't enable/disable these bits of the
6494 * 5705/5750, just say success.
6507 for (i = 0; i < MAX_WAIT_CNT; i++) {
6510 if ((val & enable_bit) == 0)
6514 if (i == MAX_WAIT_CNT && !silent) {
6515 dev_err(&tp->pdev->dev,
6516 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6524 /* tp->lock is held. */
6525 static int tg3_abort_hw(struct tg3 *tp, int silent)
6529 tg3_disable_ints(tp);
6531 tp->rx_mode &= ~RX_MODE_ENABLE;
6532 tw32_f(MAC_RX_MODE, tp->rx_mode);
6535 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6536 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6537 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6538 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6539 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6540 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6542 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6543 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6544 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6545 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6546 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6547 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6548 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6550 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6551 tw32_f(MAC_MODE, tp->mac_mode);
6554 tp->tx_mode &= ~TX_MODE_ENABLE;
6555 tw32_f(MAC_TX_MODE, tp->tx_mode);
6557 for (i = 0; i < MAX_WAIT_CNT; i++) {
6559 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6562 if (i >= MAX_WAIT_CNT) {
6563 dev_err(&tp->pdev->dev,
6564 "%s timed out, TX_MODE_ENABLE will not clear "
6565 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6569 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6570 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6571 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6573 tw32(FTQ_RESET, 0xffffffff);
6574 tw32(FTQ_RESET, 0x00000000);
6576 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6577 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6579 for (i = 0; i < tp->irq_cnt; i++) {
6580 struct tg3_napi *tnapi = &tp->napi[i];
6581 if (tnapi->hw_status)
6582 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6585 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6590 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6595 /* NCSI does not support APE events */
6596 if (tp->tg3_flags3 & TG3_FLG3_APE_HAS_NCSI)
6599 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6600 if (apedata != APE_SEG_SIG_MAGIC)
6603 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6604 if (!(apedata & APE_FW_STATUS_READY))
6607 /* Wait for up to 1 millisecond for APE to service previous event. */
6608 for (i = 0; i < 10; i++) {
6609 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6612 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6614 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6615 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6616 event | APE_EVENT_STATUS_EVENT_PENDING);
6618 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6620 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6626 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6627 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6630 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6635 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
6639 case RESET_KIND_INIT:
6640 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6641 APE_HOST_SEG_SIG_MAGIC);
6642 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6643 APE_HOST_SEG_LEN_MAGIC);
6644 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6645 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6646 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6647 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6648 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6649 APE_HOST_BEHAV_NO_PHYLOCK);
6650 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6651 TG3_APE_HOST_DRVR_STATE_START);
6653 event = APE_EVENT_STATUS_STATE_START;
6655 case RESET_KIND_SHUTDOWN:
6656 /* With the interface we are currently using,
6657 * APE does not track driver state. Wiping
6658 * out the HOST SEGMENT SIGNATURE forces
6659 * the APE to assume OS absent status.
6661 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6663 if (device_may_wakeup(&tp->pdev->dev) &&
6664 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
6665 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6666 TG3_APE_HOST_WOL_SPEED_AUTO);
6667 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6669 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6671 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6673 event = APE_EVENT_STATUS_STATE_UNLOAD;
6675 case RESET_KIND_SUSPEND:
6676 event = APE_EVENT_STATUS_STATE_SUSPEND;
6682 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6684 tg3_ape_send_event(tp, event);
6687 /* tp->lock is held. */
6688 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6690 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6691 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6693 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6695 case RESET_KIND_INIT:
6696 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6700 case RESET_KIND_SHUTDOWN:
6701 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6705 case RESET_KIND_SUSPEND:
6706 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6715 if (kind == RESET_KIND_INIT ||
6716 kind == RESET_KIND_SUSPEND)
6717 tg3_ape_driver_state_change(tp, kind);
6720 /* tp->lock is held. */
6721 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6723 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6725 case RESET_KIND_INIT:
6726 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6727 DRV_STATE_START_DONE);
6730 case RESET_KIND_SHUTDOWN:
6731 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6732 DRV_STATE_UNLOAD_DONE);
6740 if (kind == RESET_KIND_SHUTDOWN)
6741 tg3_ape_driver_state_change(tp, kind);
6744 /* tp->lock is held. */
6745 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6747 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6749 case RESET_KIND_INIT:
6750 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6754 case RESET_KIND_SHUTDOWN:
6755 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6759 case RESET_KIND_SUSPEND:
6760 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6770 static int tg3_poll_fw(struct tg3 *tp)
6775 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6776 /* Wait up to 20ms for init done. */
6777 for (i = 0; i < 200; i++) {
6778 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6785 /* Wait for firmware initialization to complete. */
6786 for (i = 0; i < 100000; i++) {
6787 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6788 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6793 /* Chip might not be fitted with firmware. Some Sun onboard
6794 * parts are configured like that. So don't signal the timeout
6795 * of the above loop as an error, but do report the lack of
6796 * running firmware once.
6799 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
6800 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
6802 netdev_info(tp->dev, "No firmware running\n");
6805 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6806 /* The 57765 A0 needs a little more
6807 * time to do some important work.
6815 /* Save PCI command register before chip reset */
6816 static void tg3_save_pci_state(struct tg3 *tp)
6818 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
6821 /* Restore PCI state after chip reset */
6822 static void tg3_restore_pci_state(struct tg3 *tp)
6826 /* Re-enable indirect register accesses. */
6827 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6828 tp->misc_host_ctrl);
6830 /* Set MAX PCI retry to zero. */
6831 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
6832 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6833 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
6834 val |= PCISTATE_RETRY_SAME_DMA;
6835 /* Allow reads and writes to the APE register and memory space. */
6836 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6837 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6838 PCISTATE_ALLOW_APE_SHMEM_WR |
6839 PCISTATE_ALLOW_APE_PSPACE_WR;
6840 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
6842 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
6844 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6845 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6846 pcie_set_readrq(tp->pdev, 4096);
6848 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6849 tp->pci_cacheline_sz);
6850 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
6855 /* Make sure PCI-X relaxed ordering bit is clear. */
6856 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6859 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6861 pcix_cmd &= ~PCI_X_CMD_ERO;
6862 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6866 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6868 /* Chip reset on 5780 will reset MSI enable bit,
6869 * so need to restore it.
6871 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6874 pci_read_config_word(tp->pdev,
6875 tp->msi_cap + PCI_MSI_FLAGS,
6877 pci_write_config_word(tp->pdev,
6878 tp->msi_cap + PCI_MSI_FLAGS,
6879 ctrl | PCI_MSI_FLAGS_ENABLE);
6880 val = tr32(MSGINT_MODE);
6881 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
6886 static void tg3_stop_fw(struct tg3 *);
6888 /* tp->lock is held. */
6889 static int tg3_chip_reset(struct tg3 *tp)
6892 void (*write_op)(struct tg3 *, u32, u32);
6897 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
6899 /* No matching tg3_nvram_unlock() after this because
6900 * chip reset below will undo the nvram lock.
6902 tp->nvram_lock_cnt = 0;
6904 /* GRC_MISC_CFG core clock reset will clear the memory
6905 * enable bit in PCI register 4 and the MSI enable bit
6906 * on some chips, so we save relevant registers here.
6908 tg3_save_pci_state(tp);
6910 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
6911 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
6912 tw32(GRC_FASTBOOT_PC, 0);
6915 * We must avoid the readl() that normally takes place.
6916 * It locks machines, causes machine checks, and other
6917 * fun things. So, temporarily disable the 5701
6918 * hardware workaround, while we do the reset.
6920 write_op = tp->write32;
6921 if (write_op == tg3_write_flush_reg32)
6922 tp->write32 = tg3_write32;
6924 /* Prevent the irq handler from reading or writing PCI registers
6925 * during chip reset when the memory enable bit in the PCI command
6926 * register may be cleared. The chip does not generate interrupt
6927 * at this time, but the irq handler may still be called due to irq
6928 * sharing or irqpoll.
6930 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6931 for (i = 0; i < tp->irq_cnt; i++) {
6932 struct tg3_napi *tnapi = &tp->napi[i];
6933 if (tnapi->hw_status) {
6934 tnapi->hw_status->status = 0;
6935 tnapi->hw_status->status_tag = 0;
6937 tnapi->last_tag = 0;
6938 tnapi->last_irq_tag = 0;
6942 for (i = 0; i < tp->irq_cnt; i++)
6943 synchronize_irq(tp->napi[i].irq_vec);
6945 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6946 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
6947 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
6951 val = GRC_MISC_CFG_CORECLK_RESET;
6953 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6954 /* Force PCIe 1.0a mode */
6955 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
6956 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
6957 tr32(TG3_PCIE_PHY_TSTCTL) ==
6958 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
6959 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
6961 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6962 tw32(GRC_MISC_CFG, (1 << 29));
6967 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6968 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6969 tw32(GRC_VCPU_EXT_CTRL,
6970 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6973 /* Manage gphy power for all CPMU absent PCIe devices. */
6974 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6975 !(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
6976 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6978 tw32(GRC_MISC_CFG, val);
6980 /* restore 5701 hardware bug workaround write method */
6981 tp->write32 = write_op;
6983 /* Unfortunately, we have to delay before the PCI read back.
6984 * Some 575X chips even will not respond to a PCI cfg access
6985 * when the reset command is given to the chip.
6987 * How do these hardware designers expect things to work
6988 * properly if the PCI write is posted for a long period
6989 * of time? It is always necessary to have some method by
6990 * which a register read back can occur to push the write
6991 * out which does the reset.
6993 * For most tg3 variants the trick below was working.
6998 /* Flush PCI posted writes. The normal MMIO registers
6999 * are inaccessible at this time so this is the only
7000 * way to make this reliably (actually, this is no longer
7001 * the case, see above). I tried to use indirect
7002 * register read/write but this upset some 5701 variants.
7004 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7008 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
7011 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7015 /* Wait for link training to complete. */
7016 for (i = 0; i < 5000; i++)
7019 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7020 pci_write_config_dword(tp->pdev, 0xc4,
7021 cfg_val | (1 << 15));
7024 /* Clear the "no snoop" and "relaxed ordering" bits. */
7025 pci_read_config_word(tp->pdev,
7026 tp->pcie_cap + PCI_EXP_DEVCTL,
7028 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7029 PCI_EXP_DEVCTL_NOSNOOP_EN);
7031 * Older PCIe devices only support the 128 byte
7032 * MPS setting. Enforce the restriction.
7034 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
7035 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7036 pci_write_config_word(tp->pdev,
7037 tp->pcie_cap + PCI_EXP_DEVCTL,
7040 pcie_set_readrq(tp->pdev, 4096);
7042 /* Clear error status */
7043 pci_write_config_word(tp->pdev,
7044 tp->pcie_cap + PCI_EXP_DEVSTA,
7045 PCI_EXP_DEVSTA_CED |
7046 PCI_EXP_DEVSTA_NFED |
7047 PCI_EXP_DEVSTA_FED |
7048 PCI_EXP_DEVSTA_URD);
7051 tg3_restore_pci_state(tp);
7053 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
7056 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
7057 val = tr32(MEMARB_MODE);
7058 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7060 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7062 tw32(0x5000, 0x400);
7065 tw32(GRC_MODE, tp->grc_mode);
7067 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7070 tw32(0xc4, val | (1 << 15));
7073 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7074 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7075 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7076 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7077 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7078 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7081 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7082 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7083 tw32_f(MAC_MODE, tp->mac_mode);
7084 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7085 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7086 tw32_f(MAC_MODE, tp->mac_mode);
7087 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7088 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
7089 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
7090 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
7091 tw32_f(MAC_MODE, tp->mac_mode);
7093 tw32_f(MAC_MODE, 0);
7096 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7098 err = tg3_poll_fw(tp);
7104 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7105 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7106 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7107 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
7110 tw32(0x7c00, val | (1 << 25));
7113 /* Reprobe ASF enable state. */
7114 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
7115 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
7116 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7117 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7120 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7121 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7122 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7123 tp->last_event_jiffies = jiffies;
7124 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7125 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7132 /* tp->lock is held. */
7133 static void tg3_stop_fw(struct tg3 *tp)
7135 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7136 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7137 /* Wait for RX cpu to ACK the previous event. */
7138 tg3_wait_for_event_ack(tp);
7140 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7142 tg3_generate_fw_event(tp);
7144 /* Wait for RX cpu to ACK this event. */
7145 tg3_wait_for_event_ack(tp);
7149 /* tp->lock is held. */
7150 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7156 tg3_write_sig_pre_reset(tp, kind);
7158 tg3_abort_hw(tp, silent);
7159 err = tg3_chip_reset(tp);
7161 __tg3_set_mac_addr(tp, 0);
7163 tg3_write_sig_legacy(tp, kind);
7164 tg3_write_sig_post_reset(tp, kind);
7172 #define RX_CPU_SCRATCH_BASE 0x30000
7173 #define RX_CPU_SCRATCH_SIZE 0x04000
7174 #define TX_CPU_SCRATCH_BASE 0x34000
7175 #define TX_CPU_SCRATCH_SIZE 0x04000
7177 /* tp->lock is held. */
7178 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7182 BUG_ON(offset == TX_CPU_BASE &&
7183 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
7185 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7186 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7188 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7191 if (offset == RX_CPU_BASE) {
7192 for (i = 0; i < 10000; i++) {
7193 tw32(offset + CPU_STATE, 0xffffffff);
7194 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7195 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7199 tw32(offset + CPU_STATE, 0xffffffff);
7200 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7203 for (i = 0; i < 10000; i++) {
7204 tw32(offset + CPU_STATE, 0xffffffff);
7205 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7206 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7212 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7213 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7217 /* Clear firmware's nvram arbitration. */
7218 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7219 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7224 unsigned int fw_base;
7225 unsigned int fw_len;
7226 const __be32 *fw_data;
7229 /* tp->lock is held. */
7230 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7231 int cpu_scratch_size, struct fw_info *info)
7233 int err, lock_err, i;
7234 void (*write_op)(struct tg3 *, u32, u32);
7236 if (cpu_base == TX_CPU_BASE &&
7237 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7239 "%s: Trying to load TX cpu firmware which is 5705\n",
7244 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7245 write_op = tg3_write_mem;
7247 write_op = tg3_write_indirect_reg32;
7249 /* It is possible that bootcode is still loading at this point.
7250 * Get the nvram lock first before halting the cpu.
7252 lock_err = tg3_nvram_lock(tp);
7253 err = tg3_halt_cpu(tp, cpu_base);
7255 tg3_nvram_unlock(tp);
7259 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7260 write_op(tp, cpu_scratch_base + i, 0);
7261 tw32(cpu_base + CPU_STATE, 0xffffffff);
7262 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7263 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7264 write_op(tp, (cpu_scratch_base +
7265 (info->fw_base & 0xffff) +
7267 be32_to_cpu(info->fw_data[i]));
7275 /* tp->lock is held. */
7276 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7278 struct fw_info info;
7279 const __be32 *fw_data;
7282 fw_data = (void *)tp->fw->data;
7284 /* Firmware blob starts with version numbers, followed by
7285 start address and length. We are setting complete length.
7286 length = end_address_of_bss - start_address_of_text.
7287 Remainder is the blob to be loaded contiguously
7288 from start address. */
7290 info.fw_base = be32_to_cpu(fw_data[1]);
7291 info.fw_len = tp->fw->size - 12;
7292 info.fw_data = &fw_data[3];
7294 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7295 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7300 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7301 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7306 /* Now startup only the RX cpu. */
7307 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7308 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7310 for (i = 0; i < 5; i++) {
7311 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7313 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7314 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7315 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7319 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7320 "should be %08x\n", __func__,
7321 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7324 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7325 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7330 /* 5705 needs a special version of the TSO firmware. */
7332 /* tp->lock is held. */
7333 static int tg3_load_tso_firmware(struct tg3 *tp)
7335 struct fw_info info;
7336 const __be32 *fw_data;
7337 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7340 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7343 fw_data = (void *)tp->fw->data;
7345 /* Firmware blob starts with version numbers, followed by
7346 start address and length. We are setting complete length.
7347 length = end_address_of_bss - start_address_of_text.
7348 Remainder is the blob to be loaded contiguously
7349 from start address. */
7351 info.fw_base = be32_to_cpu(fw_data[1]);
7352 cpu_scratch_size = tp->fw_len;
7353 info.fw_len = tp->fw->size - 12;
7354 info.fw_data = &fw_data[3];
7356 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7357 cpu_base = RX_CPU_BASE;
7358 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7360 cpu_base = TX_CPU_BASE;
7361 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7362 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7365 err = tg3_load_firmware_cpu(tp, cpu_base,
7366 cpu_scratch_base, cpu_scratch_size,
7371 /* Now startup the cpu. */
7372 tw32(cpu_base + CPU_STATE, 0xffffffff);
7373 tw32_f(cpu_base + CPU_PC, info.fw_base);
7375 for (i = 0; i < 5; i++) {
7376 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7378 tw32(cpu_base + CPU_STATE, 0xffffffff);
7379 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7380 tw32_f(cpu_base + CPU_PC, info.fw_base);
7385 "%s fails to set CPU PC, is %08x should be %08x\n",
7386 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7389 tw32(cpu_base + CPU_STATE, 0xffffffff);
7390 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7395 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7397 struct tg3 *tp = netdev_priv(dev);
7398 struct sockaddr *addr = p;
7399 int err = 0, skip_mac_1 = 0;
7401 if (!is_valid_ether_addr(addr->sa_data))
7404 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7406 if (!netif_running(dev))
7409 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7410 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7412 addr0_high = tr32(MAC_ADDR_0_HIGH);
7413 addr0_low = tr32(MAC_ADDR_0_LOW);
7414 addr1_high = tr32(MAC_ADDR_1_HIGH);
7415 addr1_low = tr32(MAC_ADDR_1_LOW);
7417 /* Skip MAC addr 1 if ASF is using it. */
7418 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7419 !(addr1_high == 0 && addr1_low == 0))
7422 spin_lock_bh(&tp->lock);
7423 __tg3_set_mac_addr(tp, skip_mac_1);
7424 spin_unlock_bh(&tp->lock);
7429 /* tp->lock is held. */
7430 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7431 dma_addr_t mapping, u32 maxlen_flags,
7435 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7436 ((u64) mapping >> 32));
7438 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7439 ((u64) mapping & 0xffffffff));
7441 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7444 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7446 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7450 static void __tg3_set_rx_mode(struct net_device *);
7451 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7455 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) {
7456 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7457 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7458 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7460 tw32(HOSTCC_TXCOL_TICKS, 0);
7461 tw32(HOSTCC_TXMAX_FRAMES, 0);
7462 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7465 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
7466 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7467 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7468 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7470 tw32(HOSTCC_RXCOL_TICKS, 0);
7471 tw32(HOSTCC_RXMAX_FRAMES, 0);
7472 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7475 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7476 u32 val = ec->stats_block_coalesce_usecs;
7478 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7479 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7481 if (!netif_carrier_ok(tp->dev))
7484 tw32(HOSTCC_STAT_COAL_TICKS, val);
7487 for (i = 0; i < tp->irq_cnt - 1; i++) {
7490 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7491 tw32(reg, ec->rx_coalesce_usecs);
7492 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7493 tw32(reg, ec->rx_max_coalesced_frames);
7494 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7495 tw32(reg, ec->rx_max_coalesced_frames_irq);
7497 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7498 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7499 tw32(reg, ec->tx_coalesce_usecs);
7500 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7501 tw32(reg, ec->tx_max_coalesced_frames);
7502 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7503 tw32(reg, ec->tx_max_coalesced_frames_irq);
7507 for (; i < tp->irq_max - 1; i++) {
7508 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7509 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7510 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7512 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7513 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7514 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7515 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7520 /* tp->lock is held. */
7521 static void tg3_rings_reset(struct tg3 *tp)
7524 u32 stblk, txrcb, rxrcb, limit;
7525 struct tg3_napi *tnapi = &tp->napi[0];
7527 /* Disable all transmit rings but the first. */
7528 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7529 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7530 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7531 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7533 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7535 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7536 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7537 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7538 BDINFO_FLAGS_DISABLED);
7541 /* Disable all receive return rings but the first. */
7542 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7543 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7544 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7545 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7546 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7547 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7548 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7549 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7551 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7553 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7554 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7555 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7556 BDINFO_FLAGS_DISABLED);
7558 /* Disable interrupts */
7559 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7561 /* Zero mailbox registers. */
7562 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
7563 for (i = 1; i < tp->irq_max; i++) {
7564 tp->napi[i].tx_prod = 0;
7565 tp->napi[i].tx_cons = 0;
7566 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
7567 tw32_mailbox(tp->napi[i].prodmbox, 0);
7568 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7569 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7571 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
7572 tw32_mailbox(tp->napi[0].prodmbox, 0);
7574 tp->napi[0].tx_prod = 0;
7575 tp->napi[0].tx_cons = 0;
7576 tw32_mailbox(tp->napi[0].prodmbox, 0);
7577 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7580 /* Make sure the NIC-based send BD rings are disabled. */
7581 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7582 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7583 for (i = 0; i < 16; i++)
7584 tw32_tx_mbox(mbox + i * 8, 0);
7587 txrcb = NIC_SRAM_SEND_RCB;
7588 rxrcb = NIC_SRAM_RCV_RET_RCB;
7590 /* Clear status block in ram. */
7591 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7593 /* Set status block DMA address */
7594 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7595 ((u64) tnapi->status_mapping >> 32));
7596 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7597 ((u64) tnapi->status_mapping & 0xffffffff));
7599 if (tnapi->tx_ring) {
7600 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7601 (TG3_TX_RING_SIZE <<
7602 BDINFO_FLAGS_MAXLEN_SHIFT),
7603 NIC_SRAM_TX_BUFFER_DESC);
7604 txrcb += TG3_BDINFO_SIZE;
7607 if (tnapi->rx_rcb) {
7608 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7609 (TG3_RX_RCB_RING_SIZE(tp) <<
7610 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7611 rxrcb += TG3_BDINFO_SIZE;
7614 stblk = HOSTCC_STATBLCK_RING1;
7616 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7617 u64 mapping = (u64)tnapi->status_mapping;
7618 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7619 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7621 /* Clear status block in ram. */
7622 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7624 if (tnapi->tx_ring) {
7625 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7626 (TG3_TX_RING_SIZE <<
7627 BDINFO_FLAGS_MAXLEN_SHIFT),
7628 NIC_SRAM_TX_BUFFER_DESC);
7629 txrcb += TG3_BDINFO_SIZE;
7632 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7633 (TG3_RX_RCB_RING_SIZE(tp) <<
7634 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7637 rxrcb += TG3_BDINFO_SIZE;
7641 /* tp->lock is held. */
7642 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7644 u32 val, rdmac_mode;
7646 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7648 tg3_disable_ints(tp);
7652 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7654 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
7655 tg3_abort_hw(tp, 1);
7660 err = tg3_chip_reset(tp);
7664 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7666 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7667 val = tr32(TG3_CPMU_CTRL);
7668 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7669 tw32(TG3_CPMU_CTRL, val);
7671 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7672 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7673 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7674 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7676 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7677 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7678 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7679 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7681 val = tr32(TG3_CPMU_HST_ACC);
7682 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7683 val |= CPMU_HST_ACC_MACCLK_6_25;
7684 tw32(TG3_CPMU_HST_ACC, val);
7687 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7688 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7689 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7690 PCIE_PWR_MGMT_L1_THRESH_4MS;
7691 tw32(PCIE_PWR_MGMT_THRESH, val);
7693 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7694 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7696 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7698 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7699 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7702 if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
7703 u32 grc_mode = tr32(GRC_MODE);
7705 /* Access the lower 1K of PL PCIE block registers. */
7706 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7707 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7709 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7710 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7711 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7713 tw32(GRC_MODE, grc_mode);
7716 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7717 u32 grc_mode = tr32(GRC_MODE);
7719 /* Access the lower 1K of PL PCIE block registers. */
7720 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7721 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7723 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5);
7724 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7725 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7727 tw32(GRC_MODE, grc_mode);
7729 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7730 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7731 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7732 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7735 /* This works around an issue with Athlon chipsets on
7736 * B3 tigon3 silicon. This bit has no effect on any
7737 * other revision. But do not set this on PCI Express
7738 * chips and don't even touch the clocks if the CPMU is present.
7740 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7741 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7742 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7743 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7746 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7747 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7748 val = tr32(TG3PCI_PCISTATE);
7749 val |= PCISTATE_RETRY_SAME_DMA;
7750 tw32(TG3PCI_PCISTATE, val);
7753 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7754 /* Allow reads and writes to the
7755 * APE register and memory space.
7757 val = tr32(TG3PCI_PCISTATE);
7758 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7759 PCISTATE_ALLOW_APE_SHMEM_WR |
7760 PCISTATE_ALLOW_APE_PSPACE_WR;
7761 tw32(TG3PCI_PCISTATE, val);
7764 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7765 /* Enable some hw fixes. */
7766 val = tr32(TG3PCI_MSI_DATA);
7767 val |= (1 << 26) | (1 << 28) | (1 << 29);
7768 tw32(TG3PCI_MSI_DATA, val);
7771 /* Descriptor ring init may make accesses to the
7772 * NIC SRAM area to setup the TX descriptors, so we
7773 * can only do this after the hardware has been
7774 * successfully reset.
7776 err = tg3_init_rings(tp);
7780 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
7781 val = tr32(TG3PCI_DMA_RW_CTRL) &
7782 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
7783 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
7784 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
7785 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
7786 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7787 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7788 /* This value is determined during the probe time DMA
7789 * engine test, tg3_test_dma.
7791 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7794 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7795 GRC_MODE_4X_NIC_SEND_RINGS |
7796 GRC_MODE_NO_TX_PHDR_CSUM |
7797 GRC_MODE_NO_RX_PHDR_CSUM);
7798 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7800 /* Pseudo-header checksum is done by hardware logic and not
7801 * the offload processers, so make the chip do the pseudo-
7802 * header checksums on receive. For transmit it is more
7803 * convenient to do the pseudo-header checksum in software
7804 * as Linux does that on transmit for us in all cases.
7806 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7810 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7812 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7813 val = tr32(GRC_MISC_CFG);
7815 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7816 tw32(GRC_MISC_CFG, val);
7818 /* Initialize MBUF/DESC pool. */
7819 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7821 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7822 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7823 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7824 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7826 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7827 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7828 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7829 } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7832 fw_len = tp->fw_len;
7833 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7834 tw32(BUFMGR_MB_POOL_ADDR,
7835 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7836 tw32(BUFMGR_MB_POOL_SIZE,
7837 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7840 if (tp->dev->mtu <= ETH_DATA_LEN) {
7841 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7842 tp->bufmgr_config.mbuf_read_dma_low_water);
7843 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7844 tp->bufmgr_config.mbuf_mac_rx_low_water);
7845 tw32(BUFMGR_MB_HIGH_WATER,
7846 tp->bufmgr_config.mbuf_high_water);
7848 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7849 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7850 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7851 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7852 tw32(BUFMGR_MB_HIGH_WATER,
7853 tp->bufmgr_config.mbuf_high_water_jumbo);
7855 tw32(BUFMGR_DMA_LOW_WATER,
7856 tp->bufmgr_config.dma_low_water);
7857 tw32(BUFMGR_DMA_HIGH_WATER,
7858 tp->bufmgr_config.dma_high_water);
7860 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
7861 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7862 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
7863 tw32(BUFMGR_MODE, val);
7864 for (i = 0; i < 2000; i++) {
7865 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7870 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
7874 /* Setup replenish threshold. */
7875 val = tp->rx_pending / 8;
7878 else if (val > tp->rx_std_max_post)
7879 val = tp->rx_std_max_post;
7880 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7881 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7882 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7884 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7885 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7888 tw32(RCVBDI_STD_THRESH, val);
7890 /* Initialize TG3_BDINFO's at:
7891 * RCVDBDI_STD_BD: standard eth size rx ring
7892 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7893 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7896 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7897 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7898 * ring attribute flags
7899 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7901 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7902 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7904 * The size of each ring is fixed in the firmware, but the location is
7907 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7908 ((u64) tpr->rx_std_mapping >> 32));
7909 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7910 ((u64) tpr->rx_std_mapping & 0xffffffff));
7911 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
7912 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
7913 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7914 NIC_SRAM_RX_BUFFER_DESC);
7916 /* Disable the mini ring */
7917 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7918 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7919 BDINFO_FLAGS_DISABLED);
7921 /* Program the jumbo buffer descriptor ring control
7922 * blocks on those devices that have them.
7924 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
7925 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
7926 /* Setup replenish threshold. */
7927 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7929 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7930 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7931 ((u64) tpr->rx_jmb_mapping >> 32));
7932 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7933 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
7934 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7935 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7936 BDINFO_FLAGS_USE_EXT_RECV);
7937 if (!(tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) ||
7938 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7939 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7940 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7942 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7943 BDINFO_FLAGS_DISABLED);
7946 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
7947 val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
7948 (TG3_RX_STD_DMA_SZ << 2);
7950 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
7952 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
7954 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
7956 tpr->rx_std_prod_idx = tp->rx_pending;
7957 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
7959 tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7960 tp->rx_jumbo_pending : 0;
7961 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
7963 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
7964 tw32(STD_REPLENISH_LWM, 32);
7965 tw32(JMB_REPLENISH_LWM, 16);
7968 tg3_rings_reset(tp);
7970 /* Initialize MAC address and backoff seed. */
7971 __tg3_set_mac_addr(tp, 0);
7973 /* MTU + ethernet header + FCS + optional VLAN tag */
7974 tw32(MAC_RX_MTU_SIZE,
7975 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
7977 /* The slot time is changed by tg3_setup_phy if we
7978 * run at gigabit with half duplex.
7980 tw32(MAC_TX_LENGTHS,
7981 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7982 (6 << TX_LENGTHS_IPG_SHIFT) |
7983 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7985 /* Receive rules. */
7986 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7987 tw32(RCVLPC_CONFIG, 0x0181);
7989 /* Calculate RDMAC_MODE setting early, we need it to determine
7990 * the RCVLPC_STATE_ENABLE mask.
7992 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7993 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7994 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7995 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7996 RDMAC_MODE_LNGREAD_ENAB);
7998 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7999 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8000 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8002 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8003 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8004 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8005 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8006 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8007 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8009 /* If statement applies to 5705 and 5750 PCI devices only */
8010 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8011 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
8012 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
8013 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
8014 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8015 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8016 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8017 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
8018 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8022 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
8023 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8025 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8026 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8028 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
8029 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8030 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8031 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8033 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8034 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8035 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8036 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8037 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
8038 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8039 tw32(TG3_RDMA_RSRVCTRL_REG,
8040 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8043 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
8044 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8045 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8046 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8047 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8050 /* Receive/send statistics. */
8051 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
8052 val = tr32(RCVLPC_STATS_ENABLE);
8053 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8054 tw32(RCVLPC_STATS_ENABLE, val);
8055 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8056 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8057 val = tr32(RCVLPC_STATS_ENABLE);
8058 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8059 tw32(RCVLPC_STATS_ENABLE, val);
8061 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8063 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8064 tw32(SNDDATAI_STATSENAB, 0xffffff);
8065 tw32(SNDDATAI_STATSCTRL,
8066 (SNDDATAI_SCTRL_ENABLE |
8067 SNDDATAI_SCTRL_FASTUPD));
8069 /* Setup host coalescing engine. */
8070 tw32(HOSTCC_MODE, 0);
8071 for (i = 0; i < 2000; i++) {
8072 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8077 __tg3_set_coalesce(tp, &tp->coal);
8079 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8080 /* Status/statistics block address. See tg3_timer,
8081 * the tg3_periodic_fetch_stats call there, and
8082 * tg3_get_stats to see how this works for 5705/5750 chips.
8084 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8085 ((u64) tp->stats_mapping >> 32));
8086 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8087 ((u64) tp->stats_mapping & 0xffffffff));
8088 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8090 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8092 /* Clear statistics and status block memory areas */
8093 for (i = NIC_SRAM_STATS_BLK;
8094 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8096 tg3_write_mem(tp, i, 0);
8101 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8103 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8104 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8105 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8106 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8108 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8109 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8110 /* reset to prevent losing 1st rx packet intermittently */
8111 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8115 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8116 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8119 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8120 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8121 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8122 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8123 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8124 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8125 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8128 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8129 * If TG3_FLG2_IS_NIC is zero, we should read the
8130 * register to preserve the GPIO settings for LOMs. The GPIOs,
8131 * whether used as inputs or outputs, are set by boot code after
8134 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
8137 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8138 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8139 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8141 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8142 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8143 GRC_LCLCTRL_GPIO_OUTPUT3;
8145 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8146 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8148 tp->grc_local_ctrl &= ~gpio_mask;
8149 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8151 /* GPIO1 must be driven high for eeprom write protect */
8152 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
8153 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8154 GRC_LCLCTRL_GPIO_OUTPUT1);
8156 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8159 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
8160 val = tr32(MSGINT_MODE);
8161 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8162 tw32(MSGINT_MODE, val);
8165 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8166 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8170 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8171 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8172 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8173 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8174 WDMAC_MODE_LNGREAD_ENAB);
8176 /* If statement applies to 5705 and 5750 PCI devices only */
8177 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8178 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
8179 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8180 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8181 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8182 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8184 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8185 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8186 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8187 val |= WDMAC_MODE_RX_ACCEL;
8191 /* Enable host coalescing bug fix */
8192 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8193 val |= WDMAC_MODE_STATUS_TAG_FIX;
8195 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8196 val |= WDMAC_MODE_BURST_ALL_DATA;
8198 tw32_f(WDMAC_MODE, val);
8201 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
8204 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8206 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8207 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8208 pcix_cmd |= PCI_X_CMD_READ_2K;
8209 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8210 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8211 pcix_cmd |= PCI_X_CMD_READ_2K;
8213 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8217 tw32_f(RDMAC_MODE, rdmac_mode);
8220 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8221 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8222 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8224 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8226 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8228 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8230 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8231 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8232 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
8233 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8234 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8235 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8236 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8237 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
8238 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8239 tw32(SNDBDI_MODE, val);
8240 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8242 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8243 err = tg3_load_5701_a0_firmware_fix(tp);
8248 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
8249 err = tg3_load_tso_firmware(tp);
8254 tp->tx_mode = TX_MODE_ENABLE;
8255 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
8256 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8257 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8258 tw32_f(MAC_TX_MODE, tp->tx_mode);
8261 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
8262 u32 reg = MAC_RSS_INDIR_TBL_0;
8263 u8 *ent = (u8 *)&val;
8265 /* Setup the indirection table */
8266 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8267 int idx = i % sizeof(val);
8269 ent[idx] = i % (tp->irq_cnt - 1);
8270 if (idx == sizeof(val) - 1) {
8276 /* Setup the "secret" hash key. */
8277 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8278 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8279 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8280 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8281 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8282 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8283 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8284 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8285 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8286 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8289 tp->rx_mode = RX_MODE_ENABLE;
8290 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8291 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8293 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
8294 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8295 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8296 RX_MODE_RSS_IPV6_HASH_EN |
8297 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8298 RX_MODE_RSS_IPV4_HASH_EN |
8299 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8301 tw32_f(MAC_RX_MODE, tp->rx_mode);
8304 tw32(MAC_LED_CTRL, tp->led_ctrl);
8306 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8307 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8308 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8311 tw32_f(MAC_RX_MODE, tp->rx_mode);
8314 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8315 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8316 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8317 /* Set drive transmission level to 1.2V */
8318 /* only if the signal pre-emphasis bit is not set */
8319 val = tr32(MAC_SERDES_CFG);
8322 tw32(MAC_SERDES_CFG, val);
8324 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8325 tw32(MAC_SERDES_CFG, 0x616000);
8328 /* Prevent chip from dropping frames when flow control
8331 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8335 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8337 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8338 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8339 /* Use hardware link auto-negotiation */
8340 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
8343 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8344 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8347 tmp = tr32(SERDES_RX_CTRL);
8348 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8349 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8350 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8351 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8354 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
8355 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8356 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8357 tp->link_config.speed = tp->link_config.orig_speed;
8358 tp->link_config.duplex = tp->link_config.orig_duplex;
8359 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8362 err = tg3_setup_phy(tp, 0);
8366 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8367 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8370 /* Clear CRC stats. */
8371 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8372 tg3_writephy(tp, MII_TG3_TEST1,
8373 tmp | MII_TG3_TEST1_CRC_EN);
8374 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8379 __tg3_set_rx_mode(tp->dev);
8381 /* Initialize receive rules. */
8382 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8383 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8384 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8385 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8387 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8388 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
8392 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
8396 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8398 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8400 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8402 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8404 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8406 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8408 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8410 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8412 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8414 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8416 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8418 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8420 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8422 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8430 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8431 /* Write our heartbeat update interval to APE. */
8432 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8433 APE_HOST_HEARTBEAT_INT_DISABLE);
8435 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8440 /* Called at device open time to get the chip ready for
8441 * packet processing. Invoked with tp->lock held.
8443 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8445 tg3_switch_clocks(tp);
8447 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8449 return tg3_reset_hw(tp, reset_phy);
8452 #define TG3_STAT_ADD32(PSTAT, REG) \
8453 do { u32 __val = tr32(REG); \
8454 (PSTAT)->low += __val; \
8455 if ((PSTAT)->low < __val) \
8456 (PSTAT)->high += 1; \
8459 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8461 struct tg3_hw_stats *sp = tp->hw_stats;
8463 if (!netif_carrier_ok(tp->dev))
8466 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8467 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8468 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8469 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8470 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8471 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8472 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8473 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8474 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8475 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8476 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8477 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8478 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8480 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8481 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8482 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8483 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8484 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8485 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8486 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8487 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8488 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8489 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8490 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8491 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8492 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8493 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8495 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8496 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8497 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8500 static void tg3_timer(unsigned long __opaque)
8502 struct tg3 *tp = (struct tg3 *) __opaque;
8507 spin_lock(&tp->lock);
8509 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8510 /* All of this garbage is because when using non-tagged
8511 * IRQ status the mailbox/status_block protocol the chip
8512 * uses with the cpu is race prone.
8514 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8515 tw32(GRC_LOCAL_CTRL,
8516 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8518 tw32(HOSTCC_MODE, tp->coalesce_mode |
8519 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8522 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8523 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
8524 spin_unlock(&tp->lock);
8525 schedule_work(&tp->reset_task);
8530 /* This part only runs once per second. */
8531 if (!--tp->timer_counter) {
8532 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8533 tg3_periodic_fetch_stats(tp);
8535 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
8539 mac_stat = tr32(MAC_STATUS);
8542 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8543 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8545 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8549 tg3_setup_phy(tp, 0);
8550 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
8551 u32 mac_stat = tr32(MAC_STATUS);
8554 if (netif_carrier_ok(tp->dev) &&
8555 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8558 if (!netif_carrier_ok(tp->dev) &&
8559 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8560 MAC_STATUS_SIGNAL_DET))) {
8564 if (!tp->serdes_counter) {
8567 ~MAC_MODE_PORT_MODE_MASK));
8569 tw32_f(MAC_MODE, tp->mac_mode);
8572 tg3_setup_phy(tp, 0);
8574 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8575 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8576 tg3_serdes_parallel_detect(tp);
8579 tp->timer_counter = tp->timer_multiplier;
8582 /* Heartbeat is only sent once every 2 seconds.
8584 * The heartbeat is to tell the ASF firmware that the host
8585 * driver is still alive. In the event that the OS crashes,
8586 * ASF needs to reset the hardware to free up the FIFO space
8587 * that may be filled with rx packets destined for the host.
8588 * If the FIFO is full, ASF will no longer function properly.
8590 * Unintended resets have been reported on real time kernels
8591 * where the timer doesn't run on time. Netpoll will also have
8594 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8595 * to check the ring condition when the heartbeat is expiring
8596 * before doing the reset. This will prevent most unintended
8599 if (!--tp->asf_counter) {
8600 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
8601 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
8602 tg3_wait_for_event_ack(tp);
8604 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8605 FWCMD_NICDRV_ALIVE3);
8606 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8607 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8608 TG3_FW_UPDATE_TIMEOUT_SEC);
8610 tg3_generate_fw_event(tp);
8612 tp->asf_counter = tp->asf_multiplier;
8615 spin_unlock(&tp->lock);
8618 tp->timer.expires = jiffies + tp->timer_offset;
8619 add_timer(&tp->timer);
8622 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8625 unsigned long flags;
8627 struct tg3_napi *tnapi = &tp->napi[irq_num];
8629 if (tp->irq_cnt == 1)
8630 name = tp->dev->name;
8632 name = &tnapi->irq_lbl[0];
8633 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8634 name[IFNAMSIZ-1] = 0;
8637 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8639 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8641 flags = IRQF_SAMPLE_RANDOM;
8644 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8645 fn = tg3_interrupt_tagged;
8646 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
8649 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8652 static int tg3_test_interrupt(struct tg3 *tp)
8654 struct tg3_napi *tnapi = &tp->napi[0];
8655 struct net_device *dev = tp->dev;
8656 int err, i, intr_ok = 0;
8659 if (!netif_running(dev))
8662 tg3_disable_ints(tp);
8664 free_irq(tnapi->irq_vec, tnapi);
8667 * Turn off MSI one shot mode. Otherwise this test has no
8668 * observable way to know whether the interrupt was delivered.
8670 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
8671 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8672 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8673 tw32(MSGINT_MODE, val);
8676 err = request_irq(tnapi->irq_vec, tg3_test_isr,
8677 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8681 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
8682 tg3_enable_ints(tp);
8684 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8687 for (i = 0; i < 5; i++) {
8688 u32 int_mbox, misc_host_ctrl;
8690 int_mbox = tr32_mailbox(tnapi->int_mbox);
8691 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8693 if ((int_mbox != 0) ||
8694 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8702 tg3_disable_ints(tp);
8704 free_irq(tnapi->irq_vec, tnapi);
8706 err = tg3_request_irq(tp, 0);
8712 /* Reenable MSI one shot mode. */
8713 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
8714 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8715 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
8716 tw32(MSGINT_MODE, val);
8724 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8725 * successfully restored
8727 static int tg3_test_msi(struct tg3 *tp)
8732 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8735 /* Turn off SERR reporting in case MSI terminates with Master
8738 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8739 pci_write_config_word(tp->pdev, PCI_COMMAND,
8740 pci_cmd & ~PCI_COMMAND_SERR);
8742 err = tg3_test_interrupt(tp);
8744 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8749 /* other failures */
8753 /* MSI test failed, go back to INTx mode */
8754 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
8755 "to INTx mode. Please report this failure to the PCI "
8756 "maintainer and include system chipset information\n");
8758 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8760 pci_disable_msi(tp->pdev);
8762 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8763 tp->napi[0].irq_vec = tp->pdev->irq;
8765 err = tg3_request_irq(tp, 0);
8769 /* Need to reset the chip because the MSI cycle may have terminated
8770 * with Master Abort.
8772 tg3_full_lock(tp, 1);
8774 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8775 err = tg3_init_hw(tp, 1);
8777 tg3_full_unlock(tp);
8780 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8785 static int tg3_request_firmware(struct tg3 *tp)
8787 const __be32 *fw_data;
8789 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
8790 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
8795 fw_data = (void *)tp->fw->data;
8797 /* Firmware blob starts with version numbers, followed by
8798 * start address and _full_ length including BSS sections
8799 * (which must be longer than the actual data, of course
8802 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
8803 if (tp->fw_len < (tp->fw->size - 12)) {
8804 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
8805 tp->fw_len, tp->fw_needed);
8806 release_firmware(tp->fw);
8811 /* We no longer need firmware; we have it. */
8812 tp->fw_needed = NULL;
8816 static bool tg3_enable_msix(struct tg3 *tp)
8818 int i, rc, cpus = num_online_cpus();
8819 struct msix_entry msix_ent[tp->irq_max];
8822 /* Just fallback to the simpler MSI mode. */
8826 * We want as many rx rings enabled as there are cpus.
8827 * The first MSIX vector only deals with link interrupts, etc,
8828 * so we add one to the number of vectors we are requesting.
8830 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
8832 for (i = 0; i < tp->irq_max; i++) {
8833 msix_ent[i].entry = i;
8834 msix_ent[i].vector = 0;
8837 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
8840 } else if (rc != 0) {
8841 if (pci_enable_msix(tp->pdev, msix_ent, rc))
8843 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
8848 for (i = 0; i < tp->irq_max; i++)
8849 tp->napi[i].irq_vec = msix_ent[i].vector;
8851 netif_set_real_num_tx_queues(tp->dev, 1);
8852 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
8853 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
8854 pci_disable_msix(tp->pdev);
8857 if (tp->irq_cnt > 1)
8858 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
8863 static void tg3_ints_init(struct tg3 *tp)
8865 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
8866 !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8867 /* All MSI supporting chips should support tagged
8868 * status. Assert that this is the case.
8870 netdev_warn(tp->dev,
8871 "MSI without TAGGED_STATUS? Not using MSI\n");
8875 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
8876 tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
8877 else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
8878 pci_enable_msi(tp->pdev) == 0)
8879 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8881 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8882 u32 msi_mode = tr32(MSGINT_MODE);
8883 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8884 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
8885 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8888 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
8890 tp->napi[0].irq_vec = tp->pdev->irq;
8891 netif_set_real_num_tx_queues(tp->dev, 1);
8895 static void tg3_ints_fini(struct tg3 *tp)
8897 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8898 pci_disable_msix(tp->pdev);
8899 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
8900 pci_disable_msi(tp->pdev);
8901 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
8902 tp->tg3_flags3 &= ~(TG3_FLG3_ENABLE_RSS | TG3_FLG3_ENABLE_TSS);
8905 static int tg3_open(struct net_device *dev)
8907 struct tg3 *tp = netdev_priv(dev);
8910 if (tp->fw_needed) {
8911 err = tg3_request_firmware(tp);
8912 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8916 netdev_warn(tp->dev, "TSO capability disabled\n");
8917 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8918 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8919 netdev_notice(tp->dev, "TSO capability restored\n");
8920 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8924 netif_carrier_off(tp->dev);
8926 err = tg3_set_power_state(tp, PCI_D0);
8930 tg3_full_lock(tp, 0);
8932 tg3_disable_ints(tp);
8933 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8935 tg3_full_unlock(tp);
8938 * Setup interrupts first so we know how
8939 * many NAPI resources to allocate
8943 /* The placement of this call is tied
8944 * to the setup and use of Host TX descriptors.
8946 err = tg3_alloc_consistent(tp);
8952 tg3_napi_enable(tp);
8954 for (i = 0; i < tp->irq_cnt; i++) {
8955 struct tg3_napi *tnapi = &tp->napi[i];
8956 err = tg3_request_irq(tp, i);
8958 for (i--; i >= 0; i--)
8959 free_irq(tnapi->irq_vec, tnapi);
8967 tg3_full_lock(tp, 0);
8969 err = tg3_init_hw(tp, 1);
8971 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8974 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8975 tp->timer_offset = HZ;
8977 tp->timer_offset = HZ / 10;
8979 BUG_ON(tp->timer_offset > HZ);
8980 tp->timer_counter = tp->timer_multiplier =
8981 (HZ / tp->timer_offset);
8982 tp->asf_counter = tp->asf_multiplier =
8983 ((HZ / tp->timer_offset) * 2);
8985 init_timer(&tp->timer);
8986 tp->timer.expires = jiffies + tp->timer_offset;
8987 tp->timer.data = (unsigned long) tp;
8988 tp->timer.function = tg3_timer;
8991 tg3_full_unlock(tp);
8996 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8997 err = tg3_test_msi(tp);
9000 tg3_full_lock(tp, 0);
9001 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9003 tg3_full_unlock(tp);
9008 if (!(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
9009 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
9010 u32 val = tr32(PCIE_TRANSACTION_CFG);
9012 tw32(PCIE_TRANSACTION_CFG,
9013 val | PCIE_TRANS_CFG_1SHOT_MSI);
9019 tg3_full_lock(tp, 0);
9021 add_timer(&tp->timer);
9022 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9023 tg3_enable_ints(tp);
9025 tg3_full_unlock(tp);
9027 netif_tx_start_all_queues(dev);
9032 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9033 struct tg3_napi *tnapi = &tp->napi[i];
9034 free_irq(tnapi->irq_vec, tnapi);
9038 tg3_napi_disable(tp);
9040 tg3_free_consistent(tp);
9047 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9048 struct rtnl_link_stats64 *);
9049 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9051 static int tg3_close(struct net_device *dev)
9054 struct tg3 *tp = netdev_priv(dev);
9056 tg3_napi_disable(tp);
9057 cancel_work_sync(&tp->reset_task);
9059 netif_tx_stop_all_queues(dev);
9061 del_timer_sync(&tp->timer);
9065 tg3_full_lock(tp, 1);
9067 tg3_disable_ints(tp);
9069 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9071 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
9073 tg3_full_unlock(tp);
9075 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9076 struct tg3_napi *tnapi = &tp->napi[i];
9077 free_irq(tnapi->irq_vec, tnapi);
9082 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9084 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9085 sizeof(tp->estats_prev));
9089 tg3_free_consistent(tp);
9091 tg3_set_power_state(tp, PCI_D3hot);
9093 netif_carrier_off(tp->dev);
9098 static inline u64 get_stat64(tg3_stat64_t *val)
9100 return ((u64)val->high << 32) | ((u64)val->low);
9103 static u64 calc_crc_errors(struct tg3 *tp)
9105 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9107 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9108 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9109 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9112 spin_lock_bh(&tp->lock);
9113 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9114 tg3_writephy(tp, MII_TG3_TEST1,
9115 val | MII_TG3_TEST1_CRC_EN);
9116 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9119 spin_unlock_bh(&tp->lock);
9121 tp->phy_crc_errors += val;
9123 return tp->phy_crc_errors;
9126 return get_stat64(&hw_stats->rx_fcs_errors);
9129 #define ESTAT_ADD(member) \
9130 estats->member = old_estats->member + \
9131 get_stat64(&hw_stats->member)
9133 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9135 struct tg3_ethtool_stats *estats = &tp->estats;
9136 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9137 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9142 ESTAT_ADD(rx_octets);
9143 ESTAT_ADD(rx_fragments);
9144 ESTAT_ADD(rx_ucast_packets);
9145 ESTAT_ADD(rx_mcast_packets);
9146 ESTAT_ADD(rx_bcast_packets);
9147 ESTAT_ADD(rx_fcs_errors);
9148 ESTAT_ADD(rx_align_errors);
9149 ESTAT_ADD(rx_xon_pause_rcvd);
9150 ESTAT_ADD(rx_xoff_pause_rcvd);
9151 ESTAT_ADD(rx_mac_ctrl_rcvd);
9152 ESTAT_ADD(rx_xoff_entered);
9153 ESTAT_ADD(rx_frame_too_long_errors);
9154 ESTAT_ADD(rx_jabbers);
9155 ESTAT_ADD(rx_undersize_packets);
9156 ESTAT_ADD(rx_in_length_errors);
9157 ESTAT_ADD(rx_out_length_errors);
9158 ESTAT_ADD(rx_64_or_less_octet_packets);
9159 ESTAT_ADD(rx_65_to_127_octet_packets);
9160 ESTAT_ADD(rx_128_to_255_octet_packets);
9161 ESTAT_ADD(rx_256_to_511_octet_packets);
9162 ESTAT_ADD(rx_512_to_1023_octet_packets);
9163 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9164 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9165 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9166 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9167 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9169 ESTAT_ADD(tx_octets);
9170 ESTAT_ADD(tx_collisions);
9171 ESTAT_ADD(tx_xon_sent);
9172 ESTAT_ADD(tx_xoff_sent);
9173 ESTAT_ADD(tx_flow_control);
9174 ESTAT_ADD(tx_mac_errors);
9175 ESTAT_ADD(tx_single_collisions);
9176 ESTAT_ADD(tx_mult_collisions);
9177 ESTAT_ADD(tx_deferred);
9178 ESTAT_ADD(tx_excessive_collisions);
9179 ESTAT_ADD(tx_late_collisions);
9180 ESTAT_ADD(tx_collide_2times);
9181 ESTAT_ADD(tx_collide_3times);
9182 ESTAT_ADD(tx_collide_4times);
9183 ESTAT_ADD(tx_collide_5times);
9184 ESTAT_ADD(tx_collide_6times);
9185 ESTAT_ADD(tx_collide_7times);
9186 ESTAT_ADD(tx_collide_8times);
9187 ESTAT_ADD(tx_collide_9times);
9188 ESTAT_ADD(tx_collide_10times);
9189 ESTAT_ADD(tx_collide_11times);
9190 ESTAT_ADD(tx_collide_12times);
9191 ESTAT_ADD(tx_collide_13times);
9192 ESTAT_ADD(tx_collide_14times);
9193 ESTAT_ADD(tx_collide_15times);
9194 ESTAT_ADD(tx_ucast_packets);
9195 ESTAT_ADD(tx_mcast_packets);
9196 ESTAT_ADD(tx_bcast_packets);
9197 ESTAT_ADD(tx_carrier_sense_errors);
9198 ESTAT_ADD(tx_discards);
9199 ESTAT_ADD(tx_errors);
9201 ESTAT_ADD(dma_writeq_full);
9202 ESTAT_ADD(dma_write_prioq_full);
9203 ESTAT_ADD(rxbds_empty);
9204 ESTAT_ADD(rx_discards);
9205 ESTAT_ADD(rx_errors);
9206 ESTAT_ADD(rx_threshold_hit);
9208 ESTAT_ADD(dma_readq_full);
9209 ESTAT_ADD(dma_read_prioq_full);
9210 ESTAT_ADD(tx_comp_queue_full);
9212 ESTAT_ADD(ring_set_send_prod_index);
9213 ESTAT_ADD(ring_status_update);
9214 ESTAT_ADD(nic_irqs);
9215 ESTAT_ADD(nic_avoided_irqs);
9216 ESTAT_ADD(nic_tx_threshold_hit);
9221 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9222 struct rtnl_link_stats64 *stats)
9224 struct tg3 *tp = netdev_priv(dev);
9225 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9226 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9231 stats->rx_packets = old_stats->rx_packets +
9232 get_stat64(&hw_stats->rx_ucast_packets) +
9233 get_stat64(&hw_stats->rx_mcast_packets) +
9234 get_stat64(&hw_stats->rx_bcast_packets);
9236 stats->tx_packets = old_stats->tx_packets +
9237 get_stat64(&hw_stats->tx_ucast_packets) +
9238 get_stat64(&hw_stats->tx_mcast_packets) +
9239 get_stat64(&hw_stats->tx_bcast_packets);
9241 stats->rx_bytes = old_stats->rx_bytes +
9242 get_stat64(&hw_stats->rx_octets);
9243 stats->tx_bytes = old_stats->tx_bytes +
9244 get_stat64(&hw_stats->tx_octets);
9246 stats->rx_errors = old_stats->rx_errors +
9247 get_stat64(&hw_stats->rx_errors);
9248 stats->tx_errors = old_stats->tx_errors +
9249 get_stat64(&hw_stats->tx_errors) +
9250 get_stat64(&hw_stats->tx_mac_errors) +
9251 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9252 get_stat64(&hw_stats->tx_discards);
9254 stats->multicast = old_stats->multicast +
9255 get_stat64(&hw_stats->rx_mcast_packets);
9256 stats->collisions = old_stats->collisions +
9257 get_stat64(&hw_stats->tx_collisions);
9259 stats->rx_length_errors = old_stats->rx_length_errors +
9260 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9261 get_stat64(&hw_stats->rx_undersize_packets);
9263 stats->rx_over_errors = old_stats->rx_over_errors +
9264 get_stat64(&hw_stats->rxbds_empty);
9265 stats->rx_frame_errors = old_stats->rx_frame_errors +
9266 get_stat64(&hw_stats->rx_align_errors);
9267 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9268 get_stat64(&hw_stats->tx_discards);
9269 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9270 get_stat64(&hw_stats->tx_carrier_sense_errors);
9272 stats->rx_crc_errors = old_stats->rx_crc_errors +
9273 calc_crc_errors(tp);
9275 stats->rx_missed_errors = old_stats->rx_missed_errors +
9276 get_stat64(&hw_stats->rx_discards);
9281 static inline u32 calc_crc(unsigned char *buf, int len)
9289 for (j = 0; j < len; j++) {
9292 for (k = 0; k < 8; k++) {
9305 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9307 /* accept or reject all multicast frames */
9308 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9309 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9310 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9311 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9314 static void __tg3_set_rx_mode(struct net_device *dev)
9316 struct tg3 *tp = netdev_priv(dev);
9319 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9320 RX_MODE_KEEP_VLAN_TAG);
9322 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9325 #if TG3_VLAN_TAG_USED
9327 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9328 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9330 /* By definition, VLAN is disabled always in this
9333 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9334 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9337 if (dev->flags & IFF_PROMISC) {
9338 /* Promiscuous mode. */
9339 rx_mode |= RX_MODE_PROMISC;
9340 } else if (dev->flags & IFF_ALLMULTI) {
9341 /* Accept all multicast. */
9342 tg3_set_multi(tp, 1);
9343 } else if (netdev_mc_empty(dev)) {
9344 /* Reject all multicast. */
9345 tg3_set_multi(tp, 0);
9347 /* Accept one or more multicast(s). */
9348 struct netdev_hw_addr *ha;
9349 u32 mc_filter[4] = { 0, };
9354 netdev_for_each_mc_addr(ha, dev) {
9355 crc = calc_crc(ha->addr, ETH_ALEN);
9357 regidx = (bit & 0x60) >> 5;
9359 mc_filter[regidx] |= (1 << bit);
9362 tw32(MAC_HASH_REG_0, mc_filter[0]);
9363 tw32(MAC_HASH_REG_1, mc_filter[1]);
9364 tw32(MAC_HASH_REG_2, mc_filter[2]);
9365 tw32(MAC_HASH_REG_3, mc_filter[3]);
9368 if (rx_mode != tp->rx_mode) {
9369 tp->rx_mode = rx_mode;
9370 tw32_f(MAC_RX_MODE, rx_mode);
9375 static void tg3_set_rx_mode(struct net_device *dev)
9377 struct tg3 *tp = netdev_priv(dev);
9379 if (!netif_running(dev))
9382 tg3_full_lock(tp, 0);
9383 __tg3_set_rx_mode(dev);
9384 tg3_full_unlock(tp);
9387 #define TG3_REGDUMP_LEN (32 * 1024)
9389 static int tg3_get_regs_len(struct net_device *dev)
9391 return TG3_REGDUMP_LEN;
9394 static void tg3_get_regs(struct net_device *dev,
9395 struct ethtool_regs *regs, void *_p)
9398 struct tg3 *tp = netdev_priv(dev);
9404 memset(p, 0, TG3_REGDUMP_LEN);
9406 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9409 tg3_full_lock(tp, 0);
9411 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
9412 #define GET_REG32_LOOP(base, len) \
9413 do { p = (u32 *)(orig_p + (base)); \
9414 for (i = 0; i < len; i += 4) \
9415 __GET_REG32((base) + i); \
9417 #define GET_REG32_1(reg) \
9418 do { p = (u32 *)(orig_p + (reg)); \
9419 __GET_REG32((reg)); \
9422 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
9423 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
9424 GET_REG32_LOOP(MAC_MODE, 0x4f0);
9425 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
9426 GET_REG32_1(SNDDATAC_MODE);
9427 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
9428 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
9429 GET_REG32_1(SNDBDC_MODE);
9430 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
9431 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
9432 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
9433 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
9434 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
9435 GET_REG32_1(RCVDCC_MODE);
9436 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
9437 GET_REG32_LOOP(RCVCC_MODE, 0x14);
9438 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
9439 GET_REG32_1(MBFREE_MODE);
9440 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
9441 GET_REG32_LOOP(MEMARB_MODE, 0x10);
9442 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
9443 GET_REG32_LOOP(RDMAC_MODE, 0x08);
9444 GET_REG32_LOOP(WDMAC_MODE, 0x08);
9445 GET_REG32_1(RX_CPU_MODE);
9446 GET_REG32_1(RX_CPU_STATE);
9447 GET_REG32_1(RX_CPU_PGMCTR);
9448 GET_REG32_1(RX_CPU_HWBKPT);
9449 GET_REG32_1(TX_CPU_MODE);
9450 GET_REG32_1(TX_CPU_STATE);
9451 GET_REG32_1(TX_CPU_PGMCTR);
9452 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
9453 GET_REG32_LOOP(FTQ_RESET, 0x120);
9454 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
9455 GET_REG32_1(DMAC_MODE);
9456 GET_REG32_LOOP(GRC_MODE, 0x4c);
9457 if (tp->tg3_flags & TG3_FLAG_NVRAM)
9458 GET_REG32_LOOP(NVRAM_CMD, 0x24);
9461 #undef GET_REG32_LOOP
9464 tg3_full_unlock(tp);
9467 static int tg3_get_eeprom_len(struct net_device *dev)
9469 struct tg3 *tp = netdev_priv(dev);
9471 return tp->nvram_size;
9474 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9476 struct tg3 *tp = netdev_priv(dev);
9479 u32 i, offset, len, b_offset, b_count;
9482 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9485 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9488 offset = eeprom->offset;
9492 eeprom->magic = TG3_EEPROM_MAGIC;
9495 /* adjustments to start on required 4 byte boundary */
9496 b_offset = offset & 3;
9497 b_count = 4 - b_offset;
9498 if (b_count > len) {
9499 /* i.e. offset=1 len=2 */
9502 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9505 memcpy(data, ((char *)&val) + b_offset, b_count);
9508 eeprom->len += b_count;
9511 /* read bytes upto the last 4 byte boundary */
9512 pd = &data[eeprom->len];
9513 for (i = 0; i < (len - (len & 3)); i += 4) {
9514 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9519 memcpy(pd + i, &val, 4);
9524 /* read last bytes not ending on 4 byte boundary */
9525 pd = &data[eeprom->len];
9527 b_offset = offset + len - b_count;
9528 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9531 memcpy(pd, &val, b_count);
9532 eeprom->len += b_count;
9537 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9539 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9541 struct tg3 *tp = netdev_priv(dev);
9543 u32 offset, len, b_offset, odd_len;
9547 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9550 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
9551 eeprom->magic != TG3_EEPROM_MAGIC)
9554 offset = eeprom->offset;
9557 if ((b_offset = (offset & 3))) {
9558 /* adjustments to start on required 4 byte boundary */
9559 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9570 /* adjustments to end on required 4 byte boundary */
9572 len = (len + 3) & ~3;
9573 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9579 if (b_offset || odd_len) {
9580 buf = kmalloc(len, GFP_KERNEL);
9584 memcpy(buf, &start, 4);
9586 memcpy(buf+len-4, &end, 4);
9587 memcpy(buf + b_offset, data, eeprom->len);
9590 ret = tg3_nvram_write_block(tp, offset, len, buf);
9598 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9600 struct tg3 *tp = netdev_priv(dev);
9602 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9603 struct phy_device *phydev;
9604 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9606 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9607 return phy_ethtool_gset(phydev, cmd);
9610 cmd->supported = (SUPPORTED_Autoneg);
9612 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9613 cmd->supported |= (SUPPORTED_1000baseT_Half |
9614 SUPPORTED_1000baseT_Full);
9616 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9617 cmd->supported |= (SUPPORTED_100baseT_Half |
9618 SUPPORTED_100baseT_Full |
9619 SUPPORTED_10baseT_Half |
9620 SUPPORTED_10baseT_Full |
9622 cmd->port = PORT_TP;
9624 cmd->supported |= SUPPORTED_FIBRE;
9625 cmd->port = PORT_FIBRE;
9628 cmd->advertising = tp->link_config.advertising;
9629 if (netif_running(dev)) {
9630 cmd->speed = tp->link_config.active_speed;
9631 cmd->duplex = tp->link_config.active_duplex;
9633 cmd->phy_address = tp->phy_addr;
9634 cmd->transceiver = XCVR_INTERNAL;
9635 cmd->autoneg = tp->link_config.autoneg;
9641 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9643 struct tg3 *tp = netdev_priv(dev);
9645 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9646 struct phy_device *phydev;
9647 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9649 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9650 return phy_ethtool_sset(phydev, cmd);
9653 if (cmd->autoneg != AUTONEG_ENABLE &&
9654 cmd->autoneg != AUTONEG_DISABLE)
9657 if (cmd->autoneg == AUTONEG_DISABLE &&
9658 cmd->duplex != DUPLEX_FULL &&
9659 cmd->duplex != DUPLEX_HALF)
9662 if (cmd->autoneg == AUTONEG_ENABLE) {
9663 u32 mask = ADVERTISED_Autoneg |
9665 ADVERTISED_Asym_Pause;
9667 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9668 mask |= ADVERTISED_1000baseT_Half |
9669 ADVERTISED_1000baseT_Full;
9671 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9672 mask |= ADVERTISED_100baseT_Half |
9673 ADVERTISED_100baseT_Full |
9674 ADVERTISED_10baseT_Half |
9675 ADVERTISED_10baseT_Full |
9678 mask |= ADVERTISED_FIBRE;
9680 if (cmd->advertising & ~mask)
9683 mask &= (ADVERTISED_1000baseT_Half |
9684 ADVERTISED_1000baseT_Full |
9685 ADVERTISED_100baseT_Half |
9686 ADVERTISED_100baseT_Full |
9687 ADVERTISED_10baseT_Half |
9688 ADVERTISED_10baseT_Full);
9690 cmd->advertising &= mask;
9692 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
9693 if (cmd->speed != SPEED_1000)
9696 if (cmd->duplex != DUPLEX_FULL)
9699 if (cmd->speed != SPEED_100 &&
9700 cmd->speed != SPEED_10)
9705 tg3_full_lock(tp, 0);
9707 tp->link_config.autoneg = cmd->autoneg;
9708 if (cmd->autoneg == AUTONEG_ENABLE) {
9709 tp->link_config.advertising = (cmd->advertising |
9710 ADVERTISED_Autoneg);
9711 tp->link_config.speed = SPEED_INVALID;
9712 tp->link_config.duplex = DUPLEX_INVALID;
9714 tp->link_config.advertising = 0;
9715 tp->link_config.speed = cmd->speed;
9716 tp->link_config.duplex = cmd->duplex;
9719 tp->link_config.orig_speed = tp->link_config.speed;
9720 tp->link_config.orig_duplex = tp->link_config.duplex;
9721 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9723 if (netif_running(dev))
9724 tg3_setup_phy(tp, 1);
9726 tg3_full_unlock(tp);
9731 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9733 struct tg3 *tp = netdev_priv(dev);
9735 strcpy(info->driver, DRV_MODULE_NAME);
9736 strcpy(info->version, DRV_MODULE_VERSION);
9737 strcpy(info->fw_version, tp->fw_ver);
9738 strcpy(info->bus_info, pci_name(tp->pdev));
9741 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9743 struct tg3 *tp = netdev_priv(dev);
9745 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9746 device_can_wakeup(&tp->pdev->dev))
9747 wol->supported = WAKE_MAGIC;
9751 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9752 device_can_wakeup(&tp->pdev->dev))
9753 wol->wolopts = WAKE_MAGIC;
9754 memset(&wol->sopass, 0, sizeof(wol->sopass));
9757 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9759 struct tg3 *tp = netdev_priv(dev);
9760 struct device *dp = &tp->pdev->dev;
9762 if (wol->wolopts & ~WAKE_MAGIC)
9764 if ((wol->wolopts & WAKE_MAGIC) &&
9765 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9768 spin_lock_bh(&tp->lock);
9769 if (wol->wolopts & WAKE_MAGIC) {
9770 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9771 device_set_wakeup_enable(dp, true);
9773 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9774 device_set_wakeup_enable(dp, false);
9776 spin_unlock_bh(&tp->lock);
9781 static u32 tg3_get_msglevel(struct net_device *dev)
9783 struct tg3 *tp = netdev_priv(dev);
9784 return tp->msg_enable;
9787 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9789 struct tg3 *tp = netdev_priv(dev);
9790 tp->msg_enable = value;
9793 static int tg3_set_tso(struct net_device *dev, u32 value)
9795 struct tg3 *tp = netdev_priv(dev);
9797 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9802 if ((dev->features & NETIF_F_IPV6_CSUM) &&
9803 ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
9804 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
9806 dev->features |= NETIF_F_TSO6;
9807 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
9808 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9809 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9810 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9811 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9812 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9813 dev->features |= NETIF_F_TSO_ECN;
9815 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9817 return ethtool_op_set_tso(dev, value);
9820 static int tg3_nway_reset(struct net_device *dev)
9822 struct tg3 *tp = netdev_priv(dev);
9825 if (!netif_running(dev))
9828 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
9831 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9832 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9834 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9838 spin_lock_bh(&tp->lock);
9840 tg3_readphy(tp, MII_BMCR, &bmcr);
9841 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9842 ((bmcr & BMCR_ANENABLE) ||
9843 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
9844 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9848 spin_unlock_bh(&tp->lock);
9854 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9856 struct tg3 *tp = netdev_priv(dev);
9858 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9859 ering->rx_mini_max_pending = 0;
9860 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9861 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9863 ering->rx_jumbo_max_pending = 0;
9865 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9867 ering->rx_pending = tp->rx_pending;
9868 ering->rx_mini_pending = 0;
9869 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9870 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9872 ering->rx_jumbo_pending = 0;
9874 ering->tx_pending = tp->napi[0].tx_pending;
9877 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9879 struct tg3 *tp = netdev_priv(dev);
9880 int i, irq_sync = 0, err = 0;
9882 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9883 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9884 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9885 (ering->tx_pending <= MAX_SKB_FRAGS) ||
9886 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9887 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9890 if (netif_running(dev)) {
9896 tg3_full_lock(tp, irq_sync);
9898 tp->rx_pending = ering->rx_pending;
9900 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9901 tp->rx_pending > 63)
9902 tp->rx_pending = 63;
9903 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9905 for (i = 0; i < tp->irq_max; i++)
9906 tp->napi[i].tx_pending = ering->tx_pending;
9908 if (netif_running(dev)) {
9909 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9910 err = tg3_restart_hw(tp, 1);
9912 tg3_netif_start(tp);
9915 tg3_full_unlock(tp);
9917 if (irq_sync && !err)
9923 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9925 struct tg3 *tp = netdev_priv(dev);
9927 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9929 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
9930 epause->rx_pause = 1;
9932 epause->rx_pause = 0;
9934 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
9935 epause->tx_pause = 1;
9937 epause->tx_pause = 0;
9940 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9942 struct tg3 *tp = netdev_priv(dev);
9945 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9947 struct phy_device *phydev;
9949 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9951 if (!(phydev->supported & SUPPORTED_Pause) ||
9952 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
9953 ((epause->rx_pause && !epause->tx_pause) ||
9954 (!epause->rx_pause && epause->tx_pause))))
9957 tp->link_config.flowctrl = 0;
9958 if (epause->rx_pause) {
9959 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9961 if (epause->tx_pause) {
9962 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9963 newadv = ADVERTISED_Pause;
9965 newadv = ADVERTISED_Pause |
9966 ADVERTISED_Asym_Pause;
9967 } else if (epause->tx_pause) {
9968 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9969 newadv = ADVERTISED_Asym_Pause;
9973 if (epause->autoneg)
9974 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9976 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9978 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
9979 u32 oldadv = phydev->advertising &
9980 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
9981 if (oldadv != newadv) {
9982 phydev->advertising &=
9983 ~(ADVERTISED_Pause |
9984 ADVERTISED_Asym_Pause);
9985 phydev->advertising |= newadv;
9986 if (phydev->autoneg) {
9988 * Always renegotiate the link to
9989 * inform our link partner of our
9990 * flow control settings, even if the
9991 * flow control is forced. Let
9992 * tg3_adjust_link() do the final
9993 * flow control setup.
9995 return phy_start_aneg(phydev);
9999 if (!epause->autoneg)
10000 tg3_setup_flow_control(tp, 0, 0);
10002 tp->link_config.orig_advertising &=
10003 ~(ADVERTISED_Pause |
10004 ADVERTISED_Asym_Pause);
10005 tp->link_config.orig_advertising |= newadv;
10010 if (netif_running(dev)) {
10011 tg3_netif_stop(tp);
10015 tg3_full_lock(tp, irq_sync);
10017 if (epause->autoneg)
10018 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10020 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
10021 if (epause->rx_pause)
10022 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10024 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10025 if (epause->tx_pause)
10026 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10028 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10030 if (netif_running(dev)) {
10031 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10032 err = tg3_restart_hw(tp, 1);
10034 tg3_netif_start(tp);
10037 tg3_full_unlock(tp);
10043 static u32 tg3_get_rx_csum(struct net_device *dev)
10045 struct tg3 *tp = netdev_priv(dev);
10046 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
10049 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
10051 struct tg3 *tp = netdev_priv(dev);
10053 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10059 spin_lock_bh(&tp->lock);
10061 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10063 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10064 spin_unlock_bh(&tp->lock);
10069 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
10071 struct tg3 *tp = netdev_priv(dev);
10073 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10079 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10080 ethtool_op_set_tx_ipv6_csum(dev, data);
10082 ethtool_op_set_tx_csum(dev, data);
10087 static int tg3_get_sset_count(struct net_device *dev, int sset)
10091 return TG3_NUM_TEST;
10093 return TG3_NUM_STATS;
10095 return -EOPNOTSUPP;
10099 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10101 switch (stringset) {
10103 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10106 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10109 WARN_ON(1); /* we need a WARN() */
10114 static int tg3_phys_id(struct net_device *dev, u32 data)
10116 struct tg3 *tp = netdev_priv(dev);
10119 if (!netif_running(tp->dev))
10123 data = UINT_MAX / 2;
10125 for (i = 0; i < (data * 2); i++) {
10127 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10128 LED_CTRL_1000MBPS_ON |
10129 LED_CTRL_100MBPS_ON |
10130 LED_CTRL_10MBPS_ON |
10131 LED_CTRL_TRAFFIC_OVERRIDE |
10132 LED_CTRL_TRAFFIC_BLINK |
10133 LED_CTRL_TRAFFIC_LED);
10136 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10137 LED_CTRL_TRAFFIC_OVERRIDE);
10139 if (msleep_interruptible(500))
10142 tw32(MAC_LED_CTRL, tp->led_ctrl);
10146 static void tg3_get_ethtool_stats(struct net_device *dev,
10147 struct ethtool_stats *estats, u64 *tmp_stats)
10149 struct tg3 *tp = netdev_priv(dev);
10150 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10153 #define NVRAM_TEST_SIZE 0x100
10154 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10155 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10156 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10157 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10158 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10160 static int tg3_test_nvram(struct tg3 *tp)
10164 int i, j, k, err = 0, size;
10166 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
10169 if (tg3_nvram_read(tp, 0, &magic) != 0)
10172 if (magic == TG3_EEPROM_MAGIC)
10173 size = NVRAM_TEST_SIZE;
10174 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10175 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10176 TG3_EEPROM_SB_FORMAT_1) {
10177 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10178 case TG3_EEPROM_SB_REVISION_0:
10179 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10181 case TG3_EEPROM_SB_REVISION_2:
10182 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10184 case TG3_EEPROM_SB_REVISION_3:
10185 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10192 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10193 size = NVRAM_SELFBOOT_HW_SIZE;
10197 buf = kmalloc(size, GFP_KERNEL);
10202 for (i = 0, j = 0; i < size; i += 4, j++) {
10203 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10210 /* Selfboot format */
10211 magic = be32_to_cpu(buf[0]);
10212 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10213 TG3_EEPROM_MAGIC_FW) {
10214 u8 *buf8 = (u8 *) buf, csum8 = 0;
10216 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10217 TG3_EEPROM_SB_REVISION_2) {
10218 /* For rev 2, the csum doesn't include the MBA. */
10219 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10221 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10224 for (i = 0; i < size; i++)
10237 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10238 TG3_EEPROM_MAGIC_HW) {
10239 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10240 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10241 u8 *buf8 = (u8 *) buf;
10243 /* Separate the parity bits and the data bytes. */
10244 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10245 if ((i == 0) || (i == 8)) {
10249 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10250 parity[k++] = buf8[i] & msk;
10252 } else if (i == 16) {
10256 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10257 parity[k++] = buf8[i] & msk;
10260 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10261 parity[k++] = buf8[i] & msk;
10264 data[j++] = buf8[i];
10268 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10269 u8 hw8 = hweight8(data[i]);
10271 if ((hw8 & 0x1) && parity[i])
10273 else if (!(hw8 & 0x1) && !parity[i])
10280 /* Bootstrap checksum at offset 0x10 */
10281 csum = calc_crc((unsigned char *) buf, 0x10);
10282 if (csum != be32_to_cpu(buf[0x10/4]))
10285 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10286 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10287 if (csum != be32_to_cpu(buf[0xfc/4]))
10297 #define TG3_SERDES_TIMEOUT_SEC 2
10298 #define TG3_COPPER_TIMEOUT_SEC 6
10300 static int tg3_test_link(struct tg3 *tp)
10304 if (!netif_running(tp->dev))
10307 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10308 max = TG3_SERDES_TIMEOUT_SEC;
10310 max = TG3_COPPER_TIMEOUT_SEC;
10312 for (i = 0; i < max; i++) {
10313 if (netif_carrier_ok(tp->dev))
10316 if (msleep_interruptible(1000))
10323 /* Only test the commonly used registers */
10324 static int tg3_test_registers(struct tg3 *tp)
10326 int i, is_5705, is_5750;
10327 u32 offset, read_mask, write_mask, val, save_val, read_val;
10331 #define TG3_FL_5705 0x1
10332 #define TG3_FL_NOT_5705 0x2
10333 #define TG3_FL_NOT_5788 0x4
10334 #define TG3_FL_NOT_5750 0x8
10338 /* MAC Control Registers */
10339 { MAC_MODE, TG3_FL_NOT_5705,
10340 0x00000000, 0x00ef6f8c },
10341 { MAC_MODE, TG3_FL_5705,
10342 0x00000000, 0x01ef6b8c },
10343 { MAC_STATUS, TG3_FL_NOT_5705,
10344 0x03800107, 0x00000000 },
10345 { MAC_STATUS, TG3_FL_5705,
10346 0x03800100, 0x00000000 },
10347 { MAC_ADDR_0_HIGH, 0x0000,
10348 0x00000000, 0x0000ffff },
10349 { MAC_ADDR_0_LOW, 0x0000,
10350 0x00000000, 0xffffffff },
10351 { MAC_RX_MTU_SIZE, 0x0000,
10352 0x00000000, 0x0000ffff },
10353 { MAC_TX_MODE, 0x0000,
10354 0x00000000, 0x00000070 },
10355 { MAC_TX_LENGTHS, 0x0000,
10356 0x00000000, 0x00003fff },
10357 { MAC_RX_MODE, TG3_FL_NOT_5705,
10358 0x00000000, 0x000007fc },
10359 { MAC_RX_MODE, TG3_FL_5705,
10360 0x00000000, 0x000007dc },
10361 { MAC_HASH_REG_0, 0x0000,
10362 0x00000000, 0xffffffff },
10363 { MAC_HASH_REG_1, 0x0000,
10364 0x00000000, 0xffffffff },
10365 { MAC_HASH_REG_2, 0x0000,
10366 0x00000000, 0xffffffff },
10367 { MAC_HASH_REG_3, 0x0000,
10368 0x00000000, 0xffffffff },
10370 /* Receive Data and Receive BD Initiator Control Registers. */
10371 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10372 0x00000000, 0xffffffff },
10373 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10374 0x00000000, 0xffffffff },
10375 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10376 0x00000000, 0x00000003 },
10377 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10378 0x00000000, 0xffffffff },
10379 { RCVDBDI_STD_BD+0, 0x0000,
10380 0x00000000, 0xffffffff },
10381 { RCVDBDI_STD_BD+4, 0x0000,
10382 0x00000000, 0xffffffff },
10383 { RCVDBDI_STD_BD+8, 0x0000,
10384 0x00000000, 0xffff0002 },
10385 { RCVDBDI_STD_BD+0xc, 0x0000,
10386 0x00000000, 0xffffffff },
10388 /* Receive BD Initiator Control Registers. */
10389 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10390 0x00000000, 0xffffffff },
10391 { RCVBDI_STD_THRESH, TG3_FL_5705,
10392 0x00000000, 0x000003ff },
10393 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10394 0x00000000, 0xffffffff },
10396 /* Host Coalescing Control Registers. */
10397 { HOSTCC_MODE, TG3_FL_NOT_5705,
10398 0x00000000, 0x00000004 },
10399 { HOSTCC_MODE, TG3_FL_5705,
10400 0x00000000, 0x000000f6 },
10401 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10402 0x00000000, 0xffffffff },
10403 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10404 0x00000000, 0x000003ff },
10405 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10406 0x00000000, 0xffffffff },
10407 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10408 0x00000000, 0x000003ff },
10409 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10410 0x00000000, 0xffffffff },
10411 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10412 0x00000000, 0x000000ff },
10413 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10414 0x00000000, 0xffffffff },
10415 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10416 0x00000000, 0x000000ff },
10417 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10418 0x00000000, 0xffffffff },
10419 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10420 0x00000000, 0xffffffff },
10421 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10422 0x00000000, 0xffffffff },
10423 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10424 0x00000000, 0x000000ff },
10425 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10426 0x00000000, 0xffffffff },
10427 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10428 0x00000000, 0x000000ff },
10429 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10430 0x00000000, 0xffffffff },
10431 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10432 0x00000000, 0xffffffff },
10433 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10434 0x00000000, 0xffffffff },
10435 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10436 0x00000000, 0xffffffff },
10437 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10438 0x00000000, 0xffffffff },
10439 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10440 0xffffffff, 0x00000000 },
10441 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10442 0xffffffff, 0x00000000 },
10444 /* Buffer Manager Control Registers. */
10445 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10446 0x00000000, 0x007fff80 },
10447 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10448 0x00000000, 0x007fffff },
10449 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10450 0x00000000, 0x0000003f },
10451 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10452 0x00000000, 0x000001ff },
10453 { BUFMGR_MB_HIGH_WATER, 0x0000,
10454 0x00000000, 0x000001ff },
10455 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10456 0xffffffff, 0x00000000 },
10457 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10458 0xffffffff, 0x00000000 },
10460 /* Mailbox Registers */
10461 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10462 0x00000000, 0x000001ff },
10463 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10464 0x00000000, 0x000001ff },
10465 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10466 0x00000000, 0x000007ff },
10467 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10468 0x00000000, 0x000001ff },
10470 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10473 is_5705 = is_5750 = 0;
10474 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10476 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10480 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10481 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10484 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10487 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10488 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10491 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10494 offset = (u32) reg_tbl[i].offset;
10495 read_mask = reg_tbl[i].read_mask;
10496 write_mask = reg_tbl[i].write_mask;
10498 /* Save the original register content */
10499 save_val = tr32(offset);
10501 /* Determine the read-only value. */
10502 read_val = save_val & read_mask;
10504 /* Write zero to the register, then make sure the read-only bits
10505 * are not changed and the read/write bits are all zeros.
10509 val = tr32(offset);
10511 /* Test the read-only and read/write bits. */
10512 if (((val & read_mask) != read_val) || (val & write_mask))
10515 /* Write ones to all the bits defined by RdMask and WrMask, then
10516 * make sure the read-only bits are not changed and the
10517 * read/write bits are all ones.
10519 tw32(offset, read_mask | write_mask);
10521 val = tr32(offset);
10523 /* Test the read-only bits. */
10524 if ((val & read_mask) != read_val)
10527 /* Test the read/write bits. */
10528 if ((val & write_mask) != write_mask)
10531 tw32(offset, save_val);
10537 if (netif_msg_hw(tp))
10538 netdev_err(tp->dev,
10539 "Register test failed at offset %x\n", offset);
10540 tw32(offset, save_val);
10544 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10546 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10550 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10551 for (j = 0; j < len; j += 4) {
10554 tg3_write_mem(tp, offset + j, test_pattern[i]);
10555 tg3_read_mem(tp, offset + j, &val);
10556 if (val != test_pattern[i])
10563 static int tg3_test_memory(struct tg3 *tp)
10565 static struct mem_entry {
10568 } mem_tbl_570x[] = {
10569 { 0x00000000, 0x00b50},
10570 { 0x00002000, 0x1c000},
10571 { 0xffffffff, 0x00000}
10572 }, mem_tbl_5705[] = {
10573 { 0x00000100, 0x0000c},
10574 { 0x00000200, 0x00008},
10575 { 0x00004000, 0x00800},
10576 { 0x00006000, 0x01000},
10577 { 0x00008000, 0x02000},
10578 { 0x00010000, 0x0e000},
10579 { 0xffffffff, 0x00000}
10580 }, mem_tbl_5755[] = {
10581 { 0x00000200, 0x00008},
10582 { 0x00004000, 0x00800},
10583 { 0x00006000, 0x00800},
10584 { 0x00008000, 0x02000},
10585 { 0x00010000, 0x0c000},
10586 { 0xffffffff, 0x00000}
10587 }, mem_tbl_5906[] = {
10588 { 0x00000200, 0x00008},
10589 { 0x00004000, 0x00400},
10590 { 0x00006000, 0x00400},
10591 { 0x00008000, 0x01000},
10592 { 0x00010000, 0x01000},
10593 { 0xffffffff, 0x00000}
10594 }, mem_tbl_5717[] = {
10595 { 0x00000200, 0x00008},
10596 { 0x00010000, 0x0a000},
10597 { 0x00020000, 0x13c00},
10598 { 0xffffffff, 0x00000}
10599 }, mem_tbl_57765[] = {
10600 { 0x00000200, 0x00008},
10601 { 0x00004000, 0x00800},
10602 { 0x00006000, 0x09800},
10603 { 0x00010000, 0x0a000},
10604 { 0xffffffff, 0x00000}
10606 struct mem_entry *mem_tbl;
10610 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
10611 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
10612 mem_tbl = mem_tbl_5717;
10613 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10614 mem_tbl = mem_tbl_57765;
10615 else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10616 mem_tbl = mem_tbl_5755;
10617 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10618 mem_tbl = mem_tbl_5906;
10619 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
10620 mem_tbl = mem_tbl_5705;
10622 mem_tbl = mem_tbl_570x;
10624 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10625 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10633 #define TG3_MAC_LOOPBACK 0
10634 #define TG3_PHY_LOOPBACK 1
10636 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10638 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10639 u32 desc_idx, coal_now;
10640 struct sk_buff *skb, *rx_skb;
10643 int num_pkts, tx_len, rx_len, i, err;
10644 struct tg3_rx_buffer_desc *desc;
10645 struct tg3_napi *tnapi, *rnapi;
10646 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
10648 tnapi = &tp->napi[0];
10649 rnapi = &tp->napi[0];
10650 if (tp->irq_cnt > 1) {
10651 rnapi = &tp->napi[1];
10652 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
10653 tnapi = &tp->napi[1];
10655 coal_now = tnapi->coal_now | rnapi->coal_now;
10657 if (loopback_mode == TG3_MAC_LOOPBACK) {
10658 /* HW errata - mac loopback fails in some cases on 5780.
10659 * Normal traffic and PHY loopback are not affected by
10662 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10665 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
10666 MAC_MODE_PORT_INT_LPBACK;
10667 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10668 mac_mode |= MAC_MODE_LINK_POLARITY;
10669 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
10670 mac_mode |= MAC_MODE_PORT_MODE_MII;
10672 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10673 tw32(MAC_MODE, mac_mode);
10674 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10677 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
10678 tg3_phy_fet_toggle_apd(tp, false);
10679 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10681 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10683 tg3_phy_toggle_automdix(tp, 0);
10685 tg3_writephy(tp, MII_BMCR, val);
10688 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10689 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
10690 tg3_writephy(tp, MII_TG3_FET_PTEST,
10691 MII_TG3_FET_PTEST_FRC_TX_LINK |
10692 MII_TG3_FET_PTEST_FRC_TX_LOCK);
10693 /* The write needs to be flushed for the AC131 */
10694 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10695 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
10696 mac_mode |= MAC_MODE_PORT_MODE_MII;
10698 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10700 /* reset to prevent losing 1st rx packet intermittently */
10701 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10702 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10704 tw32_f(MAC_RX_MODE, tp->rx_mode);
10706 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10707 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
10708 if (masked_phy_id == TG3_PHY_ID_BCM5401)
10709 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10710 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
10711 mac_mode |= MAC_MODE_LINK_POLARITY;
10712 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10713 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10715 tw32(MAC_MODE, mac_mode);
10723 skb = netdev_alloc_skb(tp->dev, tx_len);
10727 tx_data = skb_put(skb, tx_len);
10728 memcpy(tx_data, tp->dev->dev_addr, 6);
10729 memset(tx_data + 6, 0x0, 8);
10731 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10733 for (i = 14; i < tx_len; i++)
10734 tx_data[i] = (u8) (i & 0xff);
10736 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10737 if (pci_dma_mapping_error(tp->pdev, map)) {
10738 dev_kfree_skb(skb);
10742 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10747 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
10751 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
10756 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
10757 tr32_mailbox(tnapi->prodmbox);
10761 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
10762 for (i = 0; i < 35; i++) {
10763 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10768 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
10769 rx_idx = rnapi->hw_status->idx[0].rx_producer;
10770 if ((tx_idx == tnapi->tx_prod) &&
10771 (rx_idx == (rx_start_idx + num_pkts)))
10775 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10776 dev_kfree_skb(skb);
10778 if (tx_idx != tnapi->tx_prod)
10781 if (rx_idx != rx_start_idx + num_pkts)
10784 desc = &rnapi->rx_rcb[rx_start_idx];
10785 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10786 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10787 if (opaque_key != RXD_OPAQUE_RING_STD)
10790 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10791 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10794 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10795 if (rx_len != tx_len)
10798 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
10800 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
10801 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10803 for (i = 14; i < tx_len; i++) {
10804 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10809 /* tg3_free_rings will unmap and free the rx_skb */
10814 #define TG3_MAC_LOOPBACK_FAILED 1
10815 #define TG3_PHY_LOOPBACK_FAILED 2
10816 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10817 TG3_PHY_LOOPBACK_FAILED)
10819 static int tg3_test_loopback(struct tg3 *tp)
10824 if (!netif_running(tp->dev))
10825 return TG3_LOOPBACK_FAILED;
10827 err = tg3_reset_hw(tp, 1);
10829 return TG3_LOOPBACK_FAILED;
10831 /* Turn off gphy autopowerdown. */
10832 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
10833 tg3_phy_toggle_apd(tp, false);
10835 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10839 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10841 /* Wait for up to 40 microseconds to acquire lock. */
10842 for (i = 0; i < 4; i++) {
10843 status = tr32(TG3_CPMU_MUTEX_GNT);
10844 if (status == CPMU_MUTEX_GNT_DRIVER)
10849 if (status != CPMU_MUTEX_GNT_DRIVER)
10850 return TG3_LOOPBACK_FAILED;
10852 /* Turn off link-based power management. */
10853 cpmuctrl = tr32(TG3_CPMU_CTRL);
10854 tw32(TG3_CPMU_CTRL,
10855 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10856 CPMU_CTRL_LINK_AWARE_MODE));
10859 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10860 err |= TG3_MAC_LOOPBACK_FAILED;
10862 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10863 tw32(TG3_CPMU_CTRL, cpmuctrl);
10865 /* Release the mutex */
10866 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10869 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10870 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10871 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10872 err |= TG3_PHY_LOOPBACK_FAILED;
10875 /* Re-enable gphy autopowerdown. */
10876 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
10877 tg3_phy_toggle_apd(tp, true);
10882 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10885 struct tg3 *tp = netdev_priv(dev);
10887 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10888 tg3_set_power_state(tp, PCI_D0);
10890 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10892 if (tg3_test_nvram(tp) != 0) {
10893 etest->flags |= ETH_TEST_FL_FAILED;
10896 if (tg3_test_link(tp) != 0) {
10897 etest->flags |= ETH_TEST_FL_FAILED;
10900 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10901 int err, err2 = 0, irq_sync = 0;
10903 if (netif_running(dev)) {
10905 tg3_netif_stop(tp);
10909 tg3_full_lock(tp, irq_sync);
10911 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10912 err = tg3_nvram_lock(tp);
10913 tg3_halt_cpu(tp, RX_CPU_BASE);
10914 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10915 tg3_halt_cpu(tp, TX_CPU_BASE);
10917 tg3_nvram_unlock(tp);
10919 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
10922 if (tg3_test_registers(tp) != 0) {
10923 etest->flags |= ETH_TEST_FL_FAILED;
10926 if (tg3_test_memory(tp) != 0) {
10927 etest->flags |= ETH_TEST_FL_FAILED;
10930 if ((data[4] = tg3_test_loopback(tp)) != 0)
10931 etest->flags |= ETH_TEST_FL_FAILED;
10933 tg3_full_unlock(tp);
10935 if (tg3_test_interrupt(tp) != 0) {
10936 etest->flags |= ETH_TEST_FL_FAILED;
10940 tg3_full_lock(tp, 0);
10942 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10943 if (netif_running(dev)) {
10944 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10945 err2 = tg3_restart_hw(tp, 1);
10947 tg3_netif_start(tp);
10950 tg3_full_unlock(tp);
10952 if (irq_sync && !err2)
10955 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10956 tg3_set_power_state(tp, PCI_D3hot);
10960 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10962 struct mii_ioctl_data *data = if_mii(ifr);
10963 struct tg3 *tp = netdev_priv(dev);
10966 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10967 struct phy_device *phydev;
10968 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10970 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10971 return phy_mii_ioctl(phydev, ifr, cmd);
10976 data->phy_id = tp->phy_addr;
10979 case SIOCGMIIREG: {
10982 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10983 break; /* We have no PHY */
10985 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10988 spin_lock_bh(&tp->lock);
10989 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10990 spin_unlock_bh(&tp->lock);
10992 data->val_out = mii_regval;
10998 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10999 break; /* We have no PHY */
11001 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11004 spin_lock_bh(&tp->lock);
11005 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11006 spin_unlock_bh(&tp->lock);
11014 return -EOPNOTSUPP;
11017 #if TG3_VLAN_TAG_USED
11018 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
11020 struct tg3 *tp = netdev_priv(dev);
11022 if (!netif_running(dev)) {
11027 tg3_netif_stop(tp);
11029 tg3_full_lock(tp, 0);
11033 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
11034 __tg3_set_rx_mode(dev);
11036 tg3_netif_start(tp);
11038 tg3_full_unlock(tp);
11042 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11044 struct tg3 *tp = netdev_priv(dev);
11046 memcpy(ec, &tp->coal, sizeof(*ec));
11050 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11052 struct tg3 *tp = netdev_priv(dev);
11053 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11054 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11056 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
11057 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11058 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11059 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11060 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11063 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11064 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11065 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11066 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11067 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11068 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11069 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11070 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11071 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11072 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11075 /* No rx interrupts will be generated if both are zero */
11076 if ((ec->rx_coalesce_usecs == 0) &&
11077 (ec->rx_max_coalesced_frames == 0))
11080 /* No tx interrupts will be generated if both are zero */
11081 if ((ec->tx_coalesce_usecs == 0) &&
11082 (ec->tx_max_coalesced_frames == 0))
11085 /* Only copy relevant parameters, ignore all others. */
11086 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11087 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11088 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11089 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11090 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11091 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11092 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11093 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11094 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11096 if (netif_running(dev)) {
11097 tg3_full_lock(tp, 0);
11098 __tg3_set_coalesce(tp, &tp->coal);
11099 tg3_full_unlock(tp);
11104 static const struct ethtool_ops tg3_ethtool_ops = {
11105 .get_settings = tg3_get_settings,
11106 .set_settings = tg3_set_settings,
11107 .get_drvinfo = tg3_get_drvinfo,
11108 .get_regs_len = tg3_get_regs_len,
11109 .get_regs = tg3_get_regs,
11110 .get_wol = tg3_get_wol,
11111 .set_wol = tg3_set_wol,
11112 .get_msglevel = tg3_get_msglevel,
11113 .set_msglevel = tg3_set_msglevel,
11114 .nway_reset = tg3_nway_reset,
11115 .get_link = ethtool_op_get_link,
11116 .get_eeprom_len = tg3_get_eeprom_len,
11117 .get_eeprom = tg3_get_eeprom,
11118 .set_eeprom = tg3_set_eeprom,
11119 .get_ringparam = tg3_get_ringparam,
11120 .set_ringparam = tg3_set_ringparam,
11121 .get_pauseparam = tg3_get_pauseparam,
11122 .set_pauseparam = tg3_set_pauseparam,
11123 .get_rx_csum = tg3_get_rx_csum,
11124 .set_rx_csum = tg3_set_rx_csum,
11125 .set_tx_csum = tg3_set_tx_csum,
11126 .set_sg = ethtool_op_set_sg,
11127 .set_tso = tg3_set_tso,
11128 .self_test = tg3_self_test,
11129 .get_strings = tg3_get_strings,
11130 .phys_id = tg3_phys_id,
11131 .get_ethtool_stats = tg3_get_ethtool_stats,
11132 .get_coalesce = tg3_get_coalesce,
11133 .set_coalesce = tg3_set_coalesce,
11134 .get_sset_count = tg3_get_sset_count,
11137 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11139 u32 cursize, val, magic;
11141 tp->nvram_size = EEPROM_CHIP_SIZE;
11143 if (tg3_nvram_read(tp, 0, &magic) != 0)
11146 if ((magic != TG3_EEPROM_MAGIC) &&
11147 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11148 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11152 * Size the chip by reading offsets at increasing powers of two.
11153 * When we encounter our validation signature, we know the addressing
11154 * has wrapped around, and thus have our chip size.
11158 while (cursize < tp->nvram_size) {
11159 if (tg3_nvram_read(tp, cursize, &val) != 0)
11168 tp->nvram_size = cursize;
11171 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11175 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
11176 tg3_nvram_read(tp, 0, &val) != 0)
11179 /* Selfboot format */
11180 if (val != TG3_EEPROM_MAGIC) {
11181 tg3_get_eeprom_size(tp);
11185 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11187 /* This is confusing. We want to operate on the
11188 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11189 * call will read from NVRAM and byteswap the data
11190 * according to the byteswapping settings for all
11191 * other register accesses. This ensures the data we
11192 * want will always reside in the lower 16-bits.
11193 * However, the data in NVRAM is in LE format, which
11194 * means the data from the NVRAM read will always be
11195 * opposite the endianness of the CPU. The 16-bit
11196 * byteswap then brings the data to CPU endianness.
11198 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11202 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11205 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11209 nvcfg1 = tr32(NVRAM_CFG1);
11210 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11211 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11213 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11214 tw32(NVRAM_CFG1, nvcfg1);
11217 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11218 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11219 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11220 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11221 tp->nvram_jedecnum = JEDEC_ATMEL;
11222 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11223 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11225 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11226 tp->nvram_jedecnum = JEDEC_ATMEL;
11227 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11229 case FLASH_VENDOR_ATMEL_EEPROM:
11230 tp->nvram_jedecnum = JEDEC_ATMEL;
11231 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11232 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11234 case FLASH_VENDOR_ST:
11235 tp->nvram_jedecnum = JEDEC_ST;
11236 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11237 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11239 case FLASH_VENDOR_SAIFUN:
11240 tp->nvram_jedecnum = JEDEC_SAIFUN;
11241 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11243 case FLASH_VENDOR_SST_SMALL:
11244 case FLASH_VENDOR_SST_LARGE:
11245 tp->nvram_jedecnum = JEDEC_SST;
11246 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11250 tp->nvram_jedecnum = JEDEC_ATMEL;
11251 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11252 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11256 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11258 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11259 case FLASH_5752PAGE_SIZE_256:
11260 tp->nvram_pagesize = 256;
11262 case FLASH_5752PAGE_SIZE_512:
11263 tp->nvram_pagesize = 512;
11265 case FLASH_5752PAGE_SIZE_1K:
11266 tp->nvram_pagesize = 1024;
11268 case FLASH_5752PAGE_SIZE_2K:
11269 tp->nvram_pagesize = 2048;
11271 case FLASH_5752PAGE_SIZE_4K:
11272 tp->nvram_pagesize = 4096;
11274 case FLASH_5752PAGE_SIZE_264:
11275 tp->nvram_pagesize = 264;
11277 case FLASH_5752PAGE_SIZE_528:
11278 tp->nvram_pagesize = 528;
11283 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11287 nvcfg1 = tr32(NVRAM_CFG1);
11289 /* NVRAM protection for TPM */
11290 if (nvcfg1 & (1 << 27))
11291 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11293 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11294 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11295 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11296 tp->nvram_jedecnum = JEDEC_ATMEL;
11297 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11299 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11300 tp->nvram_jedecnum = JEDEC_ATMEL;
11301 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11302 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11304 case FLASH_5752VENDOR_ST_M45PE10:
11305 case FLASH_5752VENDOR_ST_M45PE20:
11306 case FLASH_5752VENDOR_ST_M45PE40:
11307 tp->nvram_jedecnum = JEDEC_ST;
11308 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11309 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11313 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
11314 tg3_nvram_get_pagesize(tp, nvcfg1);
11316 /* For eeprom, set pagesize to maximum eeprom size */
11317 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11319 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11320 tw32(NVRAM_CFG1, nvcfg1);
11324 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11326 u32 nvcfg1, protect = 0;
11328 nvcfg1 = tr32(NVRAM_CFG1);
11330 /* NVRAM protection for TPM */
11331 if (nvcfg1 & (1 << 27)) {
11332 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11336 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11338 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11339 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11340 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11341 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11342 tp->nvram_jedecnum = JEDEC_ATMEL;
11343 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11344 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11345 tp->nvram_pagesize = 264;
11346 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11347 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11348 tp->nvram_size = (protect ? 0x3e200 :
11349 TG3_NVRAM_SIZE_512KB);
11350 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11351 tp->nvram_size = (protect ? 0x1f200 :
11352 TG3_NVRAM_SIZE_256KB);
11354 tp->nvram_size = (protect ? 0x1f200 :
11355 TG3_NVRAM_SIZE_128KB);
11357 case FLASH_5752VENDOR_ST_M45PE10:
11358 case FLASH_5752VENDOR_ST_M45PE20:
11359 case FLASH_5752VENDOR_ST_M45PE40:
11360 tp->nvram_jedecnum = JEDEC_ST;
11361 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11362 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11363 tp->nvram_pagesize = 256;
11364 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11365 tp->nvram_size = (protect ?
11366 TG3_NVRAM_SIZE_64KB :
11367 TG3_NVRAM_SIZE_128KB);
11368 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11369 tp->nvram_size = (protect ?
11370 TG3_NVRAM_SIZE_64KB :
11371 TG3_NVRAM_SIZE_256KB);
11373 tp->nvram_size = (protect ?
11374 TG3_NVRAM_SIZE_128KB :
11375 TG3_NVRAM_SIZE_512KB);
11380 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11384 nvcfg1 = tr32(NVRAM_CFG1);
11386 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11387 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11388 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11389 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11390 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11391 tp->nvram_jedecnum = JEDEC_ATMEL;
11392 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11393 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11395 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11396 tw32(NVRAM_CFG1, nvcfg1);
11398 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11399 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11400 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11401 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11402 tp->nvram_jedecnum = JEDEC_ATMEL;
11403 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11404 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11405 tp->nvram_pagesize = 264;
11407 case FLASH_5752VENDOR_ST_M45PE10:
11408 case FLASH_5752VENDOR_ST_M45PE20:
11409 case FLASH_5752VENDOR_ST_M45PE40:
11410 tp->nvram_jedecnum = JEDEC_ST;
11411 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11412 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11413 tp->nvram_pagesize = 256;
11418 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11420 u32 nvcfg1, protect = 0;
11422 nvcfg1 = tr32(NVRAM_CFG1);
11424 /* NVRAM protection for TPM */
11425 if (nvcfg1 & (1 << 27)) {
11426 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11430 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11432 case FLASH_5761VENDOR_ATMEL_ADB021D:
11433 case FLASH_5761VENDOR_ATMEL_ADB041D:
11434 case FLASH_5761VENDOR_ATMEL_ADB081D:
11435 case FLASH_5761VENDOR_ATMEL_ADB161D:
11436 case FLASH_5761VENDOR_ATMEL_MDB021D:
11437 case FLASH_5761VENDOR_ATMEL_MDB041D:
11438 case FLASH_5761VENDOR_ATMEL_MDB081D:
11439 case FLASH_5761VENDOR_ATMEL_MDB161D:
11440 tp->nvram_jedecnum = JEDEC_ATMEL;
11441 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11442 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11443 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11444 tp->nvram_pagesize = 256;
11446 case FLASH_5761VENDOR_ST_A_M45PE20:
11447 case FLASH_5761VENDOR_ST_A_M45PE40:
11448 case FLASH_5761VENDOR_ST_A_M45PE80:
11449 case FLASH_5761VENDOR_ST_A_M45PE16:
11450 case FLASH_5761VENDOR_ST_M_M45PE20:
11451 case FLASH_5761VENDOR_ST_M_M45PE40:
11452 case FLASH_5761VENDOR_ST_M_M45PE80:
11453 case FLASH_5761VENDOR_ST_M_M45PE16:
11454 tp->nvram_jedecnum = JEDEC_ST;
11455 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11456 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11457 tp->nvram_pagesize = 256;
11462 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11465 case FLASH_5761VENDOR_ATMEL_ADB161D:
11466 case FLASH_5761VENDOR_ATMEL_MDB161D:
11467 case FLASH_5761VENDOR_ST_A_M45PE16:
11468 case FLASH_5761VENDOR_ST_M_M45PE16:
11469 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11471 case FLASH_5761VENDOR_ATMEL_ADB081D:
11472 case FLASH_5761VENDOR_ATMEL_MDB081D:
11473 case FLASH_5761VENDOR_ST_A_M45PE80:
11474 case FLASH_5761VENDOR_ST_M_M45PE80:
11475 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11477 case FLASH_5761VENDOR_ATMEL_ADB041D:
11478 case FLASH_5761VENDOR_ATMEL_MDB041D:
11479 case FLASH_5761VENDOR_ST_A_M45PE40:
11480 case FLASH_5761VENDOR_ST_M_M45PE40:
11481 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11483 case FLASH_5761VENDOR_ATMEL_ADB021D:
11484 case FLASH_5761VENDOR_ATMEL_MDB021D:
11485 case FLASH_5761VENDOR_ST_A_M45PE20:
11486 case FLASH_5761VENDOR_ST_M_M45PE20:
11487 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11493 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11495 tp->nvram_jedecnum = JEDEC_ATMEL;
11496 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11497 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11500 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11504 nvcfg1 = tr32(NVRAM_CFG1);
11506 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11507 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11508 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11509 tp->nvram_jedecnum = JEDEC_ATMEL;
11510 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11511 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11513 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11514 tw32(NVRAM_CFG1, nvcfg1);
11516 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11517 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11518 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11519 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11520 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11521 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11522 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11523 tp->nvram_jedecnum = JEDEC_ATMEL;
11524 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11525 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11527 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11528 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11529 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11530 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11531 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11533 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11534 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11535 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11537 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11538 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11539 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11543 case FLASH_5752VENDOR_ST_M45PE10:
11544 case FLASH_5752VENDOR_ST_M45PE20:
11545 case FLASH_5752VENDOR_ST_M45PE40:
11546 tp->nvram_jedecnum = JEDEC_ST;
11547 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11548 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11550 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11551 case FLASH_5752VENDOR_ST_M45PE10:
11552 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11554 case FLASH_5752VENDOR_ST_M45PE20:
11555 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11557 case FLASH_5752VENDOR_ST_M45PE40:
11558 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11563 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11567 tg3_nvram_get_pagesize(tp, nvcfg1);
11568 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11569 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11573 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11577 nvcfg1 = tr32(NVRAM_CFG1);
11579 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11580 case FLASH_5717VENDOR_ATMEL_EEPROM:
11581 case FLASH_5717VENDOR_MICRO_EEPROM:
11582 tp->nvram_jedecnum = JEDEC_ATMEL;
11583 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11584 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11586 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11587 tw32(NVRAM_CFG1, nvcfg1);
11589 case FLASH_5717VENDOR_ATMEL_MDB011D:
11590 case FLASH_5717VENDOR_ATMEL_ADB011B:
11591 case FLASH_5717VENDOR_ATMEL_ADB011D:
11592 case FLASH_5717VENDOR_ATMEL_MDB021D:
11593 case FLASH_5717VENDOR_ATMEL_ADB021B:
11594 case FLASH_5717VENDOR_ATMEL_ADB021D:
11595 case FLASH_5717VENDOR_ATMEL_45USPT:
11596 tp->nvram_jedecnum = JEDEC_ATMEL;
11597 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11598 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11600 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11601 case FLASH_5717VENDOR_ATMEL_MDB021D:
11602 case FLASH_5717VENDOR_ATMEL_ADB021B:
11603 case FLASH_5717VENDOR_ATMEL_ADB021D:
11604 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11607 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11611 case FLASH_5717VENDOR_ST_M_M25PE10:
11612 case FLASH_5717VENDOR_ST_A_M25PE10:
11613 case FLASH_5717VENDOR_ST_M_M45PE10:
11614 case FLASH_5717VENDOR_ST_A_M45PE10:
11615 case FLASH_5717VENDOR_ST_M_M25PE20:
11616 case FLASH_5717VENDOR_ST_A_M25PE20:
11617 case FLASH_5717VENDOR_ST_M_M45PE20:
11618 case FLASH_5717VENDOR_ST_A_M45PE20:
11619 case FLASH_5717VENDOR_ST_25USPT:
11620 case FLASH_5717VENDOR_ST_45USPT:
11621 tp->nvram_jedecnum = JEDEC_ST;
11622 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11623 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11625 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11626 case FLASH_5717VENDOR_ST_M_M25PE20:
11627 case FLASH_5717VENDOR_ST_A_M25PE20:
11628 case FLASH_5717VENDOR_ST_M_M45PE20:
11629 case FLASH_5717VENDOR_ST_A_M45PE20:
11630 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11633 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11638 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11642 tg3_nvram_get_pagesize(tp, nvcfg1);
11643 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11644 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11647 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
11648 static void __devinit tg3_nvram_init(struct tg3 *tp)
11650 tw32_f(GRC_EEPROM_ADDR,
11651 (EEPROM_ADDR_FSM_RESET |
11652 (EEPROM_DEFAULT_CLOCK_PERIOD <<
11653 EEPROM_ADDR_CLKPERD_SHIFT)));
11657 /* Enable seeprom accesses. */
11658 tw32_f(GRC_LOCAL_CTRL,
11659 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
11662 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11663 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
11664 tp->tg3_flags |= TG3_FLAG_NVRAM;
11666 if (tg3_nvram_lock(tp)) {
11667 netdev_warn(tp->dev,
11668 "Cannot get nvram lock, %s failed\n",
11672 tg3_enable_nvram_access(tp);
11674 tp->nvram_size = 0;
11676 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11677 tg3_get_5752_nvram_info(tp);
11678 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11679 tg3_get_5755_nvram_info(tp);
11680 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11681 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11682 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11683 tg3_get_5787_nvram_info(tp);
11684 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11685 tg3_get_5761_nvram_info(tp);
11686 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11687 tg3_get_5906_nvram_info(tp);
11688 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
11689 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11690 tg3_get_57780_nvram_info(tp);
11691 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
11692 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
11693 tg3_get_5717_nvram_info(tp);
11695 tg3_get_nvram_info(tp);
11697 if (tp->nvram_size == 0)
11698 tg3_get_nvram_size(tp);
11700 tg3_disable_nvram_access(tp);
11701 tg3_nvram_unlock(tp);
11704 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
11706 tg3_get_eeprom_size(tp);
11710 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11711 u32 offset, u32 len, u8 *buf)
11716 for (i = 0; i < len; i += 4) {
11722 memcpy(&data, buf + i, 4);
11725 * The SEEPROM interface expects the data to always be opposite
11726 * the native endian format. We accomplish this by reversing
11727 * all the operations that would have been performed on the
11728 * data from a call to tg3_nvram_read_be32().
11730 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
11732 val = tr32(GRC_EEPROM_ADDR);
11733 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11735 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11737 tw32(GRC_EEPROM_ADDR, val |
11738 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11739 (addr & EEPROM_ADDR_ADDR_MASK) |
11740 EEPROM_ADDR_START |
11741 EEPROM_ADDR_WRITE);
11743 for (j = 0; j < 1000; j++) {
11744 val = tr32(GRC_EEPROM_ADDR);
11746 if (val & EEPROM_ADDR_COMPLETE)
11750 if (!(val & EEPROM_ADDR_COMPLETE)) {
11759 /* offset and length are dword aligned */
11760 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11764 u32 pagesize = tp->nvram_pagesize;
11765 u32 pagemask = pagesize - 1;
11769 tmp = kmalloc(pagesize, GFP_KERNEL);
11775 u32 phy_addr, page_off, size;
11777 phy_addr = offset & ~pagemask;
11779 for (j = 0; j < pagesize; j += 4) {
11780 ret = tg3_nvram_read_be32(tp, phy_addr + j,
11781 (__be32 *) (tmp + j));
11788 page_off = offset & pagemask;
11795 memcpy(tmp + page_off, buf, size);
11797 offset = offset + (pagesize - page_off);
11799 tg3_enable_nvram_access(tp);
11802 * Before we can erase the flash page, we need
11803 * to issue a special "write enable" command.
11805 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11807 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11810 /* Erase the target page */
11811 tw32(NVRAM_ADDR, phy_addr);
11813 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11814 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11816 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11819 /* Issue another write enable to start the write. */
11820 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11822 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11825 for (j = 0; j < pagesize; j += 4) {
11828 data = *((__be32 *) (tmp + j));
11830 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11832 tw32(NVRAM_ADDR, phy_addr + j);
11834 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11838 nvram_cmd |= NVRAM_CMD_FIRST;
11839 else if (j == (pagesize - 4))
11840 nvram_cmd |= NVRAM_CMD_LAST;
11842 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11849 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11850 tg3_nvram_exec_cmd(tp, nvram_cmd);
11857 /* offset and length are dword aligned */
11858 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11863 for (i = 0; i < len; i += 4, offset += 4) {
11864 u32 page_off, phy_addr, nvram_cmd;
11867 memcpy(&data, buf + i, 4);
11868 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11870 page_off = offset % tp->nvram_pagesize;
11872 phy_addr = tg3_nvram_phys_addr(tp, offset);
11874 tw32(NVRAM_ADDR, phy_addr);
11876 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11878 if (page_off == 0 || i == 0)
11879 nvram_cmd |= NVRAM_CMD_FIRST;
11880 if (page_off == (tp->nvram_pagesize - 4))
11881 nvram_cmd |= NVRAM_CMD_LAST;
11883 if (i == (len - 4))
11884 nvram_cmd |= NVRAM_CMD_LAST;
11886 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11887 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
11888 (tp->nvram_jedecnum == JEDEC_ST) &&
11889 (nvram_cmd & NVRAM_CMD_FIRST)) {
11891 if ((ret = tg3_nvram_exec_cmd(tp,
11892 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11897 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11898 /* We always do complete word writes to eeprom. */
11899 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11902 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11908 /* offset and length are dword aligned */
11909 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11913 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11914 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11915 ~GRC_LCLCTRL_GPIO_OUTPUT1);
11919 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11920 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11924 ret = tg3_nvram_lock(tp);
11928 tg3_enable_nvram_access(tp);
11929 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11930 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
11931 tw32(NVRAM_WRITE1, 0x406);
11933 grc_mode = tr32(GRC_MODE);
11934 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11936 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11937 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11939 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11942 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11946 grc_mode = tr32(GRC_MODE);
11947 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11949 tg3_disable_nvram_access(tp);
11950 tg3_nvram_unlock(tp);
11953 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11954 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11961 struct subsys_tbl_ent {
11962 u16 subsys_vendor, subsys_devid;
11966 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
11967 /* Broadcom boards. */
11968 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11969 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
11970 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11971 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
11972 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11973 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
11974 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11975 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
11976 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11977 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
11978 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11979 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
11980 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11981 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
11982 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11983 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
11984 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11985 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
11986 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11987 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
11988 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11989 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
11992 { TG3PCI_SUBVENDOR_ID_3COM,
11993 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
11994 { TG3PCI_SUBVENDOR_ID_3COM,
11995 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
11996 { TG3PCI_SUBVENDOR_ID_3COM,
11997 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
11998 { TG3PCI_SUBVENDOR_ID_3COM,
11999 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12000 { TG3PCI_SUBVENDOR_ID_3COM,
12001 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12004 { TG3PCI_SUBVENDOR_ID_DELL,
12005 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12006 { TG3PCI_SUBVENDOR_ID_DELL,
12007 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12008 { TG3PCI_SUBVENDOR_ID_DELL,
12009 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12010 { TG3PCI_SUBVENDOR_ID_DELL,
12011 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12013 /* Compaq boards. */
12014 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12015 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12016 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12017 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12018 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12019 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12020 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12021 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12022 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12023 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12026 { TG3PCI_SUBVENDOR_ID_IBM,
12027 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12030 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12034 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12035 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12036 tp->pdev->subsystem_vendor) &&
12037 (subsys_id_to_phy_id[i].subsys_devid ==
12038 tp->pdev->subsystem_device))
12039 return &subsys_id_to_phy_id[i];
12044 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12049 /* On some early chips the SRAM cannot be accessed in D3hot state,
12050 * so need make sure we're in D0.
12052 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12053 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12054 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12057 /* Make sure register accesses (indirect or otherwise)
12058 * will function correctly.
12060 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12061 tp->misc_host_ctrl);
12063 /* The memory arbiter has to be enabled in order for SRAM accesses
12064 * to succeed. Normally on powerup the tg3 chip firmware will make
12065 * sure it is enabled, but other entities such as system netboot
12066 * code might disable it.
12068 val = tr32(MEMARB_MODE);
12069 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12071 tp->phy_id = TG3_PHY_ID_INVALID;
12072 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12074 /* Assume an onboard device and WOL capable by default. */
12075 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
12077 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12078 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12079 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12080 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12082 val = tr32(VCPU_CFGSHDW);
12083 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12084 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12085 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12086 (val & VCPU_CFGSHDW_WOL_MAGPKT))
12087 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12091 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12092 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12093 u32 nic_cfg, led_cfg;
12094 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12095 int eeprom_phy_serdes = 0;
12097 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12098 tp->nic_sram_data_cfg = nic_cfg;
12100 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12101 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12102 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
12103 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
12104 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
12105 (ver > 0) && (ver < 0x100))
12106 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12108 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12109 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12111 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12112 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12113 eeprom_phy_serdes = 1;
12115 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12116 if (nic_phy_id != 0) {
12117 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12118 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12120 eeprom_phy_id = (id1 >> 16) << 10;
12121 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12122 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12126 tp->phy_id = eeprom_phy_id;
12127 if (eeprom_phy_serdes) {
12128 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12129 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12131 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12134 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12135 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12136 SHASTA_EXT_LED_MODE_MASK);
12138 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12142 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12143 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12146 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12147 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12150 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12151 tp->led_ctrl = LED_CTRL_MODE_MAC;
12153 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12154 * read on some older 5700/5701 bootcode.
12156 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12158 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12160 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12164 case SHASTA_EXT_LED_SHARED:
12165 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12166 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12167 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12168 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12169 LED_CTRL_MODE_PHY_2);
12172 case SHASTA_EXT_LED_MAC:
12173 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12176 case SHASTA_EXT_LED_COMBO:
12177 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12178 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12179 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12180 LED_CTRL_MODE_PHY_2);
12185 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12186 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12187 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12188 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12190 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12191 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12193 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12194 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
12195 if ((tp->pdev->subsystem_vendor ==
12196 PCI_VENDOR_ID_ARIMA) &&
12197 (tp->pdev->subsystem_device == 0x205a ||
12198 tp->pdev->subsystem_device == 0x2063))
12199 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12201 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12202 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12205 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12206 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
12207 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12208 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
12211 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12212 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12213 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
12215 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12216 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12217 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
12219 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
12220 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
12221 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12223 if (cfg2 & (1 << 17))
12224 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12226 /* serdes signal pre-emphasis in register 0x590 set by */
12227 /* bootcode if bit 18 is set */
12228 if (cfg2 & (1 << 18))
12229 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12231 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12232 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12233 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12234 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12236 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12237 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12238 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
12241 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12242 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12243 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12246 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12247 tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE;
12248 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12249 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
12250 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12251 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
12254 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
12255 device_set_wakeup_enable(&tp->pdev->dev,
12256 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
12259 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12264 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12265 tw32(OTP_CTRL, cmd);
12267 /* Wait for up to 1 ms for command to execute. */
12268 for (i = 0; i < 100; i++) {
12269 val = tr32(OTP_STATUS);
12270 if (val & OTP_STATUS_CMD_DONE)
12275 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12278 /* Read the gphy configuration from the OTP region of the chip. The gphy
12279 * configuration is a 32-bit value that straddles the alignment boundary.
12280 * We do two 32-bit reads and then shift and merge the results.
12282 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12284 u32 bhalf_otp, thalf_otp;
12286 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12288 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12291 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12293 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12296 thalf_otp = tr32(OTP_READ_DATA);
12298 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12300 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12303 bhalf_otp = tr32(OTP_READ_DATA);
12305 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12308 static int __devinit tg3_phy_probe(struct tg3 *tp)
12310 u32 hw_phy_id_1, hw_phy_id_2;
12311 u32 hw_phy_id, hw_phy_id_masked;
12314 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
12315 return tg3_phy_init(tp);
12317 /* Reading the PHY ID register can conflict with ASF
12318 * firmware access to the PHY hardware.
12321 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12322 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
12323 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12325 /* Now read the physical PHY_ID from the chip and verify
12326 * that it is sane. If it doesn't look good, we fall back
12327 * to either the hard-coded table based PHY_ID and failing
12328 * that the value found in the eeprom area.
12330 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12331 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12333 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
12334 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12335 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
12337 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12340 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12341 tp->phy_id = hw_phy_id;
12342 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12343 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12345 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12347 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12348 /* Do nothing, phy ID already set up in
12349 * tg3_get_eeprom_hw_cfg().
12352 struct subsys_tbl_ent *p;
12354 /* No eeprom signature? Try the hardcoded
12355 * subsys device table.
12357 p = tg3_lookup_by_subsys(tp);
12361 tp->phy_id = p->phy_id;
12363 tp->phy_id == TG3_PHY_ID_BCM8002)
12364 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12368 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12369 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
12370 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
12371 u32 bmsr, adv_reg, tg3_ctrl, mask;
12373 tg3_readphy(tp, MII_BMSR, &bmsr);
12374 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12375 (bmsr & BMSR_LSTATUS))
12376 goto skip_phy_reset;
12378 err = tg3_phy_reset(tp);
12382 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
12383 ADVERTISE_100HALF | ADVERTISE_100FULL |
12384 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
12386 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
12387 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
12388 MII_TG3_CTRL_ADV_1000_FULL);
12389 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12390 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
12391 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
12392 MII_TG3_CTRL_ENABLE_AS_MASTER);
12395 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12396 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12397 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12398 if (!tg3_copper_is_advertising_all(tp, mask)) {
12399 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12401 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12402 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12404 tg3_writephy(tp, MII_BMCR,
12405 BMCR_ANENABLE | BMCR_ANRESTART);
12407 tg3_phy_set_wirespeed(tp);
12409 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12410 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12411 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12415 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
12416 err = tg3_init_5401phy_dsp(tp);
12420 err = tg3_init_5401phy_dsp(tp);
12423 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12424 tp->link_config.advertising =
12425 (ADVERTISED_1000baseT_Half |
12426 ADVERTISED_1000baseT_Full |
12427 ADVERTISED_Autoneg |
12429 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
12430 tp->link_config.advertising &=
12431 ~(ADVERTISED_1000baseT_Half |
12432 ADVERTISED_1000baseT_Full);
12437 static void __devinit tg3_read_vpd(struct tg3 *tp)
12440 unsigned int block_end, rosize, len;
12444 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
12445 tg3_nvram_read(tp, 0x0, &magic))
12448 vpd_data = kmalloc(TG3_NVM_VPD_LEN, GFP_KERNEL);
12452 if (magic == TG3_EEPROM_MAGIC) {
12453 for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
12456 /* The data is in little-endian format in NVRAM.
12457 * Use the big-endian read routines to preserve
12458 * the byte order as it exists in NVRAM.
12460 if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp))
12461 goto out_not_found;
12463 memcpy(&vpd_data[i], &tmp, sizeof(tmp));
12467 unsigned int pos = 0;
12469 for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) {
12470 cnt = pci_read_vpd(tp->pdev, pos,
12471 TG3_NVM_VPD_LEN - pos,
12473 if (cnt == -ETIMEDOUT || -EINTR)
12476 goto out_not_found;
12478 if (pos != TG3_NVM_VPD_LEN)
12479 goto out_not_found;
12482 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
12483 PCI_VPD_LRDT_RO_DATA);
12485 goto out_not_found;
12487 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
12488 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
12489 i += PCI_VPD_LRDT_TAG_SIZE;
12491 if (block_end > TG3_NVM_VPD_LEN)
12492 goto out_not_found;
12494 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12495 PCI_VPD_RO_KEYWORD_MFR_ID);
12497 len = pci_vpd_info_field_size(&vpd_data[j]);
12499 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12500 if (j + len > block_end || len != 4 ||
12501 memcmp(&vpd_data[j], "1028", 4))
12504 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12505 PCI_VPD_RO_KEYWORD_VENDOR0);
12509 len = pci_vpd_info_field_size(&vpd_data[j]);
12511 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12512 if (j + len > block_end)
12515 memcpy(tp->fw_ver, &vpd_data[j], len);
12516 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
12520 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12521 PCI_VPD_RO_KEYWORD_PARTNO);
12523 goto out_not_found;
12525 len = pci_vpd_info_field_size(&vpd_data[i]);
12527 i += PCI_VPD_INFO_FLD_HDR_SIZE;
12528 if (len > TG3_BPN_SIZE ||
12529 (len + i) > TG3_NVM_VPD_LEN)
12530 goto out_not_found;
12532 memcpy(tp->board_part_number, &vpd_data[i], len);
12536 if (!tp->board_part_number[0])
12540 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12541 strcpy(tp->board_part_number, "BCM95906");
12542 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12543 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
12544 strcpy(tp->board_part_number, "BCM57780");
12545 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12546 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
12547 strcpy(tp->board_part_number, "BCM57760");
12548 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12549 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
12550 strcpy(tp->board_part_number, "BCM57790");
12551 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12552 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
12553 strcpy(tp->board_part_number, "BCM57788");
12554 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12555 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
12556 strcpy(tp->board_part_number, "BCM57761");
12557 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12558 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
12559 strcpy(tp->board_part_number, "BCM57765");
12560 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12561 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
12562 strcpy(tp->board_part_number, "BCM57781");
12563 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12564 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
12565 strcpy(tp->board_part_number, "BCM57785");
12566 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12567 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
12568 strcpy(tp->board_part_number, "BCM57791");
12569 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12570 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
12571 strcpy(tp->board_part_number, "BCM57795");
12573 strcpy(tp->board_part_number, "none");
12576 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
12580 if (tg3_nvram_read(tp, offset, &val) ||
12581 (val & 0xfc000000) != 0x0c000000 ||
12582 tg3_nvram_read(tp, offset + 4, &val) ||
12589 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
12591 u32 val, offset, start, ver_offset;
12593 bool newver = false;
12595 if (tg3_nvram_read(tp, 0xc, &offset) ||
12596 tg3_nvram_read(tp, 0x4, &start))
12599 offset = tg3_nvram_logical_addr(tp, offset);
12601 if (tg3_nvram_read(tp, offset, &val))
12604 if ((val & 0xfc000000) == 0x0c000000) {
12605 if (tg3_nvram_read(tp, offset + 4, &val))
12612 dst_off = strlen(tp->fw_ver);
12615 if (TG3_VER_SIZE - dst_off < 16 ||
12616 tg3_nvram_read(tp, offset + 8, &ver_offset))
12619 offset = offset + ver_offset - start;
12620 for (i = 0; i < 16; i += 4) {
12622 if (tg3_nvram_read_be32(tp, offset + i, &v))
12625 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
12630 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
12633 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
12634 TG3_NVM_BCVER_MAJSFT;
12635 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
12636 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
12637 "v%d.%02d", major, minor);
12641 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
12643 u32 val, major, minor;
12645 /* Use native endian representation */
12646 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
12649 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
12650 TG3_NVM_HWSB_CFG1_MAJSFT;
12651 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
12652 TG3_NVM_HWSB_CFG1_MINSFT;
12654 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
12657 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12659 u32 offset, major, minor, build;
12661 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
12663 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
12666 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
12667 case TG3_EEPROM_SB_REVISION_0:
12668 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
12670 case TG3_EEPROM_SB_REVISION_2:
12671 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
12673 case TG3_EEPROM_SB_REVISION_3:
12674 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
12676 case TG3_EEPROM_SB_REVISION_4:
12677 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
12679 case TG3_EEPROM_SB_REVISION_5:
12680 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
12686 if (tg3_nvram_read(tp, offset, &val))
12689 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
12690 TG3_EEPROM_SB_EDH_BLD_SHFT;
12691 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
12692 TG3_EEPROM_SB_EDH_MAJ_SHFT;
12693 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
12695 if (minor > 99 || build > 26)
12698 offset = strlen(tp->fw_ver);
12699 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
12700 " v%d.%02d", major, minor);
12703 offset = strlen(tp->fw_ver);
12704 if (offset < TG3_VER_SIZE - 1)
12705 tp->fw_ver[offset] = 'a' + build - 1;
12709 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
12711 u32 val, offset, start;
12714 for (offset = TG3_NVM_DIR_START;
12715 offset < TG3_NVM_DIR_END;
12716 offset += TG3_NVM_DIRENT_SIZE) {
12717 if (tg3_nvram_read(tp, offset, &val))
12720 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
12724 if (offset == TG3_NVM_DIR_END)
12727 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12728 start = 0x08000000;
12729 else if (tg3_nvram_read(tp, offset - 4, &start))
12732 if (tg3_nvram_read(tp, offset + 4, &offset) ||
12733 !tg3_fw_img_is_valid(tp, offset) ||
12734 tg3_nvram_read(tp, offset + 8, &val))
12737 offset += val - start;
12739 vlen = strlen(tp->fw_ver);
12741 tp->fw_ver[vlen++] = ',';
12742 tp->fw_ver[vlen++] = ' ';
12744 for (i = 0; i < 4; i++) {
12746 if (tg3_nvram_read_be32(tp, offset, &v))
12749 offset += sizeof(v);
12751 if (vlen > TG3_VER_SIZE - sizeof(v)) {
12752 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
12756 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
12761 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
12767 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
12768 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
12771 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
12772 if (apedata != APE_SEG_SIG_MAGIC)
12775 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
12776 if (!(apedata & APE_FW_STATUS_READY))
12779 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
12781 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
12782 tp->tg3_flags3 |= TG3_FLG3_APE_HAS_NCSI;
12788 vlen = strlen(tp->fw_ver);
12790 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
12792 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
12793 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
12794 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
12795 (apedata & APE_FW_VERSION_BLDMSK));
12798 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
12801 bool vpd_vers = false;
12803 if (tp->fw_ver[0] != 0)
12806 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
12807 strcat(tp->fw_ver, "sb");
12811 if (tg3_nvram_read(tp, 0, &val))
12814 if (val == TG3_EEPROM_MAGIC)
12815 tg3_read_bc_ver(tp);
12816 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
12817 tg3_read_sb_ver(tp, val);
12818 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12819 tg3_read_hwsb_ver(tp);
12823 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12824 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers)
12827 tg3_read_mgmtfw_ver(tp);
12830 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
12833 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
12835 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
12837 #if TG3_VLAN_TAG_USED
12838 dev->vlan_features |= flags;
12842 static int __devinit tg3_get_invariants(struct tg3 *tp)
12844 static struct pci_device_id write_reorder_chipsets[] = {
12845 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12846 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
12847 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12848 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
12849 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
12850 PCI_DEVICE_ID_VIA_8385_0) },
12854 u32 pci_state_reg, grc_misc_cfg;
12859 /* Force memory write invalidate off. If we leave it on,
12860 * then on 5700_BX chips we have to enable a workaround.
12861 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
12862 * to match the cacheline size. The Broadcom driver have this
12863 * workaround but turns MWI off all the times so never uses
12864 * it. This seems to suggest that the workaround is insufficient.
12866 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12867 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
12868 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12870 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
12871 * has the register indirect write enable bit set before
12872 * we try to access any of the MMIO registers. It is also
12873 * critical that the PCI-X hw workaround situation is decided
12874 * before that as well.
12876 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12879 tp->pci_chip_rev_id = (misc_ctrl_reg >>
12880 MISC_HOST_CTRL_CHIPREV_SHIFT);
12881 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12882 u32 prod_id_asic_rev;
12884 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
12885 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
12886 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724 ||
12887 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719)
12888 pci_read_config_dword(tp->pdev,
12889 TG3PCI_GEN2_PRODID_ASICREV,
12890 &prod_id_asic_rev);
12891 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
12892 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
12893 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
12894 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
12895 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
12896 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
12897 pci_read_config_dword(tp->pdev,
12898 TG3PCI_GEN15_PRODID_ASICREV,
12899 &prod_id_asic_rev);
12901 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
12902 &prod_id_asic_rev);
12904 tp->pci_chip_rev_id = prod_id_asic_rev;
12907 /* Wrong chip ID in 5752 A0. This code can be removed later
12908 * as A0 is not in production.
12910 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12911 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12913 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12914 * we need to disable memory and use config. cycles
12915 * only to access all registers. The 5702/03 chips
12916 * can mistakenly decode the special cycles from the
12917 * ICH chipsets as memory write cycles, causing corruption
12918 * of register and memory space. Only certain ICH bridges
12919 * will drive special cycles with non-zero data during the
12920 * address phase which can fall within the 5703's address
12921 * range. This is not an ICH bug as the PCI spec allows
12922 * non-zero address during special cycles. However, only
12923 * these ICH bridges are known to drive non-zero addresses
12924 * during special cycles.
12926 * Since special cycles do not cross PCI bridges, we only
12927 * enable this workaround if the 5703 is on the secondary
12928 * bus of these ICH bridges.
12930 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12931 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12932 static struct tg3_dev_id {
12936 } ich_chipsets[] = {
12937 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12939 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12941 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12943 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12947 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12948 struct pci_dev *bridge = NULL;
12950 while (pci_id->vendor != 0) {
12951 bridge = pci_get_device(pci_id->vendor, pci_id->device,
12957 if (pci_id->rev != PCI_ANY_ID) {
12958 if (bridge->revision > pci_id->rev)
12961 if (bridge->subordinate &&
12962 (bridge->subordinate->number ==
12963 tp->pdev->bus->number)) {
12965 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12966 pci_dev_put(bridge);
12972 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12973 static struct tg3_dev_id {
12976 } bridge_chipsets[] = {
12977 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12978 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12981 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12982 struct pci_dev *bridge = NULL;
12984 while (pci_id->vendor != 0) {
12985 bridge = pci_get_device(pci_id->vendor,
12992 if (bridge->subordinate &&
12993 (bridge->subordinate->number <=
12994 tp->pdev->bus->number) &&
12995 (bridge->subordinate->subordinate >=
12996 tp->pdev->bus->number)) {
12997 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12998 pci_dev_put(bridge);
13004 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13005 * DMA addresses > 40-bit. This bridge may have other additional
13006 * 57xx devices behind it in some 4-port NIC designs for example.
13007 * Any tg3 device found behind the bridge will also need the 40-bit
13010 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13011 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13012 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
13013 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
13014 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13016 struct pci_dev *bridge = NULL;
13019 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13020 PCI_DEVICE_ID_SERVERWORKS_EPB,
13022 if (bridge && bridge->subordinate &&
13023 (bridge->subordinate->number <=
13024 tp->pdev->bus->number) &&
13025 (bridge->subordinate->subordinate >=
13026 tp->pdev->bus->number)) {
13027 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
13028 pci_dev_put(bridge);
13034 /* Initialize misc host control in PCI block. */
13035 tp->misc_host_ctrl |= (misc_ctrl_reg &
13036 MISC_HOST_CTRL_CHIPREV);
13037 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13038 tp->misc_host_ctrl);
13040 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13041 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13042 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
13043 tp->pdev_peer = tg3_find_peer(tp);
13045 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13046 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13047 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13048 tp->tg3_flags3 |= TG3_FLG3_5717_PLUS;
13050 /* Intentionally exclude ASIC_REV_5906 */
13051 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13052 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13053 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13054 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13055 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13056 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13057 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
13058 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
13060 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13061 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13062 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13063 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13064 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13065 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
13067 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
13068 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
13069 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
13071 /* 5700 B0 chips do not support checksumming correctly due
13072 * to hardware bugs.
13074 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
13075 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
13077 unsigned long features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
13079 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13080 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
13081 features |= NETIF_F_IPV6_CSUM;
13082 tp->dev->features |= features;
13083 vlan_features_add(tp->dev, features);
13086 /* Determine TSO capabilities */
13087 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13088 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
13089 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13090 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13091 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
13092 else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13093 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
13094 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13095 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13096 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
13097 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13098 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13099 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13100 tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
13101 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13102 tp->fw_needed = FIRMWARE_TG3TSO5;
13104 tp->fw_needed = FIRMWARE_TG3TSO;
13109 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13110 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
13111 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13112 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13113 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13114 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13115 tp->pdev_peer == tp->pdev))
13116 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
13118 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13119 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13120 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
13123 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
13124 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
13125 tp->irq_max = TG3_IRQ_MAX_VECS;
13129 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13130 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13131 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13132 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
13133 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
13134 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
13135 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
13138 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13139 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
13141 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13142 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
13143 (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
13144 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
13146 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13149 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13150 if (tp->pcie_cap != 0) {
13153 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13155 pcie_set_readrq(tp->pdev, 4096);
13157 pci_read_config_word(tp->pdev,
13158 tp->pcie_cap + PCI_EXP_LNKCTL,
13160 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13161 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13162 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
13163 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13164 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13165 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13166 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13167 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
13168 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13169 tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
13171 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13172 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13173 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13174 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13175 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13176 if (!tp->pcix_cap) {
13177 dev_err(&tp->pdev->dev,
13178 "Cannot find PCI-X capability, aborting\n");
13182 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13183 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
13186 /* If we have an AMD 762 or VIA K8T800 chipset, write
13187 * reordering to the mailbox registers done by the host
13188 * controller can cause major troubles. We read back from
13189 * every mailbox register write to force the writes to be
13190 * posted to the chip in order.
13192 if (pci_dev_present(write_reorder_chipsets) &&
13193 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13194 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
13196 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13197 &tp->pci_cacheline_sz);
13198 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13199 &tp->pci_lat_timer);
13200 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13201 tp->pci_lat_timer < 64) {
13202 tp->pci_lat_timer = 64;
13203 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13204 tp->pci_lat_timer);
13207 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13208 /* 5700 BX chips need to have their TX producer index
13209 * mailboxes written twice to workaround a bug.
13211 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
13213 /* If we are in PCI-X mode, enable register write workaround.
13215 * The workaround is to use indirect register accesses
13216 * for all chip writes not to mailbox registers.
13218 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13221 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13223 /* The chip can have it's power management PCI config
13224 * space registers clobbered due to this bug.
13225 * So explicitly force the chip into D0 here.
13227 pci_read_config_dword(tp->pdev,
13228 tp->pm_cap + PCI_PM_CTRL,
13230 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13231 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13232 pci_write_config_dword(tp->pdev,
13233 tp->pm_cap + PCI_PM_CTRL,
13236 /* Also, force SERR#/PERR# in PCI command. */
13237 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13238 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13239 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13243 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13244 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
13245 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13246 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
13248 /* Chip-specific fixup from Broadcom driver */
13249 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13250 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13251 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13252 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13255 /* Default fast path register access methods */
13256 tp->read32 = tg3_read32;
13257 tp->write32 = tg3_write32;
13258 tp->read32_mbox = tg3_read32;
13259 tp->write32_mbox = tg3_write32;
13260 tp->write32_tx_mbox = tg3_write32;
13261 tp->write32_rx_mbox = tg3_write32;
13263 /* Various workaround register access methods */
13264 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
13265 tp->write32 = tg3_write_indirect_reg32;
13266 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13267 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
13268 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13270 * Back to back register writes can cause problems on these
13271 * chips, the workaround is to read back all reg writes
13272 * except those to mailbox regs.
13274 * See tg3_write_indirect_reg32().
13276 tp->write32 = tg3_write_flush_reg32;
13279 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
13280 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
13281 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13282 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
13283 tp->write32_rx_mbox = tg3_write_flush_reg32;
13286 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
13287 tp->read32 = tg3_read_indirect_reg32;
13288 tp->write32 = tg3_write_indirect_reg32;
13289 tp->read32_mbox = tg3_read_indirect_mbox;
13290 tp->write32_mbox = tg3_write_indirect_mbox;
13291 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13292 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13297 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13298 pci_cmd &= ~PCI_COMMAND_MEMORY;
13299 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13301 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13302 tp->read32_mbox = tg3_read32_mbox_5906;
13303 tp->write32_mbox = tg3_write32_mbox_5906;
13304 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13305 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13308 if (tp->write32 == tg3_write_indirect_reg32 ||
13309 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13310 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13311 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13312 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
13314 /* Get eeprom hw config before calling tg3_set_power_state().
13315 * In particular, the TG3_FLG2_IS_NIC flag must be
13316 * determined before calling tg3_set_power_state() so that
13317 * we know whether or not to switch out of Vaux power.
13318 * When the flag is set, it means that GPIO1 is used for eeprom
13319 * write protect and also implies that it is a LOM where GPIOs
13320 * are not used to switch power.
13322 tg3_get_eeprom_hw_cfg(tp);
13324 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13325 /* Allow reads and writes to the
13326 * APE register and memory space.
13328 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13329 PCISTATE_ALLOW_APE_SHMEM_WR |
13330 PCISTATE_ALLOW_APE_PSPACE_WR;
13331 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13335 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13336 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13337 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13338 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13339 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
13340 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
13342 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
13343 * GPIO1 driven high will bring 5700's external PHY out of reset.
13344 * It is also used as eeprom write protect on LOMs.
13346 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13347 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13348 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
13349 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13350 GRC_LCLCTRL_GPIO_OUTPUT1);
13351 /* Unused GPIO3 must be driven as output on 5752 because there
13352 * are no pull-up resistors on unused GPIO pins.
13354 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13355 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13357 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13358 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13359 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13360 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13362 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13363 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13364 /* Turn off the debug UART. */
13365 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13366 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
13367 /* Keep VMain power. */
13368 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13369 GRC_LCLCTRL_GPIO_OUTPUT0;
13372 /* Force the chip into D0. */
13373 err = tg3_set_power_state(tp, PCI_D0);
13375 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13379 /* Derive initial jumbo mode from MTU assigned in
13380 * ether_setup() via the alloc_etherdev() call
13382 if (tp->dev->mtu > ETH_DATA_LEN &&
13383 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13384 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
13386 /* Determine WakeOnLan speed to use. */
13387 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13388 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13389 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13390 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13391 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
13393 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
13396 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13397 tp->phy_flags |= TG3_PHYFLG_IS_FET;
13399 /* A few boards don't want Ethernet@WireSpeed phy feature */
13400 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13401 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
13402 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13403 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13404 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13405 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13406 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13408 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13409 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13410 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
13411 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13412 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
13414 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
13415 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
13416 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13417 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13418 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
13419 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13420 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13421 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13422 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13423 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13424 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13425 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
13426 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13427 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
13429 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
13432 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13433 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13434 tp->phy_otp = tg3_read_otp_phycfg(tp);
13435 if (tp->phy_otp == 0)
13436 tp->phy_otp = TG3_OTP_DEFAULT;
13439 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
13440 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13442 tp->mi_mode = MAC_MI_MODE_BASE;
13444 tp->coalesce_mode = 0;
13445 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
13446 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
13447 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
13449 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13450 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13451 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
13453 err = tg3_mdio_init(tp);
13457 /* Initialize data/descriptor byte/word swapping. */
13458 val = tr32(GRC_MODE);
13459 val &= GRC_MODE_HOST_STACKUP;
13460 tw32(GRC_MODE, val | tp->grc_mode);
13462 tg3_switch_clocks(tp);
13464 /* Clear this out for sanity. */
13465 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
13467 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13469 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
13470 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
13471 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
13473 if (chiprevid == CHIPREV_ID_5701_A0 ||
13474 chiprevid == CHIPREV_ID_5701_B0 ||
13475 chiprevid == CHIPREV_ID_5701_B2 ||
13476 chiprevid == CHIPREV_ID_5701_B5) {
13477 void __iomem *sram_base;
13479 /* Write some dummy words into the SRAM status block
13480 * area, see if it reads back correctly. If the return
13481 * value is bad, force enable the PCIX workaround.
13483 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
13485 writel(0x00000000, sram_base);
13486 writel(0x00000000, sram_base + 4);
13487 writel(0xffffffff, sram_base + 4);
13488 if (readl(sram_base) != 0x00000000)
13489 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13494 tg3_nvram_init(tp);
13496 grc_misc_cfg = tr32(GRC_MISC_CFG);
13497 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
13499 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13500 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
13501 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
13502 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
13504 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
13505 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
13506 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
13507 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
13508 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
13509 HOSTCC_MODE_CLRTICK_TXBD);
13511 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
13512 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13513 tp->misc_host_ctrl);
13516 /* Preserve the APE MAC_MODE bits */
13517 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
13518 tp->mac_mode = tr32(MAC_MODE) |
13519 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13521 tp->mac_mode = TG3_DEF_MAC_MODE;
13523 /* these are limited to 10/100 only */
13524 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13525 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
13526 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13527 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13528 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
13529 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
13530 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
13531 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13532 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
13533 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
13534 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
13535 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
13536 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13537 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13538 (tp->phy_flags & TG3_PHYFLG_IS_FET))
13539 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
13541 err = tg3_phy_probe(tp);
13543 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
13544 /* ... but do not return immediately ... */
13549 tg3_read_fw_ver(tp);
13551 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
13552 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
13554 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13555 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
13557 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
13560 /* 5700 {AX,BX} chips have a broken status block link
13561 * change bit implementation, so we must use the
13562 * status register in those cases.
13564 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13565 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
13567 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
13569 /* The led_ctrl is set during tg3_phy_probe, here we might
13570 * have to force the link status polling mechanism based
13571 * upon subsystem IDs.
13573 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
13574 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13575 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
13576 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
13577 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
13580 /* For all SERDES we poll the MAC status register. */
13581 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13582 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
13584 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
13586 tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM;
13587 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
13588 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13589 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
13590 tp->rx_offset -= NET_IP_ALIGN;
13591 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13592 tp->rx_copy_thresh = ~(u16)0;
13596 tp->rx_std_max_post = TG3_RX_RING_SIZE;
13598 /* Increment the rx prod index on the rx std ring by at most
13599 * 8 for these chips to workaround hw errata.
13601 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13602 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13603 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13604 tp->rx_std_max_post = 8;
13606 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
13607 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
13608 PCIE_PWR_MGMT_L1_THRESH_MSK;
13613 #ifdef CONFIG_SPARC
13614 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
13616 struct net_device *dev = tp->dev;
13617 struct pci_dev *pdev = tp->pdev;
13618 struct device_node *dp = pci_device_to_OF_node(pdev);
13619 const unsigned char *addr;
13622 addr = of_get_property(dp, "local-mac-address", &len);
13623 if (addr && len == 6) {
13624 memcpy(dev->dev_addr, addr, 6);
13625 memcpy(dev->perm_addr, dev->dev_addr, 6);
13631 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
13633 struct net_device *dev = tp->dev;
13635 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
13636 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
13641 static int __devinit tg3_get_device_address(struct tg3 *tp)
13643 struct net_device *dev = tp->dev;
13644 u32 hi, lo, mac_offset;
13647 #ifdef CONFIG_SPARC
13648 if (!tg3_get_macaddr_sparc(tp))
13653 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
13654 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13655 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
13657 if (tg3_nvram_lock(tp))
13658 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
13660 tg3_nvram_unlock(tp);
13661 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13662 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
13663 if (PCI_FUNC(tp->pdev->devfn) & 1)
13665 if (PCI_FUNC(tp->pdev->devfn) > 1)
13666 mac_offset += 0x18c;
13667 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13670 /* First try to get it from MAC address mailbox. */
13671 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
13672 if ((hi >> 16) == 0x484b) {
13673 dev->dev_addr[0] = (hi >> 8) & 0xff;
13674 dev->dev_addr[1] = (hi >> 0) & 0xff;
13676 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
13677 dev->dev_addr[2] = (lo >> 24) & 0xff;
13678 dev->dev_addr[3] = (lo >> 16) & 0xff;
13679 dev->dev_addr[4] = (lo >> 8) & 0xff;
13680 dev->dev_addr[5] = (lo >> 0) & 0xff;
13682 /* Some old bootcode may report a 0 MAC address in SRAM */
13683 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
13686 /* Next, try NVRAM. */
13687 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
13688 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
13689 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
13690 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
13691 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
13693 /* Finally just fetch it out of the MAC control regs. */
13695 hi = tr32(MAC_ADDR_0_HIGH);
13696 lo = tr32(MAC_ADDR_0_LOW);
13698 dev->dev_addr[5] = lo & 0xff;
13699 dev->dev_addr[4] = (lo >> 8) & 0xff;
13700 dev->dev_addr[3] = (lo >> 16) & 0xff;
13701 dev->dev_addr[2] = (lo >> 24) & 0xff;
13702 dev->dev_addr[1] = hi & 0xff;
13703 dev->dev_addr[0] = (hi >> 8) & 0xff;
13707 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
13708 #ifdef CONFIG_SPARC
13709 if (!tg3_get_default_macaddr_sparc(tp))
13714 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
13718 #define BOUNDARY_SINGLE_CACHELINE 1
13719 #define BOUNDARY_MULTI_CACHELINE 2
13721 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
13723 int cacheline_size;
13727 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
13729 cacheline_size = 1024;
13731 cacheline_size = (int) byte * 4;
13733 /* On 5703 and later chips, the boundary bits have no
13736 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13737 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13738 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13741 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
13742 goal = BOUNDARY_MULTI_CACHELINE;
13744 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
13745 goal = BOUNDARY_SINGLE_CACHELINE;
13751 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
13752 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
13759 /* PCI controllers on most RISC systems tend to disconnect
13760 * when a device tries to burst across a cache-line boundary.
13761 * Therefore, letting tg3 do so just wastes PCI bandwidth.
13763 * Unfortunately, for PCI-E there are only limited
13764 * write-side controls for this, and thus for reads
13765 * we will still get the disconnects. We'll also waste
13766 * these PCI cycles for both read and write for chips
13767 * other than 5700 and 5701 which do not implement the
13770 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13771 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
13772 switch (cacheline_size) {
13777 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13778 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
13779 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
13781 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13782 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13787 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
13788 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
13792 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13793 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13796 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13797 switch (cacheline_size) {
13801 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13802 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13803 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
13809 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13810 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
13814 switch (cacheline_size) {
13816 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13817 val |= (DMA_RWCTRL_READ_BNDRY_16 |
13818 DMA_RWCTRL_WRITE_BNDRY_16);
13823 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13824 val |= (DMA_RWCTRL_READ_BNDRY_32 |
13825 DMA_RWCTRL_WRITE_BNDRY_32);
13830 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13831 val |= (DMA_RWCTRL_READ_BNDRY_64 |
13832 DMA_RWCTRL_WRITE_BNDRY_64);
13837 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13838 val |= (DMA_RWCTRL_READ_BNDRY_128 |
13839 DMA_RWCTRL_WRITE_BNDRY_128);
13844 val |= (DMA_RWCTRL_READ_BNDRY_256 |
13845 DMA_RWCTRL_WRITE_BNDRY_256);
13848 val |= (DMA_RWCTRL_READ_BNDRY_512 |
13849 DMA_RWCTRL_WRITE_BNDRY_512);
13853 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
13854 DMA_RWCTRL_WRITE_BNDRY_1024);
13863 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
13865 struct tg3_internal_buffer_desc test_desc;
13866 u32 sram_dma_descs;
13869 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
13871 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
13872 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
13873 tw32(RDMAC_STATUS, 0);
13874 tw32(WDMAC_STATUS, 0);
13876 tw32(BUFMGR_MODE, 0);
13877 tw32(FTQ_RESET, 0);
13879 test_desc.addr_hi = ((u64) buf_dma) >> 32;
13880 test_desc.addr_lo = buf_dma & 0xffffffff;
13881 test_desc.nic_mbuf = 0x00002100;
13882 test_desc.len = size;
13885 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
13886 * the *second* time the tg3 driver was getting loaded after an
13889 * Broadcom tells me:
13890 * ...the DMA engine is connected to the GRC block and a DMA
13891 * reset may affect the GRC block in some unpredictable way...
13892 * The behavior of resets to individual blocks has not been tested.
13894 * Broadcom noted the GRC reset will also reset all sub-components.
13897 test_desc.cqid_sqid = (13 << 8) | 2;
13899 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
13902 test_desc.cqid_sqid = (16 << 8) | 7;
13904 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
13907 test_desc.flags = 0x00000005;
13909 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
13912 val = *(((u32 *)&test_desc) + i);
13913 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
13914 sram_dma_descs + (i * sizeof(u32)));
13915 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
13917 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
13920 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
13922 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
13925 for (i = 0; i < 40; i++) {
13929 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
13931 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
13932 if ((val & 0xffff) == sram_dma_descs) {
13943 #define TEST_BUFFER_SIZE 0x2000
13945 static int __devinit tg3_test_dma(struct tg3 *tp)
13947 dma_addr_t buf_dma;
13948 u32 *buf, saved_dma_rwctrl;
13951 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
13957 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
13958 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
13960 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
13962 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13965 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13966 /* DMA read watermark not used on PCIE */
13967 tp->dma_rwctrl |= 0x00180000;
13968 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
13969 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13970 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
13971 tp->dma_rwctrl |= 0x003f0000;
13973 tp->dma_rwctrl |= 0x003f000f;
13975 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13976 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
13977 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
13978 u32 read_water = 0x7;
13980 /* If the 5704 is behind the EPB bridge, we can
13981 * do the less restrictive ONE_DMA workaround for
13982 * better performance.
13984 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13985 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13986 tp->dma_rwctrl |= 0x8000;
13987 else if (ccval == 0x6 || ccval == 0x7)
13988 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13990 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13992 /* Set bit 23 to enable PCIX hw bug fix */
13994 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13995 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13997 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13998 /* 5780 always in PCIX mode */
13999 tp->dma_rwctrl |= 0x00144000;
14000 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14001 /* 5714 always in PCIX mode */
14002 tp->dma_rwctrl |= 0x00148000;
14004 tp->dma_rwctrl |= 0x001b000f;
14008 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14009 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14010 tp->dma_rwctrl &= 0xfffffff0;
14012 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14013 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14014 /* Remove this if it causes problems for some boards. */
14015 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14017 /* On 5700/5701 chips, we need to set this bit.
14018 * Otherwise the chip will issue cacheline transactions
14019 * to streamable DMA memory with not all the byte
14020 * enables turned on. This is an error on several
14021 * RISC PCI controllers, in particular sparc64.
14023 * On 5703/5704 chips, this bit has been reassigned
14024 * a different meaning. In particular, it is used
14025 * on those chips to enable a PCI-X workaround.
14027 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14030 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14033 /* Unneeded, already done by tg3_get_invariants. */
14034 tg3_switch_clocks(tp);
14037 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14038 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14041 /* It is best to perform DMA test with maximum write burst size
14042 * to expose the 5700/5701 write DMA bug.
14044 saved_dma_rwctrl = tp->dma_rwctrl;
14045 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14046 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14051 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14054 /* Send the buffer to the chip. */
14055 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14057 dev_err(&tp->pdev->dev,
14058 "%s: Buffer write failed. err = %d\n",
14064 /* validate data reached card RAM correctly. */
14065 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14067 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14068 if (le32_to_cpu(val) != p[i]) {
14069 dev_err(&tp->pdev->dev,
14070 "%s: Buffer corrupted on device! "
14071 "(%d != %d)\n", __func__, val, i);
14072 /* ret = -ENODEV here? */
14077 /* Now read it back. */
14078 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14080 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14081 "err = %d\n", __func__, ret);
14086 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14090 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14091 DMA_RWCTRL_WRITE_BNDRY_16) {
14092 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14093 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14094 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14097 dev_err(&tp->pdev->dev,
14098 "%s: Buffer corrupted on read back! "
14099 "(%d != %d)\n", __func__, p[i], i);
14105 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14111 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14112 DMA_RWCTRL_WRITE_BNDRY_16) {
14113 static struct pci_device_id dma_wait_state_chipsets[] = {
14114 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
14115 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14119 /* DMA test passed without adjusting DMA boundary,
14120 * now look for chipsets that are known to expose the
14121 * DMA bug without failing the test.
14123 if (pci_dev_present(dma_wait_state_chipsets)) {
14124 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14125 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14127 /* Safe to use the calculated DMA boundary. */
14128 tp->dma_rwctrl = saved_dma_rwctrl;
14131 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14135 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
14140 static void __devinit tg3_init_link_config(struct tg3 *tp)
14142 tp->link_config.advertising =
14143 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
14144 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
14145 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
14146 ADVERTISED_Autoneg | ADVERTISED_MII);
14147 tp->link_config.speed = SPEED_INVALID;
14148 tp->link_config.duplex = DUPLEX_INVALID;
14149 tp->link_config.autoneg = AUTONEG_ENABLE;
14150 tp->link_config.active_speed = SPEED_INVALID;
14151 tp->link_config.active_duplex = DUPLEX_INVALID;
14152 tp->link_config.orig_speed = SPEED_INVALID;
14153 tp->link_config.orig_duplex = DUPLEX_INVALID;
14154 tp->link_config.orig_autoneg = AUTONEG_INVALID;
14157 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14159 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
14160 tp->bufmgr_config.mbuf_read_dma_low_water =
14161 DEFAULT_MB_RDMA_LOW_WATER_5705;
14162 tp->bufmgr_config.mbuf_mac_rx_low_water =
14163 DEFAULT_MB_MACRX_LOW_WATER_57765;
14164 tp->bufmgr_config.mbuf_high_water =
14165 DEFAULT_MB_HIGH_WATER_57765;
14167 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14168 DEFAULT_MB_RDMA_LOW_WATER_5705;
14169 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14170 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14171 tp->bufmgr_config.mbuf_high_water_jumbo =
14172 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14173 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14174 tp->bufmgr_config.mbuf_read_dma_low_water =
14175 DEFAULT_MB_RDMA_LOW_WATER_5705;
14176 tp->bufmgr_config.mbuf_mac_rx_low_water =
14177 DEFAULT_MB_MACRX_LOW_WATER_5705;
14178 tp->bufmgr_config.mbuf_high_water =
14179 DEFAULT_MB_HIGH_WATER_5705;
14180 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14181 tp->bufmgr_config.mbuf_mac_rx_low_water =
14182 DEFAULT_MB_MACRX_LOW_WATER_5906;
14183 tp->bufmgr_config.mbuf_high_water =
14184 DEFAULT_MB_HIGH_WATER_5906;
14187 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14188 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14189 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14190 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14191 tp->bufmgr_config.mbuf_high_water_jumbo =
14192 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14194 tp->bufmgr_config.mbuf_read_dma_low_water =
14195 DEFAULT_MB_RDMA_LOW_WATER;
14196 tp->bufmgr_config.mbuf_mac_rx_low_water =
14197 DEFAULT_MB_MACRX_LOW_WATER;
14198 tp->bufmgr_config.mbuf_high_water =
14199 DEFAULT_MB_HIGH_WATER;
14201 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14202 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14203 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14204 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14205 tp->bufmgr_config.mbuf_high_water_jumbo =
14206 DEFAULT_MB_HIGH_WATER_JUMBO;
14209 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14210 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14213 static char * __devinit tg3_phy_string(struct tg3 *tp)
14215 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14216 case TG3_PHY_ID_BCM5400: return "5400";
14217 case TG3_PHY_ID_BCM5401: return "5401";
14218 case TG3_PHY_ID_BCM5411: return "5411";
14219 case TG3_PHY_ID_BCM5701: return "5701";
14220 case TG3_PHY_ID_BCM5703: return "5703";
14221 case TG3_PHY_ID_BCM5704: return "5704";
14222 case TG3_PHY_ID_BCM5705: return "5705";
14223 case TG3_PHY_ID_BCM5750: return "5750";
14224 case TG3_PHY_ID_BCM5752: return "5752";
14225 case TG3_PHY_ID_BCM5714: return "5714";
14226 case TG3_PHY_ID_BCM5780: return "5780";
14227 case TG3_PHY_ID_BCM5755: return "5755";
14228 case TG3_PHY_ID_BCM5787: return "5787";
14229 case TG3_PHY_ID_BCM5784: return "5784";
14230 case TG3_PHY_ID_BCM5756: return "5722/5756";
14231 case TG3_PHY_ID_BCM5906: return "5906";
14232 case TG3_PHY_ID_BCM5761: return "5761";
14233 case TG3_PHY_ID_BCM5718C: return "5718C";
14234 case TG3_PHY_ID_BCM5718S: return "5718S";
14235 case TG3_PHY_ID_BCM57765: return "57765";
14236 case TG3_PHY_ID_BCM5719C: return "5719C";
14237 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14238 case 0: return "serdes";
14239 default: return "unknown";
14243 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14245 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14246 strcpy(str, "PCI Express");
14248 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
14249 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14251 strcpy(str, "PCIX:");
14253 if ((clock_ctrl == 7) ||
14254 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14255 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14256 strcat(str, "133MHz");
14257 else if (clock_ctrl == 0)
14258 strcat(str, "33MHz");
14259 else if (clock_ctrl == 2)
14260 strcat(str, "50MHz");
14261 else if (clock_ctrl == 4)
14262 strcat(str, "66MHz");
14263 else if (clock_ctrl == 6)
14264 strcat(str, "100MHz");
14266 strcpy(str, "PCI:");
14267 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
14268 strcat(str, "66MHz");
14270 strcat(str, "33MHz");
14272 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
14273 strcat(str, ":32-bit");
14275 strcat(str, ":64-bit");
14279 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14281 struct pci_dev *peer;
14282 unsigned int func, devnr = tp->pdev->devfn & ~7;
14284 for (func = 0; func < 8; func++) {
14285 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14286 if (peer && peer != tp->pdev)
14290 /* 5704 can be configured in single-port mode, set peer to
14291 * tp->pdev in that case.
14299 * We don't need to keep the refcount elevated; there's no way
14300 * to remove one half of this device without removing the other
14307 static void __devinit tg3_init_coal(struct tg3 *tp)
14309 struct ethtool_coalesce *ec = &tp->coal;
14311 memset(ec, 0, sizeof(*ec));
14312 ec->cmd = ETHTOOL_GCOALESCE;
14313 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14314 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14315 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14316 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14317 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14318 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14319 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14320 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14321 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14323 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14324 HOSTCC_MODE_CLRTICK_TXBD)) {
14325 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14326 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14327 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14328 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14331 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14332 ec->rx_coalesce_usecs_irq = 0;
14333 ec->tx_coalesce_usecs_irq = 0;
14334 ec->stats_block_coalesce_usecs = 0;
14338 static const struct net_device_ops tg3_netdev_ops = {
14339 .ndo_open = tg3_open,
14340 .ndo_stop = tg3_close,
14341 .ndo_start_xmit = tg3_start_xmit,
14342 .ndo_get_stats64 = tg3_get_stats64,
14343 .ndo_validate_addr = eth_validate_addr,
14344 .ndo_set_multicast_list = tg3_set_rx_mode,
14345 .ndo_set_mac_address = tg3_set_mac_addr,
14346 .ndo_do_ioctl = tg3_ioctl,
14347 .ndo_tx_timeout = tg3_tx_timeout,
14348 .ndo_change_mtu = tg3_change_mtu,
14349 #if TG3_VLAN_TAG_USED
14350 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14352 #ifdef CONFIG_NET_POLL_CONTROLLER
14353 .ndo_poll_controller = tg3_poll_controller,
14357 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
14358 .ndo_open = tg3_open,
14359 .ndo_stop = tg3_close,
14360 .ndo_start_xmit = tg3_start_xmit_dma_bug,
14361 .ndo_get_stats64 = tg3_get_stats64,
14362 .ndo_validate_addr = eth_validate_addr,
14363 .ndo_set_multicast_list = tg3_set_rx_mode,
14364 .ndo_set_mac_address = tg3_set_mac_addr,
14365 .ndo_do_ioctl = tg3_ioctl,
14366 .ndo_tx_timeout = tg3_tx_timeout,
14367 .ndo_change_mtu = tg3_change_mtu,
14368 #if TG3_VLAN_TAG_USED
14369 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14371 #ifdef CONFIG_NET_POLL_CONTROLLER
14372 .ndo_poll_controller = tg3_poll_controller,
14376 static int __devinit tg3_init_one(struct pci_dev *pdev,
14377 const struct pci_device_id *ent)
14379 struct net_device *dev;
14381 int i, err, pm_cap;
14382 u32 sndmbx, rcvmbx, intmbx;
14384 u64 dma_mask, persist_dma_mask;
14386 printk_once(KERN_INFO "%s\n", version);
14388 err = pci_enable_device(pdev);
14390 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14394 err = pci_request_regions(pdev, DRV_MODULE_NAME);
14396 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14397 goto err_out_disable_pdev;
14400 pci_set_master(pdev);
14402 /* Find power-management capability. */
14403 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14405 dev_err(&pdev->dev,
14406 "Cannot find Power Management capability, aborting\n");
14408 goto err_out_free_res;
14411 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14413 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14415 goto err_out_free_res;
14418 SET_NETDEV_DEV(dev, &pdev->dev);
14420 #if TG3_VLAN_TAG_USED
14421 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
14424 tp = netdev_priv(dev);
14427 tp->pm_cap = pm_cap;
14428 tp->rx_mode = TG3_DEF_RX_MODE;
14429 tp->tx_mode = TG3_DEF_TX_MODE;
14432 tp->msg_enable = tg3_debug;
14434 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14436 /* The word/byte swap controls here control register access byte
14437 * swapping. DMA data byte swapping is controlled in the GRC_MODE
14440 tp->misc_host_ctrl =
14441 MISC_HOST_CTRL_MASK_PCI_INT |
14442 MISC_HOST_CTRL_WORD_SWAP |
14443 MISC_HOST_CTRL_INDIR_ACCESS |
14444 MISC_HOST_CTRL_PCISTATE_RW;
14446 /* The NONFRM (non-frame) byte/word swap controls take effect
14447 * on descriptor entries, anything which isn't packet data.
14449 * The StrongARM chips on the board (one for tx, one for rx)
14450 * are running in big-endian mode.
14452 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14453 GRC_MODE_WSWAP_NONFRM_DATA);
14454 #ifdef __BIG_ENDIAN
14455 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14457 spin_lock_init(&tp->lock);
14458 spin_lock_init(&tp->indirect_lock);
14459 INIT_WORK(&tp->reset_task, tg3_reset_task);
14461 tp->regs = pci_ioremap_bar(pdev, BAR_0);
14463 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14465 goto err_out_free_dev;
14468 tg3_init_link_config(tp);
14470 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
14471 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
14473 dev->ethtool_ops = &tg3_ethtool_ops;
14474 dev->watchdog_timeo = TG3_TX_TIMEOUT;
14475 dev->irq = pdev->irq;
14477 err = tg3_get_invariants(tp);
14479 dev_err(&pdev->dev,
14480 "Problem fetching invariants of chip, aborting\n");
14481 goto err_out_iounmap;
14484 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
14485 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
14486 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
14487 dev->netdev_ops = &tg3_netdev_ops;
14489 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
14492 /* The EPB bridge inside 5714, 5715, and 5780 and any
14493 * device behind the EPB cannot support DMA addresses > 40-bit.
14494 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
14495 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
14496 * do DMA address check in tg3_start_xmit().
14498 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
14499 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
14500 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
14501 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
14502 #ifdef CONFIG_HIGHMEM
14503 dma_mask = DMA_BIT_MASK(64);
14506 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
14508 /* Configure DMA attributes. */
14509 if (dma_mask > DMA_BIT_MASK(32)) {
14510 err = pci_set_dma_mask(pdev, dma_mask);
14512 dev->features |= NETIF_F_HIGHDMA;
14513 err = pci_set_consistent_dma_mask(pdev,
14516 dev_err(&pdev->dev, "Unable to obtain 64 bit "
14517 "DMA for consistent allocations\n");
14518 goto err_out_iounmap;
14522 if (err || dma_mask == DMA_BIT_MASK(32)) {
14523 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
14525 dev_err(&pdev->dev,
14526 "No usable DMA configuration, aborting\n");
14527 goto err_out_iounmap;
14531 tg3_init_bufmgr_config(tp);
14533 /* Selectively allow TSO based on operating conditions */
14534 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
14535 (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
14536 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
14538 tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
14539 tp->fw_needed = NULL;
14542 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14543 tp->fw_needed = FIRMWARE_TG3;
14545 /* TSO is on by default on chips that support hardware TSO.
14546 * Firmware TSO on older chips gives lower performance, so it
14547 * is off by default, but can be enabled using ethtool.
14549 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
14550 (dev->features & NETIF_F_IP_CSUM)) {
14551 dev->features |= NETIF_F_TSO;
14552 vlan_features_add(dev, NETIF_F_TSO);
14554 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
14555 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
14556 if (dev->features & NETIF_F_IPV6_CSUM) {
14557 dev->features |= NETIF_F_TSO6;
14558 vlan_features_add(dev, NETIF_F_TSO6);
14560 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
14561 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14562 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14563 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
14564 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14565 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14566 dev->features |= NETIF_F_TSO_ECN;
14567 vlan_features_add(dev, NETIF_F_TSO_ECN);
14571 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
14572 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
14573 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
14574 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
14575 tp->rx_pending = 63;
14578 err = tg3_get_device_address(tp);
14580 dev_err(&pdev->dev,
14581 "Could not obtain valid ethernet address, aborting\n");
14582 goto err_out_iounmap;
14585 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
14586 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
14587 if (!tp->aperegs) {
14588 dev_err(&pdev->dev,
14589 "Cannot map APE registers, aborting\n");
14591 goto err_out_iounmap;
14594 tg3_ape_lock_init(tp);
14596 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
14597 tg3_read_dash_ver(tp);
14601 * Reset chip in case UNDI or EFI driver did not shutdown
14602 * DMA self test will enable WDMAC and we'll see (spurious)
14603 * pending DMA on the PCI bus at that point.
14605 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
14606 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
14607 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
14608 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14611 err = tg3_test_dma(tp);
14613 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
14614 goto err_out_apeunmap;
14617 /* flow control autonegotiation is default behavior */
14618 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
14619 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14621 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
14622 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
14623 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
14624 for (i = 0; i < tp->irq_max; i++) {
14625 struct tg3_napi *tnapi = &tp->napi[i];
14628 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
14630 tnapi->int_mbox = intmbx;
14636 tnapi->consmbox = rcvmbx;
14637 tnapi->prodmbox = sndmbx;
14640 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
14642 tnapi->coal_now = HOSTCC_MODE_NOW;
14644 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
14648 * If we support MSIX, we'll be using RSS. If we're using
14649 * RSS, the first vector only handles link interrupts and the
14650 * remaining vectors handle rx and tx interrupts. Reuse the
14651 * mailbox values for the next iteration. The values we setup
14652 * above are still useful for the single vectored mode.
14667 pci_set_drvdata(pdev, dev);
14669 err = register_netdev(dev);
14671 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
14672 goto err_out_apeunmap;
14675 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
14676 tp->board_part_number,
14677 tp->pci_chip_rev_id,
14678 tg3_bus_string(tp, str),
14681 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
14682 struct phy_device *phydev;
14683 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
14685 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14686 phydev->drv->name, dev_name(&phydev->dev));
14690 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
14691 ethtype = "10/100Base-TX";
14692 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
14693 ethtype = "1000Base-SX";
14695 ethtype = "10/100/1000Base-T";
14697 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
14698 "(WireSpeed[%d])\n", tg3_phy_string(tp), ethtype,
14699 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0);
14702 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
14703 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
14704 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
14705 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
14706 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
14707 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
14708 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
14710 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
14711 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
14717 iounmap(tp->aperegs);
14718 tp->aperegs = NULL;
14731 pci_release_regions(pdev);
14733 err_out_disable_pdev:
14734 pci_disable_device(pdev);
14735 pci_set_drvdata(pdev, NULL);
14739 static void __devexit tg3_remove_one(struct pci_dev *pdev)
14741 struct net_device *dev = pci_get_drvdata(pdev);
14744 struct tg3 *tp = netdev_priv(dev);
14747 release_firmware(tp->fw);
14749 flush_scheduled_work();
14751 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
14756 unregister_netdev(dev);
14758 iounmap(tp->aperegs);
14759 tp->aperegs = NULL;
14766 pci_release_regions(pdev);
14767 pci_disable_device(pdev);
14768 pci_set_drvdata(pdev, NULL);
14772 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
14774 struct net_device *dev = pci_get_drvdata(pdev);
14775 struct tg3 *tp = netdev_priv(dev);
14776 pci_power_t target_state;
14779 /* PCI register 4 needs to be saved whether netif_running() or not.
14780 * MSI address and data need to be saved if using MSI and
14783 pci_save_state(pdev);
14785 if (!netif_running(dev))
14788 flush_scheduled_work();
14790 tg3_netif_stop(tp);
14792 del_timer_sync(&tp->timer);
14794 tg3_full_lock(tp, 1);
14795 tg3_disable_ints(tp);
14796 tg3_full_unlock(tp);
14798 netif_device_detach(dev);
14800 tg3_full_lock(tp, 0);
14801 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14802 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
14803 tg3_full_unlock(tp);
14805 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
14807 err = tg3_set_power_state(tp, target_state);
14811 tg3_full_lock(tp, 0);
14813 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14814 err2 = tg3_restart_hw(tp, 1);
14818 tp->timer.expires = jiffies + tp->timer_offset;
14819 add_timer(&tp->timer);
14821 netif_device_attach(dev);
14822 tg3_netif_start(tp);
14825 tg3_full_unlock(tp);
14834 static int tg3_resume(struct pci_dev *pdev)
14836 struct net_device *dev = pci_get_drvdata(pdev);
14837 struct tg3 *tp = netdev_priv(dev);
14840 pci_restore_state(tp->pdev);
14842 if (!netif_running(dev))
14845 err = tg3_set_power_state(tp, PCI_D0);
14849 netif_device_attach(dev);
14851 tg3_full_lock(tp, 0);
14853 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14854 err = tg3_restart_hw(tp, 1);
14858 tp->timer.expires = jiffies + tp->timer_offset;
14859 add_timer(&tp->timer);
14861 tg3_netif_start(tp);
14864 tg3_full_unlock(tp);
14872 static struct pci_driver tg3_driver = {
14873 .name = DRV_MODULE_NAME,
14874 .id_table = tg3_pci_tbl,
14875 .probe = tg3_init_one,
14876 .remove = __devexit_p(tg3_remove_one),
14877 .suspend = tg3_suspend,
14878 .resume = tg3_resume
14881 static int __init tg3_init(void)
14883 return pci_register_driver(&tg3_driver);
14886 static void __exit tg3_cleanup(void)
14888 pci_unregister_driver(&tg3_driver);
14891 module_init(tg3_init);
14892 module_exit(tg3_cleanup);