2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2010 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
45 #include <net/checksum.h>
48 #include <asm/system.h>
50 #include <asm/byteorder.h>
51 #include <asm/uaccess.h>
54 #include <asm/idprom.h>
61 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
62 #define TG3_VLAN_TAG_USED 1
64 #define TG3_VLAN_TAG_USED 0
69 #define DRV_MODULE_NAME "tg3"
70 #define DRV_MODULE_VERSION "3.108"
71 #define DRV_MODULE_RELDATE "February 17, 2010"
73 #define TG3_DEF_MAC_MODE 0
74 #define TG3_DEF_RX_MODE 0
75 #define TG3_DEF_TX_MODE 0
76 #define TG3_DEF_MSG_ENABLE \
86 /* length of time before we decide the hardware is borked,
87 * and dev->tx_timeout() should be called to fix the problem
89 #define TG3_TX_TIMEOUT (5 * HZ)
91 /* hardware minimum and maximum for a single frame's data payload */
92 #define TG3_MIN_MTU 60
93 #define TG3_MAX_MTU(tp) \
94 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
96 /* These numbers seem to be hard coded in the NIC firmware somehow.
97 * You can't change the ring sizes, but you can change where you place
98 * them in the NIC onboard memory.
100 #define TG3_RX_RING_SIZE 512
101 #define TG3_DEF_RX_RING_PENDING 200
102 #define TG3_RX_JUMBO_RING_SIZE 256
103 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
104 #define TG3_RSS_INDIR_TBL_SIZE 128
106 /* Do not place this n-ring entries value into the tp struct itself,
107 * we really want to expose these constants to GCC so that modulo et
108 * al. operations are done with shifts and masks instead of with
109 * hw multiply/modulo instructions. Another solution would be to
110 * replace things like '% foo' with '& (foo - 1)'.
112 #define TG3_RX_RCB_RING_SIZE(tp) \
113 (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \
114 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512)
116 #define TG3_TX_RING_SIZE 512
117 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
119 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
127 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
129 #define TG3_DMA_BYTE_ENAB 64
131 #define TG3_RX_STD_DMA_SZ 1536
132 #define TG3_RX_JMB_DMA_SZ 9046
134 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
136 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
137 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
139 #define TG3_RX_STD_BUFF_RING_SIZE \
140 (sizeof(struct ring_info) * TG3_RX_RING_SIZE)
142 #define TG3_RX_JMB_BUFF_RING_SIZE \
143 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
145 /* minimum number of free TX descriptors required to wake up TX process */
146 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
148 #define TG3_RAW_IP_ALIGN 2
150 /* number of ETHTOOL_GSTATS u64's */
151 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
153 #define TG3_NUM_TEST 6
155 #define FIRMWARE_TG3 "tigon/tg3.bin"
156 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
157 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
159 static char version[] __devinitdata =
160 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
162 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
163 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
164 MODULE_LICENSE("GPL");
165 MODULE_VERSION(DRV_MODULE_VERSION);
166 MODULE_FIRMWARE(FIRMWARE_TG3);
167 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
168 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
170 #define TG3_RSS_MIN_NUM_MSIX_VECS 2
172 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
173 module_param(tg3_debug, int, 0);
174 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
176 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
252 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
253 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
254 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
255 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
256 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
257 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
258 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
262 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
264 static const struct {
265 const char string[ETH_GSTRING_LEN];
266 } ethtool_stats_keys[TG3_NUM_STATS] = {
269 { "rx_ucast_packets" },
270 { "rx_mcast_packets" },
271 { "rx_bcast_packets" },
273 { "rx_align_errors" },
274 { "rx_xon_pause_rcvd" },
275 { "rx_xoff_pause_rcvd" },
276 { "rx_mac_ctrl_rcvd" },
277 { "rx_xoff_entered" },
278 { "rx_frame_too_long_errors" },
280 { "rx_undersize_packets" },
281 { "rx_in_length_errors" },
282 { "rx_out_length_errors" },
283 { "rx_64_or_less_octet_packets" },
284 { "rx_65_to_127_octet_packets" },
285 { "rx_128_to_255_octet_packets" },
286 { "rx_256_to_511_octet_packets" },
287 { "rx_512_to_1023_octet_packets" },
288 { "rx_1024_to_1522_octet_packets" },
289 { "rx_1523_to_2047_octet_packets" },
290 { "rx_2048_to_4095_octet_packets" },
291 { "rx_4096_to_8191_octet_packets" },
292 { "rx_8192_to_9022_octet_packets" },
299 { "tx_flow_control" },
301 { "tx_single_collisions" },
302 { "tx_mult_collisions" },
304 { "tx_excessive_collisions" },
305 { "tx_late_collisions" },
306 { "tx_collide_2times" },
307 { "tx_collide_3times" },
308 { "tx_collide_4times" },
309 { "tx_collide_5times" },
310 { "tx_collide_6times" },
311 { "tx_collide_7times" },
312 { "tx_collide_8times" },
313 { "tx_collide_9times" },
314 { "tx_collide_10times" },
315 { "tx_collide_11times" },
316 { "tx_collide_12times" },
317 { "tx_collide_13times" },
318 { "tx_collide_14times" },
319 { "tx_collide_15times" },
320 { "tx_ucast_packets" },
321 { "tx_mcast_packets" },
322 { "tx_bcast_packets" },
323 { "tx_carrier_sense_errors" },
327 { "dma_writeq_full" },
328 { "dma_write_prioq_full" },
332 { "rx_threshold_hit" },
334 { "dma_readq_full" },
335 { "dma_read_prioq_full" },
336 { "tx_comp_queue_full" },
338 { "ring_set_send_prod_index" },
339 { "ring_status_update" },
341 { "nic_avoided_irqs" },
342 { "nic_tx_threshold_hit" }
345 static const struct {
346 const char string[ETH_GSTRING_LEN];
347 } ethtool_test_keys[TG3_NUM_TEST] = {
348 { "nvram test (online) " },
349 { "link test (online) " },
350 { "register test (offline)" },
351 { "memory test (offline)" },
352 { "loopback test (offline)" },
353 { "interrupt test (offline)" },
356 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
358 writel(val, tp->regs + off);
361 static u32 tg3_read32(struct tg3 *tp, u32 off)
363 return (readl(tp->regs + off));
366 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
368 writel(val, tp->aperegs + off);
371 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
373 return (readl(tp->aperegs + off));
376 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
380 spin_lock_irqsave(&tp->indirect_lock, flags);
381 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
382 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
383 spin_unlock_irqrestore(&tp->indirect_lock, flags);
386 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
388 writel(val, tp->regs + off);
389 readl(tp->regs + off);
392 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
397 spin_lock_irqsave(&tp->indirect_lock, flags);
398 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
399 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
400 spin_unlock_irqrestore(&tp->indirect_lock, flags);
404 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
408 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
409 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
410 TG3_64BIT_REG_LOW, val);
413 if (off == TG3_RX_STD_PROD_IDX_REG) {
414 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
415 TG3_64BIT_REG_LOW, val);
419 spin_lock_irqsave(&tp->indirect_lock, flags);
420 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
421 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
422 spin_unlock_irqrestore(&tp->indirect_lock, flags);
424 /* In indirect mode when disabling interrupts, we also need
425 * to clear the interrupt bit in the GRC local ctrl register.
427 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
429 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
430 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
434 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
439 spin_lock_irqsave(&tp->indirect_lock, flags);
440 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
441 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
442 spin_unlock_irqrestore(&tp->indirect_lock, flags);
446 /* usec_wait specifies the wait time in usec when writing to certain registers
447 * where it is unsafe to read back the register without some delay.
448 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
449 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
451 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
453 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
454 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
455 /* Non-posted methods */
456 tp->write32(tp, off, val);
459 tg3_write32(tp, off, val);
464 /* Wait again after the read for the posted method to guarantee that
465 * the wait time is met.
471 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
473 tp->write32_mbox(tp, off, val);
474 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
475 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
476 tp->read32_mbox(tp, off);
479 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
481 void __iomem *mbox = tp->regs + off;
483 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
485 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
489 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
491 return (readl(tp->regs + off + GRCMBOX_BASE));
494 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
496 writel(val, tp->regs + off + GRCMBOX_BASE);
499 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
500 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
501 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
502 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
503 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
505 #define tw32(reg,val) tp->write32(tp, reg, val)
506 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
507 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
508 #define tr32(reg) tp->read32(tp, reg)
510 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
514 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
515 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
518 spin_lock_irqsave(&tp->indirect_lock, flags);
519 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
520 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
521 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
523 /* Always leave this as zero. */
524 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
526 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
527 tw32_f(TG3PCI_MEM_WIN_DATA, val);
529 /* Always leave this as zero. */
530 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
532 spin_unlock_irqrestore(&tp->indirect_lock, flags);
535 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
539 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
540 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
545 spin_lock_irqsave(&tp->indirect_lock, flags);
546 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
547 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
548 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
550 /* Always leave this as zero. */
551 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
553 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
554 *val = tr32(TG3PCI_MEM_WIN_DATA);
556 /* Always leave this as zero. */
557 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
559 spin_unlock_irqrestore(&tp->indirect_lock, flags);
562 static void tg3_ape_lock_init(struct tg3 *tp)
566 /* Make sure the driver hasn't any stale locks. */
567 for (i = 0; i < 8; i++)
568 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
569 APE_LOCK_GRANT_DRIVER);
572 static int tg3_ape_lock(struct tg3 *tp, int locknum)
578 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
582 case TG3_APE_LOCK_GRC:
583 case TG3_APE_LOCK_MEM:
591 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
593 /* Wait for up to 1 millisecond to acquire lock. */
594 for (i = 0; i < 100; i++) {
595 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
596 if (status == APE_LOCK_GRANT_DRIVER)
601 if (status != APE_LOCK_GRANT_DRIVER) {
602 /* Revoke the lock request. */
603 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
604 APE_LOCK_GRANT_DRIVER);
612 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
616 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
620 case TG3_APE_LOCK_GRC:
621 case TG3_APE_LOCK_MEM:
628 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
631 static void tg3_disable_ints(struct tg3 *tp)
635 tw32(TG3PCI_MISC_HOST_CTRL,
636 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
637 for (i = 0; i < tp->irq_max; i++)
638 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
641 static void tg3_enable_ints(struct tg3 *tp)
648 tw32(TG3PCI_MISC_HOST_CTRL,
649 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
651 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
652 for (i = 0; i < tp->irq_cnt; i++) {
653 struct tg3_napi *tnapi = &tp->napi[i];
654 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
655 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
656 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
658 tp->coal_now |= tnapi->coal_now;
661 /* Force an initial interrupt */
662 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
663 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
664 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
666 tw32(HOSTCC_MODE, tp->coal_now);
668 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
671 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
673 struct tg3 *tp = tnapi->tp;
674 struct tg3_hw_status *sblk = tnapi->hw_status;
675 unsigned int work_exists = 0;
677 /* check for phy events */
678 if (!(tp->tg3_flags &
679 (TG3_FLAG_USE_LINKCHG_REG |
680 TG3_FLAG_POLL_SERDES))) {
681 if (sblk->status & SD_STATUS_LINK_CHG)
684 /* check for RX/TX work to do */
685 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
686 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
693 * similar to tg3_enable_ints, but it accurately determines whether there
694 * is new work pending and can return without flushing the PIO write
695 * which reenables interrupts
697 static void tg3_int_reenable(struct tg3_napi *tnapi)
699 struct tg3 *tp = tnapi->tp;
701 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
704 /* When doing tagged status, this work check is unnecessary.
705 * The last_tag we write above tells the chip which piece of
706 * work we've completed.
708 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
710 tw32(HOSTCC_MODE, tp->coalesce_mode |
711 HOSTCC_MODE_ENABLE | tnapi->coal_now);
714 static void tg3_napi_disable(struct tg3 *tp)
718 for (i = tp->irq_cnt - 1; i >= 0; i--)
719 napi_disable(&tp->napi[i].napi);
722 static void tg3_napi_enable(struct tg3 *tp)
726 for (i = 0; i < tp->irq_cnt; i++)
727 napi_enable(&tp->napi[i].napi);
730 static inline void tg3_netif_stop(struct tg3 *tp)
732 tp->dev->trans_start = jiffies; /* prevent tx timeout */
733 tg3_napi_disable(tp);
734 netif_tx_disable(tp->dev);
737 static inline void tg3_netif_start(struct tg3 *tp)
739 /* NOTE: unconditional netif_tx_wake_all_queues is only
740 * appropriate so long as all callers are assured to
741 * have free tx slots (such as after tg3_init_hw)
743 netif_tx_wake_all_queues(tp->dev);
746 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
750 static void tg3_switch_clocks(struct tg3 *tp)
755 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
756 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
759 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
761 orig_clock_ctrl = clock_ctrl;
762 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
763 CLOCK_CTRL_CLKRUN_OENABLE |
765 tp->pci_clock_ctrl = clock_ctrl;
767 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
768 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
769 tw32_wait_f(TG3PCI_CLOCK_CTRL,
770 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
772 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
773 tw32_wait_f(TG3PCI_CLOCK_CTRL,
775 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
777 tw32_wait_f(TG3PCI_CLOCK_CTRL,
778 clock_ctrl | (CLOCK_CTRL_ALTCLK),
781 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
784 #define PHY_BUSY_LOOPS 5000
786 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
792 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
794 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
800 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
801 MI_COM_PHY_ADDR_MASK);
802 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
803 MI_COM_REG_ADDR_MASK);
804 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
806 tw32_f(MAC_MI_COM, frame_val);
808 loops = PHY_BUSY_LOOPS;
811 frame_val = tr32(MAC_MI_COM);
813 if ((frame_val & MI_COM_BUSY) == 0) {
815 frame_val = tr32(MAC_MI_COM);
823 *val = frame_val & MI_COM_DATA_MASK;
827 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
828 tw32_f(MAC_MI_MODE, tp->mi_mode);
835 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
841 if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
842 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
845 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
847 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
851 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
852 MI_COM_PHY_ADDR_MASK);
853 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
854 MI_COM_REG_ADDR_MASK);
855 frame_val |= (val & MI_COM_DATA_MASK);
856 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
858 tw32_f(MAC_MI_COM, frame_val);
860 loops = PHY_BUSY_LOOPS;
863 frame_val = tr32(MAC_MI_COM);
864 if ((frame_val & MI_COM_BUSY) == 0) {
866 frame_val = tr32(MAC_MI_COM);
876 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
877 tw32_f(MAC_MI_MODE, tp->mi_mode);
884 static int tg3_bmcr_reset(struct tg3 *tp)
889 /* OK, reset it, and poll the BMCR_RESET bit until it
890 * clears or we time out.
892 phy_control = BMCR_RESET;
893 err = tg3_writephy(tp, MII_BMCR, phy_control);
899 err = tg3_readphy(tp, MII_BMCR, &phy_control);
903 if ((phy_control & BMCR_RESET) == 0) {
915 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
917 struct tg3 *tp = bp->priv;
920 spin_lock_bh(&tp->lock);
922 if (tg3_readphy(tp, reg, &val))
925 spin_unlock_bh(&tp->lock);
930 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
932 struct tg3 *tp = bp->priv;
935 spin_lock_bh(&tp->lock);
937 if (tg3_writephy(tp, reg, val))
940 spin_unlock_bh(&tp->lock);
945 static int tg3_mdio_reset(struct mii_bus *bp)
950 static void tg3_mdio_config_5785(struct tg3 *tp)
953 struct phy_device *phydev;
955 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
956 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
957 case PHY_ID_BCM50610:
958 case PHY_ID_BCM50610M:
959 val = MAC_PHYCFG2_50610_LED_MODES;
961 case PHY_ID_BCMAC131:
962 val = MAC_PHYCFG2_AC131_LED_MODES;
964 case PHY_ID_RTL8211C:
965 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
967 case PHY_ID_RTL8201E:
968 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
974 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
975 tw32(MAC_PHYCFG2, val);
977 val = tr32(MAC_PHYCFG1);
978 val &= ~(MAC_PHYCFG1_RGMII_INT |
979 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
980 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
981 tw32(MAC_PHYCFG1, val);
986 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE))
987 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
988 MAC_PHYCFG2_FMODE_MASK_MASK |
989 MAC_PHYCFG2_GMODE_MASK_MASK |
990 MAC_PHYCFG2_ACT_MASK_MASK |
991 MAC_PHYCFG2_QUAL_MASK_MASK |
992 MAC_PHYCFG2_INBAND_ENABLE;
994 tw32(MAC_PHYCFG2, val);
996 val = tr32(MAC_PHYCFG1);
997 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
998 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
999 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1000 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1001 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1002 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1003 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1005 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1006 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1007 tw32(MAC_PHYCFG1, val);
1009 val = tr32(MAC_EXT_RGMII_MODE);
1010 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1011 MAC_RGMII_MODE_RX_QUALITY |
1012 MAC_RGMII_MODE_RX_ACTIVITY |
1013 MAC_RGMII_MODE_RX_ENG_DET |
1014 MAC_RGMII_MODE_TX_ENABLE |
1015 MAC_RGMII_MODE_TX_LOWPWR |
1016 MAC_RGMII_MODE_TX_RESET);
1017 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1018 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1019 val |= MAC_RGMII_MODE_RX_INT_B |
1020 MAC_RGMII_MODE_RX_QUALITY |
1021 MAC_RGMII_MODE_RX_ACTIVITY |
1022 MAC_RGMII_MODE_RX_ENG_DET;
1023 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1024 val |= MAC_RGMII_MODE_TX_ENABLE |
1025 MAC_RGMII_MODE_TX_LOWPWR |
1026 MAC_RGMII_MODE_TX_RESET;
1028 tw32(MAC_EXT_RGMII_MODE, val);
1031 static void tg3_mdio_start(struct tg3 *tp)
1033 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1034 tw32_f(MAC_MI_MODE, tp->mi_mode);
1037 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1038 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1039 tg3_mdio_config_5785(tp);
1042 static int tg3_mdio_init(struct tg3 *tp)
1046 struct phy_device *phydev;
1048 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
1049 u32 funcnum, is_serdes;
1051 funcnum = tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC;
1057 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1058 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1060 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1061 TG3_CPMU_PHY_STRAP_IS_SERDES;
1065 tp->phy_addr = TG3_PHY_MII_ADDR;
1069 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1070 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1073 tp->mdio_bus = mdiobus_alloc();
1074 if (tp->mdio_bus == NULL)
1077 tp->mdio_bus->name = "tg3 mdio bus";
1078 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1079 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1080 tp->mdio_bus->priv = tp;
1081 tp->mdio_bus->parent = &tp->pdev->dev;
1082 tp->mdio_bus->read = &tg3_mdio_read;
1083 tp->mdio_bus->write = &tg3_mdio_write;
1084 tp->mdio_bus->reset = &tg3_mdio_reset;
1085 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1086 tp->mdio_bus->irq = &tp->mdio_irq[0];
1088 for (i = 0; i < PHY_MAX_ADDR; i++)
1089 tp->mdio_bus->irq[i] = PHY_POLL;
1091 /* The bus registration will look for all the PHYs on the mdio bus.
1092 * Unfortunately, it does not ensure the PHY is powered up before
1093 * accessing the PHY ID registers. A chip reset is the
1094 * quickest way to bring the device back to an operational state..
1096 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1099 i = mdiobus_register(tp->mdio_bus);
1101 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1102 mdiobus_free(tp->mdio_bus);
1106 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1108 if (!phydev || !phydev->drv) {
1109 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1110 mdiobus_unregister(tp->mdio_bus);
1111 mdiobus_free(tp->mdio_bus);
1115 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1116 case PHY_ID_BCM57780:
1117 phydev->interface = PHY_INTERFACE_MODE_GMII;
1118 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1120 case PHY_ID_BCM50610:
1121 case PHY_ID_BCM50610M:
1122 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1123 PHY_BRCM_RX_REFCLK_UNUSED |
1124 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1125 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1126 if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)
1127 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1128 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1129 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1130 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1131 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1133 case PHY_ID_RTL8211C:
1134 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1136 case PHY_ID_RTL8201E:
1137 case PHY_ID_BCMAC131:
1138 phydev->interface = PHY_INTERFACE_MODE_MII;
1139 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1140 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1144 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1146 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1147 tg3_mdio_config_5785(tp);
1152 static void tg3_mdio_fini(struct tg3 *tp)
1154 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1155 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1156 mdiobus_unregister(tp->mdio_bus);
1157 mdiobus_free(tp->mdio_bus);
1161 /* tp->lock is held. */
1162 static inline void tg3_generate_fw_event(struct tg3 *tp)
1166 val = tr32(GRC_RX_CPU_EVENT);
1167 val |= GRC_RX_CPU_DRIVER_EVENT;
1168 tw32_f(GRC_RX_CPU_EVENT, val);
1170 tp->last_event_jiffies = jiffies;
1173 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1175 /* tp->lock is held. */
1176 static void tg3_wait_for_event_ack(struct tg3 *tp)
1179 unsigned int delay_cnt;
1182 /* If enough time has passed, no wait is necessary. */
1183 time_remain = (long)(tp->last_event_jiffies + 1 +
1184 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1186 if (time_remain < 0)
1189 /* Check if we can shorten the wait time. */
1190 delay_cnt = jiffies_to_usecs(time_remain);
1191 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1192 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1193 delay_cnt = (delay_cnt >> 3) + 1;
1195 for (i = 0; i < delay_cnt; i++) {
1196 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1202 /* tp->lock is held. */
1203 static void tg3_ump_link_report(struct tg3 *tp)
1208 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1209 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1212 tg3_wait_for_event_ack(tp);
1214 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1216 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1219 if (!tg3_readphy(tp, MII_BMCR, ®))
1221 if (!tg3_readphy(tp, MII_BMSR, ®))
1222 val |= (reg & 0xffff);
1223 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1226 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1228 if (!tg3_readphy(tp, MII_LPA, ®))
1229 val |= (reg & 0xffff);
1230 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1233 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1234 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1236 if (!tg3_readphy(tp, MII_STAT1000, ®))
1237 val |= (reg & 0xffff);
1239 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1241 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1245 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1247 tg3_generate_fw_event(tp);
1250 static void tg3_link_report(struct tg3 *tp)
1252 if (!netif_carrier_ok(tp->dev)) {
1253 netif_info(tp, link, tp->dev, "Link is down\n");
1254 tg3_ump_link_report(tp);
1255 } else if (netif_msg_link(tp)) {
1256 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1257 (tp->link_config.active_speed == SPEED_1000 ?
1259 (tp->link_config.active_speed == SPEED_100 ?
1261 (tp->link_config.active_duplex == DUPLEX_FULL ?
1264 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1265 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1267 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1269 tg3_ump_link_report(tp);
1273 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1277 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1278 miireg = ADVERTISE_PAUSE_CAP;
1279 else if (flow_ctrl & FLOW_CTRL_TX)
1280 miireg = ADVERTISE_PAUSE_ASYM;
1281 else if (flow_ctrl & FLOW_CTRL_RX)
1282 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1289 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1293 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1294 miireg = ADVERTISE_1000XPAUSE;
1295 else if (flow_ctrl & FLOW_CTRL_TX)
1296 miireg = ADVERTISE_1000XPSE_ASYM;
1297 else if (flow_ctrl & FLOW_CTRL_RX)
1298 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1305 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1309 if (lcladv & ADVERTISE_1000XPAUSE) {
1310 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1311 if (rmtadv & LPA_1000XPAUSE)
1312 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1313 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1316 if (rmtadv & LPA_1000XPAUSE)
1317 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1319 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1320 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1327 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1331 u32 old_rx_mode = tp->rx_mode;
1332 u32 old_tx_mode = tp->tx_mode;
1334 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1335 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1337 autoneg = tp->link_config.autoneg;
1339 if (autoneg == AUTONEG_ENABLE &&
1340 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1341 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1342 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1344 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1346 flowctrl = tp->link_config.flowctrl;
1348 tp->link_config.active_flowctrl = flowctrl;
1350 if (flowctrl & FLOW_CTRL_RX)
1351 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1353 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1355 if (old_rx_mode != tp->rx_mode)
1356 tw32_f(MAC_RX_MODE, tp->rx_mode);
1358 if (flowctrl & FLOW_CTRL_TX)
1359 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1361 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1363 if (old_tx_mode != tp->tx_mode)
1364 tw32_f(MAC_TX_MODE, tp->tx_mode);
1367 static void tg3_adjust_link(struct net_device *dev)
1369 u8 oldflowctrl, linkmesg = 0;
1370 u32 mac_mode, lcl_adv, rmt_adv;
1371 struct tg3 *tp = netdev_priv(dev);
1372 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1374 spin_lock_bh(&tp->lock);
1376 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1377 MAC_MODE_HALF_DUPLEX);
1379 oldflowctrl = tp->link_config.active_flowctrl;
1385 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1386 mac_mode |= MAC_MODE_PORT_MODE_MII;
1387 else if (phydev->speed == SPEED_1000 ||
1388 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1389 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1391 mac_mode |= MAC_MODE_PORT_MODE_MII;
1393 if (phydev->duplex == DUPLEX_HALF)
1394 mac_mode |= MAC_MODE_HALF_DUPLEX;
1396 lcl_adv = tg3_advert_flowctrl_1000T(
1397 tp->link_config.flowctrl);
1400 rmt_adv = LPA_PAUSE_CAP;
1401 if (phydev->asym_pause)
1402 rmt_adv |= LPA_PAUSE_ASYM;
1405 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1407 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1409 if (mac_mode != tp->mac_mode) {
1410 tp->mac_mode = mac_mode;
1411 tw32_f(MAC_MODE, tp->mac_mode);
1415 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1416 if (phydev->speed == SPEED_10)
1418 MAC_MI_STAT_10MBPS_MODE |
1419 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1421 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1424 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1425 tw32(MAC_TX_LENGTHS,
1426 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1427 (6 << TX_LENGTHS_IPG_SHIFT) |
1428 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1430 tw32(MAC_TX_LENGTHS,
1431 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1432 (6 << TX_LENGTHS_IPG_SHIFT) |
1433 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1435 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1436 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1437 phydev->speed != tp->link_config.active_speed ||
1438 phydev->duplex != tp->link_config.active_duplex ||
1439 oldflowctrl != tp->link_config.active_flowctrl)
1442 tp->link_config.active_speed = phydev->speed;
1443 tp->link_config.active_duplex = phydev->duplex;
1445 spin_unlock_bh(&tp->lock);
1448 tg3_link_report(tp);
1451 static int tg3_phy_init(struct tg3 *tp)
1453 struct phy_device *phydev;
1455 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1458 /* Bring the PHY back to a known state. */
1461 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1463 /* Attach the MAC to the PHY. */
1464 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1465 phydev->dev_flags, phydev->interface);
1466 if (IS_ERR(phydev)) {
1467 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1468 return PTR_ERR(phydev);
1471 /* Mask with MAC supported features. */
1472 switch (phydev->interface) {
1473 case PHY_INTERFACE_MODE_GMII:
1474 case PHY_INTERFACE_MODE_RGMII:
1475 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1476 phydev->supported &= (PHY_GBIT_FEATURES |
1478 SUPPORTED_Asym_Pause);
1482 case PHY_INTERFACE_MODE_MII:
1483 phydev->supported &= (PHY_BASIC_FEATURES |
1485 SUPPORTED_Asym_Pause);
1488 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1492 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1494 phydev->advertising = phydev->supported;
1499 static void tg3_phy_start(struct tg3 *tp)
1501 struct phy_device *phydev;
1503 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1506 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1508 if (tp->link_config.phy_is_low_power) {
1509 tp->link_config.phy_is_low_power = 0;
1510 phydev->speed = tp->link_config.orig_speed;
1511 phydev->duplex = tp->link_config.orig_duplex;
1512 phydev->autoneg = tp->link_config.orig_autoneg;
1513 phydev->advertising = tp->link_config.orig_advertising;
1518 phy_start_aneg(phydev);
1521 static void tg3_phy_stop(struct tg3 *tp)
1523 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1526 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1529 static void tg3_phy_fini(struct tg3 *tp)
1531 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1532 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1533 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1537 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1539 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1540 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1543 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1547 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1550 tg3_writephy(tp, MII_TG3_FET_TEST,
1551 phytest | MII_TG3_FET_SHADOW_EN);
1552 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1554 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1556 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1557 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1559 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1563 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1567 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1568 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
1569 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1572 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1573 tg3_phy_fet_toggle_apd(tp, enable);
1577 reg = MII_TG3_MISC_SHDW_WREN |
1578 MII_TG3_MISC_SHDW_SCR5_SEL |
1579 MII_TG3_MISC_SHDW_SCR5_LPED |
1580 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1581 MII_TG3_MISC_SHDW_SCR5_SDTL |
1582 MII_TG3_MISC_SHDW_SCR5_C125OE;
1583 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1584 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1586 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1589 reg = MII_TG3_MISC_SHDW_WREN |
1590 MII_TG3_MISC_SHDW_APD_SEL |
1591 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1593 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1595 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1598 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1602 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1603 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1606 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1609 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1610 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1612 tg3_writephy(tp, MII_TG3_FET_TEST,
1613 ephy | MII_TG3_FET_SHADOW_EN);
1614 if (!tg3_readphy(tp, reg, &phy)) {
1616 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1618 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1619 tg3_writephy(tp, reg, phy);
1621 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1624 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1625 MII_TG3_AUXCTL_SHDWSEL_MISC;
1626 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1627 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1629 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1631 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1632 phy |= MII_TG3_AUXCTL_MISC_WREN;
1633 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1638 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1642 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1645 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1646 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1647 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1648 (val | (1 << 15) | (1 << 4)));
1651 static void tg3_phy_apply_otp(struct tg3 *tp)
1660 /* Enable SM_DSP clock and tx 6dB coding. */
1661 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1662 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1663 MII_TG3_AUXCTL_ACTL_TX_6DB;
1664 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1666 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1667 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1668 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1670 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1671 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1672 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1674 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1675 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1676 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1678 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1679 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1681 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1682 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1684 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1685 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1686 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1688 /* Turn off SM_DSP clock. */
1689 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1690 MII_TG3_AUXCTL_ACTL_TX_6DB;
1691 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1694 static int tg3_wait_macro_done(struct tg3 *tp)
1701 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1702 if ((tmp32 & 0x1000) == 0)
1712 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1714 static const u32 test_pat[4][6] = {
1715 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1716 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1717 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1718 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1722 for (chan = 0; chan < 4; chan++) {
1725 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1726 (chan * 0x2000) | 0x0200);
1727 tg3_writephy(tp, 0x16, 0x0002);
1729 for (i = 0; i < 6; i++)
1730 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1733 tg3_writephy(tp, 0x16, 0x0202);
1734 if (tg3_wait_macro_done(tp)) {
1739 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1740 (chan * 0x2000) | 0x0200);
1741 tg3_writephy(tp, 0x16, 0x0082);
1742 if (tg3_wait_macro_done(tp)) {
1747 tg3_writephy(tp, 0x16, 0x0802);
1748 if (tg3_wait_macro_done(tp)) {
1753 for (i = 0; i < 6; i += 2) {
1756 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1757 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1758 tg3_wait_macro_done(tp)) {
1764 if (low != test_pat[chan][i] ||
1765 high != test_pat[chan][i+1]) {
1766 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1767 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1768 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1778 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1782 for (chan = 0; chan < 4; chan++) {
1785 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1786 (chan * 0x2000) | 0x0200);
1787 tg3_writephy(tp, 0x16, 0x0002);
1788 for (i = 0; i < 6; i++)
1789 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1790 tg3_writephy(tp, 0x16, 0x0202);
1791 if (tg3_wait_macro_done(tp))
1798 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1800 u32 reg32, phy9_orig;
1801 int retries, do_phy_reset, err;
1807 err = tg3_bmcr_reset(tp);
1813 /* Disable transmitter and interrupt. */
1814 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1818 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1820 /* Set full-duplex, 1000 mbps. */
1821 tg3_writephy(tp, MII_BMCR,
1822 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1824 /* Set to master mode. */
1825 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1828 tg3_writephy(tp, MII_TG3_CTRL,
1829 (MII_TG3_CTRL_AS_MASTER |
1830 MII_TG3_CTRL_ENABLE_AS_MASTER));
1832 /* Enable SM_DSP_CLOCK and 6dB. */
1833 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1835 /* Block the PHY control access. */
1836 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1837 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1839 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1842 } while (--retries);
1844 err = tg3_phy_reset_chanpat(tp);
1848 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1849 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1851 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1852 tg3_writephy(tp, 0x16, 0x0000);
1854 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1855 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1856 /* Set Extended packet length bit for jumbo frames */
1857 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1860 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1863 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1865 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1867 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1874 /* This will reset the tigon3 PHY if there is no valid
1875 * link unless the FORCE argument is non-zero.
1877 static int tg3_phy_reset(struct tg3 *tp)
1883 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1886 val = tr32(GRC_MISC_CFG);
1887 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1890 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1891 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1895 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1896 netif_carrier_off(tp->dev);
1897 tg3_link_report(tp);
1900 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1901 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1902 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1903 err = tg3_phy_reset_5703_4_5(tp);
1910 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1911 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1912 cpmuctrl = tr32(TG3_CPMU_CTRL);
1913 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1915 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1918 err = tg3_bmcr_reset(tp);
1922 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1925 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1926 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1928 tw32(TG3_CPMU_CTRL, cpmuctrl);
1931 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1932 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1935 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1936 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1937 CPMU_LSPD_1000MB_MACCLK_12_5) {
1938 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1940 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1944 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
1945 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))
1948 tg3_phy_apply_otp(tp);
1950 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1951 tg3_phy_toggle_apd(tp, true);
1953 tg3_phy_toggle_apd(tp, false);
1956 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1957 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1958 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1959 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1960 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1961 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1962 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1964 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1965 tg3_writephy(tp, 0x1c, 0x8d68);
1966 tg3_writephy(tp, 0x1c, 0x8d68);
1968 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1969 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1970 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1971 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1972 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1973 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1974 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1975 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1976 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1978 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1979 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1980 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1981 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1982 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1983 tg3_writephy(tp, MII_TG3_TEST1,
1984 MII_TG3_TEST1_TRIM_EN | 0x4);
1986 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1987 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1989 /* Set Extended packet length bit (bit 14) on all chips that */
1990 /* support jumbo frames */
1991 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1992 /* Cannot do read-modify-write on 5401 */
1993 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1994 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1997 /* Set bit 14 with read-modify-write to preserve other bits */
1998 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1999 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
2000 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
2003 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2004 * jumbo frames transmission.
2006 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2009 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
2010 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2011 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2014 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2015 /* adjust output voltage */
2016 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2019 tg3_phy_toggle_automdix(tp, 1);
2020 tg3_phy_set_wirespeed(tp);
2024 static void tg3_frob_aux_power(struct tg3 *tp)
2026 struct tg3 *tp_peer = tp;
2028 /* The GPIOs do something completely different on 57765. */
2029 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2030 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2033 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2034 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2035 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
2036 struct net_device *dev_peer;
2038 dev_peer = pci_get_drvdata(tp->pdev_peer);
2039 /* remove_one() may have been run on the peer. */
2043 tp_peer = netdev_priv(dev_peer);
2046 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2047 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
2048 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2049 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
2050 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2051 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2052 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2053 (GRC_LCLCTRL_GPIO_OE0 |
2054 GRC_LCLCTRL_GPIO_OE1 |
2055 GRC_LCLCTRL_GPIO_OE2 |
2056 GRC_LCLCTRL_GPIO_OUTPUT0 |
2057 GRC_LCLCTRL_GPIO_OUTPUT1),
2059 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2060 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2061 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2062 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2063 GRC_LCLCTRL_GPIO_OE1 |
2064 GRC_LCLCTRL_GPIO_OE2 |
2065 GRC_LCLCTRL_GPIO_OUTPUT0 |
2066 GRC_LCLCTRL_GPIO_OUTPUT1 |
2068 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2070 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2071 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2073 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2074 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2077 u32 grc_local_ctrl = 0;
2079 if (tp_peer != tp &&
2080 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2083 /* Workaround to prevent overdrawing Amps. */
2084 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2086 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2087 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2088 grc_local_ctrl, 100);
2091 /* On 5753 and variants, GPIO2 cannot be used. */
2092 no_gpio2 = tp->nic_sram_data_cfg &
2093 NIC_SRAM_DATA_CFG_NO_GPIO2;
2095 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2096 GRC_LCLCTRL_GPIO_OE1 |
2097 GRC_LCLCTRL_GPIO_OE2 |
2098 GRC_LCLCTRL_GPIO_OUTPUT1 |
2099 GRC_LCLCTRL_GPIO_OUTPUT2;
2101 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2102 GRC_LCLCTRL_GPIO_OUTPUT2);
2104 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2105 grc_local_ctrl, 100);
2107 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2109 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2110 grc_local_ctrl, 100);
2113 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2114 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2115 grc_local_ctrl, 100);
2119 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2120 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2121 if (tp_peer != tp &&
2122 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2125 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2126 (GRC_LCLCTRL_GPIO_OE1 |
2127 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2129 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2130 GRC_LCLCTRL_GPIO_OE1, 100);
2132 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2133 (GRC_LCLCTRL_GPIO_OE1 |
2134 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2139 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2141 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2143 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2144 if (speed != SPEED_10)
2146 } else if (speed == SPEED_10)
2152 static int tg3_setup_phy(struct tg3 *, int);
2154 #define RESET_KIND_SHUTDOWN 0
2155 #define RESET_KIND_INIT 1
2156 #define RESET_KIND_SUSPEND 2
2158 static void tg3_write_sig_post_reset(struct tg3 *, int);
2159 static int tg3_halt_cpu(struct tg3 *, u32);
2161 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2165 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2166 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2167 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2168 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2171 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2172 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2173 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2178 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2180 val = tr32(GRC_MISC_CFG);
2181 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2184 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2186 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2189 tg3_writephy(tp, MII_ADVERTISE, 0);
2190 tg3_writephy(tp, MII_BMCR,
2191 BMCR_ANENABLE | BMCR_ANRESTART);
2193 tg3_writephy(tp, MII_TG3_FET_TEST,
2194 phytest | MII_TG3_FET_SHADOW_EN);
2195 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2196 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2198 MII_TG3_FET_SHDW_AUXMODE4,
2201 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2204 } else if (do_low_power) {
2205 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2206 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2208 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2209 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2210 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2211 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2212 MII_TG3_AUXCTL_PCTL_VREG_11V);
2215 /* The PHY should not be powered down on some chips because
2218 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2219 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2220 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2221 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2224 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2225 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2226 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2227 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2228 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2229 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2232 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2235 /* tp->lock is held. */
2236 static int tg3_nvram_lock(struct tg3 *tp)
2238 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2241 if (tp->nvram_lock_cnt == 0) {
2242 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2243 for (i = 0; i < 8000; i++) {
2244 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2249 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2253 tp->nvram_lock_cnt++;
2258 /* tp->lock is held. */
2259 static void tg3_nvram_unlock(struct tg3 *tp)
2261 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2262 if (tp->nvram_lock_cnt > 0)
2263 tp->nvram_lock_cnt--;
2264 if (tp->nvram_lock_cnt == 0)
2265 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2269 /* tp->lock is held. */
2270 static void tg3_enable_nvram_access(struct tg3 *tp)
2272 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2273 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2274 u32 nvaccess = tr32(NVRAM_ACCESS);
2276 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2280 /* tp->lock is held. */
2281 static void tg3_disable_nvram_access(struct tg3 *tp)
2283 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2284 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2285 u32 nvaccess = tr32(NVRAM_ACCESS);
2287 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2291 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2292 u32 offset, u32 *val)
2297 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2300 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2301 EEPROM_ADDR_DEVID_MASK |
2303 tw32(GRC_EEPROM_ADDR,
2305 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2306 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2307 EEPROM_ADDR_ADDR_MASK) |
2308 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2310 for (i = 0; i < 1000; i++) {
2311 tmp = tr32(GRC_EEPROM_ADDR);
2313 if (tmp & EEPROM_ADDR_COMPLETE)
2317 if (!(tmp & EEPROM_ADDR_COMPLETE))
2320 tmp = tr32(GRC_EEPROM_DATA);
2323 * The data will always be opposite the native endian
2324 * format. Perform a blind byteswap to compensate.
2331 #define NVRAM_CMD_TIMEOUT 10000
2333 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2337 tw32(NVRAM_CMD, nvram_cmd);
2338 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2340 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2346 if (i == NVRAM_CMD_TIMEOUT)
2352 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2354 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2355 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2356 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2357 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2358 (tp->nvram_jedecnum == JEDEC_ATMEL))
2360 addr = ((addr / tp->nvram_pagesize) <<
2361 ATMEL_AT45DB0X1B_PAGE_POS) +
2362 (addr % tp->nvram_pagesize);
2367 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2369 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2370 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2371 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2372 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2373 (tp->nvram_jedecnum == JEDEC_ATMEL))
2375 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2376 tp->nvram_pagesize) +
2377 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2382 /* NOTE: Data read in from NVRAM is byteswapped according to
2383 * the byteswapping settings for all other register accesses.
2384 * tg3 devices are BE devices, so on a BE machine, the data
2385 * returned will be exactly as it is seen in NVRAM. On a LE
2386 * machine, the 32-bit value will be byteswapped.
2388 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2392 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2393 return tg3_nvram_read_using_eeprom(tp, offset, val);
2395 offset = tg3_nvram_phys_addr(tp, offset);
2397 if (offset > NVRAM_ADDR_MSK)
2400 ret = tg3_nvram_lock(tp);
2404 tg3_enable_nvram_access(tp);
2406 tw32(NVRAM_ADDR, offset);
2407 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2408 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2411 *val = tr32(NVRAM_RDDATA);
2413 tg3_disable_nvram_access(tp);
2415 tg3_nvram_unlock(tp);
2420 /* Ensures NVRAM data is in bytestream format. */
2421 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2424 int res = tg3_nvram_read(tp, offset, &v);
2426 *val = cpu_to_be32(v);
2430 /* tp->lock is held. */
2431 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2433 u32 addr_high, addr_low;
2436 addr_high = ((tp->dev->dev_addr[0] << 8) |
2437 tp->dev->dev_addr[1]);
2438 addr_low = ((tp->dev->dev_addr[2] << 24) |
2439 (tp->dev->dev_addr[3] << 16) |
2440 (tp->dev->dev_addr[4] << 8) |
2441 (tp->dev->dev_addr[5] << 0));
2442 for (i = 0; i < 4; i++) {
2443 if (i == 1 && skip_mac_1)
2445 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2446 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2449 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2450 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2451 for (i = 0; i < 12; i++) {
2452 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2453 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2457 addr_high = (tp->dev->dev_addr[0] +
2458 tp->dev->dev_addr[1] +
2459 tp->dev->dev_addr[2] +
2460 tp->dev->dev_addr[3] +
2461 tp->dev->dev_addr[4] +
2462 tp->dev->dev_addr[5]) &
2463 TX_BACKOFF_SEED_MASK;
2464 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2467 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2470 bool device_should_wake, do_low_power;
2472 /* Make sure register accesses (indirect or otherwise)
2473 * will function correctly.
2475 pci_write_config_dword(tp->pdev,
2476 TG3PCI_MISC_HOST_CTRL,
2477 tp->misc_host_ctrl);
2481 pci_enable_wake(tp->pdev, state, false);
2482 pci_set_power_state(tp->pdev, PCI_D0);
2484 /* Switch out of Vaux if it is a NIC */
2485 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2486 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2496 netdev_err(tp->dev, "Invalid power state (D%d) requested\n",
2501 /* Restore the CLKREQ setting. */
2502 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2505 pci_read_config_word(tp->pdev,
2506 tp->pcie_cap + PCI_EXP_LNKCTL,
2508 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2509 pci_write_config_word(tp->pdev,
2510 tp->pcie_cap + PCI_EXP_LNKCTL,
2514 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2515 tw32(TG3PCI_MISC_HOST_CTRL,
2516 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2518 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2519 device_may_wakeup(&tp->pdev->dev) &&
2520 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2522 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2523 do_low_power = false;
2524 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2525 !tp->link_config.phy_is_low_power) {
2526 struct phy_device *phydev;
2527 u32 phyid, advertising;
2529 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2531 tp->link_config.phy_is_low_power = 1;
2533 tp->link_config.orig_speed = phydev->speed;
2534 tp->link_config.orig_duplex = phydev->duplex;
2535 tp->link_config.orig_autoneg = phydev->autoneg;
2536 tp->link_config.orig_advertising = phydev->advertising;
2538 advertising = ADVERTISED_TP |
2540 ADVERTISED_Autoneg |
2541 ADVERTISED_10baseT_Half;
2543 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2544 device_should_wake) {
2545 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2547 ADVERTISED_100baseT_Half |
2548 ADVERTISED_100baseT_Full |
2549 ADVERTISED_10baseT_Full;
2551 advertising |= ADVERTISED_10baseT_Full;
2554 phydev->advertising = advertising;
2556 phy_start_aneg(phydev);
2558 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2559 if (phyid != PHY_ID_BCMAC131) {
2560 phyid &= PHY_BCM_OUI_MASK;
2561 if (phyid == PHY_BCM_OUI_1 ||
2562 phyid == PHY_BCM_OUI_2 ||
2563 phyid == PHY_BCM_OUI_3)
2564 do_low_power = true;
2568 do_low_power = true;
2570 if (tp->link_config.phy_is_low_power == 0) {
2571 tp->link_config.phy_is_low_power = 1;
2572 tp->link_config.orig_speed = tp->link_config.speed;
2573 tp->link_config.orig_duplex = tp->link_config.duplex;
2574 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2577 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2578 tp->link_config.speed = SPEED_10;
2579 tp->link_config.duplex = DUPLEX_HALF;
2580 tp->link_config.autoneg = AUTONEG_ENABLE;
2581 tg3_setup_phy(tp, 0);
2585 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2588 val = tr32(GRC_VCPU_EXT_CTRL);
2589 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2590 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2594 for (i = 0; i < 200; i++) {
2595 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2596 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2601 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2602 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2603 WOL_DRV_STATE_SHUTDOWN |
2607 if (device_should_wake) {
2610 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2612 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2616 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2617 mac_mode = MAC_MODE_PORT_MODE_GMII;
2619 mac_mode = MAC_MODE_PORT_MODE_MII;
2621 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2622 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2624 u32 speed = (tp->tg3_flags &
2625 TG3_FLAG_WOL_SPEED_100MB) ?
2626 SPEED_100 : SPEED_10;
2627 if (tg3_5700_link_polarity(tp, speed))
2628 mac_mode |= MAC_MODE_LINK_POLARITY;
2630 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2633 mac_mode = MAC_MODE_PORT_MODE_TBI;
2636 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2637 tw32(MAC_LED_CTRL, tp->led_ctrl);
2639 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2640 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2641 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2642 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2643 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2644 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2646 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2647 mac_mode |= tp->mac_mode &
2648 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2649 if (mac_mode & MAC_MODE_APE_TX_EN)
2650 mac_mode |= MAC_MODE_TDE_ENABLE;
2653 tw32_f(MAC_MODE, mac_mode);
2656 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2660 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2661 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2662 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2665 base_val = tp->pci_clock_ctrl;
2666 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2667 CLOCK_CTRL_TXCLK_DISABLE);
2669 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2670 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2671 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2672 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2673 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2675 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2676 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2677 u32 newbits1, newbits2;
2679 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2680 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2681 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2682 CLOCK_CTRL_TXCLK_DISABLE |
2684 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2685 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2686 newbits1 = CLOCK_CTRL_625_CORE;
2687 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2689 newbits1 = CLOCK_CTRL_ALTCLK;
2690 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2693 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2696 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2699 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2702 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2703 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2704 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2705 CLOCK_CTRL_TXCLK_DISABLE |
2706 CLOCK_CTRL_44MHZ_CORE);
2708 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2711 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2712 tp->pci_clock_ctrl | newbits3, 40);
2716 if (!(device_should_wake) &&
2717 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2718 tg3_power_down_phy(tp, do_low_power);
2720 tg3_frob_aux_power(tp);
2722 /* Workaround for unstable PLL clock */
2723 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2724 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2725 u32 val = tr32(0x7d00);
2727 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2729 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2732 err = tg3_nvram_lock(tp);
2733 tg3_halt_cpu(tp, RX_CPU_BASE);
2735 tg3_nvram_unlock(tp);
2739 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2741 if (device_should_wake)
2742 pci_enable_wake(tp->pdev, state, true);
2744 /* Finally, set the new power state. */
2745 pci_set_power_state(tp->pdev, state);
2750 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2752 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2753 case MII_TG3_AUX_STAT_10HALF:
2755 *duplex = DUPLEX_HALF;
2758 case MII_TG3_AUX_STAT_10FULL:
2760 *duplex = DUPLEX_FULL;
2763 case MII_TG3_AUX_STAT_100HALF:
2765 *duplex = DUPLEX_HALF;
2768 case MII_TG3_AUX_STAT_100FULL:
2770 *duplex = DUPLEX_FULL;
2773 case MII_TG3_AUX_STAT_1000HALF:
2774 *speed = SPEED_1000;
2775 *duplex = DUPLEX_HALF;
2778 case MII_TG3_AUX_STAT_1000FULL:
2779 *speed = SPEED_1000;
2780 *duplex = DUPLEX_FULL;
2784 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2785 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2787 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2791 *speed = SPEED_INVALID;
2792 *duplex = DUPLEX_INVALID;
2797 static void tg3_phy_copper_begin(struct tg3 *tp)
2802 if (tp->link_config.phy_is_low_power) {
2803 /* Entering low power mode. Disable gigabit and
2804 * 100baseT advertisements.
2806 tg3_writephy(tp, MII_TG3_CTRL, 0);
2808 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2809 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2810 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2811 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2813 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2814 } else if (tp->link_config.speed == SPEED_INVALID) {
2815 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2816 tp->link_config.advertising &=
2817 ~(ADVERTISED_1000baseT_Half |
2818 ADVERTISED_1000baseT_Full);
2820 new_adv = ADVERTISE_CSMA;
2821 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2822 new_adv |= ADVERTISE_10HALF;
2823 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2824 new_adv |= ADVERTISE_10FULL;
2825 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2826 new_adv |= ADVERTISE_100HALF;
2827 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2828 new_adv |= ADVERTISE_100FULL;
2830 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2832 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2834 if (tp->link_config.advertising &
2835 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2837 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2838 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2839 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2840 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2841 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2842 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2843 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2844 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2845 MII_TG3_CTRL_ENABLE_AS_MASTER);
2846 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2848 tg3_writephy(tp, MII_TG3_CTRL, 0);
2851 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2852 new_adv |= ADVERTISE_CSMA;
2854 /* Asking for a specific link mode. */
2855 if (tp->link_config.speed == SPEED_1000) {
2856 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2858 if (tp->link_config.duplex == DUPLEX_FULL)
2859 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2861 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2862 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2863 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2864 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2865 MII_TG3_CTRL_ENABLE_AS_MASTER);
2867 if (tp->link_config.speed == SPEED_100) {
2868 if (tp->link_config.duplex == DUPLEX_FULL)
2869 new_adv |= ADVERTISE_100FULL;
2871 new_adv |= ADVERTISE_100HALF;
2873 if (tp->link_config.duplex == DUPLEX_FULL)
2874 new_adv |= ADVERTISE_10FULL;
2876 new_adv |= ADVERTISE_10HALF;
2878 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2883 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2886 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2887 tp->link_config.speed != SPEED_INVALID) {
2888 u32 bmcr, orig_bmcr;
2890 tp->link_config.active_speed = tp->link_config.speed;
2891 tp->link_config.active_duplex = tp->link_config.duplex;
2894 switch (tp->link_config.speed) {
2900 bmcr |= BMCR_SPEED100;
2904 bmcr |= TG3_BMCR_SPEED1000;
2908 if (tp->link_config.duplex == DUPLEX_FULL)
2909 bmcr |= BMCR_FULLDPLX;
2911 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2912 (bmcr != orig_bmcr)) {
2913 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2914 for (i = 0; i < 1500; i++) {
2918 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2919 tg3_readphy(tp, MII_BMSR, &tmp))
2921 if (!(tmp & BMSR_LSTATUS)) {
2926 tg3_writephy(tp, MII_BMCR, bmcr);
2930 tg3_writephy(tp, MII_BMCR,
2931 BMCR_ANENABLE | BMCR_ANRESTART);
2935 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2939 /* Turn off tap power management. */
2940 /* Set Extended packet length bit */
2941 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2943 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2944 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2946 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2947 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2949 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2950 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2952 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2953 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2955 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2956 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2963 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2965 u32 adv_reg, all_mask = 0;
2967 if (mask & ADVERTISED_10baseT_Half)
2968 all_mask |= ADVERTISE_10HALF;
2969 if (mask & ADVERTISED_10baseT_Full)
2970 all_mask |= ADVERTISE_10FULL;
2971 if (mask & ADVERTISED_100baseT_Half)
2972 all_mask |= ADVERTISE_100HALF;
2973 if (mask & ADVERTISED_100baseT_Full)
2974 all_mask |= ADVERTISE_100FULL;
2976 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2979 if ((adv_reg & all_mask) != all_mask)
2981 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2985 if (mask & ADVERTISED_1000baseT_Half)
2986 all_mask |= ADVERTISE_1000HALF;
2987 if (mask & ADVERTISED_1000baseT_Full)
2988 all_mask |= ADVERTISE_1000FULL;
2990 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2993 if ((tg3_ctrl & all_mask) != all_mask)
2999 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3003 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3006 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3007 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3009 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3010 if (curadv != reqadv)
3013 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
3014 tg3_readphy(tp, MII_LPA, rmtadv);
3016 /* Reprogram the advertisement register, even if it
3017 * does not affect the current link. If the link
3018 * gets renegotiated in the future, we can save an
3019 * additional renegotiation cycle by advertising
3020 * it correctly in the first place.
3022 if (curadv != reqadv) {
3023 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3024 ADVERTISE_PAUSE_ASYM);
3025 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3032 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3034 int current_link_up;
3036 u32 lcl_adv, rmt_adv;
3044 (MAC_STATUS_SYNC_CHANGED |
3045 MAC_STATUS_CFG_CHANGED |
3046 MAC_STATUS_MI_COMPLETION |
3047 MAC_STATUS_LNKSTATE_CHANGED));
3050 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3052 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3056 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3058 /* Some third-party PHYs need to be reset on link going
3061 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3062 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3063 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3064 netif_carrier_ok(tp->dev)) {
3065 tg3_readphy(tp, MII_BMSR, &bmsr);
3066 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3067 !(bmsr & BMSR_LSTATUS))
3073 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3074 tg3_readphy(tp, MII_BMSR, &bmsr);
3075 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3076 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3079 if (!(bmsr & BMSR_LSTATUS)) {
3080 err = tg3_init_5401phy_dsp(tp);
3084 tg3_readphy(tp, MII_BMSR, &bmsr);
3085 for (i = 0; i < 1000; i++) {
3087 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3088 (bmsr & BMSR_LSTATUS)) {
3094 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3095 TG3_PHY_REV_BCM5401_B0 &&
3096 !(bmsr & BMSR_LSTATUS) &&
3097 tp->link_config.active_speed == SPEED_1000) {
3098 err = tg3_phy_reset(tp);
3100 err = tg3_init_5401phy_dsp(tp);
3105 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3106 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3107 /* 5701 {A0,B0} CRC bug workaround */
3108 tg3_writephy(tp, 0x15, 0x0a75);
3109 tg3_writephy(tp, 0x1c, 0x8c68);
3110 tg3_writephy(tp, 0x1c, 0x8d68);
3111 tg3_writephy(tp, 0x1c, 0x8c68);
3114 /* Clear pending interrupts... */
3115 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3116 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3118 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
3119 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3120 else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
3121 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3123 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3124 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3125 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3126 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3127 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3129 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3132 current_link_up = 0;
3133 current_speed = SPEED_INVALID;
3134 current_duplex = DUPLEX_INVALID;
3136 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
3139 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3140 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3141 if (!(val & (1 << 10))) {
3143 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3149 for (i = 0; i < 100; i++) {
3150 tg3_readphy(tp, MII_BMSR, &bmsr);
3151 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3152 (bmsr & BMSR_LSTATUS))
3157 if (bmsr & BMSR_LSTATUS) {
3160 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3161 for (i = 0; i < 2000; i++) {
3163 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3168 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3173 for (i = 0; i < 200; i++) {
3174 tg3_readphy(tp, MII_BMCR, &bmcr);
3175 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3177 if (bmcr && bmcr != 0x7fff)
3185 tp->link_config.active_speed = current_speed;
3186 tp->link_config.active_duplex = current_duplex;
3188 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3189 if ((bmcr & BMCR_ANENABLE) &&
3190 tg3_copper_is_advertising_all(tp,
3191 tp->link_config.advertising)) {
3192 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3194 current_link_up = 1;
3197 if (!(bmcr & BMCR_ANENABLE) &&
3198 tp->link_config.speed == current_speed &&
3199 tp->link_config.duplex == current_duplex &&
3200 tp->link_config.flowctrl ==
3201 tp->link_config.active_flowctrl) {
3202 current_link_up = 1;
3206 if (current_link_up == 1 &&
3207 tp->link_config.active_duplex == DUPLEX_FULL)
3208 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3212 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
3215 tg3_phy_copper_begin(tp);
3217 tg3_readphy(tp, MII_BMSR, &tmp);
3218 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
3219 (tmp & BMSR_LSTATUS))
3220 current_link_up = 1;
3223 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3224 if (current_link_up == 1) {
3225 if (tp->link_config.active_speed == SPEED_100 ||
3226 tp->link_config.active_speed == SPEED_10)
3227 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3229 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3230 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
3231 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3233 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3235 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3236 if (tp->link_config.active_duplex == DUPLEX_HALF)
3237 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3239 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3240 if (current_link_up == 1 &&
3241 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3242 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3244 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3247 /* ??? Without this setting Netgear GA302T PHY does not
3248 * ??? send/receive packets...
3250 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3251 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3252 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3253 tw32_f(MAC_MI_MODE, tp->mi_mode);
3257 tw32_f(MAC_MODE, tp->mac_mode);
3260 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3261 /* Polled via timer. */
3262 tw32_f(MAC_EVENT, 0);
3264 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3268 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3269 current_link_up == 1 &&
3270 tp->link_config.active_speed == SPEED_1000 &&
3271 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3272 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3275 (MAC_STATUS_SYNC_CHANGED |
3276 MAC_STATUS_CFG_CHANGED));
3279 NIC_SRAM_FIRMWARE_MBOX,
3280 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3283 /* Prevent send BD corruption. */
3284 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3285 u16 oldlnkctl, newlnkctl;
3287 pci_read_config_word(tp->pdev,
3288 tp->pcie_cap + PCI_EXP_LNKCTL,
3290 if (tp->link_config.active_speed == SPEED_100 ||
3291 tp->link_config.active_speed == SPEED_10)
3292 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3294 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3295 if (newlnkctl != oldlnkctl)
3296 pci_write_config_word(tp->pdev,
3297 tp->pcie_cap + PCI_EXP_LNKCTL,
3301 if (current_link_up != netif_carrier_ok(tp->dev)) {
3302 if (current_link_up)
3303 netif_carrier_on(tp->dev);
3305 netif_carrier_off(tp->dev);
3306 tg3_link_report(tp);
3312 struct tg3_fiber_aneginfo {
3314 #define ANEG_STATE_UNKNOWN 0
3315 #define ANEG_STATE_AN_ENABLE 1
3316 #define ANEG_STATE_RESTART_INIT 2
3317 #define ANEG_STATE_RESTART 3
3318 #define ANEG_STATE_DISABLE_LINK_OK 4
3319 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3320 #define ANEG_STATE_ABILITY_DETECT 6
3321 #define ANEG_STATE_ACK_DETECT_INIT 7
3322 #define ANEG_STATE_ACK_DETECT 8
3323 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3324 #define ANEG_STATE_COMPLETE_ACK 10
3325 #define ANEG_STATE_IDLE_DETECT_INIT 11
3326 #define ANEG_STATE_IDLE_DETECT 12
3327 #define ANEG_STATE_LINK_OK 13
3328 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3329 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3332 #define MR_AN_ENABLE 0x00000001
3333 #define MR_RESTART_AN 0x00000002
3334 #define MR_AN_COMPLETE 0x00000004
3335 #define MR_PAGE_RX 0x00000008
3336 #define MR_NP_LOADED 0x00000010
3337 #define MR_TOGGLE_TX 0x00000020
3338 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3339 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3340 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3341 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3342 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3343 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3344 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3345 #define MR_TOGGLE_RX 0x00002000
3346 #define MR_NP_RX 0x00004000
3348 #define MR_LINK_OK 0x80000000
3350 unsigned long link_time, cur_time;
3352 u32 ability_match_cfg;
3353 int ability_match_count;
3355 char ability_match, idle_match, ack_match;
3357 u32 txconfig, rxconfig;
3358 #define ANEG_CFG_NP 0x00000080
3359 #define ANEG_CFG_ACK 0x00000040
3360 #define ANEG_CFG_RF2 0x00000020
3361 #define ANEG_CFG_RF1 0x00000010
3362 #define ANEG_CFG_PS2 0x00000001
3363 #define ANEG_CFG_PS1 0x00008000
3364 #define ANEG_CFG_HD 0x00004000
3365 #define ANEG_CFG_FD 0x00002000
3366 #define ANEG_CFG_INVAL 0x00001f06
3371 #define ANEG_TIMER_ENAB 2
3372 #define ANEG_FAILED -1
3374 #define ANEG_STATE_SETTLE_TIME 10000
3376 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3377 struct tg3_fiber_aneginfo *ap)
3380 unsigned long delta;
3384 if (ap->state == ANEG_STATE_UNKNOWN) {
3388 ap->ability_match_cfg = 0;
3389 ap->ability_match_count = 0;
3390 ap->ability_match = 0;
3396 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3397 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3399 if (rx_cfg_reg != ap->ability_match_cfg) {
3400 ap->ability_match_cfg = rx_cfg_reg;
3401 ap->ability_match = 0;
3402 ap->ability_match_count = 0;
3404 if (++ap->ability_match_count > 1) {
3405 ap->ability_match = 1;
3406 ap->ability_match_cfg = rx_cfg_reg;
3409 if (rx_cfg_reg & ANEG_CFG_ACK)
3417 ap->ability_match_cfg = 0;
3418 ap->ability_match_count = 0;
3419 ap->ability_match = 0;
3425 ap->rxconfig = rx_cfg_reg;
3429 case ANEG_STATE_UNKNOWN:
3430 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3431 ap->state = ANEG_STATE_AN_ENABLE;
3434 case ANEG_STATE_AN_ENABLE:
3435 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3436 if (ap->flags & MR_AN_ENABLE) {
3439 ap->ability_match_cfg = 0;
3440 ap->ability_match_count = 0;
3441 ap->ability_match = 0;
3445 ap->state = ANEG_STATE_RESTART_INIT;
3447 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3451 case ANEG_STATE_RESTART_INIT:
3452 ap->link_time = ap->cur_time;
3453 ap->flags &= ~(MR_NP_LOADED);
3455 tw32(MAC_TX_AUTO_NEG, 0);
3456 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3457 tw32_f(MAC_MODE, tp->mac_mode);
3460 ret = ANEG_TIMER_ENAB;
3461 ap->state = ANEG_STATE_RESTART;
3464 case ANEG_STATE_RESTART:
3465 delta = ap->cur_time - ap->link_time;
3466 if (delta > ANEG_STATE_SETTLE_TIME) {
3467 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3469 ret = ANEG_TIMER_ENAB;
3473 case ANEG_STATE_DISABLE_LINK_OK:
3477 case ANEG_STATE_ABILITY_DETECT_INIT:
3478 ap->flags &= ~(MR_TOGGLE_TX);
3479 ap->txconfig = ANEG_CFG_FD;
3480 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3481 if (flowctrl & ADVERTISE_1000XPAUSE)
3482 ap->txconfig |= ANEG_CFG_PS1;
3483 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3484 ap->txconfig |= ANEG_CFG_PS2;
3485 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3486 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3487 tw32_f(MAC_MODE, tp->mac_mode);
3490 ap->state = ANEG_STATE_ABILITY_DETECT;
3493 case ANEG_STATE_ABILITY_DETECT:
3494 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3495 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3499 case ANEG_STATE_ACK_DETECT_INIT:
3500 ap->txconfig |= ANEG_CFG_ACK;
3501 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3502 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3503 tw32_f(MAC_MODE, tp->mac_mode);
3506 ap->state = ANEG_STATE_ACK_DETECT;
3509 case ANEG_STATE_ACK_DETECT:
3510 if (ap->ack_match != 0) {
3511 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3512 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3513 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3515 ap->state = ANEG_STATE_AN_ENABLE;
3517 } else if (ap->ability_match != 0 &&
3518 ap->rxconfig == 0) {
3519 ap->state = ANEG_STATE_AN_ENABLE;
3523 case ANEG_STATE_COMPLETE_ACK_INIT:
3524 if (ap->rxconfig & ANEG_CFG_INVAL) {
3528 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3529 MR_LP_ADV_HALF_DUPLEX |
3530 MR_LP_ADV_SYM_PAUSE |
3531 MR_LP_ADV_ASYM_PAUSE |
3532 MR_LP_ADV_REMOTE_FAULT1 |
3533 MR_LP_ADV_REMOTE_FAULT2 |
3534 MR_LP_ADV_NEXT_PAGE |
3537 if (ap->rxconfig & ANEG_CFG_FD)
3538 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3539 if (ap->rxconfig & ANEG_CFG_HD)
3540 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3541 if (ap->rxconfig & ANEG_CFG_PS1)
3542 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3543 if (ap->rxconfig & ANEG_CFG_PS2)
3544 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3545 if (ap->rxconfig & ANEG_CFG_RF1)
3546 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3547 if (ap->rxconfig & ANEG_CFG_RF2)
3548 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3549 if (ap->rxconfig & ANEG_CFG_NP)
3550 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3552 ap->link_time = ap->cur_time;
3554 ap->flags ^= (MR_TOGGLE_TX);
3555 if (ap->rxconfig & 0x0008)
3556 ap->flags |= MR_TOGGLE_RX;
3557 if (ap->rxconfig & ANEG_CFG_NP)
3558 ap->flags |= MR_NP_RX;
3559 ap->flags |= MR_PAGE_RX;
3561 ap->state = ANEG_STATE_COMPLETE_ACK;
3562 ret = ANEG_TIMER_ENAB;
3565 case ANEG_STATE_COMPLETE_ACK:
3566 if (ap->ability_match != 0 &&
3567 ap->rxconfig == 0) {
3568 ap->state = ANEG_STATE_AN_ENABLE;
3571 delta = ap->cur_time - ap->link_time;
3572 if (delta > ANEG_STATE_SETTLE_TIME) {
3573 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3574 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3576 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3577 !(ap->flags & MR_NP_RX)) {
3578 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3586 case ANEG_STATE_IDLE_DETECT_INIT:
3587 ap->link_time = ap->cur_time;
3588 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3589 tw32_f(MAC_MODE, tp->mac_mode);
3592 ap->state = ANEG_STATE_IDLE_DETECT;
3593 ret = ANEG_TIMER_ENAB;
3596 case ANEG_STATE_IDLE_DETECT:
3597 if (ap->ability_match != 0 &&
3598 ap->rxconfig == 0) {
3599 ap->state = ANEG_STATE_AN_ENABLE;
3602 delta = ap->cur_time - ap->link_time;
3603 if (delta > ANEG_STATE_SETTLE_TIME) {
3604 /* XXX another gem from the Broadcom driver :( */
3605 ap->state = ANEG_STATE_LINK_OK;
3609 case ANEG_STATE_LINK_OK:
3610 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3614 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3615 /* ??? unimplemented */
3618 case ANEG_STATE_NEXT_PAGE_WAIT:
3619 /* ??? unimplemented */
3630 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3633 struct tg3_fiber_aneginfo aninfo;
3634 int status = ANEG_FAILED;
3638 tw32_f(MAC_TX_AUTO_NEG, 0);
3640 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3641 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3644 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3647 memset(&aninfo, 0, sizeof(aninfo));
3648 aninfo.flags |= MR_AN_ENABLE;
3649 aninfo.state = ANEG_STATE_UNKNOWN;
3650 aninfo.cur_time = 0;
3652 while (++tick < 195000) {
3653 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3654 if (status == ANEG_DONE || status == ANEG_FAILED)
3660 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3661 tw32_f(MAC_MODE, tp->mac_mode);
3664 *txflags = aninfo.txconfig;
3665 *rxflags = aninfo.flags;
3667 if (status == ANEG_DONE &&
3668 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3669 MR_LP_ADV_FULL_DUPLEX)))
3675 static void tg3_init_bcm8002(struct tg3 *tp)
3677 u32 mac_status = tr32(MAC_STATUS);
3680 /* Reset when initting first time or we have a link. */
3681 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3682 !(mac_status & MAC_STATUS_PCS_SYNCED))
3685 /* Set PLL lock range. */
3686 tg3_writephy(tp, 0x16, 0x8007);
3689 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3691 /* Wait for reset to complete. */
3692 /* XXX schedule_timeout() ... */
3693 for (i = 0; i < 500; i++)
3696 /* Config mode; select PMA/Ch 1 regs. */
3697 tg3_writephy(tp, 0x10, 0x8411);
3699 /* Enable auto-lock and comdet, select txclk for tx. */
3700 tg3_writephy(tp, 0x11, 0x0a10);
3702 tg3_writephy(tp, 0x18, 0x00a0);
3703 tg3_writephy(tp, 0x16, 0x41ff);
3705 /* Assert and deassert POR. */
3706 tg3_writephy(tp, 0x13, 0x0400);
3708 tg3_writephy(tp, 0x13, 0x0000);
3710 tg3_writephy(tp, 0x11, 0x0a50);
3712 tg3_writephy(tp, 0x11, 0x0a10);
3714 /* Wait for signal to stabilize */
3715 /* XXX schedule_timeout() ... */
3716 for (i = 0; i < 15000; i++)
3719 /* Deselect the channel register so we can read the PHYID
3722 tg3_writephy(tp, 0x10, 0x8011);
3725 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3728 u32 sg_dig_ctrl, sg_dig_status;
3729 u32 serdes_cfg, expected_sg_dig_ctrl;
3730 int workaround, port_a;
3731 int current_link_up;
3734 expected_sg_dig_ctrl = 0;
3737 current_link_up = 0;
3739 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3740 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3742 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3745 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3746 /* preserve bits 20-23 for voltage regulator */
3747 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3750 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3752 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3753 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3755 u32 val = serdes_cfg;
3761 tw32_f(MAC_SERDES_CFG, val);
3764 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3766 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3767 tg3_setup_flow_control(tp, 0, 0);
3768 current_link_up = 1;
3773 /* Want auto-negotiation. */
3774 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3776 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3777 if (flowctrl & ADVERTISE_1000XPAUSE)
3778 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3779 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3780 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3782 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3783 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3784 tp->serdes_counter &&
3785 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3786 MAC_STATUS_RCVD_CFG)) ==
3787 MAC_STATUS_PCS_SYNCED)) {
3788 tp->serdes_counter--;
3789 current_link_up = 1;
3794 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3795 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3797 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3799 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3800 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3801 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3802 MAC_STATUS_SIGNAL_DET)) {
3803 sg_dig_status = tr32(SG_DIG_STATUS);
3804 mac_status = tr32(MAC_STATUS);
3806 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3807 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3808 u32 local_adv = 0, remote_adv = 0;
3810 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3811 local_adv |= ADVERTISE_1000XPAUSE;
3812 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3813 local_adv |= ADVERTISE_1000XPSE_ASYM;
3815 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3816 remote_adv |= LPA_1000XPAUSE;
3817 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3818 remote_adv |= LPA_1000XPAUSE_ASYM;
3820 tg3_setup_flow_control(tp, local_adv, remote_adv);
3821 current_link_up = 1;
3822 tp->serdes_counter = 0;
3823 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3824 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3825 if (tp->serdes_counter)
3826 tp->serdes_counter--;
3829 u32 val = serdes_cfg;
3836 tw32_f(MAC_SERDES_CFG, val);
3839 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3842 /* Link parallel detection - link is up */
3843 /* only if we have PCS_SYNC and not */
3844 /* receiving config code words */
3845 mac_status = tr32(MAC_STATUS);
3846 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3847 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3848 tg3_setup_flow_control(tp, 0, 0);
3849 current_link_up = 1;
3851 TG3_FLG2_PARALLEL_DETECT;
3852 tp->serdes_counter =
3853 SERDES_PARALLEL_DET_TIMEOUT;
3855 goto restart_autoneg;
3859 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3860 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3864 return current_link_up;
3867 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3869 int current_link_up = 0;
3871 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3874 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3875 u32 txflags, rxflags;
3878 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3879 u32 local_adv = 0, remote_adv = 0;
3881 if (txflags & ANEG_CFG_PS1)
3882 local_adv |= ADVERTISE_1000XPAUSE;
3883 if (txflags & ANEG_CFG_PS2)
3884 local_adv |= ADVERTISE_1000XPSE_ASYM;
3886 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3887 remote_adv |= LPA_1000XPAUSE;
3888 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3889 remote_adv |= LPA_1000XPAUSE_ASYM;
3891 tg3_setup_flow_control(tp, local_adv, remote_adv);
3893 current_link_up = 1;
3895 for (i = 0; i < 30; i++) {
3898 (MAC_STATUS_SYNC_CHANGED |
3899 MAC_STATUS_CFG_CHANGED));
3901 if ((tr32(MAC_STATUS) &
3902 (MAC_STATUS_SYNC_CHANGED |
3903 MAC_STATUS_CFG_CHANGED)) == 0)
3907 mac_status = tr32(MAC_STATUS);
3908 if (current_link_up == 0 &&
3909 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3910 !(mac_status & MAC_STATUS_RCVD_CFG))
3911 current_link_up = 1;
3913 tg3_setup_flow_control(tp, 0, 0);
3915 /* Forcing 1000FD link up. */
3916 current_link_up = 1;
3918 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3921 tw32_f(MAC_MODE, tp->mac_mode);
3926 return current_link_up;
3929 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3932 u16 orig_active_speed;
3933 u8 orig_active_duplex;
3935 int current_link_up;
3938 orig_pause_cfg = tp->link_config.active_flowctrl;
3939 orig_active_speed = tp->link_config.active_speed;
3940 orig_active_duplex = tp->link_config.active_duplex;
3942 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3943 netif_carrier_ok(tp->dev) &&
3944 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3945 mac_status = tr32(MAC_STATUS);
3946 mac_status &= (MAC_STATUS_PCS_SYNCED |
3947 MAC_STATUS_SIGNAL_DET |
3948 MAC_STATUS_CFG_CHANGED |
3949 MAC_STATUS_RCVD_CFG);
3950 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3951 MAC_STATUS_SIGNAL_DET)) {
3952 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3953 MAC_STATUS_CFG_CHANGED));
3958 tw32_f(MAC_TX_AUTO_NEG, 0);
3960 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3961 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3962 tw32_f(MAC_MODE, tp->mac_mode);
3965 if (tp->phy_id == TG3_PHY_ID_BCM8002)
3966 tg3_init_bcm8002(tp);
3968 /* Enable link change event even when serdes polling. */
3969 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3972 current_link_up = 0;
3973 mac_status = tr32(MAC_STATUS);
3975 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3976 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3978 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3980 tp->napi[0].hw_status->status =
3981 (SD_STATUS_UPDATED |
3982 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
3984 for (i = 0; i < 100; i++) {
3985 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3986 MAC_STATUS_CFG_CHANGED));
3988 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3989 MAC_STATUS_CFG_CHANGED |
3990 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3994 mac_status = tr32(MAC_STATUS);
3995 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3996 current_link_up = 0;
3997 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3998 tp->serdes_counter == 0) {
3999 tw32_f(MAC_MODE, (tp->mac_mode |
4000 MAC_MODE_SEND_CONFIGS));
4002 tw32_f(MAC_MODE, tp->mac_mode);
4006 if (current_link_up == 1) {
4007 tp->link_config.active_speed = SPEED_1000;
4008 tp->link_config.active_duplex = DUPLEX_FULL;
4009 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4010 LED_CTRL_LNKLED_OVERRIDE |
4011 LED_CTRL_1000MBPS_ON));
4013 tp->link_config.active_speed = SPEED_INVALID;
4014 tp->link_config.active_duplex = DUPLEX_INVALID;
4015 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4016 LED_CTRL_LNKLED_OVERRIDE |
4017 LED_CTRL_TRAFFIC_OVERRIDE));
4020 if (current_link_up != netif_carrier_ok(tp->dev)) {
4021 if (current_link_up)
4022 netif_carrier_on(tp->dev);
4024 netif_carrier_off(tp->dev);
4025 tg3_link_report(tp);
4027 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4028 if (orig_pause_cfg != now_pause_cfg ||
4029 orig_active_speed != tp->link_config.active_speed ||
4030 orig_active_duplex != tp->link_config.active_duplex)
4031 tg3_link_report(tp);
4037 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4039 int current_link_up, err = 0;
4043 u32 local_adv, remote_adv;
4045 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4046 tw32_f(MAC_MODE, tp->mac_mode);
4052 (MAC_STATUS_SYNC_CHANGED |
4053 MAC_STATUS_CFG_CHANGED |
4054 MAC_STATUS_MI_COMPLETION |
4055 MAC_STATUS_LNKSTATE_CHANGED));
4061 current_link_up = 0;
4062 current_speed = SPEED_INVALID;
4063 current_duplex = DUPLEX_INVALID;
4065 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4066 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4067 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4068 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4069 bmsr |= BMSR_LSTATUS;
4071 bmsr &= ~BMSR_LSTATUS;
4074 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4076 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4077 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4078 /* do nothing, just check for link up at the end */
4079 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4082 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4083 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4084 ADVERTISE_1000XPAUSE |
4085 ADVERTISE_1000XPSE_ASYM |
4088 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4090 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4091 new_adv |= ADVERTISE_1000XHALF;
4092 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4093 new_adv |= ADVERTISE_1000XFULL;
4095 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4096 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4097 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4098 tg3_writephy(tp, MII_BMCR, bmcr);
4100 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4101 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4102 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4109 bmcr &= ~BMCR_SPEED1000;
4110 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4112 if (tp->link_config.duplex == DUPLEX_FULL)
4113 new_bmcr |= BMCR_FULLDPLX;
4115 if (new_bmcr != bmcr) {
4116 /* BMCR_SPEED1000 is a reserved bit that needs
4117 * to be set on write.
4119 new_bmcr |= BMCR_SPEED1000;
4121 /* Force a linkdown */
4122 if (netif_carrier_ok(tp->dev)) {
4125 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4126 adv &= ~(ADVERTISE_1000XFULL |
4127 ADVERTISE_1000XHALF |
4129 tg3_writephy(tp, MII_ADVERTISE, adv);
4130 tg3_writephy(tp, MII_BMCR, bmcr |
4134 netif_carrier_off(tp->dev);
4136 tg3_writephy(tp, MII_BMCR, new_bmcr);
4138 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4139 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4140 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4142 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4143 bmsr |= BMSR_LSTATUS;
4145 bmsr &= ~BMSR_LSTATUS;
4147 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4151 if (bmsr & BMSR_LSTATUS) {
4152 current_speed = SPEED_1000;
4153 current_link_up = 1;
4154 if (bmcr & BMCR_FULLDPLX)
4155 current_duplex = DUPLEX_FULL;
4157 current_duplex = DUPLEX_HALF;
4162 if (bmcr & BMCR_ANENABLE) {
4165 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4166 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4167 common = local_adv & remote_adv;
4168 if (common & (ADVERTISE_1000XHALF |
4169 ADVERTISE_1000XFULL)) {
4170 if (common & ADVERTISE_1000XFULL)
4171 current_duplex = DUPLEX_FULL;
4173 current_duplex = DUPLEX_HALF;
4176 current_link_up = 0;
4180 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4181 tg3_setup_flow_control(tp, local_adv, remote_adv);
4183 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4184 if (tp->link_config.active_duplex == DUPLEX_HALF)
4185 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4187 tw32_f(MAC_MODE, tp->mac_mode);
4190 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4192 tp->link_config.active_speed = current_speed;
4193 tp->link_config.active_duplex = current_duplex;
4195 if (current_link_up != netif_carrier_ok(tp->dev)) {
4196 if (current_link_up)
4197 netif_carrier_on(tp->dev);
4199 netif_carrier_off(tp->dev);
4200 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4202 tg3_link_report(tp);
4207 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4209 if (tp->serdes_counter) {
4210 /* Give autoneg time to complete. */
4211 tp->serdes_counter--;
4214 if (!netif_carrier_ok(tp->dev) &&
4215 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4218 tg3_readphy(tp, MII_BMCR, &bmcr);
4219 if (bmcr & BMCR_ANENABLE) {
4222 /* Select shadow register 0x1f */
4223 tg3_writephy(tp, 0x1c, 0x7c00);
4224 tg3_readphy(tp, 0x1c, &phy1);
4226 /* Select expansion interrupt status register */
4227 tg3_writephy(tp, 0x17, 0x0f01);
4228 tg3_readphy(tp, 0x15, &phy2);
4229 tg3_readphy(tp, 0x15, &phy2);
4231 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4232 /* We have signal detect and not receiving
4233 * config code words, link is up by parallel
4237 bmcr &= ~BMCR_ANENABLE;
4238 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4239 tg3_writephy(tp, MII_BMCR, bmcr);
4240 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
4244 else if (netif_carrier_ok(tp->dev) &&
4245 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4246 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4249 /* Select expansion interrupt status register */
4250 tg3_writephy(tp, 0x17, 0x0f01);
4251 tg3_readphy(tp, 0x15, &phy2);
4255 /* Config code words received, turn on autoneg. */
4256 tg3_readphy(tp, MII_BMCR, &bmcr);
4257 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4259 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4265 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4269 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4270 err = tg3_setup_fiber_phy(tp, force_reset);
4271 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4272 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4274 err = tg3_setup_copper_phy(tp, force_reset);
4277 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4280 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4281 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4283 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4288 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4289 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4290 tw32(GRC_MISC_CFG, val);
4293 if (tp->link_config.active_speed == SPEED_1000 &&
4294 tp->link_config.active_duplex == DUPLEX_HALF)
4295 tw32(MAC_TX_LENGTHS,
4296 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4297 (6 << TX_LENGTHS_IPG_SHIFT) |
4298 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4300 tw32(MAC_TX_LENGTHS,
4301 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4302 (6 << TX_LENGTHS_IPG_SHIFT) |
4303 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4305 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4306 if (netif_carrier_ok(tp->dev)) {
4307 tw32(HOSTCC_STAT_COAL_TICKS,
4308 tp->coal.stats_block_coalesce_usecs);
4310 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4314 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4315 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4316 if (!netif_carrier_ok(tp->dev))
4317 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4320 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4321 tw32(PCIE_PWR_MGMT_THRESH, val);
4327 /* This is called whenever we suspect that the system chipset is re-
4328 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4329 * is bogus tx completions. We try to recover by setting the
4330 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4333 static void tg3_tx_recover(struct tg3 *tp)
4335 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4336 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4338 netdev_warn(tp->dev, "The system may be re-ordering memory-mapped I/O cycles to the network device, attempting to recover\n"
4339 "Please report the problem to the driver maintainer and include system chipset information.\n");
4341 spin_lock(&tp->lock);
4342 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4343 spin_unlock(&tp->lock);
4346 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4349 return tnapi->tx_pending -
4350 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4353 /* Tigon3 never reports partial packet sends. So we do not
4354 * need special logic to handle SKBs that have not had all
4355 * of their frags sent yet, like SunGEM does.
4357 static void tg3_tx(struct tg3_napi *tnapi)
4359 struct tg3 *tp = tnapi->tp;
4360 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4361 u32 sw_idx = tnapi->tx_cons;
4362 struct netdev_queue *txq;
4363 int index = tnapi - tp->napi;
4365 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
4368 txq = netdev_get_tx_queue(tp->dev, index);
4370 while (sw_idx != hw_idx) {
4371 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4372 struct sk_buff *skb = ri->skb;
4375 if (unlikely(skb == NULL)) {
4380 pci_unmap_single(tp->pdev,
4381 pci_unmap_addr(ri, mapping),
4387 sw_idx = NEXT_TX(sw_idx);
4389 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4390 ri = &tnapi->tx_buffers[sw_idx];
4391 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4394 pci_unmap_page(tp->pdev,
4395 pci_unmap_addr(ri, mapping),
4396 skb_shinfo(skb)->frags[i].size,
4398 sw_idx = NEXT_TX(sw_idx);
4403 if (unlikely(tx_bug)) {
4409 tnapi->tx_cons = sw_idx;
4411 /* Need to make the tx_cons update visible to tg3_start_xmit()
4412 * before checking for netif_queue_stopped(). Without the
4413 * memory barrier, there is a small possibility that tg3_start_xmit()
4414 * will miss it and cause the queue to be stopped forever.
4418 if (unlikely(netif_tx_queue_stopped(txq) &&
4419 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4420 __netif_tx_lock(txq, smp_processor_id());
4421 if (netif_tx_queue_stopped(txq) &&
4422 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4423 netif_tx_wake_queue(txq);
4424 __netif_tx_unlock(txq);
4428 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4433 pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping),
4434 map_sz, PCI_DMA_FROMDEVICE);
4435 dev_kfree_skb_any(ri->skb);
4439 /* Returns size of skb allocated or < 0 on error.
4441 * We only need to fill in the address because the other members
4442 * of the RX descriptor are invariant, see tg3_init_rings.
4444 * Note the purposeful assymetry of cpu vs. chip accesses. For
4445 * posting buffers we only dirty the first cache line of the RX
4446 * descriptor (containing the address). Whereas for the RX status
4447 * buffers the cpu only reads the last cacheline of the RX descriptor
4448 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4450 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4451 u32 opaque_key, u32 dest_idx_unmasked)
4453 struct tg3_rx_buffer_desc *desc;
4454 struct ring_info *map, *src_map;
4455 struct sk_buff *skb;
4457 int skb_size, dest_idx;
4460 switch (opaque_key) {
4461 case RXD_OPAQUE_RING_STD:
4462 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4463 desc = &tpr->rx_std[dest_idx];
4464 map = &tpr->rx_std_buffers[dest_idx];
4465 skb_size = tp->rx_pkt_map_sz;
4468 case RXD_OPAQUE_RING_JUMBO:
4469 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4470 desc = &tpr->rx_jmb[dest_idx].std;
4471 map = &tpr->rx_jmb_buffers[dest_idx];
4472 skb_size = TG3_RX_JMB_MAP_SZ;
4479 /* Do not overwrite any of the map or rp information
4480 * until we are sure we can commit to a new buffer.
4482 * Callers depend upon this behavior and assume that
4483 * we leave everything unchanged if we fail.
4485 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4489 skb_reserve(skb, tp->rx_offset);
4491 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4492 PCI_DMA_FROMDEVICE);
4493 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4499 pci_unmap_addr_set(map, mapping, mapping);
4501 desc->addr_hi = ((u64)mapping >> 32);
4502 desc->addr_lo = ((u64)mapping & 0xffffffff);
4507 /* We only need to move over in the address because the other
4508 * members of the RX descriptor are invariant. See notes above
4509 * tg3_alloc_rx_skb for full details.
4511 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4512 struct tg3_rx_prodring_set *dpr,
4513 u32 opaque_key, int src_idx,
4514 u32 dest_idx_unmasked)
4516 struct tg3 *tp = tnapi->tp;
4517 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4518 struct ring_info *src_map, *dest_map;
4520 struct tg3_rx_prodring_set *spr = &tp->prodring[0];
4522 switch (opaque_key) {
4523 case RXD_OPAQUE_RING_STD:
4524 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4525 dest_desc = &dpr->rx_std[dest_idx];
4526 dest_map = &dpr->rx_std_buffers[dest_idx];
4527 src_desc = &spr->rx_std[src_idx];
4528 src_map = &spr->rx_std_buffers[src_idx];
4531 case RXD_OPAQUE_RING_JUMBO:
4532 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4533 dest_desc = &dpr->rx_jmb[dest_idx].std;
4534 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4535 src_desc = &spr->rx_jmb[src_idx].std;
4536 src_map = &spr->rx_jmb_buffers[src_idx];
4543 dest_map->skb = src_map->skb;
4544 pci_unmap_addr_set(dest_map, mapping,
4545 pci_unmap_addr(src_map, mapping));
4546 dest_desc->addr_hi = src_desc->addr_hi;
4547 dest_desc->addr_lo = src_desc->addr_lo;
4549 /* Ensure that the update to the skb happens after the physical
4550 * addresses have been transferred to the new BD location.
4554 src_map->skb = NULL;
4557 /* The RX ring scheme is composed of multiple rings which post fresh
4558 * buffers to the chip, and one special ring the chip uses to report
4559 * status back to the host.
4561 * The special ring reports the status of received packets to the
4562 * host. The chip does not write into the original descriptor the
4563 * RX buffer was obtained from. The chip simply takes the original
4564 * descriptor as provided by the host, updates the status and length
4565 * field, then writes this into the next status ring entry.
4567 * Each ring the host uses to post buffers to the chip is described
4568 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4569 * it is first placed into the on-chip ram. When the packet's length
4570 * is known, it walks down the TG3_BDINFO entries to select the ring.
4571 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4572 * which is within the range of the new packet's length is chosen.
4574 * The "separate ring for rx status" scheme may sound queer, but it makes
4575 * sense from a cache coherency perspective. If only the host writes
4576 * to the buffer post rings, and only the chip writes to the rx status
4577 * rings, then cache lines never move beyond shared-modified state.
4578 * If both the host and chip were to write into the same ring, cache line
4579 * eviction could occur since both entities want it in an exclusive state.
4581 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4583 struct tg3 *tp = tnapi->tp;
4584 u32 work_mask, rx_std_posted = 0;
4585 u32 std_prod_idx, jmb_prod_idx;
4586 u32 sw_idx = tnapi->rx_rcb_ptr;
4589 struct tg3_rx_prodring_set *tpr = tnapi->prodring;
4591 hw_idx = *(tnapi->rx_rcb_prod_idx);
4593 * We need to order the read of hw_idx and the read of
4594 * the opaque cookie.
4599 std_prod_idx = tpr->rx_std_prod_idx;
4600 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4601 while (sw_idx != hw_idx && budget > 0) {
4602 struct ring_info *ri;
4603 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4605 struct sk_buff *skb;
4606 dma_addr_t dma_addr;
4607 u32 opaque_key, desc_idx, *post_ptr;
4609 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4610 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4611 if (opaque_key == RXD_OPAQUE_RING_STD) {
4612 ri = &tp->prodring[0].rx_std_buffers[desc_idx];
4613 dma_addr = pci_unmap_addr(ri, mapping);
4615 post_ptr = &std_prod_idx;
4617 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4618 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
4619 dma_addr = pci_unmap_addr(ri, mapping);
4621 post_ptr = &jmb_prod_idx;
4623 goto next_pkt_nopost;
4625 work_mask |= opaque_key;
4627 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4628 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4630 tg3_recycle_rx(tnapi, tpr, opaque_key,
4631 desc_idx, *post_ptr);
4633 /* Other statistics kept track of by card. */
4634 tp->net_stats.rx_dropped++;
4638 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4641 if (len > RX_COPY_THRESHOLD &&
4642 tp->rx_offset == NET_IP_ALIGN) {
4643 /* rx_offset will likely not equal NET_IP_ALIGN
4644 * if this is a 5701 card running in PCI-X mode
4645 * [see tg3_get_invariants()]
4649 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4654 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4655 PCI_DMA_FROMDEVICE);
4657 /* Ensure that the update to the skb happens
4658 * after the usage of the old DMA mapping.
4666 struct sk_buff *copy_skb;
4668 tg3_recycle_rx(tnapi, tpr, opaque_key,
4669 desc_idx, *post_ptr);
4671 copy_skb = netdev_alloc_skb(tp->dev,
4672 len + TG3_RAW_IP_ALIGN);
4673 if (copy_skb == NULL)
4674 goto drop_it_no_recycle;
4676 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4677 skb_put(copy_skb, len);
4678 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4679 skb_copy_from_linear_data(skb, copy_skb->data, len);
4680 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4682 /* We'll reuse the original ring buffer. */
4686 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4687 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4688 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4689 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4690 skb->ip_summed = CHECKSUM_UNNECESSARY;
4692 skb->ip_summed = CHECKSUM_NONE;
4694 skb->protocol = eth_type_trans(skb, tp->dev);
4696 if (len > (tp->dev->mtu + ETH_HLEN) &&
4697 skb->protocol != htons(ETH_P_8021Q)) {
4702 #if TG3_VLAN_TAG_USED
4703 if (tp->vlgrp != NULL &&
4704 desc->type_flags & RXD_FLAG_VLAN) {
4705 vlan_gro_receive(&tnapi->napi, tp->vlgrp,
4706 desc->err_vlan & RXD_VLAN_MASK, skb);
4709 napi_gro_receive(&tnapi->napi, skb);
4717 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4718 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4719 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4720 tpr->rx_std_prod_idx);
4721 work_mask &= ~RXD_OPAQUE_RING_STD;
4726 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4728 /* Refresh hw_idx to see if there is new work */
4729 if (sw_idx == hw_idx) {
4730 hw_idx = *(tnapi->rx_rcb_prod_idx);
4735 /* ACK the status ring. */
4736 tnapi->rx_rcb_ptr = sw_idx;
4737 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4739 /* Refill RX ring(s). */
4740 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
4741 if (work_mask & RXD_OPAQUE_RING_STD) {
4742 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4743 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4744 tpr->rx_std_prod_idx);
4746 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4747 tpr->rx_jmb_prod_idx = jmb_prod_idx %
4748 TG3_RX_JUMBO_RING_SIZE;
4749 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4750 tpr->rx_jmb_prod_idx);
4753 } else if (work_mask) {
4754 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4755 * updated before the producer indices can be updated.
4759 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4760 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
4762 if (tnapi != &tp->napi[1])
4763 napi_schedule(&tp->napi[1].napi);
4769 static void tg3_poll_link(struct tg3 *tp)
4771 /* handle link change and other phy events */
4772 if (!(tp->tg3_flags &
4773 (TG3_FLAG_USE_LINKCHG_REG |
4774 TG3_FLAG_POLL_SERDES))) {
4775 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4777 if (sblk->status & SD_STATUS_LINK_CHG) {
4778 sblk->status = SD_STATUS_UPDATED |
4779 (sblk->status & ~SD_STATUS_LINK_CHG);
4780 spin_lock(&tp->lock);
4781 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4783 (MAC_STATUS_SYNC_CHANGED |
4784 MAC_STATUS_CFG_CHANGED |
4785 MAC_STATUS_MI_COMPLETION |
4786 MAC_STATUS_LNKSTATE_CHANGED));
4789 tg3_setup_phy(tp, 0);
4790 spin_unlock(&tp->lock);
4795 static int tg3_rx_prodring_xfer(struct tg3 *tp,
4796 struct tg3_rx_prodring_set *dpr,
4797 struct tg3_rx_prodring_set *spr)
4799 u32 si, di, cpycnt, src_prod_idx;
4803 src_prod_idx = spr->rx_std_prod_idx;
4805 /* Make sure updates to the rx_std_buffers[] entries and the
4806 * standard producer index are seen in the correct order.
4810 if (spr->rx_std_cons_idx == src_prod_idx)
4813 if (spr->rx_std_cons_idx < src_prod_idx)
4814 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4816 cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
4818 cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
4820 si = spr->rx_std_cons_idx;
4821 di = dpr->rx_std_prod_idx;
4823 for (i = di; i < di + cpycnt; i++) {
4824 if (dpr->rx_std_buffers[i].skb) {
4834 /* Ensure that updates to the rx_std_buffers ring and the
4835 * shadowed hardware producer ring from tg3_recycle_skb() are
4836 * ordered correctly WRT the skb check above.
4840 memcpy(&dpr->rx_std_buffers[di],
4841 &spr->rx_std_buffers[si],
4842 cpycnt * sizeof(struct ring_info));
4844 for (i = 0; i < cpycnt; i++, di++, si++) {
4845 struct tg3_rx_buffer_desc *sbd, *dbd;
4846 sbd = &spr->rx_std[si];
4847 dbd = &dpr->rx_std[di];
4848 dbd->addr_hi = sbd->addr_hi;
4849 dbd->addr_lo = sbd->addr_lo;
4852 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
4854 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
4859 src_prod_idx = spr->rx_jmb_prod_idx;
4861 /* Make sure updates to the rx_jmb_buffers[] entries and
4862 * the jumbo producer index are seen in the correct order.
4866 if (spr->rx_jmb_cons_idx == src_prod_idx)
4869 if (spr->rx_jmb_cons_idx < src_prod_idx)
4870 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4872 cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
4874 cpycnt = min(cpycnt,
4875 TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
4877 si = spr->rx_jmb_cons_idx;
4878 di = dpr->rx_jmb_prod_idx;
4880 for (i = di; i < di + cpycnt; i++) {
4881 if (dpr->rx_jmb_buffers[i].skb) {
4891 /* Ensure that updates to the rx_jmb_buffers ring and the
4892 * shadowed hardware producer ring from tg3_recycle_skb() are
4893 * ordered correctly WRT the skb check above.
4897 memcpy(&dpr->rx_jmb_buffers[di],
4898 &spr->rx_jmb_buffers[si],
4899 cpycnt * sizeof(struct ring_info));
4901 for (i = 0; i < cpycnt; i++, di++, si++) {
4902 struct tg3_rx_buffer_desc *sbd, *dbd;
4903 sbd = &spr->rx_jmb[si].std;
4904 dbd = &dpr->rx_jmb[di].std;
4905 dbd->addr_hi = sbd->addr_hi;
4906 dbd->addr_lo = sbd->addr_lo;
4909 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
4910 TG3_RX_JUMBO_RING_SIZE;
4911 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
4912 TG3_RX_JUMBO_RING_SIZE;
4918 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4920 struct tg3 *tp = tnapi->tp;
4922 /* run TX completion thread */
4923 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4925 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4929 /* run RX thread, within the bounds set by NAPI.
4930 * All RX "locking" is done by ensuring outside
4931 * code synchronizes with tg3->napi.poll()
4933 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4934 work_done += tg3_rx(tnapi, budget - work_done);
4936 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4937 struct tg3_rx_prodring_set *dpr = &tp->prodring[0];
4939 u32 std_prod_idx = dpr->rx_std_prod_idx;
4940 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
4942 for (i = 1; i < tp->irq_cnt; i++)
4943 err |= tg3_rx_prodring_xfer(tp, dpr,
4944 tp->napi[i].prodring);
4948 if (std_prod_idx != dpr->rx_std_prod_idx)
4949 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4950 dpr->rx_std_prod_idx);
4952 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
4953 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4954 dpr->rx_jmb_prod_idx);
4959 tw32_f(HOSTCC_MODE, tp->coal_now);
4965 static int tg3_poll_msix(struct napi_struct *napi, int budget)
4967 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4968 struct tg3 *tp = tnapi->tp;
4970 struct tg3_hw_status *sblk = tnapi->hw_status;
4973 work_done = tg3_poll_work(tnapi, work_done, budget);
4975 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4978 if (unlikely(work_done >= budget))
4981 /* tp->last_tag is used in tg3_restart_ints() below
4982 * to tell the hw how much work has been processed,
4983 * so we must read it before checking for more work.
4985 tnapi->last_tag = sblk->status_tag;
4986 tnapi->last_irq_tag = tnapi->last_tag;
4989 /* check for RX/TX work to do */
4990 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
4991 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
4992 napi_complete(napi);
4993 /* Reenable interrupts. */
4994 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5003 /* work_done is guaranteed to be less than budget. */
5004 napi_complete(napi);
5005 schedule_work(&tp->reset_task);
5009 static int tg3_poll(struct napi_struct *napi, int budget)
5011 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5012 struct tg3 *tp = tnapi->tp;
5014 struct tg3_hw_status *sblk = tnapi->hw_status;
5019 work_done = tg3_poll_work(tnapi, work_done, budget);
5021 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5024 if (unlikely(work_done >= budget))
5027 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
5028 /* tp->last_tag is used in tg3_int_reenable() below
5029 * to tell the hw how much work has been processed,
5030 * so we must read it before checking for more work.
5032 tnapi->last_tag = sblk->status_tag;
5033 tnapi->last_irq_tag = tnapi->last_tag;
5036 sblk->status &= ~SD_STATUS_UPDATED;
5038 if (likely(!tg3_has_work(tnapi))) {
5039 napi_complete(napi);
5040 tg3_int_reenable(tnapi);
5048 /* work_done is guaranteed to be less than budget. */
5049 napi_complete(napi);
5050 schedule_work(&tp->reset_task);
5054 static void tg3_irq_quiesce(struct tg3 *tp)
5058 BUG_ON(tp->irq_sync);
5063 for (i = 0; i < tp->irq_cnt; i++)
5064 synchronize_irq(tp->napi[i].irq_vec);
5067 static inline int tg3_irq_sync(struct tg3 *tp)
5069 return tp->irq_sync;
5072 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5073 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5074 * with as well. Most of the time, this is not necessary except when
5075 * shutting down the device.
5077 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5079 spin_lock_bh(&tp->lock);
5081 tg3_irq_quiesce(tp);
5084 static inline void tg3_full_unlock(struct tg3 *tp)
5086 spin_unlock_bh(&tp->lock);
5089 /* One-shot MSI handler - Chip automatically disables interrupt
5090 * after sending MSI so driver doesn't have to do it.
5092 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5094 struct tg3_napi *tnapi = dev_id;
5095 struct tg3 *tp = tnapi->tp;
5097 prefetch(tnapi->hw_status);
5099 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5101 if (likely(!tg3_irq_sync(tp)))
5102 napi_schedule(&tnapi->napi);
5107 /* MSI ISR - No need to check for interrupt sharing and no need to
5108 * flush status block and interrupt mailbox. PCI ordering rules
5109 * guarantee that MSI will arrive after the status block.
5111 static irqreturn_t tg3_msi(int irq, void *dev_id)
5113 struct tg3_napi *tnapi = dev_id;
5114 struct tg3 *tp = tnapi->tp;
5116 prefetch(tnapi->hw_status);
5118 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5120 * Writing any value to intr-mbox-0 clears PCI INTA# and
5121 * chip-internal interrupt pending events.
5122 * Writing non-zero to intr-mbox-0 additional tells the
5123 * NIC to stop sending us irqs, engaging "in-intr-handler"
5126 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5127 if (likely(!tg3_irq_sync(tp)))
5128 napi_schedule(&tnapi->napi);
5130 return IRQ_RETVAL(1);
5133 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5135 struct tg3_napi *tnapi = dev_id;
5136 struct tg3 *tp = tnapi->tp;
5137 struct tg3_hw_status *sblk = tnapi->hw_status;
5138 unsigned int handled = 1;
5140 /* In INTx mode, it is possible for the interrupt to arrive at
5141 * the CPU before the status block posted prior to the interrupt.
5142 * Reading the PCI State register will confirm whether the
5143 * interrupt is ours and will flush the status block.
5145 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5146 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5147 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5154 * Writing any value to intr-mbox-0 clears PCI INTA# and
5155 * chip-internal interrupt pending events.
5156 * Writing non-zero to intr-mbox-0 additional tells the
5157 * NIC to stop sending us irqs, engaging "in-intr-handler"
5160 * Flush the mailbox to de-assert the IRQ immediately to prevent
5161 * spurious interrupts. The flush impacts performance but
5162 * excessive spurious interrupts can be worse in some cases.
5164 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5165 if (tg3_irq_sync(tp))
5167 sblk->status &= ~SD_STATUS_UPDATED;
5168 if (likely(tg3_has_work(tnapi))) {
5169 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5170 napi_schedule(&tnapi->napi);
5172 /* No work, shared interrupt perhaps? re-enable
5173 * interrupts, and flush that PCI write
5175 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5179 return IRQ_RETVAL(handled);
5182 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5184 struct tg3_napi *tnapi = dev_id;
5185 struct tg3 *tp = tnapi->tp;
5186 struct tg3_hw_status *sblk = tnapi->hw_status;
5187 unsigned int handled = 1;
5189 /* In INTx mode, it is possible for the interrupt to arrive at
5190 * the CPU before the status block posted prior to the interrupt.
5191 * Reading the PCI State register will confirm whether the
5192 * interrupt is ours and will flush the status block.
5194 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5195 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5196 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5203 * writing any value to intr-mbox-0 clears PCI INTA# and
5204 * chip-internal interrupt pending events.
5205 * writing non-zero to intr-mbox-0 additional tells the
5206 * NIC to stop sending us irqs, engaging "in-intr-handler"
5209 * Flush the mailbox to de-assert the IRQ immediately to prevent
5210 * spurious interrupts. The flush impacts performance but
5211 * excessive spurious interrupts can be worse in some cases.
5213 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5216 * In a shared interrupt configuration, sometimes other devices'
5217 * interrupts will scream. We record the current status tag here
5218 * so that the above check can report that the screaming interrupts
5219 * are unhandled. Eventually they will be silenced.
5221 tnapi->last_irq_tag = sblk->status_tag;
5223 if (tg3_irq_sync(tp))
5226 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5228 napi_schedule(&tnapi->napi);
5231 return IRQ_RETVAL(handled);
5234 /* ISR for interrupt test */
5235 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5237 struct tg3_napi *tnapi = dev_id;
5238 struct tg3 *tp = tnapi->tp;
5239 struct tg3_hw_status *sblk = tnapi->hw_status;
5241 if ((sblk->status & SD_STATUS_UPDATED) ||
5242 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5243 tg3_disable_ints(tp);
5244 return IRQ_RETVAL(1);
5246 return IRQ_RETVAL(0);
5249 static int tg3_init_hw(struct tg3 *, int);
5250 static int tg3_halt(struct tg3 *, int, int);
5252 /* Restart hardware after configuration changes, self-test, etc.
5253 * Invoked with tp->lock held.
5255 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5256 __releases(tp->lock)
5257 __acquires(tp->lock)
5261 err = tg3_init_hw(tp, reset_phy);
5263 netdev_err(tp->dev, "Failed to re-initialize device, aborting\n");
5264 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5265 tg3_full_unlock(tp);
5266 del_timer_sync(&tp->timer);
5268 tg3_napi_enable(tp);
5270 tg3_full_lock(tp, 0);
5275 #ifdef CONFIG_NET_POLL_CONTROLLER
5276 static void tg3_poll_controller(struct net_device *dev)
5279 struct tg3 *tp = netdev_priv(dev);
5281 for (i = 0; i < tp->irq_cnt; i++)
5282 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5286 static void tg3_reset_task(struct work_struct *work)
5288 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5290 unsigned int restart_timer;
5292 tg3_full_lock(tp, 0);
5294 if (!netif_running(tp->dev)) {
5295 tg3_full_unlock(tp);
5299 tg3_full_unlock(tp);
5305 tg3_full_lock(tp, 1);
5307 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5308 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5310 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5311 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5312 tp->write32_rx_mbox = tg3_write_flush_reg32;
5313 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5314 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5317 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5318 err = tg3_init_hw(tp, 1);
5322 tg3_netif_start(tp);
5325 mod_timer(&tp->timer, jiffies + 1);
5328 tg3_full_unlock(tp);
5334 static void tg3_dump_short_state(struct tg3 *tp)
5336 netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5337 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5338 netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5339 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5342 static void tg3_tx_timeout(struct net_device *dev)
5344 struct tg3 *tp = netdev_priv(dev);
5346 if (netif_msg_tx_err(tp)) {
5347 netdev_err(dev, "transmit timed out, resetting\n");
5348 tg3_dump_short_state(tp);
5351 schedule_work(&tp->reset_task);
5354 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5355 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5357 u32 base = (u32) mapping & 0xffffffff;
5359 return ((base > 0xffffdcc0) &&
5360 (base + len + 8 < base));
5363 /* Test for DMA addresses > 40-bit */
5364 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5367 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5368 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5369 return (((u64) mapping + len) > DMA_BIT_MASK(40));
5376 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5378 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5379 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5380 struct sk_buff *skb, u32 last_plus_one,
5381 u32 *start, u32 base_flags, u32 mss)
5383 struct tg3 *tp = tnapi->tp;
5384 struct sk_buff *new_skb;
5385 dma_addr_t new_addr = 0;
5389 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5390 new_skb = skb_copy(skb, GFP_ATOMIC);
5392 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5394 new_skb = skb_copy_expand(skb,
5395 skb_headroom(skb) + more_headroom,
5396 skb_tailroom(skb), GFP_ATOMIC);
5402 /* New SKB is guaranteed to be linear. */
5404 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5406 /* Make sure the mapping succeeded */
5407 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5409 dev_kfree_skb(new_skb);
5412 /* Make sure new skb does not cross any 4G boundaries.
5413 * Drop the packet if it does.
5415 } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5416 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5417 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5420 dev_kfree_skb(new_skb);
5423 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5424 base_flags, 1 | (mss << 1));
5425 *start = NEXT_TX(entry);
5429 /* Now clean up the sw ring entries. */
5431 while (entry != last_plus_one) {
5435 len = skb_headlen(skb);
5437 len = skb_shinfo(skb)->frags[i-1].size;
5439 pci_unmap_single(tp->pdev,
5440 pci_unmap_addr(&tnapi->tx_buffers[entry],
5442 len, PCI_DMA_TODEVICE);
5444 tnapi->tx_buffers[entry].skb = new_skb;
5445 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5448 tnapi->tx_buffers[entry].skb = NULL;
5450 entry = NEXT_TX(entry);
5459 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5460 dma_addr_t mapping, int len, u32 flags,
5463 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5464 int is_end = (mss_and_is_end & 0x1);
5465 u32 mss = (mss_and_is_end >> 1);
5469 flags |= TXD_FLAG_END;
5470 if (flags & TXD_FLAG_VLAN) {
5471 vlan_tag = flags >> 16;
5474 vlan_tag |= (mss << TXD_MSS_SHIFT);
5476 txd->addr_hi = ((u64) mapping >> 32);
5477 txd->addr_lo = ((u64) mapping & 0xffffffff);
5478 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5479 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5482 /* hard_start_xmit for devices that don't have any bugs and
5483 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5485 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5486 struct net_device *dev)
5488 struct tg3 *tp = netdev_priv(dev);
5489 u32 len, entry, base_flags, mss;
5491 struct tg3_napi *tnapi;
5492 struct netdev_queue *txq;
5493 unsigned int i, last;
5496 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5497 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5498 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5501 /* We are running in BH disabled context with netif_tx_lock
5502 * and TX reclaim runs via tp->napi.poll inside of a software
5503 * interrupt. Furthermore, IRQ processing runs lockless so we have
5504 * no IRQ context deadlocks to worry about either. Rejoice!
5506 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5507 if (!netif_tx_queue_stopped(txq)) {
5508 netif_tx_stop_queue(txq);
5510 /* This is a hard error, log it. */
5511 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
5513 return NETDEV_TX_BUSY;
5516 entry = tnapi->tx_prod;
5519 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5520 int tcp_opt_len, ip_tcp_len;
5523 if (skb_header_cloned(skb) &&
5524 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5529 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
5530 hdrlen = skb_headlen(skb) - ETH_HLEN;
5532 struct iphdr *iph = ip_hdr(skb);
5534 tcp_opt_len = tcp_optlen(skb);
5535 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5538 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5539 hdrlen = ip_tcp_len + tcp_opt_len;
5542 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5543 mss |= (hdrlen & 0xc) << 12;
5545 base_flags |= 0x00000010;
5546 base_flags |= (hdrlen & 0x3e0) << 5;
5550 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5551 TXD_FLAG_CPU_POST_DMA);
5553 tcp_hdr(skb)->check = 0;
5556 else if (skb->ip_summed == CHECKSUM_PARTIAL)
5557 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5558 #if TG3_VLAN_TAG_USED
5559 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5560 base_flags |= (TXD_FLAG_VLAN |
5561 (vlan_tx_tag_get(skb) << 16));
5564 len = skb_headlen(skb);
5566 /* Queue skb data, a.k.a. the main skb fragment. */
5567 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5568 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5573 tnapi->tx_buffers[entry].skb = skb;
5574 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5576 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5577 !mss && skb->len > ETH_DATA_LEN)
5578 base_flags |= TXD_FLAG_JMB_PKT;
5580 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5581 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5583 entry = NEXT_TX(entry);
5585 /* Now loop through additional data fragments, and queue them. */
5586 if (skb_shinfo(skb)->nr_frags > 0) {
5587 last = skb_shinfo(skb)->nr_frags - 1;
5588 for (i = 0; i <= last; i++) {
5589 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5592 mapping = pci_map_page(tp->pdev,
5595 len, PCI_DMA_TODEVICE);
5596 if (pci_dma_mapping_error(tp->pdev, mapping))
5599 tnapi->tx_buffers[entry].skb = NULL;
5600 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5603 tg3_set_txd(tnapi, entry, mapping, len,
5604 base_flags, (i == last) | (mss << 1));
5606 entry = NEXT_TX(entry);
5610 /* Packets are ready, update Tx producer idx local and on card. */
5611 tw32_tx_mbox(tnapi->prodmbox, entry);
5613 tnapi->tx_prod = entry;
5614 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5615 netif_tx_stop_queue(txq);
5616 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5617 netif_tx_wake_queue(txq);
5623 return NETDEV_TX_OK;
5627 entry = tnapi->tx_prod;
5628 tnapi->tx_buffers[entry].skb = NULL;
5629 pci_unmap_single(tp->pdev,
5630 pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5633 for (i = 0; i <= last; i++) {
5634 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5635 entry = NEXT_TX(entry);
5637 pci_unmap_page(tp->pdev,
5638 pci_unmap_addr(&tnapi->tx_buffers[entry],
5640 frag->size, PCI_DMA_TODEVICE);
5644 return NETDEV_TX_OK;
5647 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5648 struct net_device *);
5650 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5651 * TSO header is greater than 80 bytes.
5653 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5655 struct sk_buff *segs, *nskb;
5656 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5658 /* Estimate the number of fragments in the worst case */
5659 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5660 netif_stop_queue(tp->dev);
5661 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5662 return NETDEV_TX_BUSY;
5664 netif_wake_queue(tp->dev);
5667 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5669 goto tg3_tso_bug_end;
5675 tg3_start_xmit_dma_bug(nskb, tp->dev);
5681 return NETDEV_TX_OK;
5684 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5685 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5687 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5688 struct net_device *dev)
5690 struct tg3 *tp = netdev_priv(dev);
5691 u32 len, entry, base_flags, mss;
5692 int would_hit_hwbug;
5694 struct tg3_napi *tnapi;
5695 struct netdev_queue *txq;
5696 unsigned int i, last;
5699 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5700 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5701 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5704 /* We are running in BH disabled context with netif_tx_lock
5705 * and TX reclaim runs via tp->napi.poll inside of a software
5706 * interrupt. Furthermore, IRQ processing runs lockless so we have
5707 * no IRQ context deadlocks to worry about either. Rejoice!
5709 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5710 if (!netif_tx_queue_stopped(txq)) {
5711 netif_tx_stop_queue(txq);
5713 /* This is a hard error, log it. */
5714 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
5716 return NETDEV_TX_BUSY;
5719 entry = tnapi->tx_prod;
5721 if (skb->ip_summed == CHECKSUM_PARTIAL)
5722 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5724 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5726 u32 tcp_opt_len, ip_tcp_len, hdr_len;
5728 if (skb_header_cloned(skb) &&
5729 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5734 tcp_opt_len = tcp_optlen(skb);
5735 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5737 hdr_len = ip_tcp_len + tcp_opt_len;
5738 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5739 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5740 return (tg3_tso_bug(tp, skb));
5742 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5743 TXD_FLAG_CPU_POST_DMA);
5747 iph->tot_len = htons(mss + hdr_len);
5748 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5749 tcp_hdr(skb)->check = 0;
5750 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5752 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5757 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5758 mss |= (hdr_len & 0xc) << 12;
5760 base_flags |= 0x00000010;
5761 base_flags |= (hdr_len & 0x3e0) << 5;
5762 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
5763 mss |= hdr_len << 9;
5764 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
5765 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5766 if (tcp_opt_len || iph->ihl > 5) {
5769 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5770 mss |= (tsflags << 11);
5773 if (tcp_opt_len || iph->ihl > 5) {
5776 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5777 base_flags |= tsflags << 12;
5781 #if TG3_VLAN_TAG_USED
5782 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5783 base_flags |= (TXD_FLAG_VLAN |
5784 (vlan_tx_tag_get(skb) << 16));
5787 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5788 !mss && skb->len > ETH_DATA_LEN)
5789 base_flags |= TXD_FLAG_JMB_PKT;
5791 len = skb_headlen(skb);
5793 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5794 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5799 tnapi->tx_buffers[entry].skb = skb;
5800 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5802 would_hit_hwbug = 0;
5804 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
5805 would_hit_hwbug = 1;
5807 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5808 tg3_4g_overflow_test(mapping, len))
5809 would_hit_hwbug = 1;
5811 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5812 tg3_40bit_overflow_test(tp, mapping, len))
5813 would_hit_hwbug = 1;
5815 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5816 would_hit_hwbug = 1;
5818 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5819 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5821 entry = NEXT_TX(entry);
5823 /* Now loop through additional data fragments, and queue them. */
5824 if (skb_shinfo(skb)->nr_frags > 0) {
5825 last = skb_shinfo(skb)->nr_frags - 1;
5826 for (i = 0; i <= last; i++) {
5827 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5830 mapping = pci_map_page(tp->pdev,
5833 len, PCI_DMA_TODEVICE);
5835 tnapi->tx_buffers[entry].skb = NULL;
5836 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5838 if (pci_dma_mapping_error(tp->pdev, mapping))
5841 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
5843 would_hit_hwbug = 1;
5845 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5846 tg3_4g_overflow_test(mapping, len))
5847 would_hit_hwbug = 1;
5849 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5850 tg3_40bit_overflow_test(tp, mapping, len))
5851 would_hit_hwbug = 1;
5853 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5854 tg3_set_txd(tnapi, entry, mapping, len,
5855 base_flags, (i == last)|(mss << 1));
5857 tg3_set_txd(tnapi, entry, mapping, len,
5858 base_flags, (i == last));
5860 entry = NEXT_TX(entry);
5864 if (would_hit_hwbug) {
5865 u32 last_plus_one = entry;
5868 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5869 start &= (TG3_TX_RING_SIZE - 1);
5871 /* If the workaround fails due to memory/mapping
5872 * failure, silently drop this packet.
5874 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
5875 &start, base_flags, mss))
5881 /* Packets are ready, update Tx producer idx local and on card. */
5882 tw32_tx_mbox(tnapi->prodmbox, entry);
5884 tnapi->tx_prod = entry;
5885 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5886 netif_tx_stop_queue(txq);
5887 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5888 netif_tx_wake_queue(txq);
5894 return NETDEV_TX_OK;
5898 entry = tnapi->tx_prod;
5899 tnapi->tx_buffers[entry].skb = NULL;
5900 pci_unmap_single(tp->pdev,
5901 pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5904 for (i = 0; i <= last; i++) {
5905 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5906 entry = NEXT_TX(entry);
5908 pci_unmap_page(tp->pdev,
5909 pci_unmap_addr(&tnapi->tx_buffers[entry],
5911 frag->size, PCI_DMA_TODEVICE);
5915 return NETDEV_TX_OK;
5918 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5923 if (new_mtu > ETH_DATA_LEN) {
5924 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5925 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5926 ethtool_op_set_tso(dev, 0);
5929 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5931 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5932 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5933 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5937 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5939 struct tg3 *tp = netdev_priv(dev);
5942 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5945 if (!netif_running(dev)) {
5946 /* We'll just catch it later when the
5949 tg3_set_mtu(dev, tp, new_mtu);
5957 tg3_full_lock(tp, 1);
5959 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5961 tg3_set_mtu(dev, tp, new_mtu);
5963 err = tg3_restart_hw(tp, 0);
5966 tg3_netif_start(tp);
5968 tg3_full_unlock(tp);
5976 static void tg3_rx_prodring_free(struct tg3 *tp,
5977 struct tg3_rx_prodring_set *tpr)
5981 if (tpr != &tp->prodring[0]) {
5982 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
5983 i = (i + 1) % TG3_RX_RING_SIZE)
5984 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
5987 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5988 for (i = tpr->rx_jmb_cons_idx;
5989 i != tpr->rx_jmb_prod_idx;
5990 i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) {
5991 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
5999 for (i = 0; i < TG3_RX_RING_SIZE; i++)
6000 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6003 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6004 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++)
6005 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6010 /* Initialize tx/rx rings for packet processing.
6012 * The chip has been shut down and the driver detached from
6013 * the networking, so no interrupts or new tx packets will
6014 * end up in the driver. tp->{tx,}lock are held and thus
6017 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6018 struct tg3_rx_prodring_set *tpr)
6020 u32 i, rx_pkt_dma_sz;
6022 tpr->rx_std_cons_idx = 0;
6023 tpr->rx_std_prod_idx = 0;
6024 tpr->rx_jmb_cons_idx = 0;
6025 tpr->rx_jmb_prod_idx = 0;
6027 if (tpr != &tp->prodring[0]) {
6028 memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE);
6029 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
6030 memset(&tpr->rx_jmb_buffers[0], 0,
6031 TG3_RX_JMB_BUFF_RING_SIZE);
6035 /* Zero out all descriptors. */
6036 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
6038 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6039 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
6040 tp->dev->mtu > ETH_DATA_LEN)
6041 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6042 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6044 /* Initialize invariants of the rings, we only set this
6045 * stuff once. This works because the card does not
6046 * write into the rx buffer posting rings.
6048 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
6049 struct tg3_rx_buffer_desc *rxd;
6051 rxd = &tpr->rx_std[i];
6052 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6053 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6054 rxd->opaque = (RXD_OPAQUE_RING_STD |
6055 (i << RXD_OPAQUE_INDEX_SHIFT));
6058 /* Now allocate fresh SKBs for each rx ring. */
6059 for (i = 0; i < tp->rx_pending; i++) {
6060 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6061 netdev_warn(tp->dev, "Using a smaller RX standard ring, only %d out of %d buffers were allocated successfully\n",
6070 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
6073 memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
6075 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE))
6078 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
6079 struct tg3_rx_buffer_desc *rxd;
6081 rxd = &tpr->rx_jmb[i].std;
6082 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6083 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6085 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6086 (i << RXD_OPAQUE_INDEX_SHIFT));
6089 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6090 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6091 netdev_warn(tp->dev, "Using a smaller RX jumbo ring, only %d out of %d buffers were allocated successfully\n",
6092 i, tp->rx_jumbo_pending);
6095 tp->rx_jumbo_pending = i;
6104 tg3_rx_prodring_free(tp, tpr);
6108 static void tg3_rx_prodring_fini(struct tg3 *tp,
6109 struct tg3_rx_prodring_set *tpr)
6111 kfree(tpr->rx_std_buffers);
6112 tpr->rx_std_buffers = NULL;
6113 kfree(tpr->rx_jmb_buffers);
6114 tpr->rx_jmb_buffers = NULL;
6116 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
6117 tpr->rx_std, tpr->rx_std_mapping);
6121 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
6122 tpr->rx_jmb, tpr->rx_jmb_mapping);
6127 static int tg3_rx_prodring_init(struct tg3 *tp,
6128 struct tg3_rx_prodring_set *tpr)
6130 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL);
6131 if (!tpr->rx_std_buffers)
6134 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
6135 &tpr->rx_std_mapping);
6139 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6140 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE,
6142 if (!tpr->rx_jmb_buffers)
6145 tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
6146 TG3_RX_JUMBO_RING_BYTES,
6147 &tpr->rx_jmb_mapping);
6155 tg3_rx_prodring_fini(tp, tpr);
6159 /* Free up pending packets in all rx/tx rings.
6161 * The chip has been shut down and the driver detached from
6162 * the networking, so no interrupts or new tx packets will
6163 * end up in the driver. tp->{tx,}lock is not held and we are not
6164 * in an interrupt context and thus may sleep.
6166 static void tg3_free_rings(struct tg3 *tp)
6170 for (j = 0; j < tp->irq_cnt; j++) {
6171 struct tg3_napi *tnapi = &tp->napi[j];
6173 if (!tnapi->tx_buffers)
6176 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6177 struct ring_info *txp;
6178 struct sk_buff *skb;
6181 txp = &tnapi->tx_buffers[i];
6189 pci_unmap_single(tp->pdev,
6190 pci_unmap_addr(txp, mapping),
6197 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6198 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6199 pci_unmap_page(tp->pdev,
6200 pci_unmap_addr(txp, mapping),
6201 skb_shinfo(skb)->frags[k].size,
6206 dev_kfree_skb_any(skb);
6209 tg3_rx_prodring_free(tp, &tp->prodring[j]);
6213 /* Initialize tx/rx rings for packet processing.
6215 * The chip has been shut down and the driver detached from
6216 * the networking, so no interrupts or new tx packets will
6217 * end up in the driver. tp->{tx,}lock are held and thus
6220 static int tg3_init_rings(struct tg3 *tp)
6224 /* Free up all the SKBs. */
6227 for (i = 0; i < tp->irq_cnt; i++) {
6228 struct tg3_napi *tnapi = &tp->napi[i];
6230 tnapi->last_tag = 0;
6231 tnapi->last_irq_tag = 0;
6232 tnapi->hw_status->status = 0;
6233 tnapi->hw_status->status_tag = 0;
6234 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6239 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6241 tnapi->rx_rcb_ptr = 0;
6243 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6245 if (tg3_rx_prodring_alloc(tp, &tp->prodring[i])) {
6255 * Must not be invoked with interrupt sources disabled and
6256 * the hardware shutdown down.
6258 static void tg3_free_consistent(struct tg3 *tp)
6262 for (i = 0; i < tp->irq_cnt; i++) {
6263 struct tg3_napi *tnapi = &tp->napi[i];
6265 if (tnapi->tx_ring) {
6266 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
6267 tnapi->tx_ring, tnapi->tx_desc_mapping);
6268 tnapi->tx_ring = NULL;
6271 kfree(tnapi->tx_buffers);
6272 tnapi->tx_buffers = NULL;
6274 if (tnapi->rx_rcb) {
6275 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
6277 tnapi->rx_rcb_mapping);
6278 tnapi->rx_rcb = NULL;
6281 if (tnapi->hw_status) {
6282 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
6284 tnapi->status_mapping);
6285 tnapi->hw_status = NULL;
6290 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
6291 tp->hw_stats, tp->stats_mapping);
6292 tp->hw_stats = NULL;
6295 for (i = 0; i < tp->irq_cnt; i++)
6296 tg3_rx_prodring_fini(tp, &tp->prodring[i]);
6300 * Must not be invoked with interrupt sources disabled and
6301 * the hardware shutdown down. Can sleep.
6303 static int tg3_alloc_consistent(struct tg3 *tp)
6307 for (i = 0; i < tp->irq_cnt; i++) {
6308 if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
6312 tp->hw_stats = pci_alloc_consistent(tp->pdev,
6313 sizeof(struct tg3_hw_stats),
6314 &tp->stats_mapping);
6318 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6320 for (i = 0; i < tp->irq_cnt; i++) {
6321 struct tg3_napi *tnapi = &tp->napi[i];
6322 struct tg3_hw_status *sblk;
6324 tnapi->hw_status = pci_alloc_consistent(tp->pdev,
6326 &tnapi->status_mapping);
6327 if (!tnapi->hw_status)
6330 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6331 sblk = tnapi->hw_status;
6333 /* If multivector TSS is enabled, vector 0 does not handle
6334 * tx interrupts. Don't allocate any resources for it.
6336 if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) ||
6337 (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) {
6338 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6341 if (!tnapi->tx_buffers)
6344 tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
6346 &tnapi->tx_desc_mapping);
6347 if (!tnapi->tx_ring)
6352 * When RSS is enabled, the status block format changes
6353 * slightly. The "rx_jumbo_consumer", "reserved",
6354 * and "rx_mini_consumer" members get mapped to the
6355 * other three rx return ring producer indexes.
6359 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6362 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6365 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6368 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6372 tnapi->prodring = &tp->prodring[i];
6375 * If multivector RSS is enabled, vector 0 does not handle
6376 * rx or tx interrupts. Don't allocate any resources for it.
6378 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
6381 tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
6382 TG3_RX_RCB_RING_BYTES(tp),
6383 &tnapi->rx_rcb_mapping);
6387 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6393 tg3_free_consistent(tp);
6397 #define MAX_WAIT_CNT 1000
6399 /* To stop a block, clear the enable bit and poll till it
6400 * clears. tp->lock is held.
6402 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6407 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6414 /* We can't enable/disable these bits of the
6415 * 5705/5750, just say success.
6428 for (i = 0; i < MAX_WAIT_CNT; i++) {
6431 if ((val & enable_bit) == 0)
6435 if (i == MAX_WAIT_CNT && !silent) {
6436 dev_err(&tp->pdev->dev,
6437 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6445 /* tp->lock is held. */
6446 static int tg3_abort_hw(struct tg3 *tp, int silent)
6450 tg3_disable_ints(tp);
6452 tp->rx_mode &= ~RX_MODE_ENABLE;
6453 tw32_f(MAC_RX_MODE, tp->rx_mode);
6456 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6457 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6458 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6459 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6460 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6461 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6463 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6464 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6465 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6466 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6467 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6468 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6469 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6471 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6472 tw32_f(MAC_MODE, tp->mac_mode);
6475 tp->tx_mode &= ~TX_MODE_ENABLE;
6476 tw32_f(MAC_TX_MODE, tp->tx_mode);
6478 for (i = 0; i < MAX_WAIT_CNT; i++) {
6480 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6483 if (i >= MAX_WAIT_CNT) {
6484 dev_err(&tp->pdev->dev,
6485 "%s timed out, TX_MODE_ENABLE will not clear "
6486 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6490 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6491 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6492 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6494 tw32(FTQ_RESET, 0xffffffff);
6495 tw32(FTQ_RESET, 0x00000000);
6497 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6498 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6500 for (i = 0; i < tp->irq_cnt; i++) {
6501 struct tg3_napi *tnapi = &tp->napi[i];
6502 if (tnapi->hw_status)
6503 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6506 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6511 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6516 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6517 if (apedata != APE_SEG_SIG_MAGIC)
6520 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6521 if (!(apedata & APE_FW_STATUS_READY))
6524 /* Wait for up to 1 millisecond for APE to service previous event. */
6525 for (i = 0; i < 10; i++) {
6526 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6529 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6531 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6532 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6533 event | APE_EVENT_STATUS_EVENT_PENDING);
6535 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6537 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6543 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6544 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6547 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6552 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
6556 case RESET_KIND_INIT:
6557 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6558 APE_HOST_SEG_SIG_MAGIC);
6559 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6560 APE_HOST_SEG_LEN_MAGIC);
6561 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6562 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6563 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6564 APE_HOST_DRIVER_ID_MAGIC);
6565 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6566 APE_HOST_BEHAV_NO_PHYLOCK);
6568 event = APE_EVENT_STATUS_STATE_START;
6570 case RESET_KIND_SHUTDOWN:
6571 /* With the interface we are currently using,
6572 * APE does not track driver state. Wiping
6573 * out the HOST SEGMENT SIGNATURE forces
6574 * the APE to assume OS absent status.
6576 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6578 event = APE_EVENT_STATUS_STATE_UNLOAD;
6580 case RESET_KIND_SUSPEND:
6581 event = APE_EVENT_STATUS_STATE_SUSPEND;
6587 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6589 tg3_ape_send_event(tp, event);
6592 /* tp->lock is held. */
6593 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6595 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6596 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6598 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6600 case RESET_KIND_INIT:
6601 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6605 case RESET_KIND_SHUTDOWN:
6606 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6610 case RESET_KIND_SUSPEND:
6611 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6620 if (kind == RESET_KIND_INIT ||
6621 kind == RESET_KIND_SUSPEND)
6622 tg3_ape_driver_state_change(tp, kind);
6625 /* tp->lock is held. */
6626 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6628 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6630 case RESET_KIND_INIT:
6631 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6632 DRV_STATE_START_DONE);
6635 case RESET_KIND_SHUTDOWN:
6636 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6637 DRV_STATE_UNLOAD_DONE);
6645 if (kind == RESET_KIND_SHUTDOWN)
6646 tg3_ape_driver_state_change(tp, kind);
6649 /* tp->lock is held. */
6650 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6652 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6654 case RESET_KIND_INIT:
6655 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6659 case RESET_KIND_SHUTDOWN:
6660 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6664 case RESET_KIND_SUSPEND:
6665 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6675 static int tg3_poll_fw(struct tg3 *tp)
6680 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6681 /* Wait up to 20ms for init done. */
6682 for (i = 0; i < 200; i++) {
6683 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6690 /* Wait for firmware initialization to complete. */
6691 for (i = 0; i < 100000; i++) {
6692 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6693 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6698 /* Chip might not be fitted with firmware. Some Sun onboard
6699 * parts are configured like that. So don't signal the timeout
6700 * of the above loop as an error, but do report the lack of
6701 * running firmware once.
6704 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
6705 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
6707 netdev_info(tp->dev, "No firmware running\n");
6710 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6711 /* The 57765 A0 needs a little more
6712 * time to do some important work.
6720 /* Save PCI command register before chip reset */
6721 static void tg3_save_pci_state(struct tg3 *tp)
6723 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
6726 /* Restore PCI state after chip reset */
6727 static void tg3_restore_pci_state(struct tg3 *tp)
6731 /* Re-enable indirect register accesses. */
6732 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6733 tp->misc_host_ctrl);
6735 /* Set MAX PCI retry to zero. */
6736 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
6737 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6738 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
6739 val |= PCISTATE_RETRY_SAME_DMA;
6740 /* Allow reads and writes to the APE register and memory space. */
6741 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6742 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6743 PCISTATE_ALLOW_APE_SHMEM_WR;
6744 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
6746 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
6748 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6749 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6750 pcie_set_readrq(tp->pdev, 4096);
6752 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6753 tp->pci_cacheline_sz);
6754 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
6759 /* Make sure PCI-X relaxed ordering bit is clear. */
6760 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6763 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6765 pcix_cmd &= ~PCI_X_CMD_ERO;
6766 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6770 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6772 /* Chip reset on 5780 will reset MSI enable bit,
6773 * so need to restore it.
6775 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6778 pci_read_config_word(tp->pdev,
6779 tp->msi_cap + PCI_MSI_FLAGS,
6781 pci_write_config_word(tp->pdev,
6782 tp->msi_cap + PCI_MSI_FLAGS,
6783 ctrl | PCI_MSI_FLAGS_ENABLE);
6784 val = tr32(MSGINT_MODE);
6785 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
6790 static void tg3_stop_fw(struct tg3 *);
6792 /* tp->lock is held. */
6793 static int tg3_chip_reset(struct tg3 *tp)
6796 void (*write_op)(struct tg3 *, u32, u32);
6801 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
6803 /* No matching tg3_nvram_unlock() after this because
6804 * chip reset below will undo the nvram lock.
6806 tp->nvram_lock_cnt = 0;
6808 /* GRC_MISC_CFG core clock reset will clear the memory
6809 * enable bit in PCI register 4 and the MSI enable bit
6810 * on some chips, so we save relevant registers here.
6812 tg3_save_pci_state(tp);
6814 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
6815 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
6816 tw32(GRC_FASTBOOT_PC, 0);
6819 * We must avoid the readl() that normally takes place.
6820 * It locks machines, causes machine checks, and other
6821 * fun things. So, temporarily disable the 5701
6822 * hardware workaround, while we do the reset.
6824 write_op = tp->write32;
6825 if (write_op == tg3_write_flush_reg32)
6826 tp->write32 = tg3_write32;
6828 /* Prevent the irq handler from reading or writing PCI registers
6829 * during chip reset when the memory enable bit in the PCI command
6830 * register may be cleared. The chip does not generate interrupt
6831 * at this time, but the irq handler may still be called due to irq
6832 * sharing or irqpoll.
6834 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6835 for (i = 0; i < tp->irq_cnt; i++) {
6836 struct tg3_napi *tnapi = &tp->napi[i];
6837 if (tnapi->hw_status) {
6838 tnapi->hw_status->status = 0;
6839 tnapi->hw_status->status_tag = 0;
6841 tnapi->last_tag = 0;
6842 tnapi->last_irq_tag = 0;
6846 for (i = 0; i < tp->irq_cnt; i++)
6847 synchronize_irq(tp->napi[i].irq_vec);
6849 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6850 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
6851 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
6855 val = GRC_MISC_CFG_CORECLK_RESET;
6857 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6858 if (tr32(0x7e2c) == 0x60) {
6861 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6862 tw32(GRC_MISC_CFG, (1 << 29));
6867 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6868 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6869 tw32(GRC_VCPU_EXT_CTRL,
6870 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6873 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6874 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6875 tw32(GRC_MISC_CFG, val);
6877 /* restore 5701 hardware bug workaround write method */
6878 tp->write32 = write_op;
6880 /* Unfortunately, we have to delay before the PCI read back.
6881 * Some 575X chips even will not respond to a PCI cfg access
6882 * when the reset command is given to the chip.
6884 * How do these hardware designers expect things to work
6885 * properly if the PCI write is posted for a long period
6886 * of time? It is always necessary to have some method by
6887 * which a register read back can occur to push the write
6888 * out which does the reset.
6890 * For most tg3 variants the trick below was working.
6895 /* Flush PCI posted writes. The normal MMIO registers
6896 * are inaccessible at this time so this is the only
6897 * way to make this reliably (actually, this is no longer
6898 * the case, see above). I tried to use indirect
6899 * register read/write but this upset some 5701 variants.
6901 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6905 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
6908 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6912 /* Wait for link training to complete. */
6913 for (i = 0; i < 5000; i++)
6916 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6917 pci_write_config_dword(tp->pdev, 0xc4,
6918 cfg_val | (1 << 15));
6921 /* Clear the "no snoop" and "relaxed ordering" bits. */
6922 pci_read_config_word(tp->pdev,
6923 tp->pcie_cap + PCI_EXP_DEVCTL,
6925 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
6926 PCI_EXP_DEVCTL_NOSNOOP_EN);
6928 * Older PCIe devices only support the 128 byte
6929 * MPS setting. Enforce the restriction.
6931 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
6932 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784))
6933 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
6934 pci_write_config_word(tp->pdev,
6935 tp->pcie_cap + PCI_EXP_DEVCTL,
6938 pcie_set_readrq(tp->pdev, 4096);
6940 /* Clear error status */
6941 pci_write_config_word(tp->pdev,
6942 tp->pcie_cap + PCI_EXP_DEVSTA,
6943 PCI_EXP_DEVSTA_CED |
6944 PCI_EXP_DEVSTA_NFED |
6945 PCI_EXP_DEVSTA_FED |
6946 PCI_EXP_DEVSTA_URD);
6949 tg3_restore_pci_state(tp);
6951 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6954 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6955 val = tr32(MEMARB_MODE);
6956 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
6958 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6960 tw32(0x5000, 0x400);
6963 tw32(GRC_MODE, tp->grc_mode);
6965 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
6968 tw32(0xc4, val | (1 << 15));
6971 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6972 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6973 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6974 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6975 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6976 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6979 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6980 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6981 tw32_f(MAC_MODE, tp->mac_mode);
6982 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6983 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6984 tw32_f(MAC_MODE, tp->mac_mode);
6985 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6986 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6987 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6988 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6989 tw32_f(MAC_MODE, tp->mac_mode);
6991 tw32_f(MAC_MODE, 0);
6994 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6996 err = tg3_poll_fw(tp);
7002 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7005 phy_addr = tp->phy_addr;
7006 tp->phy_addr = TG3_PHY_PCIE_ADDR;
7008 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
7009 TG3_PCIEPHY_TXB_BLK << TG3_PCIEPHY_BLOCK_SHIFT);
7010 val = TG3_PCIEPHY_TX0CTRL1_TXOCM | TG3_PCIEPHY_TX0CTRL1_RDCTL |
7011 TG3_PCIEPHY_TX0CTRL1_TXCMV | TG3_PCIEPHY_TX0CTRL1_TKSEL |
7012 TG3_PCIEPHY_TX0CTRL1_NB_EN;
7013 tg3_writephy(tp, TG3_PCIEPHY_TX0CTRL1, val);
7016 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
7017 TG3_PCIEPHY_XGXS_BLK1 << TG3_PCIEPHY_BLOCK_SHIFT);
7018 val = TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN |
7019 TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN;
7020 tg3_writephy(tp, TG3_PCIEPHY_PWRMGMT4, val);
7023 tp->phy_addr = phy_addr;
7026 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7027 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7028 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7029 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
7030 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
7033 tw32(0x7c00, val | (1 << 25));
7036 /* Reprobe ASF enable state. */
7037 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
7038 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
7039 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7040 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7043 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7044 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7045 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7046 tp->last_event_jiffies = jiffies;
7047 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7048 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7055 /* tp->lock is held. */
7056 static void tg3_stop_fw(struct tg3 *tp)
7058 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7059 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7060 /* Wait for RX cpu to ACK the previous event. */
7061 tg3_wait_for_event_ack(tp);
7063 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7065 tg3_generate_fw_event(tp);
7067 /* Wait for RX cpu to ACK this event. */
7068 tg3_wait_for_event_ack(tp);
7072 /* tp->lock is held. */
7073 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7079 tg3_write_sig_pre_reset(tp, kind);
7081 tg3_abort_hw(tp, silent);
7082 err = tg3_chip_reset(tp);
7084 __tg3_set_mac_addr(tp, 0);
7086 tg3_write_sig_legacy(tp, kind);
7087 tg3_write_sig_post_reset(tp, kind);
7095 #define RX_CPU_SCRATCH_BASE 0x30000
7096 #define RX_CPU_SCRATCH_SIZE 0x04000
7097 #define TX_CPU_SCRATCH_BASE 0x34000
7098 #define TX_CPU_SCRATCH_SIZE 0x04000
7100 /* tp->lock is held. */
7101 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7105 BUG_ON(offset == TX_CPU_BASE &&
7106 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
7108 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7109 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7111 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7114 if (offset == RX_CPU_BASE) {
7115 for (i = 0; i < 10000; i++) {
7116 tw32(offset + CPU_STATE, 0xffffffff);
7117 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7118 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7122 tw32(offset + CPU_STATE, 0xffffffff);
7123 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7126 for (i = 0; i < 10000; i++) {
7127 tw32(offset + CPU_STATE, 0xffffffff);
7128 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7129 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7135 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7136 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7140 /* Clear firmware's nvram arbitration. */
7141 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7142 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7147 unsigned int fw_base;
7148 unsigned int fw_len;
7149 const __be32 *fw_data;
7152 /* tp->lock is held. */
7153 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7154 int cpu_scratch_size, struct fw_info *info)
7156 int err, lock_err, i;
7157 void (*write_op)(struct tg3 *, u32, u32);
7159 if (cpu_base == TX_CPU_BASE &&
7160 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7161 netdev_err(tp->dev, "%s: Trying to load TX cpu firmware which is 5705\n",
7166 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7167 write_op = tg3_write_mem;
7169 write_op = tg3_write_indirect_reg32;
7171 /* It is possible that bootcode is still loading at this point.
7172 * Get the nvram lock first before halting the cpu.
7174 lock_err = tg3_nvram_lock(tp);
7175 err = tg3_halt_cpu(tp, cpu_base);
7177 tg3_nvram_unlock(tp);
7181 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7182 write_op(tp, cpu_scratch_base + i, 0);
7183 tw32(cpu_base + CPU_STATE, 0xffffffff);
7184 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7185 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7186 write_op(tp, (cpu_scratch_base +
7187 (info->fw_base & 0xffff) +
7189 be32_to_cpu(info->fw_data[i]));
7197 /* tp->lock is held. */
7198 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7200 struct fw_info info;
7201 const __be32 *fw_data;
7204 fw_data = (void *)tp->fw->data;
7206 /* Firmware blob starts with version numbers, followed by
7207 start address and length. We are setting complete length.
7208 length = end_address_of_bss - start_address_of_text.
7209 Remainder is the blob to be loaded contiguously
7210 from start address. */
7212 info.fw_base = be32_to_cpu(fw_data[1]);
7213 info.fw_len = tp->fw->size - 12;
7214 info.fw_data = &fw_data[3];
7216 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7217 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7222 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7223 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7228 /* Now startup only the RX cpu. */
7229 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7230 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7232 for (i = 0; i < 5; i++) {
7233 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7235 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7236 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7237 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7241 netdev_err(tp->dev, "tg3_load_firmware fails to set RX CPU PC, is %08x should be %08x\n",
7242 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7245 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7246 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7251 /* 5705 needs a special version of the TSO firmware. */
7253 /* tp->lock is held. */
7254 static int tg3_load_tso_firmware(struct tg3 *tp)
7256 struct fw_info info;
7257 const __be32 *fw_data;
7258 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7261 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7264 fw_data = (void *)tp->fw->data;
7266 /* Firmware blob starts with version numbers, followed by
7267 start address and length. We are setting complete length.
7268 length = end_address_of_bss - start_address_of_text.
7269 Remainder is the blob to be loaded contiguously
7270 from start address. */
7272 info.fw_base = be32_to_cpu(fw_data[1]);
7273 cpu_scratch_size = tp->fw_len;
7274 info.fw_len = tp->fw->size - 12;
7275 info.fw_data = &fw_data[3];
7277 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7278 cpu_base = RX_CPU_BASE;
7279 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7281 cpu_base = TX_CPU_BASE;
7282 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7283 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7286 err = tg3_load_firmware_cpu(tp, cpu_base,
7287 cpu_scratch_base, cpu_scratch_size,
7292 /* Now startup the cpu. */
7293 tw32(cpu_base + CPU_STATE, 0xffffffff);
7294 tw32_f(cpu_base + CPU_PC, info.fw_base);
7296 for (i = 0; i < 5; i++) {
7297 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7299 tw32(cpu_base + CPU_STATE, 0xffffffff);
7300 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7301 tw32_f(cpu_base + CPU_PC, info.fw_base);
7305 netdev_err(tp->dev, "%s fails to set CPU PC, is %08x should be %08x\n",
7306 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7309 tw32(cpu_base + CPU_STATE, 0xffffffff);
7310 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7315 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7317 struct tg3 *tp = netdev_priv(dev);
7318 struct sockaddr *addr = p;
7319 int err = 0, skip_mac_1 = 0;
7321 if (!is_valid_ether_addr(addr->sa_data))
7324 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7326 if (!netif_running(dev))
7329 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7330 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7332 addr0_high = tr32(MAC_ADDR_0_HIGH);
7333 addr0_low = tr32(MAC_ADDR_0_LOW);
7334 addr1_high = tr32(MAC_ADDR_1_HIGH);
7335 addr1_low = tr32(MAC_ADDR_1_LOW);
7337 /* Skip MAC addr 1 if ASF is using it. */
7338 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7339 !(addr1_high == 0 && addr1_low == 0))
7342 spin_lock_bh(&tp->lock);
7343 __tg3_set_mac_addr(tp, skip_mac_1);
7344 spin_unlock_bh(&tp->lock);
7349 /* tp->lock is held. */
7350 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7351 dma_addr_t mapping, u32 maxlen_flags,
7355 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7356 ((u64) mapping >> 32));
7358 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7359 ((u64) mapping & 0xffffffff));
7361 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7364 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7366 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7370 static void __tg3_set_rx_mode(struct net_device *);
7371 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7375 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) {
7376 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7377 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7378 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7380 tw32(HOSTCC_TXCOL_TICKS, 0);
7381 tw32(HOSTCC_TXMAX_FRAMES, 0);
7382 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7385 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
7386 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7387 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7388 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7390 tw32(HOSTCC_RXCOL_TICKS, 0);
7391 tw32(HOSTCC_RXMAX_FRAMES, 0);
7392 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7395 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7396 u32 val = ec->stats_block_coalesce_usecs;
7398 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7399 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7401 if (!netif_carrier_ok(tp->dev))
7404 tw32(HOSTCC_STAT_COAL_TICKS, val);
7407 for (i = 0; i < tp->irq_cnt - 1; i++) {
7410 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7411 tw32(reg, ec->rx_coalesce_usecs);
7412 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7413 tw32(reg, ec->rx_max_coalesced_frames);
7414 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7415 tw32(reg, ec->rx_max_coalesced_frames_irq);
7417 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7418 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7419 tw32(reg, ec->tx_coalesce_usecs);
7420 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7421 tw32(reg, ec->tx_max_coalesced_frames);
7422 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7423 tw32(reg, ec->tx_max_coalesced_frames_irq);
7427 for (; i < tp->irq_max - 1; i++) {
7428 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7429 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7430 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7432 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7433 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7434 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7435 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7440 /* tp->lock is held. */
7441 static void tg3_rings_reset(struct tg3 *tp)
7444 u32 stblk, txrcb, rxrcb, limit;
7445 struct tg3_napi *tnapi = &tp->napi[0];
7447 /* Disable all transmit rings but the first. */
7448 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7449 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7450 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7451 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7453 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7455 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7456 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7457 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7458 BDINFO_FLAGS_DISABLED);
7461 /* Disable all receive return rings but the first. */
7462 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
7463 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7464 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7465 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7466 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7467 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7468 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7470 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7472 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7473 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7474 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7475 BDINFO_FLAGS_DISABLED);
7477 /* Disable interrupts */
7478 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7480 /* Zero mailbox registers. */
7481 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
7482 for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
7483 tp->napi[i].tx_prod = 0;
7484 tp->napi[i].tx_cons = 0;
7485 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
7486 tw32_mailbox(tp->napi[i].prodmbox, 0);
7487 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7488 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7490 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
7491 tw32_mailbox(tp->napi[0].prodmbox, 0);
7493 tp->napi[0].tx_prod = 0;
7494 tp->napi[0].tx_cons = 0;
7495 tw32_mailbox(tp->napi[0].prodmbox, 0);
7496 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7499 /* Make sure the NIC-based send BD rings are disabled. */
7500 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7501 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7502 for (i = 0; i < 16; i++)
7503 tw32_tx_mbox(mbox + i * 8, 0);
7506 txrcb = NIC_SRAM_SEND_RCB;
7507 rxrcb = NIC_SRAM_RCV_RET_RCB;
7509 /* Clear status block in ram. */
7510 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7512 /* Set status block DMA address */
7513 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7514 ((u64) tnapi->status_mapping >> 32));
7515 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7516 ((u64) tnapi->status_mapping & 0xffffffff));
7518 if (tnapi->tx_ring) {
7519 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7520 (TG3_TX_RING_SIZE <<
7521 BDINFO_FLAGS_MAXLEN_SHIFT),
7522 NIC_SRAM_TX_BUFFER_DESC);
7523 txrcb += TG3_BDINFO_SIZE;
7526 if (tnapi->rx_rcb) {
7527 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7528 (TG3_RX_RCB_RING_SIZE(tp) <<
7529 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7530 rxrcb += TG3_BDINFO_SIZE;
7533 stblk = HOSTCC_STATBLCK_RING1;
7535 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7536 u64 mapping = (u64)tnapi->status_mapping;
7537 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7538 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7540 /* Clear status block in ram. */
7541 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7543 if (tnapi->tx_ring) {
7544 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7545 (TG3_TX_RING_SIZE <<
7546 BDINFO_FLAGS_MAXLEN_SHIFT),
7547 NIC_SRAM_TX_BUFFER_DESC);
7548 txrcb += TG3_BDINFO_SIZE;
7551 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7552 (TG3_RX_RCB_RING_SIZE(tp) <<
7553 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7556 rxrcb += TG3_BDINFO_SIZE;
7560 /* tp->lock is held. */
7561 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7563 u32 val, rdmac_mode;
7565 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
7567 tg3_disable_ints(tp);
7571 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7573 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7574 tg3_abort_hw(tp, 1);
7580 err = tg3_chip_reset(tp);
7584 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7586 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7587 val = tr32(TG3_CPMU_CTRL);
7588 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7589 tw32(TG3_CPMU_CTRL, val);
7591 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7592 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7593 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7594 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7596 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7597 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7598 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7599 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7601 val = tr32(TG3_CPMU_HST_ACC);
7602 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7603 val |= CPMU_HST_ACC_MACCLK_6_25;
7604 tw32(TG3_CPMU_HST_ACC, val);
7607 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7608 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7609 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7610 PCIE_PWR_MGMT_L1_THRESH_4MS;
7611 tw32(PCIE_PWR_MGMT_THRESH, val);
7613 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7614 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7616 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7618 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7619 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7622 if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
7623 u32 grc_mode = tr32(GRC_MODE);
7625 /* Access the lower 1K of PL PCIE block registers. */
7626 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7627 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7629 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7630 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7631 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7633 tw32(GRC_MODE, grc_mode);
7636 /* This works around an issue with Athlon chipsets on
7637 * B3 tigon3 silicon. This bit has no effect on any
7638 * other revision. But do not set this on PCI Express
7639 * chips and don't even touch the clocks if the CPMU is present.
7641 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7642 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7643 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7644 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7647 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7648 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7649 val = tr32(TG3PCI_PCISTATE);
7650 val |= PCISTATE_RETRY_SAME_DMA;
7651 tw32(TG3PCI_PCISTATE, val);
7654 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7655 /* Allow reads and writes to the
7656 * APE register and memory space.
7658 val = tr32(TG3PCI_PCISTATE);
7659 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7660 PCISTATE_ALLOW_APE_SHMEM_WR;
7661 tw32(TG3PCI_PCISTATE, val);
7664 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7665 /* Enable some hw fixes. */
7666 val = tr32(TG3PCI_MSI_DATA);
7667 val |= (1 << 26) | (1 << 28) | (1 << 29);
7668 tw32(TG3PCI_MSI_DATA, val);
7671 /* Descriptor ring init may make accesses to the
7672 * NIC SRAM area to setup the TX descriptors, so we
7673 * can only do this after the hardware has been
7674 * successfully reset.
7676 err = tg3_init_rings(tp);
7680 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7681 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7682 val = tr32(TG3PCI_DMA_RW_CTRL) &
7683 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
7684 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
7685 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7686 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7687 /* This value is determined during the probe time DMA
7688 * engine test, tg3_test_dma.
7690 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7693 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7694 GRC_MODE_4X_NIC_SEND_RINGS |
7695 GRC_MODE_NO_TX_PHDR_CSUM |
7696 GRC_MODE_NO_RX_PHDR_CSUM);
7697 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7699 /* Pseudo-header checksum is done by hardware logic and not
7700 * the offload processers, so make the chip do the pseudo-
7701 * header checksums on receive. For transmit it is more
7702 * convenient to do the pseudo-header checksum in software
7703 * as Linux does that on transmit for us in all cases.
7705 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7709 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7711 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7712 val = tr32(GRC_MISC_CFG);
7714 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7715 tw32(GRC_MISC_CFG, val);
7717 /* Initialize MBUF/DESC pool. */
7718 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7720 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7721 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7722 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7723 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7725 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7726 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7727 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7729 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7732 fw_len = tp->fw_len;
7733 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7734 tw32(BUFMGR_MB_POOL_ADDR,
7735 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7736 tw32(BUFMGR_MB_POOL_SIZE,
7737 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7740 if (tp->dev->mtu <= ETH_DATA_LEN) {
7741 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7742 tp->bufmgr_config.mbuf_read_dma_low_water);
7743 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7744 tp->bufmgr_config.mbuf_mac_rx_low_water);
7745 tw32(BUFMGR_MB_HIGH_WATER,
7746 tp->bufmgr_config.mbuf_high_water);
7748 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7749 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7750 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7751 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7752 tw32(BUFMGR_MB_HIGH_WATER,
7753 tp->bufmgr_config.mbuf_high_water_jumbo);
7755 tw32(BUFMGR_DMA_LOW_WATER,
7756 tp->bufmgr_config.dma_low_water);
7757 tw32(BUFMGR_DMA_HIGH_WATER,
7758 tp->bufmgr_config.dma_high_water);
7760 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7761 for (i = 0; i < 2000; i++) {
7762 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7767 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
7771 /* Setup replenish threshold. */
7772 val = tp->rx_pending / 8;
7775 else if (val > tp->rx_std_max_post)
7776 val = tp->rx_std_max_post;
7777 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7778 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7779 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7781 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7782 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7785 tw32(RCVBDI_STD_THRESH, val);
7787 /* Initialize TG3_BDINFO's at:
7788 * RCVDBDI_STD_BD: standard eth size rx ring
7789 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7790 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7793 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7794 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7795 * ring attribute flags
7796 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7798 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7799 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7801 * The size of each ring is fixed in the firmware, but the location is
7804 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7805 ((u64) tpr->rx_std_mapping >> 32));
7806 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7807 ((u64) tpr->rx_std_mapping & 0xffffffff));
7808 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7809 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7810 NIC_SRAM_RX_BUFFER_DESC);
7812 /* Disable the mini ring */
7813 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7814 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7815 BDINFO_FLAGS_DISABLED);
7817 /* Program the jumbo buffer descriptor ring control
7818 * blocks on those devices that have them.
7820 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
7821 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
7822 /* Setup replenish threshold. */
7823 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7825 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7826 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7827 ((u64) tpr->rx_jmb_mapping >> 32));
7828 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7829 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
7830 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7831 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7832 BDINFO_FLAGS_USE_EXT_RECV);
7833 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7834 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7835 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7837 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7838 BDINFO_FLAGS_DISABLED);
7841 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7842 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7843 val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
7844 (RX_STD_MAX_SIZE << 2);
7846 val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT;
7848 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
7850 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
7852 tpr->rx_std_prod_idx = tp->rx_pending;
7853 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
7855 tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7856 tp->rx_jumbo_pending : 0;
7857 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
7859 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7860 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7861 tw32(STD_REPLENISH_LWM, 32);
7862 tw32(JMB_REPLENISH_LWM, 16);
7865 tg3_rings_reset(tp);
7867 /* Initialize MAC address and backoff seed. */
7868 __tg3_set_mac_addr(tp, 0);
7870 /* MTU + ethernet header + FCS + optional VLAN tag */
7871 tw32(MAC_RX_MTU_SIZE,
7872 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
7874 /* The slot time is changed by tg3_setup_phy if we
7875 * run at gigabit with half duplex.
7877 tw32(MAC_TX_LENGTHS,
7878 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7879 (6 << TX_LENGTHS_IPG_SHIFT) |
7880 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7882 /* Receive rules. */
7883 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7884 tw32(RCVLPC_CONFIG, 0x0181);
7886 /* Calculate RDMAC_MODE setting early, we need it to determine
7887 * the RCVLPC_STATE_ENABLE mask.
7889 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7890 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7891 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7892 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7893 RDMAC_MODE_LNGREAD_ENAB);
7895 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
7896 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
7898 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7899 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7900 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7901 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7902 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7903 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7905 /* If statement applies to 5705 and 5750 PCI devices only */
7906 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7907 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7908 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7909 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7910 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7911 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7912 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7913 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7914 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7918 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7919 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7921 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7922 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
7924 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
7925 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7926 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7927 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
7929 /* Receive/send statistics. */
7930 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7931 val = tr32(RCVLPC_STATS_ENABLE);
7932 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7933 tw32(RCVLPC_STATS_ENABLE, val);
7934 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7935 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7936 val = tr32(RCVLPC_STATS_ENABLE);
7937 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7938 tw32(RCVLPC_STATS_ENABLE, val);
7940 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7942 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7943 tw32(SNDDATAI_STATSENAB, 0xffffff);
7944 tw32(SNDDATAI_STATSCTRL,
7945 (SNDDATAI_SCTRL_ENABLE |
7946 SNDDATAI_SCTRL_FASTUPD));
7948 /* Setup host coalescing engine. */
7949 tw32(HOSTCC_MODE, 0);
7950 for (i = 0; i < 2000; i++) {
7951 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7956 __tg3_set_coalesce(tp, &tp->coal);
7958 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7959 /* Status/statistics block address. See tg3_timer,
7960 * the tg3_periodic_fetch_stats call there, and
7961 * tg3_get_stats to see how this works for 5705/5750 chips.
7963 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7964 ((u64) tp->stats_mapping >> 32));
7965 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7966 ((u64) tp->stats_mapping & 0xffffffff));
7967 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7969 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7971 /* Clear statistics and status block memory areas */
7972 for (i = NIC_SRAM_STATS_BLK;
7973 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7975 tg3_write_mem(tp, i, 0);
7980 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7982 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7983 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7984 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7985 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7987 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7988 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7989 /* reset to prevent losing 1st rx packet intermittently */
7990 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7994 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7995 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7998 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7999 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8000 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8001 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8002 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8003 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8004 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8007 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8008 * If TG3_FLG2_IS_NIC is zero, we should read the
8009 * register to preserve the GPIO settings for LOMs. The GPIOs,
8010 * whether used as inputs or outputs, are set by boot code after
8013 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
8016 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8017 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8018 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8020 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8021 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8022 GRC_LCLCTRL_GPIO_OUTPUT3;
8024 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8025 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8027 tp->grc_local_ctrl &= ~gpio_mask;
8028 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8030 /* GPIO1 must be driven high for eeprom write protect */
8031 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
8032 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8033 GRC_LCLCTRL_GPIO_OUTPUT1);
8035 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8038 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
8039 val = tr32(MSGINT_MODE);
8040 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8041 tw32(MSGINT_MODE, val);
8044 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8045 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8049 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8050 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8051 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8052 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8053 WDMAC_MODE_LNGREAD_ENAB);
8055 /* If statement applies to 5705 and 5750 PCI devices only */
8056 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8057 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
8058 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8059 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8060 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8061 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8063 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8064 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8065 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8066 val |= WDMAC_MODE_RX_ACCEL;
8070 /* Enable host coalescing bug fix */
8071 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8072 val |= WDMAC_MODE_STATUS_TAG_FIX;
8074 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8075 val |= WDMAC_MODE_BURST_ALL_DATA;
8077 tw32_f(WDMAC_MODE, val);
8080 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
8083 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8085 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8086 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8087 pcix_cmd |= PCI_X_CMD_READ_2K;
8088 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8089 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8090 pcix_cmd |= PCI_X_CMD_READ_2K;
8092 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8096 tw32_f(RDMAC_MODE, rdmac_mode);
8099 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8100 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8101 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8103 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8105 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8107 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8109 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8110 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8111 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
8112 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8113 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8114 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8115 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8116 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
8117 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8118 tw32(SNDBDI_MODE, val);
8119 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8121 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8122 err = tg3_load_5701_a0_firmware_fix(tp);
8127 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
8128 err = tg3_load_tso_firmware(tp);
8133 tp->tx_mode = TX_MODE_ENABLE;
8134 tw32_f(MAC_TX_MODE, tp->tx_mode);
8137 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
8138 u32 reg = MAC_RSS_INDIR_TBL_0;
8139 u8 *ent = (u8 *)&val;
8141 /* Setup the indirection table */
8142 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8143 int idx = i % sizeof(val);
8145 ent[idx] = i % (tp->irq_cnt - 1);
8146 if (idx == sizeof(val) - 1) {
8152 /* Setup the "secret" hash key. */
8153 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8154 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8155 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8156 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8157 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8158 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8159 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8160 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8161 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8162 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8165 tp->rx_mode = RX_MODE_ENABLE;
8166 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8167 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8169 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
8170 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8171 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8172 RX_MODE_RSS_IPV6_HASH_EN |
8173 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8174 RX_MODE_RSS_IPV4_HASH_EN |
8175 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8177 tw32_f(MAC_RX_MODE, tp->rx_mode);
8180 tw32(MAC_LED_CTRL, tp->led_ctrl);
8182 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8183 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8184 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8187 tw32_f(MAC_RX_MODE, tp->rx_mode);
8190 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8191 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8192 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
8193 /* Set drive transmission level to 1.2V */
8194 /* only if the signal pre-emphasis bit is not set */
8195 val = tr32(MAC_SERDES_CFG);
8198 tw32(MAC_SERDES_CFG, val);
8200 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8201 tw32(MAC_SERDES_CFG, 0x616000);
8204 /* Prevent chip from dropping frames when flow control
8207 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8211 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8213 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8214 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8215 /* Use hardware link auto-negotiation */
8216 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
8219 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
8220 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8223 tmp = tr32(SERDES_RX_CTRL);
8224 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8225 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8226 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8227 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8230 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
8231 if (tp->link_config.phy_is_low_power) {
8232 tp->link_config.phy_is_low_power = 0;
8233 tp->link_config.speed = tp->link_config.orig_speed;
8234 tp->link_config.duplex = tp->link_config.orig_duplex;
8235 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8238 err = tg3_setup_phy(tp, 0);
8242 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8243 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) {
8246 /* Clear CRC stats. */
8247 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8248 tg3_writephy(tp, MII_TG3_TEST1,
8249 tmp | MII_TG3_TEST1_CRC_EN);
8250 tg3_readphy(tp, 0x14, &tmp);
8255 __tg3_set_rx_mode(tp->dev);
8257 /* Initialize receive rules. */
8258 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8259 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8260 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8261 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8263 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8264 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
8268 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
8272 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8274 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8276 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8278 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8280 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8282 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8284 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8286 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8288 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8290 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8292 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8294 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8296 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8298 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8306 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8307 /* Write our heartbeat update interval to APE. */
8308 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8309 APE_HOST_HEARTBEAT_INT_DISABLE);
8311 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8316 /* Called at device open time to get the chip ready for
8317 * packet processing. Invoked with tp->lock held.
8319 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8321 tg3_switch_clocks(tp);
8323 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8325 return tg3_reset_hw(tp, reset_phy);
8328 #define TG3_STAT_ADD32(PSTAT, REG) \
8329 do { u32 __val = tr32(REG); \
8330 (PSTAT)->low += __val; \
8331 if ((PSTAT)->low < __val) \
8332 (PSTAT)->high += 1; \
8335 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8337 struct tg3_hw_stats *sp = tp->hw_stats;
8339 if (!netif_carrier_ok(tp->dev))
8342 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8343 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8344 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8345 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8346 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8347 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8348 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8349 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8350 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8351 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8352 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8353 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8354 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8356 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8357 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8358 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8359 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8360 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8361 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8362 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8363 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8364 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8365 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8366 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8367 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8368 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8369 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8371 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8372 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8373 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8376 static void tg3_timer(unsigned long __opaque)
8378 struct tg3 *tp = (struct tg3 *) __opaque;
8383 spin_lock(&tp->lock);
8385 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8386 /* All of this garbage is because when using non-tagged
8387 * IRQ status the mailbox/status_block protocol the chip
8388 * uses with the cpu is race prone.
8390 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8391 tw32(GRC_LOCAL_CTRL,
8392 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8394 tw32(HOSTCC_MODE, tp->coalesce_mode |
8395 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8398 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8399 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
8400 spin_unlock(&tp->lock);
8401 schedule_work(&tp->reset_task);
8406 /* This part only runs once per second. */
8407 if (!--tp->timer_counter) {
8408 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8409 tg3_periodic_fetch_stats(tp);
8411 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
8415 mac_stat = tr32(MAC_STATUS);
8418 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
8419 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8421 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8425 tg3_setup_phy(tp, 0);
8426 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
8427 u32 mac_stat = tr32(MAC_STATUS);
8430 if (netif_carrier_ok(tp->dev) &&
8431 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8434 if (! netif_carrier_ok(tp->dev) &&
8435 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8436 MAC_STATUS_SIGNAL_DET))) {
8440 if (!tp->serdes_counter) {
8443 ~MAC_MODE_PORT_MODE_MASK));
8445 tw32_f(MAC_MODE, tp->mac_mode);
8448 tg3_setup_phy(tp, 0);
8450 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8451 tg3_serdes_parallel_detect(tp);
8453 tp->timer_counter = tp->timer_multiplier;
8456 /* Heartbeat is only sent once every 2 seconds.
8458 * The heartbeat is to tell the ASF firmware that the host
8459 * driver is still alive. In the event that the OS crashes,
8460 * ASF needs to reset the hardware to free up the FIFO space
8461 * that may be filled with rx packets destined for the host.
8462 * If the FIFO is full, ASF will no longer function properly.
8464 * Unintended resets have been reported on real time kernels
8465 * where the timer doesn't run on time. Netpoll will also have
8468 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8469 * to check the ring condition when the heartbeat is expiring
8470 * before doing the reset. This will prevent most unintended
8473 if (!--tp->asf_counter) {
8474 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
8475 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
8476 tg3_wait_for_event_ack(tp);
8478 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8479 FWCMD_NICDRV_ALIVE3);
8480 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8481 /* 5 seconds timeout */
8482 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
8484 tg3_generate_fw_event(tp);
8486 tp->asf_counter = tp->asf_multiplier;
8489 spin_unlock(&tp->lock);
8492 tp->timer.expires = jiffies + tp->timer_offset;
8493 add_timer(&tp->timer);
8496 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8499 unsigned long flags;
8501 struct tg3_napi *tnapi = &tp->napi[irq_num];
8503 if (tp->irq_cnt == 1)
8504 name = tp->dev->name;
8506 name = &tnapi->irq_lbl[0];
8507 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8508 name[IFNAMSIZ-1] = 0;
8511 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8513 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8515 flags = IRQF_SAMPLE_RANDOM;
8518 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8519 fn = tg3_interrupt_tagged;
8520 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
8523 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8526 static int tg3_test_interrupt(struct tg3 *tp)
8528 struct tg3_napi *tnapi = &tp->napi[0];
8529 struct net_device *dev = tp->dev;
8530 int err, i, intr_ok = 0;
8533 if (!netif_running(dev))
8536 tg3_disable_ints(tp);
8538 free_irq(tnapi->irq_vec, tnapi);
8541 * Turn off MSI one shot mode. Otherwise this test has no
8542 * observable way to know whether the interrupt was delivered.
8544 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8545 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
8546 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8547 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8548 tw32(MSGINT_MODE, val);
8551 err = request_irq(tnapi->irq_vec, tg3_test_isr,
8552 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8556 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
8557 tg3_enable_ints(tp);
8559 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8562 for (i = 0; i < 5; i++) {
8563 u32 int_mbox, misc_host_ctrl;
8565 int_mbox = tr32_mailbox(tnapi->int_mbox);
8566 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8568 if ((int_mbox != 0) ||
8569 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8577 tg3_disable_ints(tp);
8579 free_irq(tnapi->irq_vec, tnapi);
8581 err = tg3_request_irq(tp, 0);
8587 /* Reenable MSI one shot mode. */
8588 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8589 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
8590 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8591 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
8592 tw32(MSGINT_MODE, val);
8600 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8601 * successfully restored
8603 static int tg3_test_msi(struct tg3 *tp)
8608 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8611 /* Turn off SERR reporting in case MSI terminates with Master
8614 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8615 pci_write_config_word(tp->pdev, PCI_COMMAND,
8616 pci_cmd & ~PCI_COMMAND_SERR);
8618 err = tg3_test_interrupt(tp);
8620 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8625 /* other failures */
8629 /* MSI test failed, go back to INTx mode */
8630 netdev_warn(tp->dev, "No interrupt was generated using MSI, switching to INTx mode\n"
8631 "Please report this failure to the PCI maintainer and include system chipset information\n");
8633 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8635 pci_disable_msi(tp->pdev);
8637 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8639 err = tg3_request_irq(tp, 0);
8643 /* Need to reset the chip because the MSI cycle may have terminated
8644 * with Master Abort.
8646 tg3_full_lock(tp, 1);
8648 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8649 err = tg3_init_hw(tp, 1);
8651 tg3_full_unlock(tp);
8654 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8659 static int tg3_request_firmware(struct tg3 *tp)
8661 const __be32 *fw_data;
8663 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
8664 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
8669 fw_data = (void *)tp->fw->data;
8671 /* Firmware blob starts with version numbers, followed by
8672 * start address and _full_ length including BSS sections
8673 * (which must be longer than the actual data, of course
8676 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
8677 if (tp->fw_len < (tp->fw->size - 12)) {
8678 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
8679 tp->fw_len, tp->fw_needed);
8680 release_firmware(tp->fw);
8685 /* We no longer need firmware; we have it. */
8686 tp->fw_needed = NULL;
8690 static bool tg3_enable_msix(struct tg3 *tp)
8692 int i, rc, cpus = num_online_cpus();
8693 struct msix_entry msix_ent[tp->irq_max];
8696 /* Just fallback to the simpler MSI mode. */
8700 * We want as many rx rings enabled as there are cpus.
8701 * The first MSIX vector only deals with link interrupts, etc,
8702 * so we add one to the number of vectors we are requesting.
8704 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
8706 for (i = 0; i < tp->irq_max; i++) {
8707 msix_ent[i].entry = i;
8708 msix_ent[i].vector = 0;
8711 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
8713 if (rc < TG3_RSS_MIN_NUM_MSIX_VECS)
8715 if (pci_enable_msix(tp->pdev, msix_ent, rc))
8717 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
8722 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
8724 for (i = 0; i < tp->irq_max; i++)
8725 tp->napi[i].irq_vec = msix_ent[i].vector;
8727 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
8728 tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
8729 tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
8731 tp->dev->real_num_tx_queues = 1;
8736 static void tg3_ints_init(struct tg3 *tp)
8738 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
8739 !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8740 /* All MSI supporting chips should support tagged
8741 * status. Assert that this is the case.
8743 netdev_warn(tp->dev, "MSI without TAGGED? Not using MSI\n");
8747 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
8748 tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
8749 else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
8750 pci_enable_msi(tp->pdev) == 0)
8751 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8753 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8754 u32 msi_mode = tr32(MSGINT_MODE);
8755 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8756 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
8757 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8760 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
8762 tp->napi[0].irq_vec = tp->pdev->irq;
8763 tp->dev->real_num_tx_queues = 1;
8767 static void tg3_ints_fini(struct tg3 *tp)
8769 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8770 pci_disable_msix(tp->pdev);
8771 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
8772 pci_disable_msi(tp->pdev);
8773 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
8774 tp->tg3_flags3 &= ~TG3_FLG3_ENABLE_RSS;
8777 static int tg3_open(struct net_device *dev)
8779 struct tg3 *tp = netdev_priv(dev);
8782 if (tp->fw_needed) {
8783 err = tg3_request_firmware(tp);
8784 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8788 netdev_warn(tp->dev, "TSO capability disabled\n");
8789 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8790 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8791 netdev_notice(tp->dev, "TSO capability restored\n");
8792 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8796 netif_carrier_off(tp->dev);
8798 err = tg3_set_power_state(tp, PCI_D0);
8802 tg3_full_lock(tp, 0);
8804 tg3_disable_ints(tp);
8805 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8807 tg3_full_unlock(tp);
8810 * Setup interrupts first so we know how
8811 * many NAPI resources to allocate
8815 /* The placement of this call is tied
8816 * to the setup and use of Host TX descriptors.
8818 err = tg3_alloc_consistent(tp);
8822 tg3_napi_enable(tp);
8824 for (i = 0; i < tp->irq_cnt; i++) {
8825 struct tg3_napi *tnapi = &tp->napi[i];
8826 err = tg3_request_irq(tp, i);
8828 for (i--; i >= 0; i--)
8829 free_irq(tnapi->irq_vec, tnapi);
8837 tg3_full_lock(tp, 0);
8839 err = tg3_init_hw(tp, 1);
8841 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8844 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8845 tp->timer_offset = HZ;
8847 tp->timer_offset = HZ / 10;
8849 BUG_ON(tp->timer_offset > HZ);
8850 tp->timer_counter = tp->timer_multiplier =
8851 (HZ / tp->timer_offset);
8852 tp->asf_counter = tp->asf_multiplier =
8853 ((HZ / tp->timer_offset) * 2);
8855 init_timer(&tp->timer);
8856 tp->timer.expires = jiffies + tp->timer_offset;
8857 tp->timer.data = (unsigned long) tp;
8858 tp->timer.function = tg3_timer;
8861 tg3_full_unlock(tp);
8866 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8867 err = tg3_test_msi(tp);
8870 tg3_full_lock(tp, 0);
8871 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8873 tg3_full_unlock(tp);
8878 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8879 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8880 (tp->tg3_flags2 & TG3_FLG2_USING_MSI) &&
8881 (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) {
8882 u32 val = tr32(PCIE_TRANSACTION_CFG);
8884 tw32(PCIE_TRANSACTION_CFG,
8885 val | PCIE_TRANS_CFG_1SHOT_MSI);
8891 tg3_full_lock(tp, 0);
8893 add_timer(&tp->timer);
8894 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8895 tg3_enable_ints(tp);
8897 tg3_full_unlock(tp);
8899 netif_tx_start_all_queues(dev);
8904 for (i = tp->irq_cnt - 1; i >= 0; i--) {
8905 struct tg3_napi *tnapi = &tp->napi[i];
8906 free_irq(tnapi->irq_vec, tnapi);
8910 tg3_napi_disable(tp);
8911 tg3_free_consistent(tp);
8919 /*static*/ void tg3_dump_state(struct tg3 *tp)
8921 u32 val32, val32_2, val32_3, val32_4, val32_5;
8924 struct tg3_hw_status *sblk = tp->napi[0]->hw_status;
8926 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8927 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8928 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8932 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8933 tr32(MAC_MODE), tr32(MAC_STATUS));
8934 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8935 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8936 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8937 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8938 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8939 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8941 /* Send data initiator control block */
8942 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8943 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8944 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8945 tr32(SNDDATAI_STATSCTRL));
8947 /* Send data completion control block */
8948 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8950 /* Send BD ring selector block */
8951 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8952 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8954 /* Send BD initiator control block */
8955 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8956 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8958 /* Send BD completion control block */
8959 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8961 /* Receive list placement control block */
8962 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8963 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8964 printk(" RCVLPC_STATSCTRL[%08x]\n",
8965 tr32(RCVLPC_STATSCTRL));
8967 /* Receive data and receive BD initiator control block */
8968 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8969 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8971 /* Receive data completion control block */
8972 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8975 /* Receive BD initiator control block */
8976 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8977 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8979 /* Receive BD completion control block */
8980 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8981 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8983 /* Receive list selector control block */
8984 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8985 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8987 /* Mbuf cluster free block */
8988 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8989 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8991 /* Host coalescing control block */
8992 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8993 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8994 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8995 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8996 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8997 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8998 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8999 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
9000 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
9001 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
9002 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
9003 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
9005 /* Memory arbiter control block */
9006 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
9007 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
9009 /* Buffer manager control block */
9010 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
9011 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
9012 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
9013 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
9014 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
9015 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
9016 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
9017 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
9019 /* Read DMA control block */
9020 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
9021 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
9023 /* Write DMA control block */
9024 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
9025 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
9027 /* DMA completion block */
9028 printk("DEBUG: DMAC_MODE[%08x]\n",
9032 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
9033 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
9034 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
9035 tr32(GRC_LOCAL_CTRL));
9038 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
9039 tr32(RCVDBDI_JUMBO_BD + 0x0),
9040 tr32(RCVDBDI_JUMBO_BD + 0x4),
9041 tr32(RCVDBDI_JUMBO_BD + 0x8),
9042 tr32(RCVDBDI_JUMBO_BD + 0xc));
9043 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
9044 tr32(RCVDBDI_STD_BD + 0x0),
9045 tr32(RCVDBDI_STD_BD + 0x4),
9046 tr32(RCVDBDI_STD_BD + 0x8),
9047 tr32(RCVDBDI_STD_BD + 0xc));
9048 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
9049 tr32(RCVDBDI_MINI_BD + 0x0),
9050 tr32(RCVDBDI_MINI_BD + 0x4),
9051 tr32(RCVDBDI_MINI_BD + 0x8),
9052 tr32(RCVDBDI_MINI_BD + 0xc));
9054 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
9055 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
9056 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
9057 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
9058 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
9059 val32, val32_2, val32_3, val32_4);
9061 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
9062 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
9063 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
9064 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
9065 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
9066 val32, val32_2, val32_3, val32_4);
9068 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
9069 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
9070 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
9071 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
9072 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
9073 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
9074 val32, val32_2, val32_3, val32_4, val32_5);
9076 /* SW status block */
9078 "Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
9081 sblk->rx_jumbo_consumer,
9083 sblk->rx_mini_consumer,
9084 sblk->idx[0].rx_producer,
9085 sblk->idx[0].tx_consumer);
9087 /* SW statistics block */
9088 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
9089 ((u32 *)tp->hw_stats)[0],
9090 ((u32 *)tp->hw_stats)[1],
9091 ((u32 *)tp->hw_stats)[2],
9092 ((u32 *)tp->hw_stats)[3]);
9095 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
9096 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
9097 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
9098 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
9099 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
9101 /* NIC side send descriptors. */
9102 for (i = 0; i < 6; i++) {
9105 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
9106 + (i * sizeof(struct tg3_tx_buffer_desc));
9107 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
9109 readl(txd + 0x0), readl(txd + 0x4),
9110 readl(txd + 0x8), readl(txd + 0xc));
9113 /* NIC side RX descriptors. */
9114 for (i = 0; i < 6; i++) {
9117 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
9118 + (i * sizeof(struct tg3_rx_buffer_desc));
9119 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
9121 readl(rxd + 0x0), readl(rxd + 0x4),
9122 readl(rxd + 0x8), readl(rxd + 0xc));
9123 rxd += (4 * sizeof(u32));
9124 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
9126 readl(rxd + 0x0), readl(rxd + 0x4),
9127 readl(rxd + 0x8), readl(rxd + 0xc));
9130 for (i = 0; i < 6; i++) {
9133 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
9134 + (i * sizeof(struct tg3_rx_buffer_desc));
9135 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
9137 readl(rxd + 0x0), readl(rxd + 0x4),
9138 readl(rxd + 0x8), readl(rxd + 0xc));
9139 rxd += (4 * sizeof(u32));
9140 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
9142 readl(rxd + 0x0), readl(rxd + 0x4),
9143 readl(rxd + 0x8), readl(rxd + 0xc));
9148 static struct net_device_stats *tg3_get_stats(struct net_device *);
9149 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9151 static int tg3_close(struct net_device *dev)
9154 struct tg3 *tp = netdev_priv(dev);
9156 tg3_napi_disable(tp);
9157 cancel_work_sync(&tp->reset_task);
9159 netif_tx_stop_all_queues(dev);
9161 del_timer_sync(&tp->timer);
9165 tg3_full_lock(tp, 1);
9170 tg3_disable_ints(tp);
9172 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9174 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
9176 tg3_full_unlock(tp);
9178 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9179 struct tg3_napi *tnapi = &tp->napi[i];
9180 free_irq(tnapi->irq_vec, tnapi);
9185 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
9186 sizeof(tp->net_stats_prev));
9187 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9188 sizeof(tp->estats_prev));
9190 tg3_free_consistent(tp);
9192 tg3_set_power_state(tp, PCI_D3hot);
9194 netif_carrier_off(tp->dev);
9199 static inline unsigned long get_stat64(tg3_stat64_t *val)
9203 #if (BITS_PER_LONG == 32)
9206 ret = ((u64)val->high << 32) | ((u64)val->low);
9211 static inline u64 get_estat64(tg3_stat64_t *val)
9213 return ((u64)val->high << 32) | ((u64)val->low);
9216 static unsigned long calc_crc_errors(struct tg3 *tp)
9218 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9220 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
9221 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9222 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9225 spin_lock_bh(&tp->lock);
9226 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9227 tg3_writephy(tp, MII_TG3_TEST1,
9228 val | MII_TG3_TEST1_CRC_EN);
9229 tg3_readphy(tp, 0x14, &val);
9232 spin_unlock_bh(&tp->lock);
9234 tp->phy_crc_errors += val;
9236 return tp->phy_crc_errors;
9239 return get_stat64(&hw_stats->rx_fcs_errors);
9242 #define ESTAT_ADD(member) \
9243 estats->member = old_estats->member + \
9244 get_estat64(&hw_stats->member)
9246 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9248 struct tg3_ethtool_stats *estats = &tp->estats;
9249 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9250 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9255 ESTAT_ADD(rx_octets);
9256 ESTAT_ADD(rx_fragments);
9257 ESTAT_ADD(rx_ucast_packets);
9258 ESTAT_ADD(rx_mcast_packets);
9259 ESTAT_ADD(rx_bcast_packets);
9260 ESTAT_ADD(rx_fcs_errors);
9261 ESTAT_ADD(rx_align_errors);
9262 ESTAT_ADD(rx_xon_pause_rcvd);
9263 ESTAT_ADD(rx_xoff_pause_rcvd);
9264 ESTAT_ADD(rx_mac_ctrl_rcvd);
9265 ESTAT_ADD(rx_xoff_entered);
9266 ESTAT_ADD(rx_frame_too_long_errors);
9267 ESTAT_ADD(rx_jabbers);
9268 ESTAT_ADD(rx_undersize_packets);
9269 ESTAT_ADD(rx_in_length_errors);
9270 ESTAT_ADD(rx_out_length_errors);
9271 ESTAT_ADD(rx_64_or_less_octet_packets);
9272 ESTAT_ADD(rx_65_to_127_octet_packets);
9273 ESTAT_ADD(rx_128_to_255_octet_packets);
9274 ESTAT_ADD(rx_256_to_511_octet_packets);
9275 ESTAT_ADD(rx_512_to_1023_octet_packets);
9276 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9277 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9278 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9279 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9280 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9282 ESTAT_ADD(tx_octets);
9283 ESTAT_ADD(tx_collisions);
9284 ESTAT_ADD(tx_xon_sent);
9285 ESTAT_ADD(tx_xoff_sent);
9286 ESTAT_ADD(tx_flow_control);
9287 ESTAT_ADD(tx_mac_errors);
9288 ESTAT_ADD(tx_single_collisions);
9289 ESTAT_ADD(tx_mult_collisions);
9290 ESTAT_ADD(tx_deferred);
9291 ESTAT_ADD(tx_excessive_collisions);
9292 ESTAT_ADD(tx_late_collisions);
9293 ESTAT_ADD(tx_collide_2times);
9294 ESTAT_ADD(tx_collide_3times);
9295 ESTAT_ADD(tx_collide_4times);
9296 ESTAT_ADD(tx_collide_5times);
9297 ESTAT_ADD(tx_collide_6times);
9298 ESTAT_ADD(tx_collide_7times);
9299 ESTAT_ADD(tx_collide_8times);
9300 ESTAT_ADD(tx_collide_9times);
9301 ESTAT_ADD(tx_collide_10times);
9302 ESTAT_ADD(tx_collide_11times);
9303 ESTAT_ADD(tx_collide_12times);
9304 ESTAT_ADD(tx_collide_13times);
9305 ESTAT_ADD(tx_collide_14times);
9306 ESTAT_ADD(tx_collide_15times);
9307 ESTAT_ADD(tx_ucast_packets);
9308 ESTAT_ADD(tx_mcast_packets);
9309 ESTAT_ADD(tx_bcast_packets);
9310 ESTAT_ADD(tx_carrier_sense_errors);
9311 ESTAT_ADD(tx_discards);
9312 ESTAT_ADD(tx_errors);
9314 ESTAT_ADD(dma_writeq_full);
9315 ESTAT_ADD(dma_write_prioq_full);
9316 ESTAT_ADD(rxbds_empty);
9317 ESTAT_ADD(rx_discards);
9318 ESTAT_ADD(rx_errors);
9319 ESTAT_ADD(rx_threshold_hit);
9321 ESTAT_ADD(dma_readq_full);
9322 ESTAT_ADD(dma_read_prioq_full);
9323 ESTAT_ADD(tx_comp_queue_full);
9325 ESTAT_ADD(ring_set_send_prod_index);
9326 ESTAT_ADD(ring_status_update);
9327 ESTAT_ADD(nic_irqs);
9328 ESTAT_ADD(nic_avoided_irqs);
9329 ESTAT_ADD(nic_tx_threshold_hit);
9334 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
9336 struct tg3 *tp = netdev_priv(dev);
9337 struct net_device_stats *stats = &tp->net_stats;
9338 struct net_device_stats *old_stats = &tp->net_stats_prev;
9339 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9344 stats->rx_packets = old_stats->rx_packets +
9345 get_stat64(&hw_stats->rx_ucast_packets) +
9346 get_stat64(&hw_stats->rx_mcast_packets) +
9347 get_stat64(&hw_stats->rx_bcast_packets);
9349 stats->tx_packets = old_stats->tx_packets +
9350 get_stat64(&hw_stats->tx_ucast_packets) +
9351 get_stat64(&hw_stats->tx_mcast_packets) +
9352 get_stat64(&hw_stats->tx_bcast_packets);
9354 stats->rx_bytes = old_stats->rx_bytes +
9355 get_stat64(&hw_stats->rx_octets);
9356 stats->tx_bytes = old_stats->tx_bytes +
9357 get_stat64(&hw_stats->tx_octets);
9359 stats->rx_errors = old_stats->rx_errors +
9360 get_stat64(&hw_stats->rx_errors);
9361 stats->tx_errors = old_stats->tx_errors +
9362 get_stat64(&hw_stats->tx_errors) +
9363 get_stat64(&hw_stats->tx_mac_errors) +
9364 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9365 get_stat64(&hw_stats->tx_discards);
9367 stats->multicast = old_stats->multicast +
9368 get_stat64(&hw_stats->rx_mcast_packets);
9369 stats->collisions = old_stats->collisions +
9370 get_stat64(&hw_stats->tx_collisions);
9372 stats->rx_length_errors = old_stats->rx_length_errors +
9373 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9374 get_stat64(&hw_stats->rx_undersize_packets);
9376 stats->rx_over_errors = old_stats->rx_over_errors +
9377 get_stat64(&hw_stats->rxbds_empty);
9378 stats->rx_frame_errors = old_stats->rx_frame_errors +
9379 get_stat64(&hw_stats->rx_align_errors);
9380 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9381 get_stat64(&hw_stats->tx_discards);
9382 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9383 get_stat64(&hw_stats->tx_carrier_sense_errors);
9385 stats->rx_crc_errors = old_stats->rx_crc_errors +
9386 calc_crc_errors(tp);
9388 stats->rx_missed_errors = old_stats->rx_missed_errors +
9389 get_stat64(&hw_stats->rx_discards);
9394 static inline u32 calc_crc(unsigned char *buf, int len)
9402 for (j = 0; j < len; j++) {
9405 for (k = 0; k < 8; k++) {
9419 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9421 /* accept or reject all multicast frames */
9422 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9423 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9424 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9425 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9428 static void __tg3_set_rx_mode(struct net_device *dev)
9430 struct tg3 *tp = netdev_priv(dev);
9433 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9434 RX_MODE_KEEP_VLAN_TAG);
9436 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9439 #if TG3_VLAN_TAG_USED
9441 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9442 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9444 /* By definition, VLAN is disabled always in this
9447 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9448 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9451 if (dev->flags & IFF_PROMISC) {
9452 /* Promiscuous mode. */
9453 rx_mode |= RX_MODE_PROMISC;
9454 } else if (dev->flags & IFF_ALLMULTI) {
9455 /* Accept all multicast. */
9456 tg3_set_multi (tp, 1);
9457 } else if (netdev_mc_empty(dev)) {
9458 /* Reject all multicast. */
9459 tg3_set_multi (tp, 0);
9461 /* Accept one or more multicast(s). */
9462 struct netdev_hw_addr *ha;
9463 u32 mc_filter[4] = { 0, };
9468 netdev_for_each_mc_addr(ha, dev) {
9469 crc = calc_crc(ha->addr, ETH_ALEN);
9471 regidx = (bit & 0x60) >> 5;
9473 mc_filter[regidx] |= (1 << bit);
9476 tw32(MAC_HASH_REG_0, mc_filter[0]);
9477 tw32(MAC_HASH_REG_1, mc_filter[1]);
9478 tw32(MAC_HASH_REG_2, mc_filter[2]);
9479 tw32(MAC_HASH_REG_3, mc_filter[3]);
9482 if (rx_mode != tp->rx_mode) {
9483 tp->rx_mode = rx_mode;
9484 tw32_f(MAC_RX_MODE, rx_mode);
9489 static void tg3_set_rx_mode(struct net_device *dev)
9491 struct tg3 *tp = netdev_priv(dev);
9493 if (!netif_running(dev))
9496 tg3_full_lock(tp, 0);
9497 __tg3_set_rx_mode(dev);
9498 tg3_full_unlock(tp);
9501 #define TG3_REGDUMP_LEN (32 * 1024)
9503 static int tg3_get_regs_len(struct net_device *dev)
9505 return TG3_REGDUMP_LEN;
9508 static void tg3_get_regs(struct net_device *dev,
9509 struct ethtool_regs *regs, void *_p)
9512 struct tg3 *tp = netdev_priv(dev);
9518 memset(p, 0, TG3_REGDUMP_LEN);
9520 if (tp->link_config.phy_is_low_power)
9523 tg3_full_lock(tp, 0);
9525 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
9526 #define GET_REG32_LOOP(base,len) \
9527 do { p = (u32 *)(orig_p + (base)); \
9528 for (i = 0; i < len; i += 4) \
9529 __GET_REG32((base) + i); \
9531 #define GET_REG32_1(reg) \
9532 do { p = (u32 *)(orig_p + (reg)); \
9533 __GET_REG32((reg)); \
9536 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
9537 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
9538 GET_REG32_LOOP(MAC_MODE, 0x4f0);
9539 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
9540 GET_REG32_1(SNDDATAC_MODE);
9541 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
9542 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
9543 GET_REG32_1(SNDBDC_MODE);
9544 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
9545 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
9546 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
9547 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
9548 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
9549 GET_REG32_1(RCVDCC_MODE);
9550 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
9551 GET_REG32_LOOP(RCVCC_MODE, 0x14);
9552 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
9553 GET_REG32_1(MBFREE_MODE);
9554 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
9555 GET_REG32_LOOP(MEMARB_MODE, 0x10);
9556 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
9557 GET_REG32_LOOP(RDMAC_MODE, 0x08);
9558 GET_REG32_LOOP(WDMAC_MODE, 0x08);
9559 GET_REG32_1(RX_CPU_MODE);
9560 GET_REG32_1(RX_CPU_STATE);
9561 GET_REG32_1(RX_CPU_PGMCTR);
9562 GET_REG32_1(RX_CPU_HWBKPT);
9563 GET_REG32_1(TX_CPU_MODE);
9564 GET_REG32_1(TX_CPU_STATE);
9565 GET_REG32_1(TX_CPU_PGMCTR);
9566 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
9567 GET_REG32_LOOP(FTQ_RESET, 0x120);
9568 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
9569 GET_REG32_1(DMAC_MODE);
9570 GET_REG32_LOOP(GRC_MODE, 0x4c);
9571 if (tp->tg3_flags & TG3_FLAG_NVRAM)
9572 GET_REG32_LOOP(NVRAM_CMD, 0x24);
9575 #undef GET_REG32_LOOP
9578 tg3_full_unlock(tp);
9581 static int tg3_get_eeprom_len(struct net_device *dev)
9583 struct tg3 *tp = netdev_priv(dev);
9585 return tp->nvram_size;
9588 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9590 struct tg3 *tp = netdev_priv(dev);
9593 u32 i, offset, len, b_offset, b_count;
9596 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9599 if (tp->link_config.phy_is_low_power)
9602 offset = eeprom->offset;
9606 eeprom->magic = TG3_EEPROM_MAGIC;
9609 /* adjustments to start on required 4 byte boundary */
9610 b_offset = offset & 3;
9611 b_count = 4 - b_offset;
9612 if (b_count > len) {
9613 /* i.e. offset=1 len=2 */
9616 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9619 memcpy(data, ((char*)&val) + b_offset, b_count);
9622 eeprom->len += b_count;
9625 /* read bytes upto the last 4 byte boundary */
9626 pd = &data[eeprom->len];
9627 for (i = 0; i < (len - (len & 3)); i += 4) {
9628 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9633 memcpy(pd + i, &val, 4);
9638 /* read last bytes not ending on 4 byte boundary */
9639 pd = &data[eeprom->len];
9641 b_offset = offset + len - b_count;
9642 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9645 memcpy(pd, &val, b_count);
9646 eeprom->len += b_count;
9651 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9653 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9655 struct tg3 *tp = netdev_priv(dev);
9657 u32 offset, len, b_offset, odd_len;
9661 if (tp->link_config.phy_is_low_power)
9664 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
9665 eeprom->magic != TG3_EEPROM_MAGIC)
9668 offset = eeprom->offset;
9671 if ((b_offset = (offset & 3))) {
9672 /* adjustments to start on required 4 byte boundary */
9673 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9684 /* adjustments to end on required 4 byte boundary */
9686 len = (len + 3) & ~3;
9687 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9693 if (b_offset || odd_len) {
9694 buf = kmalloc(len, GFP_KERNEL);
9698 memcpy(buf, &start, 4);
9700 memcpy(buf+len-4, &end, 4);
9701 memcpy(buf + b_offset, data, eeprom->len);
9704 ret = tg3_nvram_write_block(tp, offset, len, buf);
9712 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9714 struct tg3 *tp = netdev_priv(dev);
9716 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9717 struct phy_device *phydev;
9718 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9720 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9721 return phy_ethtool_gset(phydev, cmd);
9724 cmd->supported = (SUPPORTED_Autoneg);
9726 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9727 cmd->supported |= (SUPPORTED_1000baseT_Half |
9728 SUPPORTED_1000baseT_Full);
9730 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
9731 cmd->supported |= (SUPPORTED_100baseT_Half |
9732 SUPPORTED_100baseT_Full |
9733 SUPPORTED_10baseT_Half |
9734 SUPPORTED_10baseT_Full |
9736 cmd->port = PORT_TP;
9738 cmd->supported |= SUPPORTED_FIBRE;
9739 cmd->port = PORT_FIBRE;
9742 cmd->advertising = tp->link_config.advertising;
9743 if (netif_running(dev)) {
9744 cmd->speed = tp->link_config.active_speed;
9745 cmd->duplex = tp->link_config.active_duplex;
9747 cmd->phy_address = tp->phy_addr;
9748 cmd->transceiver = XCVR_INTERNAL;
9749 cmd->autoneg = tp->link_config.autoneg;
9755 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9757 struct tg3 *tp = netdev_priv(dev);
9759 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9760 struct phy_device *phydev;
9761 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9763 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9764 return phy_ethtool_sset(phydev, cmd);
9767 if (cmd->autoneg != AUTONEG_ENABLE &&
9768 cmd->autoneg != AUTONEG_DISABLE)
9771 if (cmd->autoneg == AUTONEG_DISABLE &&
9772 cmd->duplex != DUPLEX_FULL &&
9773 cmd->duplex != DUPLEX_HALF)
9776 if (cmd->autoneg == AUTONEG_ENABLE) {
9777 u32 mask = ADVERTISED_Autoneg |
9779 ADVERTISED_Asym_Pause;
9781 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9782 mask |= ADVERTISED_1000baseT_Half |
9783 ADVERTISED_1000baseT_Full;
9785 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9786 mask |= ADVERTISED_100baseT_Half |
9787 ADVERTISED_100baseT_Full |
9788 ADVERTISED_10baseT_Half |
9789 ADVERTISED_10baseT_Full |
9792 mask |= ADVERTISED_FIBRE;
9794 if (cmd->advertising & ~mask)
9797 mask &= (ADVERTISED_1000baseT_Half |
9798 ADVERTISED_1000baseT_Full |
9799 ADVERTISED_100baseT_Half |
9800 ADVERTISED_100baseT_Full |
9801 ADVERTISED_10baseT_Half |
9802 ADVERTISED_10baseT_Full);
9804 cmd->advertising &= mask;
9806 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9807 if (cmd->speed != SPEED_1000)
9810 if (cmd->duplex != DUPLEX_FULL)
9813 if (cmd->speed != SPEED_100 &&
9814 cmd->speed != SPEED_10)
9819 tg3_full_lock(tp, 0);
9821 tp->link_config.autoneg = cmd->autoneg;
9822 if (cmd->autoneg == AUTONEG_ENABLE) {
9823 tp->link_config.advertising = (cmd->advertising |
9824 ADVERTISED_Autoneg);
9825 tp->link_config.speed = SPEED_INVALID;
9826 tp->link_config.duplex = DUPLEX_INVALID;
9828 tp->link_config.advertising = 0;
9829 tp->link_config.speed = cmd->speed;
9830 tp->link_config.duplex = cmd->duplex;
9833 tp->link_config.orig_speed = tp->link_config.speed;
9834 tp->link_config.orig_duplex = tp->link_config.duplex;
9835 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9837 if (netif_running(dev))
9838 tg3_setup_phy(tp, 1);
9840 tg3_full_unlock(tp);
9845 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9847 struct tg3 *tp = netdev_priv(dev);
9849 strcpy(info->driver, DRV_MODULE_NAME);
9850 strcpy(info->version, DRV_MODULE_VERSION);
9851 strcpy(info->fw_version, tp->fw_ver);
9852 strcpy(info->bus_info, pci_name(tp->pdev));
9855 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9857 struct tg3 *tp = netdev_priv(dev);
9859 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9860 device_can_wakeup(&tp->pdev->dev))
9861 wol->supported = WAKE_MAGIC;
9865 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9866 device_can_wakeup(&tp->pdev->dev))
9867 wol->wolopts = WAKE_MAGIC;
9868 memset(&wol->sopass, 0, sizeof(wol->sopass));
9871 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9873 struct tg3 *tp = netdev_priv(dev);
9874 struct device *dp = &tp->pdev->dev;
9876 if (wol->wolopts & ~WAKE_MAGIC)
9878 if ((wol->wolopts & WAKE_MAGIC) &&
9879 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9882 spin_lock_bh(&tp->lock);
9883 if (wol->wolopts & WAKE_MAGIC) {
9884 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9885 device_set_wakeup_enable(dp, true);
9887 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9888 device_set_wakeup_enable(dp, false);
9890 spin_unlock_bh(&tp->lock);
9895 static u32 tg3_get_msglevel(struct net_device *dev)
9897 struct tg3 *tp = netdev_priv(dev);
9898 return tp->msg_enable;
9901 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9903 struct tg3 *tp = netdev_priv(dev);
9904 tp->msg_enable = value;
9907 static int tg3_set_tso(struct net_device *dev, u32 value)
9909 struct tg3 *tp = netdev_priv(dev);
9911 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9916 if ((dev->features & NETIF_F_IPV6_CSUM) &&
9917 ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
9918 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
9920 dev->features |= NETIF_F_TSO6;
9921 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
9922 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9923 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9924 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9925 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9926 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9927 dev->features |= NETIF_F_TSO_ECN;
9929 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9931 return ethtool_op_set_tso(dev, value);
9934 static int tg3_nway_reset(struct net_device *dev)
9936 struct tg3 *tp = netdev_priv(dev);
9939 if (!netif_running(dev))
9942 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9945 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9946 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9948 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9952 spin_lock_bh(&tp->lock);
9954 tg3_readphy(tp, MII_BMCR, &bmcr);
9955 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9956 ((bmcr & BMCR_ANENABLE) ||
9957 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9958 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9962 spin_unlock_bh(&tp->lock);
9968 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9970 struct tg3 *tp = netdev_priv(dev);
9972 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9973 ering->rx_mini_max_pending = 0;
9974 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9975 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9977 ering->rx_jumbo_max_pending = 0;
9979 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9981 ering->rx_pending = tp->rx_pending;
9982 ering->rx_mini_pending = 0;
9983 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9984 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9986 ering->rx_jumbo_pending = 0;
9988 ering->tx_pending = tp->napi[0].tx_pending;
9991 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9993 struct tg3 *tp = netdev_priv(dev);
9994 int i, irq_sync = 0, err = 0;
9996 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9997 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9998 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9999 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10000 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
10001 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10004 if (netif_running(dev)) {
10006 tg3_netif_stop(tp);
10010 tg3_full_lock(tp, irq_sync);
10012 tp->rx_pending = ering->rx_pending;
10014 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
10015 tp->rx_pending > 63)
10016 tp->rx_pending = 63;
10017 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10019 for (i = 0; i < TG3_IRQ_MAX_VECS; i++)
10020 tp->napi[i].tx_pending = ering->tx_pending;
10022 if (netif_running(dev)) {
10023 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10024 err = tg3_restart_hw(tp, 1);
10026 tg3_netif_start(tp);
10029 tg3_full_unlock(tp);
10031 if (irq_sync && !err)
10037 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10039 struct tg3 *tp = netdev_priv(dev);
10041 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
10043 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10044 epause->rx_pause = 1;
10046 epause->rx_pause = 0;
10048 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10049 epause->tx_pause = 1;
10051 epause->tx_pause = 0;
10054 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10056 struct tg3 *tp = netdev_priv(dev);
10059 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10061 struct phy_device *phydev;
10063 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10065 if (!(phydev->supported & SUPPORTED_Pause) ||
10066 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10067 ((epause->rx_pause && !epause->tx_pause) ||
10068 (!epause->rx_pause && epause->tx_pause))))
10071 tp->link_config.flowctrl = 0;
10072 if (epause->rx_pause) {
10073 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10075 if (epause->tx_pause) {
10076 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10077 newadv = ADVERTISED_Pause;
10079 newadv = ADVERTISED_Pause |
10080 ADVERTISED_Asym_Pause;
10081 } else if (epause->tx_pause) {
10082 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10083 newadv = ADVERTISED_Asym_Pause;
10087 if (epause->autoneg)
10088 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10090 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
10092 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
10093 u32 oldadv = phydev->advertising &
10094 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10095 if (oldadv != newadv) {
10096 phydev->advertising &=
10097 ~(ADVERTISED_Pause |
10098 ADVERTISED_Asym_Pause);
10099 phydev->advertising |= newadv;
10100 if (phydev->autoneg) {
10102 * Always renegotiate the link to
10103 * inform our link partner of our
10104 * flow control settings, even if the
10105 * flow control is forced. Let
10106 * tg3_adjust_link() do the final
10107 * flow control setup.
10109 return phy_start_aneg(phydev);
10113 if (!epause->autoneg)
10114 tg3_setup_flow_control(tp, 0, 0);
10116 tp->link_config.orig_advertising &=
10117 ~(ADVERTISED_Pause |
10118 ADVERTISED_Asym_Pause);
10119 tp->link_config.orig_advertising |= newadv;
10124 if (netif_running(dev)) {
10125 tg3_netif_stop(tp);
10129 tg3_full_lock(tp, irq_sync);
10131 if (epause->autoneg)
10132 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10134 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
10135 if (epause->rx_pause)
10136 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10138 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10139 if (epause->tx_pause)
10140 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10142 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10144 if (netif_running(dev)) {
10145 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10146 err = tg3_restart_hw(tp, 1);
10148 tg3_netif_start(tp);
10151 tg3_full_unlock(tp);
10157 static u32 tg3_get_rx_csum(struct net_device *dev)
10159 struct tg3 *tp = netdev_priv(dev);
10160 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
10163 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
10165 struct tg3 *tp = netdev_priv(dev);
10167 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10173 spin_lock_bh(&tp->lock);
10175 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10177 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10178 spin_unlock_bh(&tp->lock);
10183 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
10185 struct tg3 *tp = netdev_priv(dev);
10187 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10193 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10194 ethtool_op_set_tx_ipv6_csum(dev, data);
10196 ethtool_op_set_tx_csum(dev, data);
10201 static int tg3_get_sset_count (struct net_device *dev, int sset)
10205 return TG3_NUM_TEST;
10207 return TG3_NUM_STATS;
10209 return -EOPNOTSUPP;
10213 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
10215 switch (stringset) {
10217 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10220 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10223 WARN_ON(1); /* we need a WARN() */
10228 static int tg3_phys_id(struct net_device *dev, u32 data)
10230 struct tg3 *tp = netdev_priv(dev);
10233 if (!netif_running(tp->dev))
10237 data = UINT_MAX / 2;
10239 for (i = 0; i < (data * 2); i++) {
10241 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10242 LED_CTRL_1000MBPS_ON |
10243 LED_CTRL_100MBPS_ON |
10244 LED_CTRL_10MBPS_ON |
10245 LED_CTRL_TRAFFIC_OVERRIDE |
10246 LED_CTRL_TRAFFIC_BLINK |
10247 LED_CTRL_TRAFFIC_LED);
10250 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10251 LED_CTRL_TRAFFIC_OVERRIDE);
10253 if (msleep_interruptible(500))
10256 tw32(MAC_LED_CTRL, tp->led_ctrl);
10260 static void tg3_get_ethtool_stats (struct net_device *dev,
10261 struct ethtool_stats *estats, u64 *tmp_stats)
10263 struct tg3 *tp = netdev_priv(dev);
10264 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10267 #define NVRAM_TEST_SIZE 0x100
10268 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10269 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10270 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10271 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10272 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10274 static int tg3_test_nvram(struct tg3 *tp)
10278 int i, j, k, err = 0, size;
10280 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
10283 if (tg3_nvram_read(tp, 0, &magic) != 0)
10286 if (magic == TG3_EEPROM_MAGIC)
10287 size = NVRAM_TEST_SIZE;
10288 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10289 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10290 TG3_EEPROM_SB_FORMAT_1) {
10291 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10292 case TG3_EEPROM_SB_REVISION_0:
10293 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10295 case TG3_EEPROM_SB_REVISION_2:
10296 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10298 case TG3_EEPROM_SB_REVISION_3:
10299 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10306 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10307 size = NVRAM_SELFBOOT_HW_SIZE;
10311 buf = kmalloc(size, GFP_KERNEL);
10316 for (i = 0, j = 0; i < size; i += 4, j++) {
10317 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10324 /* Selfboot format */
10325 magic = be32_to_cpu(buf[0]);
10326 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10327 TG3_EEPROM_MAGIC_FW) {
10328 u8 *buf8 = (u8 *) buf, csum8 = 0;
10330 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10331 TG3_EEPROM_SB_REVISION_2) {
10332 /* For rev 2, the csum doesn't include the MBA. */
10333 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10335 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10338 for (i = 0; i < size; i++)
10351 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10352 TG3_EEPROM_MAGIC_HW) {
10353 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10354 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10355 u8 *buf8 = (u8 *) buf;
10357 /* Separate the parity bits and the data bytes. */
10358 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10359 if ((i == 0) || (i == 8)) {
10363 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10364 parity[k++] = buf8[i] & msk;
10367 else if (i == 16) {
10371 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10372 parity[k++] = buf8[i] & msk;
10375 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10376 parity[k++] = buf8[i] & msk;
10379 data[j++] = buf8[i];
10383 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10384 u8 hw8 = hweight8(data[i]);
10386 if ((hw8 & 0x1) && parity[i])
10388 else if (!(hw8 & 0x1) && !parity[i])
10395 /* Bootstrap checksum at offset 0x10 */
10396 csum = calc_crc((unsigned char *) buf, 0x10);
10397 if (csum != be32_to_cpu(buf[0x10/4]))
10400 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10401 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10402 if (csum != be32_to_cpu(buf[0xfc/4]))
10412 #define TG3_SERDES_TIMEOUT_SEC 2
10413 #define TG3_COPPER_TIMEOUT_SEC 6
10415 static int tg3_test_link(struct tg3 *tp)
10419 if (!netif_running(tp->dev))
10422 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10423 max = TG3_SERDES_TIMEOUT_SEC;
10425 max = TG3_COPPER_TIMEOUT_SEC;
10427 for (i = 0; i < max; i++) {
10428 if (netif_carrier_ok(tp->dev))
10431 if (msleep_interruptible(1000))
10438 /* Only test the commonly used registers */
10439 static int tg3_test_registers(struct tg3 *tp)
10441 int i, is_5705, is_5750;
10442 u32 offset, read_mask, write_mask, val, save_val, read_val;
10446 #define TG3_FL_5705 0x1
10447 #define TG3_FL_NOT_5705 0x2
10448 #define TG3_FL_NOT_5788 0x4
10449 #define TG3_FL_NOT_5750 0x8
10453 /* MAC Control Registers */
10454 { MAC_MODE, TG3_FL_NOT_5705,
10455 0x00000000, 0x00ef6f8c },
10456 { MAC_MODE, TG3_FL_5705,
10457 0x00000000, 0x01ef6b8c },
10458 { MAC_STATUS, TG3_FL_NOT_5705,
10459 0x03800107, 0x00000000 },
10460 { MAC_STATUS, TG3_FL_5705,
10461 0x03800100, 0x00000000 },
10462 { MAC_ADDR_0_HIGH, 0x0000,
10463 0x00000000, 0x0000ffff },
10464 { MAC_ADDR_0_LOW, 0x0000,
10465 0x00000000, 0xffffffff },
10466 { MAC_RX_MTU_SIZE, 0x0000,
10467 0x00000000, 0x0000ffff },
10468 { MAC_TX_MODE, 0x0000,
10469 0x00000000, 0x00000070 },
10470 { MAC_TX_LENGTHS, 0x0000,
10471 0x00000000, 0x00003fff },
10472 { MAC_RX_MODE, TG3_FL_NOT_5705,
10473 0x00000000, 0x000007fc },
10474 { MAC_RX_MODE, TG3_FL_5705,
10475 0x00000000, 0x000007dc },
10476 { MAC_HASH_REG_0, 0x0000,
10477 0x00000000, 0xffffffff },
10478 { MAC_HASH_REG_1, 0x0000,
10479 0x00000000, 0xffffffff },
10480 { MAC_HASH_REG_2, 0x0000,
10481 0x00000000, 0xffffffff },
10482 { MAC_HASH_REG_3, 0x0000,
10483 0x00000000, 0xffffffff },
10485 /* Receive Data and Receive BD Initiator Control Registers. */
10486 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10487 0x00000000, 0xffffffff },
10488 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10489 0x00000000, 0xffffffff },
10490 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10491 0x00000000, 0x00000003 },
10492 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10493 0x00000000, 0xffffffff },
10494 { RCVDBDI_STD_BD+0, 0x0000,
10495 0x00000000, 0xffffffff },
10496 { RCVDBDI_STD_BD+4, 0x0000,
10497 0x00000000, 0xffffffff },
10498 { RCVDBDI_STD_BD+8, 0x0000,
10499 0x00000000, 0xffff0002 },
10500 { RCVDBDI_STD_BD+0xc, 0x0000,
10501 0x00000000, 0xffffffff },
10503 /* Receive BD Initiator Control Registers. */
10504 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10505 0x00000000, 0xffffffff },
10506 { RCVBDI_STD_THRESH, TG3_FL_5705,
10507 0x00000000, 0x000003ff },
10508 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10509 0x00000000, 0xffffffff },
10511 /* Host Coalescing Control Registers. */
10512 { HOSTCC_MODE, TG3_FL_NOT_5705,
10513 0x00000000, 0x00000004 },
10514 { HOSTCC_MODE, TG3_FL_5705,
10515 0x00000000, 0x000000f6 },
10516 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10517 0x00000000, 0xffffffff },
10518 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10519 0x00000000, 0x000003ff },
10520 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10521 0x00000000, 0xffffffff },
10522 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10523 0x00000000, 0x000003ff },
10524 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10525 0x00000000, 0xffffffff },
10526 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10527 0x00000000, 0x000000ff },
10528 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10529 0x00000000, 0xffffffff },
10530 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10531 0x00000000, 0x000000ff },
10532 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10533 0x00000000, 0xffffffff },
10534 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10535 0x00000000, 0xffffffff },
10536 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10537 0x00000000, 0xffffffff },
10538 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10539 0x00000000, 0x000000ff },
10540 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10541 0x00000000, 0xffffffff },
10542 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10543 0x00000000, 0x000000ff },
10544 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10545 0x00000000, 0xffffffff },
10546 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10547 0x00000000, 0xffffffff },
10548 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10549 0x00000000, 0xffffffff },
10550 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10551 0x00000000, 0xffffffff },
10552 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10553 0x00000000, 0xffffffff },
10554 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10555 0xffffffff, 0x00000000 },
10556 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10557 0xffffffff, 0x00000000 },
10559 /* Buffer Manager Control Registers. */
10560 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10561 0x00000000, 0x007fff80 },
10562 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10563 0x00000000, 0x007fffff },
10564 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10565 0x00000000, 0x0000003f },
10566 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10567 0x00000000, 0x000001ff },
10568 { BUFMGR_MB_HIGH_WATER, 0x0000,
10569 0x00000000, 0x000001ff },
10570 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10571 0xffffffff, 0x00000000 },
10572 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10573 0xffffffff, 0x00000000 },
10575 /* Mailbox Registers */
10576 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10577 0x00000000, 0x000001ff },
10578 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10579 0x00000000, 0x000001ff },
10580 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10581 0x00000000, 0x000007ff },
10582 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10583 0x00000000, 0x000001ff },
10585 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10588 is_5705 = is_5750 = 0;
10589 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10591 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10595 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10596 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10599 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10602 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10603 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10606 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10609 offset = (u32) reg_tbl[i].offset;
10610 read_mask = reg_tbl[i].read_mask;
10611 write_mask = reg_tbl[i].write_mask;
10613 /* Save the original register content */
10614 save_val = tr32(offset);
10616 /* Determine the read-only value. */
10617 read_val = save_val & read_mask;
10619 /* Write zero to the register, then make sure the read-only bits
10620 * are not changed and the read/write bits are all zeros.
10624 val = tr32(offset);
10626 /* Test the read-only and read/write bits. */
10627 if (((val & read_mask) != read_val) || (val & write_mask))
10630 /* Write ones to all the bits defined by RdMask and WrMask, then
10631 * make sure the read-only bits are not changed and the
10632 * read/write bits are all ones.
10634 tw32(offset, read_mask | write_mask);
10636 val = tr32(offset);
10638 /* Test the read-only bits. */
10639 if ((val & read_mask) != read_val)
10642 /* Test the read/write bits. */
10643 if ((val & write_mask) != write_mask)
10646 tw32(offset, save_val);
10652 if (netif_msg_hw(tp))
10653 netdev_err(tp->dev,
10654 "Register test failed at offset %x\n", offset);
10655 tw32(offset, save_val);
10659 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10661 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10665 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10666 for (j = 0; j < len; j += 4) {
10669 tg3_write_mem(tp, offset + j, test_pattern[i]);
10670 tg3_read_mem(tp, offset + j, &val);
10671 if (val != test_pattern[i])
10678 static int tg3_test_memory(struct tg3 *tp)
10680 static struct mem_entry {
10683 } mem_tbl_570x[] = {
10684 { 0x00000000, 0x00b50},
10685 { 0x00002000, 0x1c000},
10686 { 0xffffffff, 0x00000}
10687 }, mem_tbl_5705[] = {
10688 { 0x00000100, 0x0000c},
10689 { 0x00000200, 0x00008},
10690 { 0x00004000, 0x00800},
10691 { 0x00006000, 0x01000},
10692 { 0x00008000, 0x02000},
10693 { 0x00010000, 0x0e000},
10694 { 0xffffffff, 0x00000}
10695 }, mem_tbl_5755[] = {
10696 { 0x00000200, 0x00008},
10697 { 0x00004000, 0x00800},
10698 { 0x00006000, 0x00800},
10699 { 0x00008000, 0x02000},
10700 { 0x00010000, 0x0c000},
10701 { 0xffffffff, 0x00000}
10702 }, mem_tbl_5906[] = {
10703 { 0x00000200, 0x00008},
10704 { 0x00004000, 0x00400},
10705 { 0x00006000, 0x00400},
10706 { 0x00008000, 0x01000},
10707 { 0x00010000, 0x01000},
10708 { 0xffffffff, 0x00000}
10709 }, mem_tbl_5717[] = {
10710 { 0x00000200, 0x00008},
10711 { 0x00010000, 0x0a000},
10712 { 0x00020000, 0x13c00},
10713 { 0xffffffff, 0x00000}
10714 }, mem_tbl_57765[] = {
10715 { 0x00000200, 0x00008},
10716 { 0x00004000, 0x00800},
10717 { 0x00006000, 0x09800},
10718 { 0x00010000, 0x0a000},
10719 { 0xffffffff, 0x00000}
10721 struct mem_entry *mem_tbl;
10725 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
10726 mem_tbl = mem_tbl_5717;
10727 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10728 mem_tbl = mem_tbl_57765;
10729 else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10730 mem_tbl = mem_tbl_5755;
10731 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10732 mem_tbl = mem_tbl_5906;
10733 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
10734 mem_tbl = mem_tbl_5705;
10736 mem_tbl = mem_tbl_570x;
10738 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10739 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
10740 mem_tbl[i].len)) != 0)
10747 #define TG3_MAC_LOOPBACK 0
10748 #define TG3_PHY_LOOPBACK 1
10750 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10752 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10753 u32 desc_idx, coal_now;
10754 struct sk_buff *skb, *rx_skb;
10757 int num_pkts, tx_len, rx_len, i, err;
10758 struct tg3_rx_buffer_desc *desc;
10759 struct tg3_napi *tnapi, *rnapi;
10760 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
10762 tnapi = &tp->napi[0];
10763 rnapi = &tp->napi[0];
10764 if (tp->irq_cnt > 1) {
10765 rnapi = &tp->napi[1];
10766 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
10767 tnapi = &tp->napi[1];
10769 coal_now = tnapi->coal_now | rnapi->coal_now;
10771 if (loopback_mode == TG3_MAC_LOOPBACK) {
10772 /* HW errata - mac loopback fails in some cases on 5780.
10773 * Normal traffic and PHY loopback are not affected by
10776 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10779 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
10780 MAC_MODE_PORT_INT_LPBACK;
10781 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10782 mac_mode |= MAC_MODE_LINK_POLARITY;
10783 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10784 mac_mode |= MAC_MODE_PORT_MODE_MII;
10786 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10787 tw32(MAC_MODE, mac_mode);
10788 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10791 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10792 tg3_phy_fet_toggle_apd(tp, false);
10793 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10795 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10797 tg3_phy_toggle_automdix(tp, 0);
10799 tg3_writephy(tp, MII_BMCR, val);
10802 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10803 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10804 tg3_writephy(tp, MII_TG3_FET_PTEST,
10805 MII_TG3_FET_PTEST_FRC_TX_LINK |
10806 MII_TG3_FET_PTEST_FRC_TX_LOCK);
10807 /* The write needs to be flushed for the AC131 */
10808 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10809 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
10810 mac_mode |= MAC_MODE_PORT_MODE_MII;
10812 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10814 /* reset to prevent losing 1st rx packet intermittently */
10815 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10816 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10818 tw32_f(MAC_RX_MODE, tp->rx_mode);
10820 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10821 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
10822 if (masked_phy_id == TG3_PHY_ID_BCM5401)
10823 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10824 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
10825 mac_mode |= MAC_MODE_LINK_POLARITY;
10826 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10827 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10829 tw32(MAC_MODE, mac_mode);
10837 skb = netdev_alloc_skb(tp->dev, tx_len);
10841 tx_data = skb_put(skb, tx_len);
10842 memcpy(tx_data, tp->dev->dev_addr, 6);
10843 memset(tx_data + 6, 0x0, 8);
10845 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10847 for (i = 14; i < tx_len; i++)
10848 tx_data[i] = (u8) (i & 0xff);
10850 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10851 if (pci_dma_mapping_error(tp->pdev, map)) {
10852 dev_kfree_skb(skb);
10856 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10861 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
10865 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
10870 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
10871 tr32_mailbox(tnapi->prodmbox);
10875 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
10876 for (i = 0; i < 35; i++) {
10877 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10882 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
10883 rx_idx = rnapi->hw_status->idx[0].rx_producer;
10884 if ((tx_idx == tnapi->tx_prod) &&
10885 (rx_idx == (rx_start_idx + num_pkts)))
10889 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10890 dev_kfree_skb(skb);
10892 if (tx_idx != tnapi->tx_prod)
10895 if (rx_idx != rx_start_idx + num_pkts)
10898 desc = &rnapi->rx_rcb[rx_start_idx];
10899 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10900 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10901 if (opaque_key != RXD_OPAQUE_RING_STD)
10904 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10905 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10908 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10909 if (rx_len != tx_len)
10912 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
10914 map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
10915 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10917 for (i = 14; i < tx_len; i++) {
10918 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10923 /* tg3_free_rings will unmap and free the rx_skb */
10928 #define TG3_MAC_LOOPBACK_FAILED 1
10929 #define TG3_PHY_LOOPBACK_FAILED 2
10930 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10931 TG3_PHY_LOOPBACK_FAILED)
10933 static int tg3_test_loopback(struct tg3 *tp)
10938 if (!netif_running(tp->dev))
10939 return TG3_LOOPBACK_FAILED;
10941 err = tg3_reset_hw(tp, 1);
10943 return TG3_LOOPBACK_FAILED;
10945 /* Turn off gphy autopowerdown. */
10946 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10947 tg3_phy_toggle_apd(tp, false);
10949 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10953 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10955 /* Wait for up to 40 microseconds to acquire lock. */
10956 for (i = 0; i < 4; i++) {
10957 status = tr32(TG3_CPMU_MUTEX_GNT);
10958 if (status == CPMU_MUTEX_GNT_DRIVER)
10963 if (status != CPMU_MUTEX_GNT_DRIVER)
10964 return TG3_LOOPBACK_FAILED;
10966 /* Turn off link-based power management. */
10967 cpmuctrl = tr32(TG3_CPMU_CTRL);
10968 tw32(TG3_CPMU_CTRL,
10969 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10970 CPMU_CTRL_LINK_AWARE_MODE));
10973 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10974 err |= TG3_MAC_LOOPBACK_FAILED;
10976 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10977 tw32(TG3_CPMU_CTRL, cpmuctrl);
10979 /* Release the mutex */
10980 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10983 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10984 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10985 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10986 err |= TG3_PHY_LOOPBACK_FAILED;
10989 /* Re-enable gphy autopowerdown. */
10990 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10991 tg3_phy_toggle_apd(tp, true);
10996 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10999 struct tg3 *tp = netdev_priv(dev);
11001 if (tp->link_config.phy_is_low_power)
11002 tg3_set_power_state(tp, PCI_D0);
11004 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11006 if (tg3_test_nvram(tp) != 0) {
11007 etest->flags |= ETH_TEST_FL_FAILED;
11010 if (tg3_test_link(tp) != 0) {
11011 etest->flags |= ETH_TEST_FL_FAILED;
11014 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11015 int err, err2 = 0, irq_sync = 0;
11017 if (netif_running(dev)) {
11019 tg3_netif_stop(tp);
11023 tg3_full_lock(tp, irq_sync);
11025 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11026 err = tg3_nvram_lock(tp);
11027 tg3_halt_cpu(tp, RX_CPU_BASE);
11028 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11029 tg3_halt_cpu(tp, TX_CPU_BASE);
11031 tg3_nvram_unlock(tp);
11033 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
11036 if (tg3_test_registers(tp) != 0) {
11037 etest->flags |= ETH_TEST_FL_FAILED;
11040 if (tg3_test_memory(tp) != 0) {
11041 etest->flags |= ETH_TEST_FL_FAILED;
11044 if ((data[4] = tg3_test_loopback(tp)) != 0)
11045 etest->flags |= ETH_TEST_FL_FAILED;
11047 tg3_full_unlock(tp);
11049 if (tg3_test_interrupt(tp) != 0) {
11050 etest->flags |= ETH_TEST_FL_FAILED;
11054 tg3_full_lock(tp, 0);
11056 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11057 if (netif_running(dev)) {
11058 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11059 err2 = tg3_restart_hw(tp, 1);
11061 tg3_netif_start(tp);
11064 tg3_full_unlock(tp);
11066 if (irq_sync && !err2)
11069 if (tp->link_config.phy_is_low_power)
11070 tg3_set_power_state(tp, PCI_D3hot);
11074 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11076 struct mii_ioctl_data *data = if_mii(ifr);
11077 struct tg3 *tp = netdev_priv(dev);
11080 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
11081 struct phy_device *phydev;
11082 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
11084 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11085 return phy_mii_ioctl(phydev, data, cmd);
11090 data->phy_id = tp->phy_addr;
11093 case SIOCGMIIREG: {
11096 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11097 break; /* We have no PHY */
11099 if (tp->link_config.phy_is_low_power)
11102 spin_lock_bh(&tp->lock);
11103 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11104 spin_unlock_bh(&tp->lock);
11106 data->val_out = mii_regval;
11112 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11113 break; /* We have no PHY */
11115 if (tp->link_config.phy_is_low_power)
11118 spin_lock_bh(&tp->lock);
11119 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11120 spin_unlock_bh(&tp->lock);
11128 return -EOPNOTSUPP;
11131 #if TG3_VLAN_TAG_USED
11132 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
11134 struct tg3 *tp = netdev_priv(dev);
11136 if (!netif_running(dev)) {
11141 tg3_netif_stop(tp);
11143 tg3_full_lock(tp, 0);
11147 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
11148 __tg3_set_rx_mode(dev);
11150 tg3_netif_start(tp);
11152 tg3_full_unlock(tp);
11156 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11158 struct tg3 *tp = netdev_priv(dev);
11160 memcpy(ec, &tp->coal, sizeof(*ec));
11164 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11166 struct tg3 *tp = netdev_priv(dev);
11167 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11168 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11170 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
11171 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11172 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11173 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11174 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11177 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11178 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11179 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11180 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11181 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11182 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11183 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11184 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11185 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11186 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11189 /* No rx interrupts will be generated if both are zero */
11190 if ((ec->rx_coalesce_usecs == 0) &&
11191 (ec->rx_max_coalesced_frames == 0))
11194 /* No tx interrupts will be generated if both are zero */
11195 if ((ec->tx_coalesce_usecs == 0) &&
11196 (ec->tx_max_coalesced_frames == 0))
11199 /* Only copy relevant parameters, ignore all others. */
11200 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11201 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11202 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11203 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11204 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11205 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11206 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11207 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11208 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11210 if (netif_running(dev)) {
11211 tg3_full_lock(tp, 0);
11212 __tg3_set_coalesce(tp, &tp->coal);
11213 tg3_full_unlock(tp);
11218 static const struct ethtool_ops tg3_ethtool_ops = {
11219 .get_settings = tg3_get_settings,
11220 .set_settings = tg3_set_settings,
11221 .get_drvinfo = tg3_get_drvinfo,
11222 .get_regs_len = tg3_get_regs_len,
11223 .get_regs = tg3_get_regs,
11224 .get_wol = tg3_get_wol,
11225 .set_wol = tg3_set_wol,
11226 .get_msglevel = tg3_get_msglevel,
11227 .set_msglevel = tg3_set_msglevel,
11228 .nway_reset = tg3_nway_reset,
11229 .get_link = ethtool_op_get_link,
11230 .get_eeprom_len = tg3_get_eeprom_len,
11231 .get_eeprom = tg3_get_eeprom,
11232 .set_eeprom = tg3_set_eeprom,
11233 .get_ringparam = tg3_get_ringparam,
11234 .set_ringparam = tg3_set_ringparam,
11235 .get_pauseparam = tg3_get_pauseparam,
11236 .set_pauseparam = tg3_set_pauseparam,
11237 .get_rx_csum = tg3_get_rx_csum,
11238 .set_rx_csum = tg3_set_rx_csum,
11239 .set_tx_csum = tg3_set_tx_csum,
11240 .set_sg = ethtool_op_set_sg,
11241 .set_tso = tg3_set_tso,
11242 .self_test = tg3_self_test,
11243 .get_strings = tg3_get_strings,
11244 .phys_id = tg3_phys_id,
11245 .get_ethtool_stats = tg3_get_ethtool_stats,
11246 .get_coalesce = tg3_get_coalesce,
11247 .set_coalesce = tg3_set_coalesce,
11248 .get_sset_count = tg3_get_sset_count,
11251 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11253 u32 cursize, val, magic;
11255 tp->nvram_size = EEPROM_CHIP_SIZE;
11257 if (tg3_nvram_read(tp, 0, &magic) != 0)
11260 if ((magic != TG3_EEPROM_MAGIC) &&
11261 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11262 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11266 * Size the chip by reading offsets at increasing powers of two.
11267 * When we encounter our validation signature, we know the addressing
11268 * has wrapped around, and thus have our chip size.
11272 while (cursize < tp->nvram_size) {
11273 if (tg3_nvram_read(tp, cursize, &val) != 0)
11282 tp->nvram_size = cursize;
11285 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11289 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
11290 tg3_nvram_read(tp, 0, &val) != 0)
11293 /* Selfboot format */
11294 if (val != TG3_EEPROM_MAGIC) {
11295 tg3_get_eeprom_size(tp);
11299 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11301 /* This is confusing. We want to operate on the
11302 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11303 * call will read from NVRAM and byteswap the data
11304 * according to the byteswapping settings for all
11305 * other register accesses. This ensures the data we
11306 * want will always reside in the lower 16-bits.
11307 * However, the data in NVRAM is in LE format, which
11308 * means the data from the NVRAM read will always be
11309 * opposite the endianness of the CPU. The 16-bit
11310 * byteswap then brings the data to CPU endianness.
11312 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11316 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11319 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11323 nvcfg1 = tr32(NVRAM_CFG1);
11324 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11325 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11327 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11328 tw32(NVRAM_CFG1, nvcfg1);
11331 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11332 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11333 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11334 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11335 tp->nvram_jedecnum = JEDEC_ATMEL;
11336 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11337 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11339 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11340 tp->nvram_jedecnum = JEDEC_ATMEL;
11341 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11343 case FLASH_VENDOR_ATMEL_EEPROM:
11344 tp->nvram_jedecnum = JEDEC_ATMEL;
11345 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11346 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11348 case FLASH_VENDOR_ST:
11349 tp->nvram_jedecnum = JEDEC_ST;
11350 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11351 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11353 case FLASH_VENDOR_SAIFUN:
11354 tp->nvram_jedecnum = JEDEC_SAIFUN;
11355 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11357 case FLASH_VENDOR_SST_SMALL:
11358 case FLASH_VENDOR_SST_LARGE:
11359 tp->nvram_jedecnum = JEDEC_SST;
11360 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11364 tp->nvram_jedecnum = JEDEC_ATMEL;
11365 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11366 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11370 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11372 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11373 case FLASH_5752PAGE_SIZE_256:
11374 tp->nvram_pagesize = 256;
11376 case FLASH_5752PAGE_SIZE_512:
11377 tp->nvram_pagesize = 512;
11379 case FLASH_5752PAGE_SIZE_1K:
11380 tp->nvram_pagesize = 1024;
11382 case FLASH_5752PAGE_SIZE_2K:
11383 tp->nvram_pagesize = 2048;
11385 case FLASH_5752PAGE_SIZE_4K:
11386 tp->nvram_pagesize = 4096;
11388 case FLASH_5752PAGE_SIZE_264:
11389 tp->nvram_pagesize = 264;
11391 case FLASH_5752PAGE_SIZE_528:
11392 tp->nvram_pagesize = 528;
11397 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11401 nvcfg1 = tr32(NVRAM_CFG1);
11403 /* NVRAM protection for TPM */
11404 if (nvcfg1 & (1 << 27))
11405 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11407 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11408 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11409 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11410 tp->nvram_jedecnum = JEDEC_ATMEL;
11411 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11413 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11414 tp->nvram_jedecnum = JEDEC_ATMEL;
11415 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11416 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11418 case FLASH_5752VENDOR_ST_M45PE10:
11419 case FLASH_5752VENDOR_ST_M45PE20:
11420 case FLASH_5752VENDOR_ST_M45PE40:
11421 tp->nvram_jedecnum = JEDEC_ST;
11422 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11423 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11427 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
11428 tg3_nvram_get_pagesize(tp, nvcfg1);
11430 /* For eeprom, set pagesize to maximum eeprom size */
11431 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11433 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11434 tw32(NVRAM_CFG1, nvcfg1);
11438 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11440 u32 nvcfg1, protect = 0;
11442 nvcfg1 = tr32(NVRAM_CFG1);
11444 /* NVRAM protection for TPM */
11445 if (nvcfg1 & (1 << 27)) {
11446 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11450 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11452 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11453 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11454 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11455 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11456 tp->nvram_jedecnum = JEDEC_ATMEL;
11457 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11458 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11459 tp->nvram_pagesize = 264;
11460 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11461 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11462 tp->nvram_size = (protect ? 0x3e200 :
11463 TG3_NVRAM_SIZE_512KB);
11464 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11465 tp->nvram_size = (protect ? 0x1f200 :
11466 TG3_NVRAM_SIZE_256KB);
11468 tp->nvram_size = (protect ? 0x1f200 :
11469 TG3_NVRAM_SIZE_128KB);
11471 case FLASH_5752VENDOR_ST_M45PE10:
11472 case FLASH_5752VENDOR_ST_M45PE20:
11473 case FLASH_5752VENDOR_ST_M45PE40:
11474 tp->nvram_jedecnum = JEDEC_ST;
11475 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11476 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11477 tp->nvram_pagesize = 256;
11478 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11479 tp->nvram_size = (protect ?
11480 TG3_NVRAM_SIZE_64KB :
11481 TG3_NVRAM_SIZE_128KB);
11482 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11483 tp->nvram_size = (protect ?
11484 TG3_NVRAM_SIZE_64KB :
11485 TG3_NVRAM_SIZE_256KB);
11487 tp->nvram_size = (protect ?
11488 TG3_NVRAM_SIZE_128KB :
11489 TG3_NVRAM_SIZE_512KB);
11494 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11498 nvcfg1 = tr32(NVRAM_CFG1);
11500 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11501 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11502 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11503 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11504 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11505 tp->nvram_jedecnum = JEDEC_ATMEL;
11506 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11507 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11509 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11510 tw32(NVRAM_CFG1, nvcfg1);
11512 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11513 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11514 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11515 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11516 tp->nvram_jedecnum = JEDEC_ATMEL;
11517 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11518 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11519 tp->nvram_pagesize = 264;
11521 case FLASH_5752VENDOR_ST_M45PE10:
11522 case FLASH_5752VENDOR_ST_M45PE20:
11523 case FLASH_5752VENDOR_ST_M45PE40:
11524 tp->nvram_jedecnum = JEDEC_ST;
11525 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11526 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11527 tp->nvram_pagesize = 256;
11532 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11534 u32 nvcfg1, protect = 0;
11536 nvcfg1 = tr32(NVRAM_CFG1);
11538 /* NVRAM protection for TPM */
11539 if (nvcfg1 & (1 << 27)) {
11540 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11544 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11546 case FLASH_5761VENDOR_ATMEL_ADB021D:
11547 case FLASH_5761VENDOR_ATMEL_ADB041D:
11548 case FLASH_5761VENDOR_ATMEL_ADB081D:
11549 case FLASH_5761VENDOR_ATMEL_ADB161D:
11550 case FLASH_5761VENDOR_ATMEL_MDB021D:
11551 case FLASH_5761VENDOR_ATMEL_MDB041D:
11552 case FLASH_5761VENDOR_ATMEL_MDB081D:
11553 case FLASH_5761VENDOR_ATMEL_MDB161D:
11554 tp->nvram_jedecnum = JEDEC_ATMEL;
11555 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11556 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11557 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11558 tp->nvram_pagesize = 256;
11560 case FLASH_5761VENDOR_ST_A_M45PE20:
11561 case FLASH_5761VENDOR_ST_A_M45PE40:
11562 case FLASH_5761VENDOR_ST_A_M45PE80:
11563 case FLASH_5761VENDOR_ST_A_M45PE16:
11564 case FLASH_5761VENDOR_ST_M_M45PE20:
11565 case FLASH_5761VENDOR_ST_M_M45PE40:
11566 case FLASH_5761VENDOR_ST_M_M45PE80:
11567 case FLASH_5761VENDOR_ST_M_M45PE16:
11568 tp->nvram_jedecnum = JEDEC_ST;
11569 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11570 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11571 tp->nvram_pagesize = 256;
11576 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11579 case FLASH_5761VENDOR_ATMEL_ADB161D:
11580 case FLASH_5761VENDOR_ATMEL_MDB161D:
11581 case FLASH_5761VENDOR_ST_A_M45PE16:
11582 case FLASH_5761VENDOR_ST_M_M45PE16:
11583 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11585 case FLASH_5761VENDOR_ATMEL_ADB081D:
11586 case FLASH_5761VENDOR_ATMEL_MDB081D:
11587 case FLASH_5761VENDOR_ST_A_M45PE80:
11588 case FLASH_5761VENDOR_ST_M_M45PE80:
11589 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11591 case FLASH_5761VENDOR_ATMEL_ADB041D:
11592 case FLASH_5761VENDOR_ATMEL_MDB041D:
11593 case FLASH_5761VENDOR_ST_A_M45PE40:
11594 case FLASH_5761VENDOR_ST_M_M45PE40:
11595 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11597 case FLASH_5761VENDOR_ATMEL_ADB021D:
11598 case FLASH_5761VENDOR_ATMEL_MDB021D:
11599 case FLASH_5761VENDOR_ST_A_M45PE20:
11600 case FLASH_5761VENDOR_ST_M_M45PE20:
11601 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11607 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11609 tp->nvram_jedecnum = JEDEC_ATMEL;
11610 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11611 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11614 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11618 nvcfg1 = tr32(NVRAM_CFG1);
11620 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11621 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11622 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11623 tp->nvram_jedecnum = JEDEC_ATMEL;
11624 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11625 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11627 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11628 tw32(NVRAM_CFG1, nvcfg1);
11630 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11631 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11632 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11633 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11634 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11635 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11636 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11637 tp->nvram_jedecnum = JEDEC_ATMEL;
11638 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11639 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11641 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11642 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11643 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11644 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11645 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11647 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11648 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11649 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11651 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11652 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11653 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11657 case FLASH_5752VENDOR_ST_M45PE10:
11658 case FLASH_5752VENDOR_ST_M45PE20:
11659 case FLASH_5752VENDOR_ST_M45PE40:
11660 tp->nvram_jedecnum = JEDEC_ST;
11661 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11662 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11664 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11665 case FLASH_5752VENDOR_ST_M45PE10:
11666 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11668 case FLASH_5752VENDOR_ST_M45PE20:
11669 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11671 case FLASH_5752VENDOR_ST_M45PE40:
11672 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11677 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11681 tg3_nvram_get_pagesize(tp, nvcfg1);
11682 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11683 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11687 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11691 nvcfg1 = tr32(NVRAM_CFG1);
11693 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11694 case FLASH_5717VENDOR_ATMEL_EEPROM:
11695 case FLASH_5717VENDOR_MICRO_EEPROM:
11696 tp->nvram_jedecnum = JEDEC_ATMEL;
11697 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11698 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11700 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11701 tw32(NVRAM_CFG1, nvcfg1);
11703 case FLASH_5717VENDOR_ATMEL_MDB011D:
11704 case FLASH_5717VENDOR_ATMEL_ADB011B:
11705 case FLASH_5717VENDOR_ATMEL_ADB011D:
11706 case FLASH_5717VENDOR_ATMEL_MDB021D:
11707 case FLASH_5717VENDOR_ATMEL_ADB021B:
11708 case FLASH_5717VENDOR_ATMEL_ADB021D:
11709 case FLASH_5717VENDOR_ATMEL_45USPT:
11710 tp->nvram_jedecnum = JEDEC_ATMEL;
11711 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11712 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11714 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11715 case FLASH_5717VENDOR_ATMEL_MDB021D:
11716 case FLASH_5717VENDOR_ATMEL_ADB021B:
11717 case FLASH_5717VENDOR_ATMEL_ADB021D:
11718 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11721 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11725 case FLASH_5717VENDOR_ST_M_M25PE10:
11726 case FLASH_5717VENDOR_ST_A_M25PE10:
11727 case FLASH_5717VENDOR_ST_M_M45PE10:
11728 case FLASH_5717VENDOR_ST_A_M45PE10:
11729 case FLASH_5717VENDOR_ST_M_M25PE20:
11730 case FLASH_5717VENDOR_ST_A_M25PE20:
11731 case FLASH_5717VENDOR_ST_M_M45PE20:
11732 case FLASH_5717VENDOR_ST_A_M45PE20:
11733 case FLASH_5717VENDOR_ST_25USPT:
11734 case FLASH_5717VENDOR_ST_45USPT:
11735 tp->nvram_jedecnum = JEDEC_ST;
11736 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11737 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11739 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11740 case FLASH_5717VENDOR_ST_M_M25PE20:
11741 case FLASH_5717VENDOR_ST_A_M25PE20:
11742 case FLASH_5717VENDOR_ST_M_M45PE20:
11743 case FLASH_5717VENDOR_ST_A_M45PE20:
11744 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11747 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11752 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11756 tg3_nvram_get_pagesize(tp, nvcfg1);
11757 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11758 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11761 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
11762 static void __devinit tg3_nvram_init(struct tg3 *tp)
11764 tw32_f(GRC_EEPROM_ADDR,
11765 (EEPROM_ADDR_FSM_RESET |
11766 (EEPROM_DEFAULT_CLOCK_PERIOD <<
11767 EEPROM_ADDR_CLKPERD_SHIFT)));
11771 /* Enable seeprom accesses. */
11772 tw32_f(GRC_LOCAL_CTRL,
11773 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
11776 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11777 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
11778 tp->tg3_flags |= TG3_FLAG_NVRAM;
11780 if (tg3_nvram_lock(tp)) {
11781 netdev_warn(tp->dev, "Cannot get nvram lock, %s failed\n",
11785 tg3_enable_nvram_access(tp);
11787 tp->nvram_size = 0;
11789 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11790 tg3_get_5752_nvram_info(tp);
11791 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11792 tg3_get_5755_nvram_info(tp);
11793 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11794 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11795 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11796 tg3_get_5787_nvram_info(tp);
11797 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11798 tg3_get_5761_nvram_info(tp);
11799 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11800 tg3_get_5906_nvram_info(tp);
11801 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
11802 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11803 tg3_get_57780_nvram_info(tp);
11804 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
11805 tg3_get_5717_nvram_info(tp);
11807 tg3_get_nvram_info(tp);
11809 if (tp->nvram_size == 0)
11810 tg3_get_nvram_size(tp);
11812 tg3_disable_nvram_access(tp);
11813 tg3_nvram_unlock(tp);
11816 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
11818 tg3_get_eeprom_size(tp);
11822 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11823 u32 offset, u32 len, u8 *buf)
11828 for (i = 0; i < len; i += 4) {
11834 memcpy(&data, buf + i, 4);
11837 * The SEEPROM interface expects the data to always be opposite
11838 * the native endian format. We accomplish this by reversing
11839 * all the operations that would have been performed on the
11840 * data from a call to tg3_nvram_read_be32().
11842 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
11844 val = tr32(GRC_EEPROM_ADDR);
11845 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11847 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11849 tw32(GRC_EEPROM_ADDR, val |
11850 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11851 (addr & EEPROM_ADDR_ADDR_MASK) |
11852 EEPROM_ADDR_START |
11853 EEPROM_ADDR_WRITE);
11855 for (j = 0; j < 1000; j++) {
11856 val = tr32(GRC_EEPROM_ADDR);
11858 if (val & EEPROM_ADDR_COMPLETE)
11862 if (!(val & EEPROM_ADDR_COMPLETE)) {
11871 /* offset and length are dword aligned */
11872 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11876 u32 pagesize = tp->nvram_pagesize;
11877 u32 pagemask = pagesize - 1;
11881 tmp = kmalloc(pagesize, GFP_KERNEL);
11887 u32 phy_addr, page_off, size;
11889 phy_addr = offset & ~pagemask;
11891 for (j = 0; j < pagesize; j += 4) {
11892 ret = tg3_nvram_read_be32(tp, phy_addr + j,
11893 (__be32 *) (tmp + j));
11900 page_off = offset & pagemask;
11907 memcpy(tmp + page_off, buf, size);
11909 offset = offset + (pagesize - page_off);
11911 tg3_enable_nvram_access(tp);
11914 * Before we can erase the flash page, we need
11915 * to issue a special "write enable" command.
11917 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11919 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11922 /* Erase the target page */
11923 tw32(NVRAM_ADDR, phy_addr);
11925 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11926 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11928 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11931 /* Issue another write enable to start the write. */
11932 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11934 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11937 for (j = 0; j < pagesize; j += 4) {
11940 data = *((__be32 *) (tmp + j));
11942 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11944 tw32(NVRAM_ADDR, phy_addr + j);
11946 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11950 nvram_cmd |= NVRAM_CMD_FIRST;
11951 else if (j == (pagesize - 4))
11952 nvram_cmd |= NVRAM_CMD_LAST;
11954 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11961 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11962 tg3_nvram_exec_cmd(tp, nvram_cmd);
11969 /* offset and length are dword aligned */
11970 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11975 for (i = 0; i < len; i += 4, offset += 4) {
11976 u32 page_off, phy_addr, nvram_cmd;
11979 memcpy(&data, buf + i, 4);
11980 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11982 page_off = offset % tp->nvram_pagesize;
11984 phy_addr = tg3_nvram_phys_addr(tp, offset);
11986 tw32(NVRAM_ADDR, phy_addr);
11988 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11990 if ((page_off == 0) || (i == 0))
11991 nvram_cmd |= NVRAM_CMD_FIRST;
11992 if (page_off == (tp->nvram_pagesize - 4))
11993 nvram_cmd |= NVRAM_CMD_LAST;
11995 if (i == (len - 4))
11996 nvram_cmd |= NVRAM_CMD_LAST;
11998 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11999 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
12000 (tp->nvram_jedecnum == JEDEC_ST) &&
12001 (nvram_cmd & NVRAM_CMD_FIRST)) {
12003 if ((ret = tg3_nvram_exec_cmd(tp,
12004 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12009 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
12010 /* We always do complete word writes to eeprom. */
12011 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12014 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12020 /* offset and length are dword aligned */
12021 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12025 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
12026 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12027 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12031 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
12032 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12037 ret = tg3_nvram_lock(tp);
12041 tg3_enable_nvram_access(tp);
12042 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
12043 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
12044 tw32(NVRAM_WRITE1, 0x406);
12046 grc_mode = tr32(GRC_MODE);
12047 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12049 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
12050 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
12052 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12056 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12060 grc_mode = tr32(GRC_MODE);
12061 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12063 tg3_disable_nvram_access(tp);
12064 tg3_nvram_unlock(tp);
12067 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
12068 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12075 struct subsys_tbl_ent {
12076 u16 subsys_vendor, subsys_devid;
12080 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12081 /* Broadcom boards. */
12082 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12083 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12084 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12085 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12086 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12087 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12088 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12089 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12090 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12091 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12092 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12093 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12094 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12095 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12096 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12097 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12098 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12099 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12100 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12101 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12102 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12103 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12106 { TG3PCI_SUBVENDOR_ID_3COM,
12107 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12108 { TG3PCI_SUBVENDOR_ID_3COM,
12109 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12110 { TG3PCI_SUBVENDOR_ID_3COM,
12111 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12112 { TG3PCI_SUBVENDOR_ID_3COM,
12113 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12114 { TG3PCI_SUBVENDOR_ID_3COM,
12115 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12118 { TG3PCI_SUBVENDOR_ID_DELL,
12119 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12120 { TG3PCI_SUBVENDOR_ID_DELL,
12121 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12122 { TG3PCI_SUBVENDOR_ID_DELL,
12123 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12124 { TG3PCI_SUBVENDOR_ID_DELL,
12125 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12127 /* Compaq boards. */
12128 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12129 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12130 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12131 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12132 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12133 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12134 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12135 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12136 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12137 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12140 { TG3PCI_SUBVENDOR_ID_IBM,
12141 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12144 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12148 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12149 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12150 tp->pdev->subsystem_vendor) &&
12151 (subsys_id_to_phy_id[i].subsys_devid ==
12152 tp->pdev->subsystem_device))
12153 return &subsys_id_to_phy_id[i];
12158 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12163 /* On some early chips the SRAM cannot be accessed in D3hot state,
12164 * so need make sure we're in D0.
12166 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12167 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12168 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12171 /* Make sure register accesses (indirect or otherwise)
12172 * will function correctly.
12174 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12175 tp->misc_host_ctrl);
12177 /* The memory arbiter has to be enabled in order for SRAM accesses
12178 * to succeed. Normally on powerup the tg3 chip firmware will make
12179 * sure it is enabled, but other entities such as system netboot
12180 * code might disable it.
12182 val = tr32(MEMARB_MODE);
12183 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12185 tp->phy_id = TG3_PHY_ID_INVALID;
12186 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12188 /* Assume an onboard device and WOL capable by default. */
12189 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
12191 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12192 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12193 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12194 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12196 val = tr32(VCPU_CFGSHDW);
12197 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12198 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12199 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12200 (val & VCPU_CFGSHDW_WOL_MAGPKT))
12201 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12205 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12206 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12207 u32 nic_cfg, led_cfg;
12208 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12209 int eeprom_phy_serdes = 0;
12211 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12212 tp->nic_sram_data_cfg = nic_cfg;
12214 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12215 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12216 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
12217 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
12218 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
12219 (ver > 0) && (ver < 0x100))
12220 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12222 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12223 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12225 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12226 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12227 eeprom_phy_serdes = 1;
12229 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12230 if (nic_phy_id != 0) {
12231 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12232 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12234 eeprom_phy_id = (id1 >> 16) << 10;
12235 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12236 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12240 tp->phy_id = eeprom_phy_id;
12241 if (eeprom_phy_serdes) {
12242 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
12243 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12244 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
12246 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12249 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12250 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12251 SHASTA_EXT_LED_MODE_MASK);
12253 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12257 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12258 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12261 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12262 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12265 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12266 tp->led_ctrl = LED_CTRL_MODE_MAC;
12268 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12269 * read on some older 5700/5701 bootcode.
12271 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12273 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12275 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12279 case SHASTA_EXT_LED_SHARED:
12280 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12281 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12282 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12283 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12284 LED_CTRL_MODE_PHY_2);
12287 case SHASTA_EXT_LED_MAC:
12288 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12291 case SHASTA_EXT_LED_COMBO:
12292 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12293 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12294 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12295 LED_CTRL_MODE_PHY_2);
12300 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12301 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12302 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12303 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12305 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12306 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12308 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12309 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
12310 if ((tp->pdev->subsystem_vendor ==
12311 PCI_VENDOR_ID_ARIMA) &&
12312 (tp->pdev->subsystem_device == 0x205a ||
12313 tp->pdev->subsystem_device == 0x2063))
12314 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12316 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12317 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12320 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12321 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
12322 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12323 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
12326 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12327 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12328 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
12330 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
12331 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12332 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
12334 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
12335 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
12336 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12338 if (cfg2 & (1 << 17))
12339 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
12341 /* serdes signal pre-emphasis in register 0x590 set by */
12342 /* bootcode if bit 18 is set */
12343 if (cfg2 & (1 << 18))
12344 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
12346 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12347 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12348 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12349 tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
12351 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12354 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12355 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12356 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12359 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12360 tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE;
12361 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12362 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
12363 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12364 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
12367 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
12368 device_set_wakeup_enable(&tp->pdev->dev,
12369 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
12372 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12377 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12378 tw32(OTP_CTRL, cmd);
12380 /* Wait for up to 1 ms for command to execute. */
12381 for (i = 0; i < 100; i++) {
12382 val = tr32(OTP_STATUS);
12383 if (val & OTP_STATUS_CMD_DONE)
12388 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12391 /* Read the gphy configuration from the OTP region of the chip. The gphy
12392 * configuration is a 32-bit value that straddles the alignment boundary.
12393 * We do two 32-bit reads and then shift and merge the results.
12395 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12397 u32 bhalf_otp, thalf_otp;
12399 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12401 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12404 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12406 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12409 thalf_otp = tr32(OTP_READ_DATA);
12411 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12413 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12416 bhalf_otp = tr32(OTP_READ_DATA);
12418 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12421 static int __devinit tg3_phy_probe(struct tg3 *tp)
12423 u32 hw_phy_id_1, hw_phy_id_2;
12424 u32 hw_phy_id, hw_phy_id_masked;
12427 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
12428 return tg3_phy_init(tp);
12430 /* Reading the PHY ID register can conflict with ASF
12431 * firmware access to the PHY hardware.
12434 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12435 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
12436 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12438 /* Now read the physical PHY_ID from the chip and verify
12439 * that it is sane. If it doesn't look good, we fall back
12440 * to either the hard-coded table based PHY_ID and failing
12441 * that the value found in the eeprom area.
12443 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12444 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12446 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
12447 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12448 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
12450 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12453 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12454 tp->phy_id = hw_phy_id;
12455 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12456 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12458 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
12460 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12461 /* Do nothing, phy ID already set up in
12462 * tg3_get_eeprom_hw_cfg().
12465 struct subsys_tbl_ent *p;
12467 /* No eeprom signature? Try the hardcoded
12468 * subsys device table.
12470 p = tg3_lookup_by_subsys(tp);
12474 tp->phy_id = p->phy_id;
12476 tp->phy_id == TG3_PHY_ID_BCM8002)
12477 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12481 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
12482 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
12483 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
12484 u32 bmsr, adv_reg, tg3_ctrl, mask;
12486 tg3_readphy(tp, MII_BMSR, &bmsr);
12487 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12488 (bmsr & BMSR_LSTATUS))
12489 goto skip_phy_reset;
12491 err = tg3_phy_reset(tp);
12495 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
12496 ADVERTISE_100HALF | ADVERTISE_100FULL |
12497 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
12499 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
12500 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
12501 MII_TG3_CTRL_ADV_1000_FULL);
12502 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12503 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
12504 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
12505 MII_TG3_CTRL_ENABLE_AS_MASTER);
12508 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12509 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12510 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12511 if (!tg3_copper_is_advertising_all(tp, mask)) {
12512 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12514 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
12515 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12517 tg3_writephy(tp, MII_BMCR,
12518 BMCR_ANENABLE | BMCR_ANRESTART);
12520 tg3_phy_set_wirespeed(tp);
12522 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12523 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
12524 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12528 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
12529 err = tg3_init_5401phy_dsp(tp);
12533 err = tg3_init_5401phy_dsp(tp);
12536 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
12537 tp->link_config.advertising =
12538 (ADVERTISED_1000baseT_Half |
12539 ADVERTISED_1000baseT_Full |
12540 ADVERTISED_Autoneg |
12542 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
12543 tp->link_config.advertising &=
12544 ~(ADVERTISED_1000baseT_Half |
12545 ADVERTISED_1000baseT_Full);
12550 static void __devinit tg3_read_partno(struct tg3 *tp)
12552 unsigned char vpd_data[TG3_NVM_VPD_LEN]; /* in little-endian format */
12553 unsigned int block_end, rosize, len;
12557 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
12558 tg3_nvram_read(tp, 0x0, &magic))
12559 goto out_not_found;
12561 if (magic == TG3_EEPROM_MAGIC) {
12562 for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
12565 /* The data is in little-endian format in NVRAM.
12566 * Use the big-endian read routines to preserve
12567 * the byte order as it exists in NVRAM.
12569 if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp))
12570 goto out_not_found;
12572 memcpy(&vpd_data[i], &tmp, sizeof(tmp));
12576 unsigned int pos = 0;
12578 for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) {
12579 cnt = pci_read_vpd(tp->pdev, pos,
12580 TG3_NVM_VPD_LEN - pos,
12582 if (cnt == -ETIMEDOUT || -EINTR)
12585 goto out_not_found;
12587 if (pos != TG3_NVM_VPD_LEN)
12588 goto out_not_found;
12591 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
12592 PCI_VPD_LRDT_RO_DATA);
12594 goto out_not_found;
12596 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
12597 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
12598 i += PCI_VPD_LRDT_TAG_SIZE;
12600 if (block_end > TG3_NVM_VPD_LEN)
12601 goto out_not_found;
12603 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12604 PCI_VPD_RO_KEYWORD_PARTNO);
12606 goto out_not_found;
12608 len = pci_vpd_info_field_size(&vpd_data[i]);
12610 i += PCI_VPD_INFO_FLD_HDR_SIZE;
12611 if (len > TG3_BPN_SIZE ||
12612 (len + i) > TG3_NVM_VPD_LEN)
12613 goto out_not_found;
12615 memcpy(tp->board_part_number, &vpd_data[i], len);
12620 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12621 strcpy(tp->board_part_number, "BCM95906");
12622 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12623 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
12624 strcpy(tp->board_part_number, "BCM57780");
12625 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12626 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
12627 strcpy(tp->board_part_number, "BCM57760");
12628 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12629 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
12630 strcpy(tp->board_part_number, "BCM57790");
12631 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12632 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
12633 strcpy(tp->board_part_number, "BCM57788");
12634 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12635 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
12636 strcpy(tp->board_part_number, "BCM57761");
12637 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12638 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
12639 strcpy(tp->board_part_number, "BCM57765");
12640 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12641 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
12642 strcpy(tp->board_part_number, "BCM57781");
12643 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12644 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
12645 strcpy(tp->board_part_number, "BCM57785");
12646 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12647 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
12648 strcpy(tp->board_part_number, "BCM57791");
12649 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12650 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
12651 strcpy(tp->board_part_number, "BCM57795");
12653 strcpy(tp->board_part_number, "none");
12656 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
12660 if (tg3_nvram_read(tp, offset, &val) ||
12661 (val & 0xfc000000) != 0x0c000000 ||
12662 tg3_nvram_read(tp, offset + 4, &val) ||
12669 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
12671 u32 val, offset, start, ver_offset;
12673 bool newver = false;
12675 if (tg3_nvram_read(tp, 0xc, &offset) ||
12676 tg3_nvram_read(tp, 0x4, &start))
12679 offset = tg3_nvram_logical_addr(tp, offset);
12681 if (tg3_nvram_read(tp, offset, &val))
12684 if ((val & 0xfc000000) == 0x0c000000) {
12685 if (tg3_nvram_read(tp, offset + 4, &val))
12693 if (tg3_nvram_read(tp, offset + 8, &ver_offset))
12696 offset = offset + ver_offset - start;
12697 for (i = 0; i < 16; i += 4) {
12699 if (tg3_nvram_read_be32(tp, offset + i, &v))
12702 memcpy(tp->fw_ver + i, &v, sizeof(v));
12707 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
12710 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
12711 TG3_NVM_BCVER_MAJSFT;
12712 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
12713 snprintf(&tp->fw_ver[0], 32, "v%d.%02d", major, minor);
12717 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
12719 u32 val, major, minor;
12721 /* Use native endian representation */
12722 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
12725 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
12726 TG3_NVM_HWSB_CFG1_MAJSFT;
12727 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
12728 TG3_NVM_HWSB_CFG1_MINSFT;
12730 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
12733 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12735 u32 offset, major, minor, build;
12737 tp->fw_ver[0] = 's';
12738 tp->fw_ver[1] = 'b';
12739 tp->fw_ver[2] = '\0';
12741 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
12744 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
12745 case TG3_EEPROM_SB_REVISION_0:
12746 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
12748 case TG3_EEPROM_SB_REVISION_2:
12749 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
12751 case TG3_EEPROM_SB_REVISION_3:
12752 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
12754 case TG3_EEPROM_SB_REVISION_4:
12755 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
12757 case TG3_EEPROM_SB_REVISION_5:
12758 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
12764 if (tg3_nvram_read(tp, offset, &val))
12767 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
12768 TG3_EEPROM_SB_EDH_BLD_SHFT;
12769 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
12770 TG3_EEPROM_SB_EDH_MAJ_SHFT;
12771 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
12773 if (minor > 99 || build > 26)
12776 snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
12779 tp->fw_ver[8] = 'a' + build - 1;
12780 tp->fw_ver[9] = '\0';
12784 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
12786 u32 val, offset, start;
12789 for (offset = TG3_NVM_DIR_START;
12790 offset < TG3_NVM_DIR_END;
12791 offset += TG3_NVM_DIRENT_SIZE) {
12792 if (tg3_nvram_read(tp, offset, &val))
12795 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
12799 if (offset == TG3_NVM_DIR_END)
12802 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12803 start = 0x08000000;
12804 else if (tg3_nvram_read(tp, offset - 4, &start))
12807 if (tg3_nvram_read(tp, offset + 4, &offset) ||
12808 !tg3_fw_img_is_valid(tp, offset) ||
12809 tg3_nvram_read(tp, offset + 8, &val))
12812 offset += val - start;
12814 vlen = strlen(tp->fw_ver);
12816 tp->fw_ver[vlen++] = ',';
12817 tp->fw_ver[vlen++] = ' ';
12819 for (i = 0; i < 4; i++) {
12821 if (tg3_nvram_read_be32(tp, offset, &v))
12824 offset += sizeof(v);
12826 if (vlen > TG3_VER_SIZE - sizeof(v)) {
12827 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
12831 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
12836 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
12841 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
12842 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
12845 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
12846 if (apedata != APE_SEG_SIG_MAGIC)
12849 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
12850 if (!(apedata & APE_FW_STATUS_READY))
12853 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
12855 vlen = strlen(tp->fw_ver);
12857 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " DASH v%d.%d.%d.%d",
12858 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
12859 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
12860 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
12861 (apedata & APE_FW_VERSION_BLDMSK));
12864 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
12868 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
12869 tp->fw_ver[0] = 's';
12870 tp->fw_ver[1] = 'b';
12871 tp->fw_ver[2] = '\0';
12876 if (tg3_nvram_read(tp, 0, &val))
12879 if (val == TG3_EEPROM_MAGIC)
12880 tg3_read_bc_ver(tp);
12881 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
12882 tg3_read_sb_ver(tp, val);
12883 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12884 tg3_read_hwsb_ver(tp);
12888 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12889 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
12892 tg3_read_mgmtfw_ver(tp);
12894 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
12897 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
12899 static int __devinit tg3_get_invariants(struct tg3 *tp)
12901 static struct pci_device_id write_reorder_chipsets[] = {
12902 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12903 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
12904 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12905 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
12906 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
12907 PCI_DEVICE_ID_VIA_8385_0) },
12911 u32 pci_state_reg, grc_misc_cfg;
12916 /* Force memory write invalidate off. If we leave it on,
12917 * then on 5700_BX chips we have to enable a workaround.
12918 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
12919 * to match the cacheline size. The Broadcom driver have this
12920 * workaround but turns MWI off all the times so never uses
12921 * it. This seems to suggest that the workaround is insufficient.
12923 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12924 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
12925 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12927 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
12928 * has the register indirect write enable bit set before
12929 * we try to access any of the MMIO registers. It is also
12930 * critical that the PCI-X hw workaround situation is decided
12931 * before that as well.
12933 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12936 tp->pci_chip_rev_id = (misc_ctrl_reg >>
12937 MISC_HOST_CTRL_CHIPREV_SHIFT);
12938 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12939 u32 prod_id_asic_rev;
12941 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
12942 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
12943 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724)
12944 pci_read_config_dword(tp->pdev,
12945 TG3PCI_GEN2_PRODID_ASICREV,
12946 &prod_id_asic_rev);
12947 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
12948 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
12949 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
12950 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
12951 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
12952 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
12953 pci_read_config_dword(tp->pdev,
12954 TG3PCI_GEN15_PRODID_ASICREV,
12955 &prod_id_asic_rev);
12957 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
12958 &prod_id_asic_rev);
12960 tp->pci_chip_rev_id = prod_id_asic_rev;
12963 /* Wrong chip ID in 5752 A0. This code can be removed later
12964 * as A0 is not in production.
12966 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12967 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12969 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12970 * we need to disable memory and use config. cycles
12971 * only to access all registers. The 5702/03 chips
12972 * can mistakenly decode the special cycles from the
12973 * ICH chipsets as memory write cycles, causing corruption
12974 * of register and memory space. Only certain ICH bridges
12975 * will drive special cycles with non-zero data during the
12976 * address phase which can fall within the 5703's address
12977 * range. This is not an ICH bug as the PCI spec allows
12978 * non-zero address during special cycles. However, only
12979 * these ICH bridges are known to drive non-zero addresses
12980 * during special cycles.
12982 * Since special cycles do not cross PCI bridges, we only
12983 * enable this workaround if the 5703 is on the secondary
12984 * bus of these ICH bridges.
12986 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12987 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12988 static struct tg3_dev_id {
12992 } ich_chipsets[] = {
12993 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12995 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12997 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12999 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13003 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13004 struct pci_dev *bridge = NULL;
13006 while (pci_id->vendor != 0) {
13007 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13013 if (pci_id->rev != PCI_ANY_ID) {
13014 if (bridge->revision > pci_id->rev)
13017 if (bridge->subordinate &&
13018 (bridge->subordinate->number ==
13019 tp->pdev->bus->number)) {
13021 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
13022 pci_dev_put(bridge);
13028 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
13029 static struct tg3_dev_id {
13032 } bridge_chipsets[] = {
13033 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13034 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13037 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13038 struct pci_dev *bridge = NULL;
13040 while (pci_id->vendor != 0) {
13041 bridge = pci_get_device(pci_id->vendor,
13048 if (bridge->subordinate &&
13049 (bridge->subordinate->number <=
13050 tp->pdev->bus->number) &&
13051 (bridge->subordinate->subordinate >=
13052 tp->pdev->bus->number)) {
13053 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
13054 pci_dev_put(bridge);
13060 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13061 * DMA addresses > 40-bit. This bridge may have other additional
13062 * 57xx devices behind it in some 4-port NIC designs for example.
13063 * Any tg3 device found behind the bridge will also need the 40-bit
13066 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13067 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13068 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
13069 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
13070 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13073 struct pci_dev *bridge = NULL;
13076 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13077 PCI_DEVICE_ID_SERVERWORKS_EPB,
13079 if (bridge && bridge->subordinate &&
13080 (bridge->subordinate->number <=
13081 tp->pdev->bus->number) &&
13082 (bridge->subordinate->subordinate >=
13083 tp->pdev->bus->number)) {
13084 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
13085 pci_dev_put(bridge);
13091 /* Initialize misc host control in PCI block. */
13092 tp->misc_host_ctrl |= (misc_ctrl_reg &
13093 MISC_HOST_CTRL_CHIPREV);
13094 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13095 tp->misc_host_ctrl);
13097 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13098 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13099 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
13100 tp->pdev_peer = tg3_find_peer(tp);
13102 /* Intentionally exclude ASIC_REV_5906 */
13103 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13104 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13105 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13106 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13107 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13108 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13109 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13110 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13111 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
13113 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13114 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13115 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13116 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13117 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13118 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
13120 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
13121 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
13122 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
13124 /* 5700 B0 chips do not support checksumming correctly due
13125 * to hardware bugs.
13127 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
13128 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
13130 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13131 tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13132 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
13133 tp->dev->features |= NETIF_F_IPV6_CSUM;
13136 /* Determine TSO capabilities */
13137 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13138 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13139 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
13140 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13141 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13142 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
13143 else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13144 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
13145 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13146 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13147 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
13148 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13149 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13150 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13151 tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
13152 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13153 tp->fw_needed = FIRMWARE_TG3TSO5;
13155 tp->fw_needed = FIRMWARE_TG3TSO;
13160 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13161 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
13162 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13163 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13164 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13165 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13166 tp->pdev_peer == tp->pdev))
13167 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
13169 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13170 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13171 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
13174 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13176 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
13177 tp->irq_max = TG3_IRQ_MAX_VECS;
13181 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13182 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13183 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
13184 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
13185 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
13186 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
13189 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13190 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13191 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
13193 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13194 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
13195 (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
13196 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
13198 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13201 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13202 if (tp->pcie_cap != 0) {
13205 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13207 pcie_set_readrq(tp->pdev, 4096);
13209 pci_read_config_word(tp->pdev,
13210 tp->pcie_cap + PCI_EXP_LNKCTL,
13212 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13213 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13214 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
13215 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13216 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13217 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13218 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13219 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
13220 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13221 tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
13223 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13224 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13225 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13226 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13227 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13228 if (!tp->pcix_cap) {
13229 dev_err(&tp->pdev->dev,
13230 "Cannot find PCI-X capability, aborting\n");
13234 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13235 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
13238 /* If we have an AMD 762 or VIA K8T800 chipset, write
13239 * reordering to the mailbox registers done by the host
13240 * controller can cause major troubles. We read back from
13241 * every mailbox register write to force the writes to be
13242 * posted to the chip in order.
13244 if (pci_dev_present(write_reorder_chipsets) &&
13245 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13246 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
13248 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13249 &tp->pci_cacheline_sz);
13250 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13251 &tp->pci_lat_timer);
13252 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13253 tp->pci_lat_timer < 64) {
13254 tp->pci_lat_timer = 64;
13255 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13256 tp->pci_lat_timer);
13259 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13260 /* 5700 BX chips need to have their TX producer index
13261 * mailboxes written twice to workaround a bug.
13263 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
13265 /* If we are in PCI-X mode, enable register write workaround.
13267 * The workaround is to use indirect register accesses
13268 * for all chip writes not to mailbox registers.
13270 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13273 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13275 /* The chip can have it's power management PCI config
13276 * space registers clobbered due to this bug.
13277 * So explicitly force the chip into D0 here.
13279 pci_read_config_dword(tp->pdev,
13280 tp->pm_cap + PCI_PM_CTRL,
13282 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13283 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13284 pci_write_config_dword(tp->pdev,
13285 tp->pm_cap + PCI_PM_CTRL,
13288 /* Also, force SERR#/PERR# in PCI command. */
13289 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13290 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13291 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13295 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13296 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
13297 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13298 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
13300 /* Chip-specific fixup from Broadcom driver */
13301 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13302 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13303 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13304 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13307 /* Default fast path register access methods */
13308 tp->read32 = tg3_read32;
13309 tp->write32 = tg3_write32;
13310 tp->read32_mbox = tg3_read32;
13311 tp->write32_mbox = tg3_write32;
13312 tp->write32_tx_mbox = tg3_write32;
13313 tp->write32_rx_mbox = tg3_write32;
13315 /* Various workaround register access methods */
13316 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
13317 tp->write32 = tg3_write_indirect_reg32;
13318 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13319 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
13320 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13322 * Back to back register writes can cause problems on these
13323 * chips, the workaround is to read back all reg writes
13324 * except those to mailbox regs.
13326 * See tg3_write_indirect_reg32().
13328 tp->write32 = tg3_write_flush_reg32;
13331 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
13332 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
13333 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13334 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
13335 tp->write32_rx_mbox = tg3_write_flush_reg32;
13338 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
13339 tp->read32 = tg3_read_indirect_reg32;
13340 tp->write32 = tg3_write_indirect_reg32;
13341 tp->read32_mbox = tg3_read_indirect_mbox;
13342 tp->write32_mbox = tg3_write_indirect_mbox;
13343 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13344 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13349 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13350 pci_cmd &= ~PCI_COMMAND_MEMORY;
13351 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13353 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13354 tp->read32_mbox = tg3_read32_mbox_5906;
13355 tp->write32_mbox = tg3_write32_mbox_5906;
13356 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13357 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13360 if (tp->write32 == tg3_write_indirect_reg32 ||
13361 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13362 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13363 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13364 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
13366 /* Get eeprom hw config before calling tg3_set_power_state().
13367 * In particular, the TG3_FLG2_IS_NIC flag must be
13368 * determined before calling tg3_set_power_state() so that
13369 * we know whether or not to switch out of Vaux power.
13370 * When the flag is set, it means that GPIO1 is used for eeprom
13371 * write protect and also implies that it is a LOM where GPIOs
13372 * are not used to switch power.
13374 tg3_get_eeprom_hw_cfg(tp);
13376 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13377 /* Allow reads and writes to the
13378 * APE register and memory space.
13380 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13381 PCISTATE_ALLOW_APE_SHMEM_WR;
13382 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13386 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13387 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13388 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13389 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13390 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13391 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13392 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
13394 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
13395 * GPIO1 driven high will bring 5700's external PHY out of reset.
13396 * It is also used as eeprom write protect on LOMs.
13398 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13399 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13400 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
13401 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13402 GRC_LCLCTRL_GPIO_OUTPUT1);
13403 /* Unused GPIO3 must be driven as output on 5752 because there
13404 * are no pull-up resistors on unused GPIO pins.
13406 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13407 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13409 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13410 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13411 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13412 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13414 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13415 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13416 /* Turn off the debug UART. */
13417 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13418 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
13419 /* Keep VMain power. */
13420 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13421 GRC_LCLCTRL_GPIO_OUTPUT0;
13424 /* Force the chip into D0. */
13425 err = tg3_set_power_state(tp, PCI_D0);
13427 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13431 /* Derive initial jumbo mode from MTU assigned in
13432 * ether_setup() via the alloc_etherdev() call
13434 if (tp->dev->mtu > ETH_DATA_LEN &&
13435 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13436 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
13438 /* Determine WakeOnLan speed to use. */
13439 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13440 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13441 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13442 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13443 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
13445 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
13448 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13449 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
13451 /* A few boards don't want Ethernet@WireSpeed phy feature */
13452 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13453 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
13454 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13455 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13456 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) ||
13457 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
13458 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
13460 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13461 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13462 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
13463 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13464 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
13466 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
13467 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
13468 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13469 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13470 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
13471 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
13472 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13473 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13474 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13475 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13476 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13477 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13478 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
13479 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13480 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
13482 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
13485 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13486 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13487 tp->phy_otp = tg3_read_otp_phycfg(tp);
13488 if (tp->phy_otp == 0)
13489 tp->phy_otp = TG3_OTP_DEFAULT;
13492 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
13493 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13495 tp->mi_mode = MAC_MI_MODE_BASE;
13497 tp->coalesce_mode = 0;
13498 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
13499 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
13500 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
13502 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13503 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13504 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
13506 err = tg3_mdio_init(tp);
13510 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
13511 (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 ||
13512 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
13515 /* Initialize data/descriptor byte/word swapping. */
13516 val = tr32(GRC_MODE);
13517 val &= GRC_MODE_HOST_STACKUP;
13518 tw32(GRC_MODE, val | tp->grc_mode);
13520 tg3_switch_clocks(tp);
13522 /* Clear this out for sanity. */
13523 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
13525 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13527 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
13528 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
13529 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
13531 if (chiprevid == CHIPREV_ID_5701_A0 ||
13532 chiprevid == CHIPREV_ID_5701_B0 ||
13533 chiprevid == CHIPREV_ID_5701_B2 ||
13534 chiprevid == CHIPREV_ID_5701_B5) {
13535 void __iomem *sram_base;
13537 /* Write some dummy words into the SRAM status block
13538 * area, see if it reads back correctly. If the return
13539 * value is bad, force enable the PCIX workaround.
13541 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
13543 writel(0x00000000, sram_base);
13544 writel(0x00000000, sram_base + 4);
13545 writel(0xffffffff, sram_base + 4);
13546 if (readl(sram_base) != 0x00000000)
13547 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13552 tg3_nvram_init(tp);
13554 grc_misc_cfg = tr32(GRC_MISC_CFG);
13555 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
13557 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13558 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
13559 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
13560 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
13562 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
13563 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
13564 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
13565 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
13566 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
13567 HOSTCC_MODE_CLRTICK_TXBD);
13569 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
13570 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13571 tp->misc_host_ctrl);
13574 /* Preserve the APE MAC_MODE bits */
13575 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
13576 tp->mac_mode = tr32(MAC_MODE) |
13577 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13579 tp->mac_mode = TG3_DEF_MAC_MODE;
13581 /* these are limited to 10/100 only */
13582 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13583 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
13584 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13585 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13586 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
13587 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
13588 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
13589 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13590 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
13591 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
13592 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
13593 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
13594 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13595 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13596 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
13597 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
13599 err = tg3_phy_probe(tp);
13601 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
13602 /* ... but do not return immediately ... */
13606 tg3_read_partno(tp);
13607 tg3_read_fw_ver(tp);
13609 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
13610 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
13612 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13613 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
13615 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
13618 /* 5700 {AX,BX} chips have a broken status block link
13619 * change bit implementation, so we must use the
13620 * status register in those cases.
13622 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13623 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
13625 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
13627 /* The led_ctrl is set during tg3_phy_probe, here we might
13628 * have to force the link status polling mechanism based
13629 * upon subsystem IDs.
13631 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
13632 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13633 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
13634 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
13635 TG3_FLAG_USE_LINKCHG_REG);
13638 /* For all SERDES we poll the MAC status register. */
13639 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
13640 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
13642 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
13644 tp->rx_offset = NET_IP_ALIGN;
13645 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13646 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
13649 tp->rx_std_max_post = TG3_RX_RING_SIZE;
13651 /* Increment the rx prod index on the rx std ring by at most
13652 * 8 for these chips to workaround hw errata.
13654 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13655 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13656 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13657 tp->rx_std_max_post = 8;
13659 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
13660 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
13661 PCIE_PWR_MGMT_L1_THRESH_MSK;
13666 #ifdef CONFIG_SPARC
13667 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
13669 struct net_device *dev = tp->dev;
13670 struct pci_dev *pdev = tp->pdev;
13671 struct device_node *dp = pci_device_to_OF_node(pdev);
13672 const unsigned char *addr;
13675 addr = of_get_property(dp, "local-mac-address", &len);
13676 if (addr && len == 6) {
13677 memcpy(dev->dev_addr, addr, 6);
13678 memcpy(dev->perm_addr, dev->dev_addr, 6);
13684 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
13686 struct net_device *dev = tp->dev;
13688 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
13689 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
13694 static int __devinit tg3_get_device_address(struct tg3 *tp)
13696 struct net_device *dev = tp->dev;
13697 u32 hi, lo, mac_offset;
13700 #ifdef CONFIG_SPARC
13701 if (!tg3_get_macaddr_sparc(tp))
13706 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
13707 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13708 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
13710 if (tg3_nvram_lock(tp))
13711 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
13713 tg3_nvram_unlock(tp);
13714 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13715 if (tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC)
13717 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13720 /* First try to get it from MAC address mailbox. */
13721 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
13722 if ((hi >> 16) == 0x484b) {
13723 dev->dev_addr[0] = (hi >> 8) & 0xff;
13724 dev->dev_addr[1] = (hi >> 0) & 0xff;
13726 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
13727 dev->dev_addr[2] = (lo >> 24) & 0xff;
13728 dev->dev_addr[3] = (lo >> 16) & 0xff;
13729 dev->dev_addr[4] = (lo >> 8) & 0xff;
13730 dev->dev_addr[5] = (lo >> 0) & 0xff;
13732 /* Some old bootcode may report a 0 MAC address in SRAM */
13733 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
13736 /* Next, try NVRAM. */
13737 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
13738 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
13739 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
13740 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
13741 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
13743 /* Finally just fetch it out of the MAC control regs. */
13745 hi = tr32(MAC_ADDR_0_HIGH);
13746 lo = tr32(MAC_ADDR_0_LOW);
13748 dev->dev_addr[5] = lo & 0xff;
13749 dev->dev_addr[4] = (lo >> 8) & 0xff;
13750 dev->dev_addr[3] = (lo >> 16) & 0xff;
13751 dev->dev_addr[2] = (lo >> 24) & 0xff;
13752 dev->dev_addr[1] = hi & 0xff;
13753 dev->dev_addr[0] = (hi >> 8) & 0xff;
13757 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
13758 #ifdef CONFIG_SPARC
13759 if (!tg3_get_default_macaddr_sparc(tp))
13764 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
13768 #define BOUNDARY_SINGLE_CACHELINE 1
13769 #define BOUNDARY_MULTI_CACHELINE 2
13771 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
13773 int cacheline_size;
13777 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
13779 cacheline_size = 1024;
13781 cacheline_size = (int) byte * 4;
13783 /* On 5703 and later chips, the boundary bits have no
13786 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13787 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13788 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13791 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
13792 goal = BOUNDARY_MULTI_CACHELINE;
13794 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
13795 goal = BOUNDARY_SINGLE_CACHELINE;
13801 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13802 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13803 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
13810 /* PCI controllers on most RISC systems tend to disconnect
13811 * when a device tries to burst across a cache-line boundary.
13812 * Therefore, letting tg3 do so just wastes PCI bandwidth.
13814 * Unfortunately, for PCI-E there are only limited
13815 * write-side controls for this, and thus for reads
13816 * we will still get the disconnects. We'll also waste
13817 * these PCI cycles for both read and write for chips
13818 * other than 5700 and 5701 which do not implement the
13821 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13822 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
13823 switch (cacheline_size) {
13828 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13829 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
13830 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
13832 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13833 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13838 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
13839 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
13843 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13844 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13847 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13848 switch (cacheline_size) {
13852 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13853 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13854 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
13860 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13861 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
13865 switch (cacheline_size) {
13867 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13868 val |= (DMA_RWCTRL_READ_BNDRY_16 |
13869 DMA_RWCTRL_WRITE_BNDRY_16);
13874 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13875 val |= (DMA_RWCTRL_READ_BNDRY_32 |
13876 DMA_RWCTRL_WRITE_BNDRY_32);
13881 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13882 val |= (DMA_RWCTRL_READ_BNDRY_64 |
13883 DMA_RWCTRL_WRITE_BNDRY_64);
13888 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13889 val |= (DMA_RWCTRL_READ_BNDRY_128 |
13890 DMA_RWCTRL_WRITE_BNDRY_128);
13895 val |= (DMA_RWCTRL_READ_BNDRY_256 |
13896 DMA_RWCTRL_WRITE_BNDRY_256);
13899 val |= (DMA_RWCTRL_READ_BNDRY_512 |
13900 DMA_RWCTRL_WRITE_BNDRY_512);
13904 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
13905 DMA_RWCTRL_WRITE_BNDRY_1024);
13914 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
13916 struct tg3_internal_buffer_desc test_desc;
13917 u32 sram_dma_descs;
13920 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
13922 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
13923 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
13924 tw32(RDMAC_STATUS, 0);
13925 tw32(WDMAC_STATUS, 0);
13927 tw32(BUFMGR_MODE, 0);
13928 tw32(FTQ_RESET, 0);
13930 test_desc.addr_hi = ((u64) buf_dma) >> 32;
13931 test_desc.addr_lo = buf_dma & 0xffffffff;
13932 test_desc.nic_mbuf = 0x00002100;
13933 test_desc.len = size;
13936 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
13937 * the *second* time the tg3 driver was getting loaded after an
13940 * Broadcom tells me:
13941 * ...the DMA engine is connected to the GRC block and a DMA
13942 * reset may affect the GRC block in some unpredictable way...
13943 * The behavior of resets to individual blocks has not been tested.
13945 * Broadcom noted the GRC reset will also reset all sub-components.
13948 test_desc.cqid_sqid = (13 << 8) | 2;
13950 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
13953 test_desc.cqid_sqid = (16 << 8) | 7;
13955 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
13958 test_desc.flags = 0x00000005;
13960 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
13963 val = *(((u32 *)&test_desc) + i);
13964 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
13965 sram_dma_descs + (i * sizeof(u32)));
13966 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
13968 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
13971 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
13973 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
13977 for (i = 0; i < 40; i++) {
13981 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
13983 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
13984 if ((val & 0xffff) == sram_dma_descs) {
13995 #define TEST_BUFFER_SIZE 0x2000
13997 static int __devinit tg3_test_dma(struct tg3 *tp)
13999 dma_addr_t buf_dma;
14000 u32 *buf, saved_dma_rwctrl;
14003 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
14009 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14010 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14012 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14014 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14015 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14018 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14019 /* DMA read watermark not used on PCIE */
14020 tp->dma_rwctrl |= 0x00180000;
14021 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
14022 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14023 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14024 tp->dma_rwctrl |= 0x003f0000;
14026 tp->dma_rwctrl |= 0x003f000f;
14028 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14029 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14030 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14031 u32 read_water = 0x7;
14033 /* If the 5704 is behind the EPB bridge, we can
14034 * do the less restrictive ONE_DMA workaround for
14035 * better performance.
14037 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
14038 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14039 tp->dma_rwctrl |= 0x8000;
14040 else if (ccval == 0x6 || ccval == 0x7)
14041 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14043 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14045 /* Set bit 23 to enable PCIX hw bug fix */
14047 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14048 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14050 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14051 /* 5780 always in PCIX mode */
14052 tp->dma_rwctrl |= 0x00144000;
14053 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14054 /* 5714 always in PCIX mode */
14055 tp->dma_rwctrl |= 0x00148000;
14057 tp->dma_rwctrl |= 0x001b000f;
14061 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14062 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14063 tp->dma_rwctrl &= 0xfffffff0;
14065 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14066 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14067 /* Remove this if it causes problems for some boards. */
14068 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14070 /* On 5700/5701 chips, we need to set this bit.
14071 * Otherwise the chip will issue cacheline transactions
14072 * to streamable DMA memory with not all the byte
14073 * enables turned on. This is an error on several
14074 * RISC PCI controllers, in particular sparc64.
14076 * On 5703/5704 chips, this bit has been reassigned
14077 * a different meaning. In particular, it is used
14078 * on those chips to enable a PCI-X workaround.
14080 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14083 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14086 /* Unneeded, already done by tg3_get_invariants. */
14087 tg3_switch_clocks(tp);
14090 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14091 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14094 /* It is best to perform DMA test with maximum write burst size
14095 * to expose the 5700/5701 write DMA bug.
14097 saved_dma_rwctrl = tp->dma_rwctrl;
14098 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14099 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14104 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14107 /* Send the buffer to the chip. */
14108 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14110 dev_err(&tp->pdev->dev,
14111 "%s: Buffer write failed. err = %d\n",
14117 /* validate data reached card RAM correctly. */
14118 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14120 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14121 if (le32_to_cpu(val) != p[i]) {
14122 dev_err(&tp->pdev->dev,
14123 "%s: Buffer corrupted on device! "
14124 "(%d != %d)\n", __func__, val, i);
14125 /* ret = -ENODEV here? */
14130 /* Now read it back. */
14131 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14133 dev_err(&tp->pdev->dev,
14134 "%s: Buffer read failed. err = %d\n",
14140 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14144 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14145 DMA_RWCTRL_WRITE_BNDRY_16) {
14146 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14147 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14148 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14151 dev_err(&tp->pdev->dev,
14152 "%s: Buffer corrupted on read back! "
14153 "(%d != %d)\n", __func__, p[i], i);
14159 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14165 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14166 DMA_RWCTRL_WRITE_BNDRY_16) {
14167 static struct pci_device_id dma_wait_state_chipsets[] = {
14168 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
14169 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14173 /* DMA test passed without adjusting DMA boundary,
14174 * now look for chipsets that are known to expose the
14175 * DMA bug without failing the test.
14177 if (pci_dev_present(dma_wait_state_chipsets)) {
14178 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14179 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14182 /* Safe to use the calculated DMA boundary. */
14183 tp->dma_rwctrl = saved_dma_rwctrl;
14185 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14189 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
14194 static void __devinit tg3_init_link_config(struct tg3 *tp)
14196 tp->link_config.advertising =
14197 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
14198 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
14199 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
14200 ADVERTISED_Autoneg | ADVERTISED_MII);
14201 tp->link_config.speed = SPEED_INVALID;
14202 tp->link_config.duplex = DUPLEX_INVALID;
14203 tp->link_config.autoneg = AUTONEG_ENABLE;
14204 tp->link_config.active_speed = SPEED_INVALID;
14205 tp->link_config.active_duplex = DUPLEX_INVALID;
14206 tp->link_config.phy_is_low_power = 0;
14207 tp->link_config.orig_speed = SPEED_INVALID;
14208 tp->link_config.orig_duplex = DUPLEX_INVALID;
14209 tp->link_config.orig_autoneg = AUTONEG_INVALID;
14212 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14214 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14215 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14216 tp->bufmgr_config.mbuf_read_dma_low_water =
14217 DEFAULT_MB_RDMA_LOW_WATER_5705;
14218 tp->bufmgr_config.mbuf_mac_rx_low_water =
14219 DEFAULT_MB_MACRX_LOW_WATER_57765;
14220 tp->bufmgr_config.mbuf_high_water =
14221 DEFAULT_MB_HIGH_WATER_57765;
14223 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14224 DEFAULT_MB_RDMA_LOW_WATER_5705;
14225 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14226 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14227 tp->bufmgr_config.mbuf_high_water_jumbo =
14228 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14229 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14230 tp->bufmgr_config.mbuf_read_dma_low_water =
14231 DEFAULT_MB_RDMA_LOW_WATER_5705;
14232 tp->bufmgr_config.mbuf_mac_rx_low_water =
14233 DEFAULT_MB_MACRX_LOW_WATER_5705;
14234 tp->bufmgr_config.mbuf_high_water =
14235 DEFAULT_MB_HIGH_WATER_5705;
14236 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14237 tp->bufmgr_config.mbuf_mac_rx_low_water =
14238 DEFAULT_MB_MACRX_LOW_WATER_5906;
14239 tp->bufmgr_config.mbuf_high_water =
14240 DEFAULT_MB_HIGH_WATER_5906;
14243 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14244 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14245 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14246 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14247 tp->bufmgr_config.mbuf_high_water_jumbo =
14248 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14250 tp->bufmgr_config.mbuf_read_dma_low_water =
14251 DEFAULT_MB_RDMA_LOW_WATER;
14252 tp->bufmgr_config.mbuf_mac_rx_low_water =
14253 DEFAULT_MB_MACRX_LOW_WATER;
14254 tp->bufmgr_config.mbuf_high_water =
14255 DEFAULT_MB_HIGH_WATER;
14257 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14258 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14259 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14260 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14261 tp->bufmgr_config.mbuf_high_water_jumbo =
14262 DEFAULT_MB_HIGH_WATER_JUMBO;
14265 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14266 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14269 static char * __devinit tg3_phy_string(struct tg3 *tp)
14271 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14272 case TG3_PHY_ID_BCM5400: return "5400";
14273 case TG3_PHY_ID_BCM5401: return "5401";
14274 case TG3_PHY_ID_BCM5411: return "5411";
14275 case TG3_PHY_ID_BCM5701: return "5701";
14276 case TG3_PHY_ID_BCM5703: return "5703";
14277 case TG3_PHY_ID_BCM5704: return "5704";
14278 case TG3_PHY_ID_BCM5705: return "5705";
14279 case TG3_PHY_ID_BCM5750: return "5750";
14280 case TG3_PHY_ID_BCM5752: return "5752";
14281 case TG3_PHY_ID_BCM5714: return "5714";
14282 case TG3_PHY_ID_BCM5780: return "5780";
14283 case TG3_PHY_ID_BCM5755: return "5755";
14284 case TG3_PHY_ID_BCM5787: return "5787";
14285 case TG3_PHY_ID_BCM5784: return "5784";
14286 case TG3_PHY_ID_BCM5756: return "5722/5756";
14287 case TG3_PHY_ID_BCM5906: return "5906";
14288 case TG3_PHY_ID_BCM5761: return "5761";
14289 case TG3_PHY_ID_BCM5718C: return "5718C";
14290 case TG3_PHY_ID_BCM5718S: return "5718S";
14291 case TG3_PHY_ID_BCM57765: return "57765";
14292 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14293 case 0: return "serdes";
14294 default: return "unknown";
14298 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14300 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14301 strcpy(str, "PCI Express");
14303 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
14304 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14306 strcpy(str, "PCIX:");
14308 if ((clock_ctrl == 7) ||
14309 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14310 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14311 strcat(str, "133MHz");
14312 else if (clock_ctrl == 0)
14313 strcat(str, "33MHz");
14314 else if (clock_ctrl == 2)
14315 strcat(str, "50MHz");
14316 else if (clock_ctrl == 4)
14317 strcat(str, "66MHz");
14318 else if (clock_ctrl == 6)
14319 strcat(str, "100MHz");
14321 strcpy(str, "PCI:");
14322 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
14323 strcat(str, "66MHz");
14325 strcat(str, "33MHz");
14327 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
14328 strcat(str, ":32-bit");
14330 strcat(str, ":64-bit");
14334 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14336 struct pci_dev *peer;
14337 unsigned int func, devnr = tp->pdev->devfn & ~7;
14339 for (func = 0; func < 8; func++) {
14340 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14341 if (peer && peer != tp->pdev)
14345 /* 5704 can be configured in single-port mode, set peer to
14346 * tp->pdev in that case.
14354 * We don't need to keep the refcount elevated; there's no way
14355 * to remove one half of this device without removing the other
14362 static void __devinit tg3_init_coal(struct tg3 *tp)
14364 struct ethtool_coalesce *ec = &tp->coal;
14366 memset(ec, 0, sizeof(*ec));
14367 ec->cmd = ETHTOOL_GCOALESCE;
14368 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14369 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14370 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14371 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14372 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14373 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14374 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14375 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14376 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14378 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14379 HOSTCC_MODE_CLRTICK_TXBD)) {
14380 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14381 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14382 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14383 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14386 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14387 ec->rx_coalesce_usecs_irq = 0;
14388 ec->tx_coalesce_usecs_irq = 0;
14389 ec->stats_block_coalesce_usecs = 0;
14393 static const struct net_device_ops tg3_netdev_ops = {
14394 .ndo_open = tg3_open,
14395 .ndo_stop = tg3_close,
14396 .ndo_start_xmit = tg3_start_xmit,
14397 .ndo_get_stats = tg3_get_stats,
14398 .ndo_validate_addr = eth_validate_addr,
14399 .ndo_set_multicast_list = tg3_set_rx_mode,
14400 .ndo_set_mac_address = tg3_set_mac_addr,
14401 .ndo_do_ioctl = tg3_ioctl,
14402 .ndo_tx_timeout = tg3_tx_timeout,
14403 .ndo_change_mtu = tg3_change_mtu,
14404 #if TG3_VLAN_TAG_USED
14405 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14407 #ifdef CONFIG_NET_POLL_CONTROLLER
14408 .ndo_poll_controller = tg3_poll_controller,
14412 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
14413 .ndo_open = tg3_open,
14414 .ndo_stop = tg3_close,
14415 .ndo_start_xmit = tg3_start_xmit_dma_bug,
14416 .ndo_get_stats = tg3_get_stats,
14417 .ndo_validate_addr = eth_validate_addr,
14418 .ndo_set_multicast_list = tg3_set_rx_mode,
14419 .ndo_set_mac_address = tg3_set_mac_addr,
14420 .ndo_do_ioctl = tg3_ioctl,
14421 .ndo_tx_timeout = tg3_tx_timeout,
14422 .ndo_change_mtu = tg3_change_mtu,
14423 #if TG3_VLAN_TAG_USED
14424 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14426 #ifdef CONFIG_NET_POLL_CONTROLLER
14427 .ndo_poll_controller = tg3_poll_controller,
14431 static int __devinit tg3_init_one(struct pci_dev *pdev,
14432 const struct pci_device_id *ent)
14434 struct net_device *dev;
14436 int i, err, pm_cap;
14437 u32 sndmbx, rcvmbx, intmbx;
14439 u64 dma_mask, persist_dma_mask;
14441 printk_once(KERN_INFO "%s\n", version);
14443 err = pci_enable_device(pdev);
14445 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14449 err = pci_request_regions(pdev, DRV_MODULE_NAME);
14451 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14452 goto err_out_disable_pdev;
14455 pci_set_master(pdev);
14457 /* Find power-management capability. */
14458 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14460 dev_err(&pdev->dev,
14461 "Cannot find Power Management capability, aborting\n");
14463 goto err_out_free_res;
14466 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14468 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14470 goto err_out_free_res;
14473 SET_NETDEV_DEV(dev, &pdev->dev);
14475 #if TG3_VLAN_TAG_USED
14476 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
14479 tp = netdev_priv(dev);
14482 tp->pm_cap = pm_cap;
14483 tp->rx_mode = TG3_DEF_RX_MODE;
14484 tp->tx_mode = TG3_DEF_TX_MODE;
14487 tp->msg_enable = tg3_debug;
14489 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14491 /* The word/byte swap controls here control register access byte
14492 * swapping. DMA data byte swapping is controlled in the GRC_MODE
14495 tp->misc_host_ctrl =
14496 MISC_HOST_CTRL_MASK_PCI_INT |
14497 MISC_HOST_CTRL_WORD_SWAP |
14498 MISC_HOST_CTRL_INDIR_ACCESS |
14499 MISC_HOST_CTRL_PCISTATE_RW;
14501 /* The NONFRM (non-frame) byte/word swap controls take effect
14502 * on descriptor entries, anything which isn't packet data.
14504 * The StrongARM chips on the board (one for tx, one for rx)
14505 * are running in big-endian mode.
14507 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14508 GRC_MODE_WSWAP_NONFRM_DATA);
14509 #ifdef __BIG_ENDIAN
14510 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14512 spin_lock_init(&tp->lock);
14513 spin_lock_init(&tp->indirect_lock);
14514 INIT_WORK(&tp->reset_task, tg3_reset_task);
14516 tp->regs = pci_ioremap_bar(pdev, BAR_0);
14518 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14520 goto err_out_free_dev;
14523 tg3_init_link_config(tp);
14525 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
14526 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
14528 dev->ethtool_ops = &tg3_ethtool_ops;
14529 dev->watchdog_timeo = TG3_TX_TIMEOUT;
14530 dev->irq = pdev->irq;
14532 err = tg3_get_invariants(tp);
14534 dev_err(&pdev->dev,
14535 "Problem fetching invariants of chip, aborting\n");
14536 goto err_out_iounmap;
14539 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
14540 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
14541 dev->netdev_ops = &tg3_netdev_ops;
14543 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
14546 /* The EPB bridge inside 5714, 5715, and 5780 and any
14547 * device behind the EPB cannot support DMA addresses > 40-bit.
14548 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
14549 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
14550 * do DMA address check in tg3_start_xmit().
14552 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
14553 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
14554 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
14555 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
14556 #ifdef CONFIG_HIGHMEM
14557 dma_mask = DMA_BIT_MASK(64);
14560 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
14562 /* Configure DMA attributes. */
14563 if (dma_mask > DMA_BIT_MASK(32)) {
14564 err = pci_set_dma_mask(pdev, dma_mask);
14566 dev->features |= NETIF_F_HIGHDMA;
14567 err = pci_set_consistent_dma_mask(pdev,
14570 dev_err(&pdev->dev, "Unable to obtain 64 bit "
14571 "DMA for consistent allocations\n");
14572 goto err_out_iounmap;
14576 if (err || dma_mask == DMA_BIT_MASK(32)) {
14577 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
14579 dev_err(&pdev->dev,
14580 "No usable DMA configuration, aborting\n");
14581 goto err_out_iounmap;
14585 tg3_init_bufmgr_config(tp);
14587 /* Selectively allow TSO based on operating conditions */
14588 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
14589 (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
14590 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
14592 tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
14593 tp->fw_needed = NULL;
14596 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14597 tp->fw_needed = FIRMWARE_TG3;
14599 /* TSO is on by default on chips that support hardware TSO.
14600 * Firmware TSO on older chips gives lower performance, so it
14601 * is off by default, but can be enabled using ethtool.
14603 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
14604 (dev->features & NETIF_F_IP_CSUM))
14605 dev->features |= NETIF_F_TSO;
14607 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
14608 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
14609 if (dev->features & NETIF_F_IPV6_CSUM)
14610 dev->features |= NETIF_F_TSO6;
14611 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
14612 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14613 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14614 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
14615 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14616 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14617 dev->features |= NETIF_F_TSO_ECN;
14620 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
14621 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
14622 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
14623 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
14624 tp->rx_pending = 63;
14627 err = tg3_get_device_address(tp);
14629 dev_err(&pdev->dev,
14630 "Could not obtain valid ethernet address, aborting\n");
14631 goto err_out_iounmap;
14634 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
14635 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
14636 if (!tp->aperegs) {
14637 dev_err(&pdev->dev,
14638 "Cannot map APE registers, aborting\n");
14640 goto err_out_iounmap;
14643 tg3_ape_lock_init(tp);
14645 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
14646 tg3_read_dash_ver(tp);
14650 * Reset chip in case UNDI or EFI driver did not shutdown
14651 * DMA self test will enable WDMAC and we'll see (spurious)
14652 * pending DMA on the PCI bus at that point.
14654 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
14655 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
14656 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
14657 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14660 err = tg3_test_dma(tp);
14662 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
14663 goto err_out_apeunmap;
14666 /* flow control autonegotiation is default behavior */
14667 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
14668 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14670 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
14671 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
14672 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
14673 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
14674 struct tg3_napi *tnapi = &tp->napi[i];
14677 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
14679 tnapi->int_mbox = intmbx;
14685 tnapi->consmbox = rcvmbx;
14686 tnapi->prodmbox = sndmbx;
14689 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
14690 netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64);
14692 tnapi->coal_now = HOSTCC_MODE_NOW;
14693 netif_napi_add(dev, &tnapi->napi, tg3_poll, 64);
14696 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
14700 * If we support MSIX, we'll be using RSS. If we're using
14701 * RSS, the first vector only handles link interrupts and the
14702 * remaining vectors handle rx and tx interrupts. Reuse the
14703 * mailbox values for the next iteration. The values we setup
14704 * above are still useful for the single vectored mode.
14719 pci_set_drvdata(pdev, dev);
14721 err = register_netdev(dev);
14723 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
14724 goto err_out_apeunmap;
14727 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
14728 tp->board_part_number,
14729 tp->pci_chip_rev_id,
14730 tg3_bus_string(tp, str),
14733 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
14734 struct phy_device *phydev;
14735 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
14736 netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14737 phydev->drv->name, dev_name(&phydev->dev));
14739 netdev_info(dev, "attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
14740 tg3_phy_string(tp),
14741 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
14742 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
14743 "10/100/1000Base-T")),
14744 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
14746 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
14747 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
14748 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
14749 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
14750 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
14751 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
14752 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
14754 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
14755 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
14761 iounmap(tp->aperegs);
14762 tp->aperegs = NULL;
14775 pci_release_regions(pdev);
14777 err_out_disable_pdev:
14778 pci_disable_device(pdev);
14779 pci_set_drvdata(pdev, NULL);
14783 static void __devexit tg3_remove_one(struct pci_dev *pdev)
14785 struct net_device *dev = pci_get_drvdata(pdev);
14788 struct tg3 *tp = netdev_priv(dev);
14791 release_firmware(tp->fw);
14793 flush_scheduled_work();
14795 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
14800 unregister_netdev(dev);
14802 iounmap(tp->aperegs);
14803 tp->aperegs = NULL;
14810 pci_release_regions(pdev);
14811 pci_disable_device(pdev);
14812 pci_set_drvdata(pdev, NULL);
14816 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
14818 struct net_device *dev = pci_get_drvdata(pdev);
14819 struct tg3 *tp = netdev_priv(dev);
14820 pci_power_t target_state;
14823 /* PCI register 4 needs to be saved whether netif_running() or not.
14824 * MSI address and data need to be saved if using MSI and
14827 pci_save_state(pdev);
14829 if (!netif_running(dev))
14832 flush_scheduled_work();
14834 tg3_netif_stop(tp);
14836 del_timer_sync(&tp->timer);
14838 tg3_full_lock(tp, 1);
14839 tg3_disable_ints(tp);
14840 tg3_full_unlock(tp);
14842 netif_device_detach(dev);
14844 tg3_full_lock(tp, 0);
14845 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14846 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
14847 tg3_full_unlock(tp);
14849 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
14851 err = tg3_set_power_state(tp, target_state);
14855 tg3_full_lock(tp, 0);
14857 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14858 err2 = tg3_restart_hw(tp, 1);
14862 tp->timer.expires = jiffies + tp->timer_offset;
14863 add_timer(&tp->timer);
14865 netif_device_attach(dev);
14866 tg3_netif_start(tp);
14869 tg3_full_unlock(tp);
14878 static int tg3_resume(struct pci_dev *pdev)
14880 struct net_device *dev = pci_get_drvdata(pdev);
14881 struct tg3 *tp = netdev_priv(dev);
14884 pci_restore_state(tp->pdev);
14886 if (!netif_running(dev))
14889 err = tg3_set_power_state(tp, PCI_D0);
14893 netif_device_attach(dev);
14895 tg3_full_lock(tp, 0);
14897 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14898 err = tg3_restart_hw(tp, 1);
14902 tp->timer.expires = jiffies + tp->timer_offset;
14903 add_timer(&tp->timer);
14905 tg3_netif_start(tp);
14908 tg3_full_unlock(tp);
14916 static struct pci_driver tg3_driver = {
14917 .name = DRV_MODULE_NAME,
14918 .id_table = tg3_pci_tbl,
14919 .probe = tg3_init_one,
14920 .remove = __devexit_p(tg3_remove_one),
14921 .suspend = tg3_suspend,
14922 .resume = tg3_resume
14925 static int __init tg3_init(void)
14927 return pci_register_driver(&tg3_driver);
14930 static void __exit tg3_cleanup(void)
14932 pci_unregister_driver(&tg3_driver);
14935 module_init(tg3_init);
14936 module_exit(tg3_cleanup);