2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2010 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
45 #include <net/checksum.h>
48 #include <asm/system.h>
50 #include <asm/byteorder.h>
51 #include <asm/uaccess.h>
54 #include <asm/idprom.h>
61 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
62 #define TG3_VLAN_TAG_USED 1
64 #define TG3_VLAN_TAG_USED 0
69 #define DRV_MODULE_NAME "tg3"
70 #define DRV_MODULE_VERSION "3.109"
71 #define DRV_MODULE_RELDATE "April 2, 2010"
73 #define TG3_DEF_MAC_MODE 0
74 #define TG3_DEF_RX_MODE 0
75 #define TG3_DEF_TX_MODE 0
76 #define TG3_DEF_MSG_ENABLE \
86 /* length of time before we decide the hardware is borked,
87 * and dev->tx_timeout() should be called to fix the problem
89 #define TG3_TX_TIMEOUT (5 * HZ)
91 /* hardware minimum and maximum for a single frame's data payload */
92 #define TG3_MIN_MTU 60
93 #define TG3_MAX_MTU(tp) \
94 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
96 /* These numbers seem to be hard coded in the NIC firmware somehow.
97 * You can't change the ring sizes, but you can change where you place
98 * them in the NIC onboard memory.
100 #define TG3_RX_RING_SIZE 512
101 #define TG3_DEF_RX_RING_PENDING 200
102 #define TG3_RX_JUMBO_RING_SIZE 256
103 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
104 #define TG3_RSS_INDIR_TBL_SIZE 128
106 /* Do not place this n-ring entries value into the tp struct itself,
107 * we really want to expose these constants to GCC so that modulo et
108 * al. operations are done with shifts and masks instead of with
109 * hw multiply/modulo instructions. Another solution would be to
110 * replace things like '% foo' with '& (foo - 1)'.
112 #define TG3_RX_RCB_RING_SIZE(tp) \
113 (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \
114 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512)
116 #define TG3_TX_RING_SIZE 512
117 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
119 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
127 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
129 #define TG3_DMA_BYTE_ENAB 64
131 #define TG3_RX_STD_DMA_SZ 1536
132 #define TG3_RX_JMB_DMA_SZ 9046
134 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
136 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
137 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
139 #define TG3_RX_STD_BUFF_RING_SIZE \
140 (sizeof(struct ring_info) * TG3_RX_RING_SIZE)
142 #define TG3_RX_JMB_BUFF_RING_SIZE \
143 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
145 #define TG3_RSS_MIN_NUM_MSIX_VECS 2
147 /* minimum number of free TX descriptors required to wake up TX process */
148 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
150 #define TG3_RAW_IP_ALIGN 2
152 /* number of ETHTOOL_GSTATS u64's */
153 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
155 #define TG3_NUM_TEST 6
157 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
159 #define FIRMWARE_TG3 "tigon/tg3.bin"
160 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
161 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
163 static char version[] __devinitdata =
164 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
166 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
167 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
168 MODULE_LICENSE("GPL");
169 MODULE_VERSION(DRV_MODULE_VERSION);
170 MODULE_FIRMWARE(FIRMWARE_TG3);
171 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
172 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
174 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
175 module_param(tg3_debug, int, 0);
176 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
178 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
254 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
255 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
256 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
257 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
258 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
259 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
260 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
264 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
266 static const struct {
267 const char string[ETH_GSTRING_LEN];
268 } ethtool_stats_keys[TG3_NUM_STATS] = {
271 { "rx_ucast_packets" },
272 { "rx_mcast_packets" },
273 { "rx_bcast_packets" },
275 { "rx_align_errors" },
276 { "rx_xon_pause_rcvd" },
277 { "rx_xoff_pause_rcvd" },
278 { "rx_mac_ctrl_rcvd" },
279 { "rx_xoff_entered" },
280 { "rx_frame_too_long_errors" },
282 { "rx_undersize_packets" },
283 { "rx_in_length_errors" },
284 { "rx_out_length_errors" },
285 { "rx_64_or_less_octet_packets" },
286 { "rx_65_to_127_octet_packets" },
287 { "rx_128_to_255_octet_packets" },
288 { "rx_256_to_511_octet_packets" },
289 { "rx_512_to_1023_octet_packets" },
290 { "rx_1024_to_1522_octet_packets" },
291 { "rx_1523_to_2047_octet_packets" },
292 { "rx_2048_to_4095_octet_packets" },
293 { "rx_4096_to_8191_octet_packets" },
294 { "rx_8192_to_9022_octet_packets" },
301 { "tx_flow_control" },
303 { "tx_single_collisions" },
304 { "tx_mult_collisions" },
306 { "tx_excessive_collisions" },
307 { "tx_late_collisions" },
308 { "tx_collide_2times" },
309 { "tx_collide_3times" },
310 { "tx_collide_4times" },
311 { "tx_collide_5times" },
312 { "tx_collide_6times" },
313 { "tx_collide_7times" },
314 { "tx_collide_8times" },
315 { "tx_collide_9times" },
316 { "tx_collide_10times" },
317 { "tx_collide_11times" },
318 { "tx_collide_12times" },
319 { "tx_collide_13times" },
320 { "tx_collide_14times" },
321 { "tx_collide_15times" },
322 { "tx_ucast_packets" },
323 { "tx_mcast_packets" },
324 { "tx_bcast_packets" },
325 { "tx_carrier_sense_errors" },
329 { "dma_writeq_full" },
330 { "dma_write_prioq_full" },
334 { "rx_threshold_hit" },
336 { "dma_readq_full" },
337 { "dma_read_prioq_full" },
338 { "tx_comp_queue_full" },
340 { "ring_set_send_prod_index" },
341 { "ring_status_update" },
343 { "nic_avoided_irqs" },
344 { "nic_tx_threshold_hit" }
347 static const struct {
348 const char string[ETH_GSTRING_LEN];
349 } ethtool_test_keys[TG3_NUM_TEST] = {
350 { "nvram test (online) " },
351 { "link test (online) " },
352 { "register test (offline)" },
353 { "memory test (offline)" },
354 { "loopback test (offline)" },
355 { "interrupt test (offline)" },
358 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
360 writel(val, tp->regs + off);
363 static u32 tg3_read32(struct tg3 *tp, u32 off)
365 return (readl(tp->regs + off));
368 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
370 writel(val, tp->aperegs + off);
373 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
375 return (readl(tp->aperegs + off));
378 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
382 spin_lock_irqsave(&tp->indirect_lock, flags);
383 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
384 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
385 spin_unlock_irqrestore(&tp->indirect_lock, flags);
388 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
390 writel(val, tp->regs + off);
391 readl(tp->regs + off);
394 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
399 spin_lock_irqsave(&tp->indirect_lock, flags);
400 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
401 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
402 spin_unlock_irqrestore(&tp->indirect_lock, flags);
406 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
410 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
411 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
412 TG3_64BIT_REG_LOW, val);
415 if (off == TG3_RX_STD_PROD_IDX_REG) {
416 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
417 TG3_64BIT_REG_LOW, val);
421 spin_lock_irqsave(&tp->indirect_lock, flags);
422 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
423 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
424 spin_unlock_irqrestore(&tp->indirect_lock, flags);
426 /* In indirect mode when disabling interrupts, we also need
427 * to clear the interrupt bit in the GRC local ctrl register.
429 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
431 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
432 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
436 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
441 spin_lock_irqsave(&tp->indirect_lock, flags);
442 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
443 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
444 spin_unlock_irqrestore(&tp->indirect_lock, flags);
448 /* usec_wait specifies the wait time in usec when writing to certain registers
449 * where it is unsafe to read back the register without some delay.
450 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
451 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
453 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
455 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
456 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
457 /* Non-posted methods */
458 tp->write32(tp, off, val);
461 tg3_write32(tp, off, val);
466 /* Wait again after the read for the posted method to guarantee that
467 * the wait time is met.
473 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
475 tp->write32_mbox(tp, off, val);
476 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
477 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
478 tp->read32_mbox(tp, off);
481 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
483 void __iomem *mbox = tp->regs + off;
485 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
487 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
491 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
493 return (readl(tp->regs + off + GRCMBOX_BASE));
496 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
498 writel(val, tp->regs + off + GRCMBOX_BASE);
501 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
502 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
503 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
504 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
505 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
507 #define tw32(reg, val) tp->write32(tp, reg, val)
508 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
509 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
510 #define tr32(reg) tp->read32(tp, reg)
512 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
516 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
517 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
520 spin_lock_irqsave(&tp->indirect_lock, flags);
521 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
522 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
523 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
525 /* Always leave this as zero. */
526 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
528 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
529 tw32_f(TG3PCI_MEM_WIN_DATA, val);
531 /* Always leave this as zero. */
532 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
534 spin_unlock_irqrestore(&tp->indirect_lock, flags);
537 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
541 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
542 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
547 spin_lock_irqsave(&tp->indirect_lock, flags);
548 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
549 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
550 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
552 /* Always leave this as zero. */
553 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
555 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
556 *val = tr32(TG3PCI_MEM_WIN_DATA);
558 /* Always leave this as zero. */
559 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
561 spin_unlock_irqrestore(&tp->indirect_lock, flags);
564 static void tg3_ape_lock_init(struct tg3 *tp)
568 /* Make sure the driver hasn't any stale locks. */
569 for (i = 0; i < 8; i++)
570 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
571 APE_LOCK_GRANT_DRIVER);
574 static int tg3_ape_lock(struct tg3 *tp, int locknum)
580 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
584 case TG3_APE_LOCK_GRC:
585 case TG3_APE_LOCK_MEM:
593 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
595 /* Wait for up to 1 millisecond to acquire lock. */
596 for (i = 0; i < 100; i++) {
597 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
598 if (status == APE_LOCK_GRANT_DRIVER)
603 if (status != APE_LOCK_GRANT_DRIVER) {
604 /* Revoke the lock request. */
605 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
606 APE_LOCK_GRANT_DRIVER);
614 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
618 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
622 case TG3_APE_LOCK_GRC:
623 case TG3_APE_LOCK_MEM:
630 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
633 static void tg3_disable_ints(struct tg3 *tp)
637 tw32(TG3PCI_MISC_HOST_CTRL,
638 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
639 for (i = 0; i < tp->irq_max; i++)
640 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
643 static void tg3_enable_ints(struct tg3 *tp)
650 tw32(TG3PCI_MISC_HOST_CTRL,
651 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
653 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
654 for (i = 0; i < tp->irq_cnt; i++) {
655 struct tg3_napi *tnapi = &tp->napi[i];
657 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
658 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
659 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
661 tp->coal_now |= tnapi->coal_now;
664 /* Force an initial interrupt */
665 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
666 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
667 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
669 tw32(HOSTCC_MODE, tp->coal_now);
671 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
674 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
676 struct tg3 *tp = tnapi->tp;
677 struct tg3_hw_status *sblk = tnapi->hw_status;
678 unsigned int work_exists = 0;
680 /* check for phy events */
681 if (!(tp->tg3_flags &
682 (TG3_FLAG_USE_LINKCHG_REG |
683 TG3_FLAG_POLL_SERDES))) {
684 if (sblk->status & SD_STATUS_LINK_CHG)
687 /* check for RX/TX work to do */
688 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
689 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
696 * similar to tg3_enable_ints, but it accurately determines whether there
697 * is new work pending and can return without flushing the PIO write
698 * which reenables interrupts
700 static void tg3_int_reenable(struct tg3_napi *tnapi)
702 struct tg3 *tp = tnapi->tp;
704 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
707 /* When doing tagged status, this work check is unnecessary.
708 * The last_tag we write above tells the chip which piece of
709 * work we've completed.
711 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
713 tw32(HOSTCC_MODE, tp->coalesce_mode |
714 HOSTCC_MODE_ENABLE | tnapi->coal_now);
717 static void tg3_napi_disable(struct tg3 *tp)
721 for (i = tp->irq_cnt - 1; i >= 0; i--)
722 napi_disable(&tp->napi[i].napi);
725 static void tg3_napi_enable(struct tg3 *tp)
729 for (i = 0; i < tp->irq_cnt; i++)
730 napi_enable(&tp->napi[i].napi);
733 static inline void tg3_netif_stop(struct tg3 *tp)
735 tp->dev->trans_start = jiffies; /* prevent tx timeout */
736 tg3_napi_disable(tp);
737 netif_tx_disable(tp->dev);
740 static inline void tg3_netif_start(struct tg3 *tp)
742 /* NOTE: unconditional netif_tx_wake_all_queues is only
743 * appropriate so long as all callers are assured to
744 * have free tx slots (such as after tg3_init_hw)
746 netif_tx_wake_all_queues(tp->dev);
749 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
753 static void tg3_switch_clocks(struct tg3 *tp)
758 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
759 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
762 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
764 orig_clock_ctrl = clock_ctrl;
765 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
766 CLOCK_CTRL_CLKRUN_OENABLE |
768 tp->pci_clock_ctrl = clock_ctrl;
770 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
771 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
772 tw32_wait_f(TG3PCI_CLOCK_CTRL,
773 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
775 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
776 tw32_wait_f(TG3PCI_CLOCK_CTRL,
778 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
780 tw32_wait_f(TG3PCI_CLOCK_CTRL,
781 clock_ctrl | (CLOCK_CTRL_ALTCLK),
784 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
787 #define PHY_BUSY_LOOPS 5000
789 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
795 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
797 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
803 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
804 MI_COM_PHY_ADDR_MASK);
805 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
806 MI_COM_REG_ADDR_MASK);
807 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
809 tw32_f(MAC_MI_COM, frame_val);
811 loops = PHY_BUSY_LOOPS;
814 frame_val = tr32(MAC_MI_COM);
816 if ((frame_val & MI_COM_BUSY) == 0) {
818 frame_val = tr32(MAC_MI_COM);
826 *val = frame_val & MI_COM_DATA_MASK;
830 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
831 tw32_f(MAC_MI_MODE, tp->mi_mode);
838 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
844 if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
845 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
848 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
850 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
854 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
855 MI_COM_PHY_ADDR_MASK);
856 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
857 MI_COM_REG_ADDR_MASK);
858 frame_val |= (val & MI_COM_DATA_MASK);
859 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
861 tw32_f(MAC_MI_COM, frame_val);
863 loops = PHY_BUSY_LOOPS;
866 frame_val = tr32(MAC_MI_COM);
867 if ((frame_val & MI_COM_BUSY) == 0) {
869 frame_val = tr32(MAC_MI_COM);
879 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
880 tw32_f(MAC_MI_MODE, tp->mi_mode);
887 static int tg3_bmcr_reset(struct tg3 *tp)
892 /* OK, reset it, and poll the BMCR_RESET bit until it
893 * clears or we time out.
895 phy_control = BMCR_RESET;
896 err = tg3_writephy(tp, MII_BMCR, phy_control);
902 err = tg3_readphy(tp, MII_BMCR, &phy_control);
906 if ((phy_control & BMCR_RESET) == 0) {
918 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
920 struct tg3 *tp = bp->priv;
923 spin_lock_bh(&tp->lock);
925 if (tg3_readphy(tp, reg, &val))
928 spin_unlock_bh(&tp->lock);
933 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
935 struct tg3 *tp = bp->priv;
938 spin_lock_bh(&tp->lock);
940 if (tg3_writephy(tp, reg, val))
943 spin_unlock_bh(&tp->lock);
948 static int tg3_mdio_reset(struct mii_bus *bp)
953 static void tg3_mdio_config_5785(struct tg3 *tp)
956 struct phy_device *phydev;
958 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
959 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
960 case PHY_ID_BCM50610:
961 case PHY_ID_BCM50610M:
962 val = MAC_PHYCFG2_50610_LED_MODES;
964 case PHY_ID_BCMAC131:
965 val = MAC_PHYCFG2_AC131_LED_MODES;
967 case PHY_ID_RTL8211C:
968 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
970 case PHY_ID_RTL8201E:
971 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
977 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
978 tw32(MAC_PHYCFG2, val);
980 val = tr32(MAC_PHYCFG1);
981 val &= ~(MAC_PHYCFG1_RGMII_INT |
982 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
983 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
984 tw32(MAC_PHYCFG1, val);
989 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE))
990 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
991 MAC_PHYCFG2_FMODE_MASK_MASK |
992 MAC_PHYCFG2_GMODE_MASK_MASK |
993 MAC_PHYCFG2_ACT_MASK_MASK |
994 MAC_PHYCFG2_QUAL_MASK_MASK |
995 MAC_PHYCFG2_INBAND_ENABLE;
997 tw32(MAC_PHYCFG2, val);
999 val = tr32(MAC_PHYCFG1);
1000 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1001 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1002 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1003 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1004 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1005 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1006 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1008 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1009 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1010 tw32(MAC_PHYCFG1, val);
1012 val = tr32(MAC_EXT_RGMII_MODE);
1013 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1014 MAC_RGMII_MODE_RX_QUALITY |
1015 MAC_RGMII_MODE_RX_ACTIVITY |
1016 MAC_RGMII_MODE_RX_ENG_DET |
1017 MAC_RGMII_MODE_TX_ENABLE |
1018 MAC_RGMII_MODE_TX_LOWPWR |
1019 MAC_RGMII_MODE_TX_RESET);
1020 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1021 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1022 val |= MAC_RGMII_MODE_RX_INT_B |
1023 MAC_RGMII_MODE_RX_QUALITY |
1024 MAC_RGMII_MODE_RX_ACTIVITY |
1025 MAC_RGMII_MODE_RX_ENG_DET;
1026 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1027 val |= MAC_RGMII_MODE_TX_ENABLE |
1028 MAC_RGMII_MODE_TX_LOWPWR |
1029 MAC_RGMII_MODE_TX_RESET;
1031 tw32(MAC_EXT_RGMII_MODE, val);
1034 static void tg3_mdio_start(struct tg3 *tp)
1036 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1037 tw32_f(MAC_MI_MODE, tp->mi_mode);
1040 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1041 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1042 tg3_mdio_config_5785(tp);
1045 static int tg3_mdio_init(struct tg3 *tp)
1049 struct phy_device *phydev;
1051 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
1052 u32 funcnum, is_serdes;
1054 funcnum = tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC;
1060 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1061 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1063 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1064 TG3_CPMU_PHY_STRAP_IS_SERDES;
1068 tp->phy_addr = TG3_PHY_MII_ADDR;
1072 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1073 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1076 tp->mdio_bus = mdiobus_alloc();
1077 if (tp->mdio_bus == NULL)
1080 tp->mdio_bus->name = "tg3 mdio bus";
1081 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1082 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1083 tp->mdio_bus->priv = tp;
1084 tp->mdio_bus->parent = &tp->pdev->dev;
1085 tp->mdio_bus->read = &tg3_mdio_read;
1086 tp->mdio_bus->write = &tg3_mdio_write;
1087 tp->mdio_bus->reset = &tg3_mdio_reset;
1088 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1089 tp->mdio_bus->irq = &tp->mdio_irq[0];
1091 for (i = 0; i < PHY_MAX_ADDR; i++)
1092 tp->mdio_bus->irq[i] = PHY_POLL;
1094 /* The bus registration will look for all the PHYs on the mdio bus.
1095 * Unfortunately, it does not ensure the PHY is powered up before
1096 * accessing the PHY ID registers. A chip reset is the
1097 * quickest way to bring the device back to an operational state..
1099 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1102 i = mdiobus_register(tp->mdio_bus);
1104 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1105 mdiobus_free(tp->mdio_bus);
1109 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1111 if (!phydev || !phydev->drv) {
1112 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1113 mdiobus_unregister(tp->mdio_bus);
1114 mdiobus_free(tp->mdio_bus);
1118 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1119 case PHY_ID_BCM57780:
1120 phydev->interface = PHY_INTERFACE_MODE_GMII;
1121 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1123 case PHY_ID_BCM50610:
1124 case PHY_ID_BCM50610M:
1125 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1126 PHY_BRCM_RX_REFCLK_UNUSED |
1127 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1128 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1129 if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)
1130 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1131 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1132 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1133 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1134 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1136 case PHY_ID_RTL8211C:
1137 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1139 case PHY_ID_RTL8201E:
1140 case PHY_ID_BCMAC131:
1141 phydev->interface = PHY_INTERFACE_MODE_MII;
1142 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1143 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1147 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1149 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1150 tg3_mdio_config_5785(tp);
1155 static void tg3_mdio_fini(struct tg3 *tp)
1157 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1158 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1159 mdiobus_unregister(tp->mdio_bus);
1160 mdiobus_free(tp->mdio_bus);
1164 /* tp->lock is held. */
1165 static inline void tg3_generate_fw_event(struct tg3 *tp)
1169 val = tr32(GRC_RX_CPU_EVENT);
1170 val |= GRC_RX_CPU_DRIVER_EVENT;
1171 tw32_f(GRC_RX_CPU_EVENT, val);
1173 tp->last_event_jiffies = jiffies;
1176 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1178 /* tp->lock is held. */
1179 static void tg3_wait_for_event_ack(struct tg3 *tp)
1182 unsigned int delay_cnt;
1185 /* If enough time has passed, no wait is necessary. */
1186 time_remain = (long)(tp->last_event_jiffies + 1 +
1187 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1189 if (time_remain < 0)
1192 /* Check if we can shorten the wait time. */
1193 delay_cnt = jiffies_to_usecs(time_remain);
1194 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1195 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1196 delay_cnt = (delay_cnt >> 3) + 1;
1198 for (i = 0; i < delay_cnt; i++) {
1199 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1205 /* tp->lock is held. */
1206 static void tg3_ump_link_report(struct tg3 *tp)
1211 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1212 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1215 tg3_wait_for_event_ack(tp);
1217 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1219 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1222 if (!tg3_readphy(tp, MII_BMCR, ®))
1224 if (!tg3_readphy(tp, MII_BMSR, ®))
1225 val |= (reg & 0xffff);
1226 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1229 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1231 if (!tg3_readphy(tp, MII_LPA, ®))
1232 val |= (reg & 0xffff);
1233 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1236 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1237 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1239 if (!tg3_readphy(tp, MII_STAT1000, ®))
1240 val |= (reg & 0xffff);
1242 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1244 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1248 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1250 tg3_generate_fw_event(tp);
1253 static void tg3_link_report(struct tg3 *tp)
1255 if (!netif_carrier_ok(tp->dev)) {
1256 netif_info(tp, link, tp->dev, "Link is down\n");
1257 tg3_ump_link_report(tp);
1258 } else if (netif_msg_link(tp)) {
1259 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1260 (tp->link_config.active_speed == SPEED_1000 ?
1262 (tp->link_config.active_speed == SPEED_100 ?
1264 (tp->link_config.active_duplex == DUPLEX_FULL ?
1267 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1268 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1270 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1272 tg3_ump_link_report(tp);
1276 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1280 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1281 miireg = ADVERTISE_PAUSE_CAP;
1282 else if (flow_ctrl & FLOW_CTRL_TX)
1283 miireg = ADVERTISE_PAUSE_ASYM;
1284 else if (flow_ctrl & FLOW_CTRL_RX)
1285 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1292 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1296 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1297 miireg = ADVERTISE_1000XPAUSE;
1298 else if (flow_ctrl & FLOW_CTRL_TX)
1299 miireg = ADVERTISE_1000XPSE_ASYM;
1300 else if (flow_ctrl & FLOW_CTRL_RX)
1301 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1308 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1312 if (lcladv & ADVERTISE_1000XPAUSE) {
1313 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1314 if (rmtadv & LPA_1000XPAUSE)
1315 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1316 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1319 if (rmtadv & LPA_1000XPAUSE)
1320 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1322 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1323 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1330 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1334 u32 old_rx_mode = tp->rx_mode;
1335 u32 old_tx_mode = tp->tx_mode;
1337 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1338 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1340 autoneg = tp->link_config.autoneg;
1342 if (autoneg == AUTONEG_ENABLE &&
1343 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1344 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1345 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1347 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1349 flowctrl = tp->link_config.flowctrl;
1351 tp->link_config.active_flowctrl = flowctrl;
1353 if (flowctrl & FLOW_CTRL_RX)
1354 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1356 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1358 if (old_rx_mode != tp->rx_mode)
1359 tw32_f(MAC_RX_MODE, tp->rx_mode);
1361 if (flowctrl & FLOW_CTRL_TX)
1362 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1364 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1366 if (old_tx_mode != tp->tx_mode)
1367 tw32_f(MAC_TX_MODE, tp->tx_mode);
1370 static void tg3_adjust_link(struct net_device *dev)
1372 u8 oldflowctrl, linkmesg = 0;
1373 u32 mac_mode, lcl_adv, rmt_adv;
1374 struct tg3 *tp = netdev_priv(dev);
1375 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1377 spin_lock_bh(&tp->lock);
1379 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1380 MAC_MODE_HALF_DUPLEX);
1382 oldflowctrl = tp->link_config.active_flowctrl;
1388 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1389 mac_mode |= MAC_MODE_PORT_MODE_MII;
1390 else if (phydev->speed == SPEED_1000 ||
1391 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1392 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1394 mac_mode |= MAC_MODE_PORT_MODE_MII;
1396 if (phydev->duplex == DUPLEX_HALF)
1397 mac_mode |= MAC_MODE_HALF_DUPLEX;
1399 lcl_adv = tg3_advert_flowctrl_1000T(
1400 tp->link_config.flowctrl);
1403 rmt_adv = LPA_PAUSE_CAP;
1404 if (phydev->asym_pause)
1405 rmt_adv |= LPA_PAUSE_ASYM;
1408 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1410 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1412 if (mac_mode != tp->mac_mode) {
1413 tp->mac_mode = mac_mode;
1414 tw32_f(MAC_MODE, tp->mac_mode);
1418 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1419 if (phydev->speed == SPEED_10)
1421 MAC_MI_STAT_10MBPS_MODE |
1422 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1424 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1427 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1428 tw32(MAC_TX_LENGTHS,
1429 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1430 (6 << TX_LENGTHS_IPG_SHIFT) |
1431 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1433 tw32(MAC_TX_LENGTHS,
1434 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1435 (6 << TX_LENGTHS_IPG_SHIFT) |
1436 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1438 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1439 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1440 phydev->speed != tp->link_config.active_speed ||
1441 phydev->duplex != tp->link_config.active_duplex ||
1442 oldflowctrl != tp->link_config.active_flowctrl)
1445 tp->link_config.active_speed = phydev->speed;
1446 tp->link_config.active_duplex = phydev->duplex;
1448 spin_unlock_bh(&tp->lock);
1451 tg3_link_report(tp);
1454 static int tg3_phy_init(struct tg3 *tp)
1456 struct phy_device *phydev;
1458 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1461 /* Bring the PHY back to a known state. */
1464 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1466 /* Attach the MAC to the PHY. */
1467 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1468 phydev->dev_flags, phydev->interface);
1469 if (IS_ERR(phydev)) {
1470 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1471 return PTR_ERR(phydev);
1474 /* Mask with MAC supported features. */
1475 switch (phydev->interface) {
1476 case PHY_INTERFACE_MODE_GMII:
1477 case PHY_INTERFACE_MODE_RGMII:
1478 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1479 phydev->supported &= (PHY_GBIT_FEATURES |
1481 SUPPORTED_Asym_Pause);
1485 case PHY_INTERFACE_MODE_MII:
1486 phydev->supported &= (PHY_BASIC_FEATURES |
1488 SUPPORTED_Asym_Pause);
1491 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1495 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1497 phydev->advertising = phydev->supported;
1502 static void tg3_phy_start(struct tg3 *tp)
1504 struct phy_device *phydev;
1506 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1509 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1511 if (tp->link_config.phy_is_low_power) {
1512 tp->link_config.phy_is_low_power = 0;
1513 phydev->speed = tp->link_config.orig_speed;
1514 phydev->duplex = tp->link_config.orig_duplex;
1515 phydev->autoneg = tp->link_config.orig_autoneg;
1516 phydev->advertising = tp->link_config.orig_advertising;
1521 phy_start_aneg(phydev);
1524 static void tg3_phy_stop(struct tg3 *tp)
1526 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1529 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1532 static void tg3_phy_fini(struct tg3 *tp)
1534 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1535 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1536 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1540 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1542 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1543 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1546 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1550 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1553 tg3_writephy(tp, MII_TG3_FET_TEST,
1554 phytest | MII_TG3_FET_SHADOW_EN);
1555 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1557 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1559 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1560 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1562 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1566 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1570 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1571 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
1572 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1575 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1576 tg3_phy_fet_toggle_apd(tp, enable);
1580 reg = MII_TG3_MISC_SHDW_WREN |
1581 MII_TG3_MISC_SHDW_SCR5_SEL |
1582 MII_TG3_MISC_SHDW_SCR5_LPED |
1583 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1584 MII_TG3_MISC_SHDW_SCR5_SDTL |
1585 MII_TG3_MISC_SHDW_SCR5_C125OE;
1586 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1587 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1589 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1592 reg = MII_TG3_MISC_SHDW_WREN |
1593 MII_TG3_MISC_SHDW_APD_SEL |
1594 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1596 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1598 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1601 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1605 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1606 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1609 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1612 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1613 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1615 tg3_writephy(tp, MII_TG3_FET_TEST,
1616 ephy | MII_TG3_FET_SHADOW_EN);
1617 if (!tg3_readphy(tp, reg, &phy)) {
1619 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1621 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1622 tg3_writephy(tp, reg, phy);
1624 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1627 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1628 MII_TG3_AUXCTL_SHDWSEL_MISC;
1629 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1630 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1632 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1634 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1635 phy |= MII_TG3_AUXCTL_MISC_WREN;
1636 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1641 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1645 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1648 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1649 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1650 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1651 (val | (1 << 15) | (1 << 4)));
1654 static void tg3_phy_apply_otp(struct tg3 *tp)
1663 /* Enable SM_DSP clock and tx 6dB coding. */
1664 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1665 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1666 MII_TG3_AUXCTL_ACTL_TX_6DB;
1667 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1669 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1670 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1671 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1673 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1674 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1675 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1677 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1678 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1679 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1681 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1682 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1684 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1685 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1687 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1688 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1689 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1691 /* Turn off SM_DSP clock. */
1692 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1693 MII_TG3_AUXCTL_ACTL_TX_6DB;
1694 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1697 static int tg3_wait_macro_done(struct tg3 *tp)
1704 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1705 if ((tmp32 & 0x1000) == 0)
1715 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1717 static const u32 test_pat[4][6] = {
1718 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1719 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1720 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1721 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1725 for (chan = 0; chan < 4; chan++) {
1728 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1729 (chan * 0x2000) | 0x0200);
1730 tg3_writephy(tp, 0x16, 0x0002);
1732 for (i = 0; i < 6; i++)
1733 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1736 tg3_writephy(tp, 0x16, 0x0202);
1737 if (tg3_wait_macro_done(tp)) {
1742 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1743 (chan * 0x2000) | 0x0200);
1744 tg3_writephy(tp, 0x16, 0x0082);
1745 if (tg3_wait_macro_done(tp)) {
1750 tg3_writephy(tp, 0x16, 0x0802);
1751 if (tg3_wait_macro_done(tp)) {
1756 for (i = 0; i < 6; i += 2) {
1759 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1760 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1761 tg3_wait_macro_done(tp)) {
1767 if (low != test_pat[chan][i] ||
1768 high != test_pat[chan][i+1]) {
1769 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1770 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1771 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1781 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1785 for (chan = 0; chan < 4; chan++) {
1788 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1789 (chan * 0x2000) | 0x0200);
1790 tg3_writephy(tp, 0x16, 0x0002);
1791 for (i = 0; i < 6; i++)
1792 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1793 tg3_writephy(tp, 0x16, 0x0202);
1794 if (tg3_wait_macro_done(tp))
1801 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1803 u32 reg32, phy9_orig;
1804 int retries, do_phy_reset, err;
1810 err = tg3_bmcr_reset(tp);
1816 /* Disable transmitter and interrupt. */
1817 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1821 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1823 /* Set full-duplex, 1000 mbps. */
1824 tg3_writephy(tp, MII_BMCR,
1825 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1827 /* Set to master mode. */
1828 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1831 tg3_writephy(tp, MII_TG3_CTRL,
1832 (MII_TG3_CTRL_AS_MASTER |
1833 MII_TG3_CTRL_ENABLE_AS_MASTER));
1835 /* Enable SM_DSP_CLOCK and 6dB. */
1836 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1838 /* Block the PHY control access. */
1839 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1840 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1842 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1845 } while (--retries);
1847 err = tg3_phy_reset_chanpat(tp);
1851 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1852 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1854 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1855 tg3_writephy(tp, 0x16, 0x0000);
1857 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1858 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1859 /* Set Extended packet length bit for jumbo frames */
1860 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1862 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1865 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1867 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1869 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1876 /* This will reset the tigon3 PHY if there is no valid
1877 * link unless the FORCE argument is non-zero.
1879 static int tg3_phy_reset(struct tg3 *tp)
1885 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1888 val = tr32(GRC_MISC_CFG);
1889 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1892 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1893 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1897 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1898 netif_carrier_off(tp->dev);
1899 tg3_link_report(tp);
1902 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1903 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1904 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1905 err = tg3_phy_reset_5703_4_5(tp);
1912 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1913 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1914 cpmuctrl = tr32(TG3_CPMU_CTRL);
1915 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1917 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1920 err = tg3_bmcr_reset(tp);
1924 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1927 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1928 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1930 tw32(TG3_CPMU_CTRL, cpmuctrl);
1933 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1934 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1937 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1938 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1939 CPMU_LSPD_1000MB_MACCLK_12_5) {
1940 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1942 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1946 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
1947 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))
1950 tg3_phy_apply_otp(tp);
1952 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1953 tg3_phy_toggle_apd(tp, true);
1955 tg3_phy_toggle_apd(tp, false);
1958 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1959 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1960 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1961 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1962 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1963 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1964 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1966 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1967 tg3_writephy(tp, 0x1c, 0x8d68);
1968 tg3_writephy(tp, 0x1c, 0x8d68);
1970 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1971 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1972 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1973 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1974 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1975 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1976 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1977 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1978 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1979 } else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1980 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1981 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1982 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1983 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1984 tg3_writephy(tp, MII_TG3_TEST1,
1985 MII_TG3_TEST1_TRIM_EN | 0x4);
1987 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1988 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1990 /* Set Extended packet length bit (bit 14) on all chips that */
1991 /* support jumbo frames */
1992 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1993 /* Cannot do read-modify-write on 5401 */
1994 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1995 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1998 /* Set bit 14 with read-modify-write to preserve other bits */
1999 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
2000 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
2001 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
2004 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2005 * jumbo frames transmission.
2007 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2010 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
2011 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2012 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2015 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2016 /* adjust output voltage */
2017 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2020 tg3_phy_toggle_automdix(tp, 1);
2021 tg3_phy_set_wirespeed(tp);
2025 static void tg3_frob_aux_power(struct tg3 *tp)
2027 struct tg3 *tp_peer = tp;
2029 /* The GPIOs do something completely different on 57765. */
2030 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2031 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2034 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2035 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2036 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
2037 struct net_device *dev_peer;
2039 dev_peer = pci_get_drvdata(tp->pdev_peer);
2040 /* remove_one() may have been run on the peer. */
2044 tp_peer = netdev_priv(dev_peer);
2047 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2048 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
2049 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2050 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
2051 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2052 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2053 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2054 (GRC_LCLCTRL_GPIO_OE0 |
2055 GRC_LCLCTRL_GPIO_OE1 |
2056 GRC_LCLCTRL_GPIO_OE2 |
2057 GRC_LCLCTRL_GPIO_OUTPUT0 |
2058 GRC_LCLCTRL_GPIO_OUTPUT1),
2060 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2061 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2062 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2063 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2064 GRC_LCLCTRL_GPIO_OE1 |
2065 GRC_LCLCTRL_GPIO_OE2 |
2066 GRC_LCLCTRL_GPIO_OUTPUT0 |
2067 GRC_LCLCTRL_GPIO_OUTPUT1 |
2069 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2071 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2072 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2074 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2075 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2078 u32 grc_local_ctrl = 0;
2080 if (tp_peer != tp &&
2081 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2084 /* Workaround to prevent overdrawing Amps. */
2085 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2087 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2088 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2089 grc_local_ctrl, 100);
2092 /* On 5753 and variants, GPIO2 cannot be used. */
2093 no_gpio2 = tp->nic_sram_data_cfg &
2094 NIC_SRAM_DATA_CFG_NO_GPIO2;
2096 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2097 GRC_LCLCTRL_GPIO_OE1 |
2098 GRC_LCLCTRL_GPIO_OE2 |
2099 GRC_LCLCTRL_GPIO_OUTPUT1 |
2100 GRC_LCLCTRL_GPIO_OUTPUT2;
2102 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2103 GRC_LCLCTRL_GPIO_OUTPUT2);
2105 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2106 grc_local_ctrl, 100);
2108 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2110 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2111 grc_local_ctrl, 100);
2114 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2115 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2116 grc_local_ctrl, 100);
2120 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2121 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2122 if (tp_peer != tp &&
2123 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2126 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2127 (GRC_LCLCTRL_GPIO_OE1 |
2128 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2130 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2131 GRC_LCLCTRL_GPIO_OE1, 100);
2133 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2134 (GRC_LCLCTRL_GPIO_OE1 |
2135 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2140 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2142 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2144 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2145 if (speed != SPEED_10)
2147 } else if (speed == SPEED_10)
2153 static int tg3_setup_phy(struct tg3 *, int);
2155 #define RESET_KIND_SHUTDOWN 0
2156 #define RESET_KIND_INIT 1
2157 #define RESET_KIND_SUSPEND 2
2159 static void tg3_write_sig_post_reset(struct tg3 *, int);
2160 static int tg3_halt_cpu(struct tg3 *, u32);
2162 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2166 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2167 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2168 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2169 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2172 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2173 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2174 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2179 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2181 val = tr32(GRC_MISC_CFG);
2182 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2185 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2187 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2190 tg3_writephy(tp, MII_ADVERTISE, 0);
2191 tg3_writephy(tp, MII_BMCR,
2192 BMCR_ANENABLE | BMCR_ANRESTART);
2194 tg3_writephy(tp, MII_TG3_FET_TEST,
2195 phytest | MII_TG3_FET_SHADOW_EN);
2196 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2197 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2199 MII_TG3_FET_SHDW_AUXMODE4,
2202 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2205 } else if (do_low_power) {
2206 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2207 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2209 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2210 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2211 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2212 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2213 MII_TG3_AUXCTL_PCTL_VREG_11V);
2216 /* The PHY should not be powered down on some chips because
2219 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2220 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2221 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2222 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2225 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2226 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2227 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2228 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2229 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2230 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2233 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2236 /* tp->lock is held. */
2237 static int tg3_nvram_lock(struct tg3 *tp)
2239 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2242 if (tp->nvram_lock_cnt == 0) {
2243 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2244 for (i = 0; i < 8000; i++) {
2245 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2250 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2254 tp->nvram_lock_cnt++;
2259 /* tp->lock is held. */
2260 static void tg3_nvram_unlock(struct tg3 *tp)
2262 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2263 if (tp->nvram_lock_cnt > 0)
2264 tp->nvram_lock_cnt--;
2265 if (tp->nvram_lock_cnt == 0)
2266 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2270 /* tp->lock is held. */
2271 static void tg3_enable_nvram_access(struct tg3 *tp)
2273 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2274 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2275 u32 nvaccess = tr32(NVRAM_ACCESS);
2277 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2281 /* tp->lock is held. */
2282 static void tg3_disable_nvram_access(struct tg3 *tp)
2284 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2285 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2286 u32 nvaccess = tr32(NVRAM_ACCESS);
2288 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2292 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2293 u32 offset, u32 *val)
2298 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2301 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2302 EEPROM_ADDR_DEVID_MASK |
2304 tw32(GRC_EEPROM_ADDR,
2306 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2307 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2308 EEPROM_ADDR_ADDR_MASK) |
2309 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2311 for (i = 0; i < 1000; i++) {
2312 tmp = tr32(GRC_EEPROM_ADDR);
2314 if (tmp & EEPROM_ADDR_COMPLETE)
2318 if (!(tmp & EEPROM_ADDR_COMPLETE))
2321 tmp = tr32(GRC_EEPROM_DATA);
2324 * The data will always be opposite the native endian
2325 * format. Perform a blind byteswap to compensate.
2332 #define NVRAM_CMD_TIMEOUT 10000
2334 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2338 tw32(NVRAM_CMD, nvram_cmd);
2339 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2341 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2347 if (i == NVRAM_CMD_TIMEOUT)
2353 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2355 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2356 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2357 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2358 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2359 (tp->nvram_jedecnum == JEDEC_ATMEL))
2361 addr = ((addr / tp->nvram_pagesize) <<
2362 ATMEL_AT45DB0X1B_PAGE_POS) +
2363 (addr % tp->nvram_pagesize);
2368 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2370 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2371 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2372 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2373 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2374 (tp->nvram_jedecnum == JEDEC_ATMEL))
2376 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2377 tp->nvram_pagesize) +
2378 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2383 /* NOTE: Data read in from NVRAM is byteswapped according to
2384 * the byteswapping settings for all other register accesses.
2385 * tg3 devices are BE devices, so on a BE machine, the data
2386 * returned will be exactly as it is seen in NVRAM. On a LE
2387 * machine, the 32-bit value will be byteswapped.
2389 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2393 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2394 return tg3_nvram_read_using_eeprom(tp, offset, val);
2396 offset = tg3_nvram_phys_addr(tp, offset);
2398 if (offset > NVRAM_ADDR_MSK)
2401 ret = tg3_nvram_lock(tp);
2405 tg3_enable_nvram_access(tp);
2407 tw32(NVRAM_ADDR, offset);
2408 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2409 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2412 *val = tr32(NVRAM_RDDATA);
2414 tg3_disable_nvram_access(tp);
2416 tg3_nvram_unlock(tp);
2421 /* Ensures NVRAM data is in bytestream format. */
2422 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2425 int res = tg3_nvram_read(tp, offset, &v);
2427 *val = cpu_to_be32(v);
2431 /* tp->lock is held. */
2432 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2434 u32 addr_high, addr_low;
2437 addr_high = ((tp->dev->dev_addr[0] << 8) |
2438 tp->dev->dev_addr[1]);
2439 addr_low = ((tp->dev->dev_addr[2] << 24) |
2440 (tp->dev->dev_addr[3] << 16) |
2441 (tp->dev->dev_addr[4] << 8) |
2442 (tp->dev->dev_addr[5] << 0));
2443 for (i = 0; i < 4; i++) {
2444 if (i == 1 && skip_mac_1)
2446 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2447 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2450 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2451 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2452 for (i = 0; i < 12; i++) {
2453 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2454 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2458 addr_high = (tp->dev->dev_addr[0] +
2459 tp->dev->dev_addr[1] +
2460 tp->dev->dev_addr[2] +
2461 tp->dev->dev_addr[3] +
2462 tp->dev->dev_addr[4] +
2463 tp->dev->dev_addr[5]) &
2464 TX_BACKOFF_SEED_MASK;
2465 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2468 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2471 bool device_should_wake, do_low_power;
2473 /* Make sure register accesses (indirect or otherwise)
2474 * will function correctly.
2476 pci_write_config_dword(tp->pdev,
2477 TG3PCI_MISC_HOST_CTRL,
2478 tp->misc_host_ctrl);
2482 pci_enable_wake(tp->pdev, state, false);
2483 pci_set_power_state(tp->pdev, PCI_D0);
2485 /* Switch out of Vaux if it is a NIC */
2486 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2487 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2497 netdev_err(tp->dev, "Invalid power state (D%d) requested\n",
2502 /* Restore the CLKREQ setting. */
2503 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2506 pci_read_config_word(tp->pdev,
2507 tp->pcie_cap + PCI_EXP_LNKCTL,
2509 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2510 pci_write_config_word(tp->pdev,
2511 tp->pcie_cap + PCI_EXP_LNKCTL,
2515 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2516 tw32(TG3PCI_MISC_HOST_CTRL,
2517 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2519 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2520 device_may_wakeup(&tp->pdev->dev) &&
2521 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2523 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2524 do_low_power = false;
2525 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2526 !tp->link_config.phy_is_low_power) {
2527 struct phy_device *phydev;
2528 u32 phyid, advertising;
2530 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2532 tp->link_config.phy_is_low_power = 1;
2534 tp->link_config.orig_speed = phydev->speed;
2535 tp->link_config.orig_duplex = phydev->duplex;
2536 tp->link_config.orig_autoneg = phydev->autoneg;
2537 tp->link_config.orig_advertising = phydev->advertising;
2539 advertising = ADVERTISED_TP |
2541 ADVERTISED_Autoneg |
2542 ADVERTISED_10baseT_Half;
2544 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2545 device_should_wake) {
2546 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2548 ADVERTISED_100baseT_Half |
2549 ADVERTISED_100baseT_Full |
2550 ADVERTISED_10baseT_Full;
2552 advertising |= ADVERTISED_10baseT_Full;
2555 phydev->advertising = advertising;
2557 phy_start_aneg(phydev);
2559 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2560 if (phyid != PHY_ID_BCMAC131) {
2561 phyid &= PHY_BCM_OUI_MASK;
2562 if (phyid == PHY_BCM_OUI_1 ||
2563 phyid == PHY_BCM_OUI_2 ||
2564 phyid == PHY_BCM_OUI_3)
2565 do_low_power = true;
2569 do_low_power = true;
2571 if (tp->link_config.phy_is_low_power == 0) {
2572 tp->link_config.phy_is_low_power = 1;
2573 tp->link_config.orig_speed = tp->link_config.speed;
2574 tp->link_config.orig_duplex = tp->link_config.duplex;
2575 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2578 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2579 tp->link_config.speed = SPEED_10;
2580 tp->link_config.duplex = DUPLEX_HALF;
2581 tp->link_config.autoneg = AUTONEG_ENABLE;
2582 tg3_setup_phy(tp, 0);
2586 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2589 val = tr32(GRC_VCPU_EXT_CTRL);
2590 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2591 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2595 for (i = 0; i < 200; i++) {
2596 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2597 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2602 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2603 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2604 WOL_DRV_STATE_SHUTDOWN |
2608 if (device_should_wake) {
2611 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2613 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2617 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2618 mac_mode = MAC_MODE_PORT_MODE_GMII;
2620 mac_mode = MAC_MODE_PORT_MODE_MII;
2622 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2623 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2625 u32 speed = (tp->tg3_flags &
2626 TG3_FLAG_WOL_SPEED_100MB) ?
2627 SPEED_100 : SPEED_10;
2628 if (tg3_5700_link_polarity(tp, speed))
2629 mac_mode |= MAC_MODE_LINK_POLARITY;
2631 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2634 mac_mode = MAC_MODE_PORT_MODE_TBI;
2637 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2638 tw32(MAC_LED_CTRL, tp->led_ctrl);
2640 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2641 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2642 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2643 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2644 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2645 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2647 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2648 mac_mode |= tp->mac_mode &
2649 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2650 if (mac_mode & MAC_MODE_APE_TX_EN)
2651 mac_mode |= MAC_MODE_TDE_ENABLE;
2654 tw32_f(MAC_MODE, mac_mode);
2657 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2661 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2662 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2663 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2666 base_val = tp->pci_clock_ctrl;
2667 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2668 CLOCK_CTRL_TXCLK_DISABLE);
2670 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2671 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2672 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2673 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2674 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2676 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2677 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2678 u32 newbits1, newbits2;
2680 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2681 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2682 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2683 CLOCK_CTRL_TXCLK_DISABLE |
2685 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2686 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2687 newbits1 = CLOCK_CTRL_625_CORE;
2688 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2690 newbits1 = CLOCK_CTRL_ALTCLK;
2691 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2694 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2697 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2700 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2703 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2704 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2705 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2706 CLOCK_CTRL_TXCLK_DISABLE |
2707 CLOCK_CTRL_44MHZ_CORE);
2709 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2712 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2713 tp->pci_clock_ctrl | newbits3, 40);
2717 if (!(device_should_wake) &&
2718 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2719 tg3_power_down_phy(tp, do_low_power);
2721 tg3_frob_aux_power(tp);
2723 /* Workaround for unstable PLL clock */
2724 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2725 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2726 u32 val = tr32(0x7d00);
2728 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2730 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2733 err = tg3_nvram_lock(tp);
2734 tg3_halt_cpu(tp, RX_CPU_BASE);
2736 tg3_nvram_unlock(tp);
2740 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2742 if (device_should_wake)
2743 pci_enable_wake(tp->pdev, state, true);
2745 /* Finally, set the new power state. */
2746 pci_set_power_state(tp->pdev, state);
2751 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2753 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2754 case MII_TG3_AUX_STAT_10HALF:
2756 *duplex = DUPLEX_HALF;
2759 case MII_TG3_AUX_STAT_10FULL:
2761 *duplex = DUPLEX_FULL;
2764 case MII_TG3_AUX_STAT_100HALF:
2766 *duplex = DUPLEX_HALF;
2769 case MII_TG3_AUX_STAT_100FULL:
2771 *duplex = DUPLEX_FULL;
2774 case MII_TG3_AUX_STAT_1000HALF:
2775 *speed = SPEED_1000;
2776 *duplex = DUPLEX_HALF;
2779 case MII_TG3_AUX_STAT_1000FULL:
2780 *speed = SPEED_1000;
2781 *duplex = DUPLEX_FULL;
2785 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2786 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2788 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2792 *speed = SPEED_INVALID;
2793 *duplex = DUPLEX_INVALID;
2798 static void tg3_phy_copper_begin(struct tg3 *tp)
2803 if (tp->link_config.phy_is_low_power) {
2804 /* Entering low power mode. Disable gigabit and
2805 * 100baseT advertisements.
2807 tg3_writephy(tp, MII_TG3_CTRL, 0);
2809 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2810 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2811 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2812 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2814 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2815 } else if (tp->link_config.speed == SPEED_INVALID) {
2816 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2817 tp->link_config.advertising &=
2818 ~(ADVERTISED_1000baseT_Half |
2819 ADVERTISED_1000baseT_Full);
2821 new_adv = ADVERTISE_CSMA;
2822 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2823 new_adv |= ADVERTISE_10HALF;
2824 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2825 new_adv |= ADVERTISE_10FULL;
2826 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2827 new_adv |= ADVERTISE_100HALF;
2828 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2829 new_adv |= ADVERTISE_100FULL;
2831 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2833 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2835 if (tp->link_config.advertising &
2836 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2838 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2839 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2840 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2841 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2842 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2843 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2844 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2845 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2846 MII_TG3_CTRL_ENABLE_AS_MASTER);
2847 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2849 tg3_writephy(tp, MII_TG3_CTRL, 0);
2852 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2853 new_adv |= ADVERTISE_CSMA;
2855 /* Asking for a specific link mode. */
2856 if (tp->link_config.speed == SPEED_1000) {
2857 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2859 if (tp->link_config.duplex == DUPLEX_FULL)
2860 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2862 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2863 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2864 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2865 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2866 MII_TG3_CTRL_ENABLE_AS_MASTER);
2868 if (tp->link_config.speed == SPEED_100) {
2869 if (tp->link_config.duplex == DUPLEX_FULL)
2870 new_adv |= ADVERTISE_100FULL;
2872 new_adv |= ADVERTISE_100HALF;
2874 if (tp->link_config.duplex == DUPLEX_FULL)
2875 new_adv |= ADVERTISE_10FULL;
2877 new_adv |= ADVERTISE_10HALF;
2879 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2884 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2887 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2888 tp->link_config.speed != SPEED_INVALID) {
2889 u32 bmcr, orig_bmcr;
2891 tp->link_config.active_speed = tp->link_config.speed;
2892 tp->link_config.active_duplex = tp->link_config.duplex;
2895 switch (tp->link_config.speed) {
2901 bmcr |= BMCR_SPEED100;
2905 bmcr |= TG3_BMCR_SPEED1000;
2909 if (tp->link_config.duplex == DUPLEX_FULL)
2910 bmcr |= BMCR_FULLDPLX;
2912 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2913 (bmcr != orig_bmcr)) {
2914 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2915 for (i = 0; i < 1500; i++) {
2919 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2920 tg3_readphy(tp, MII_BMSR, &tmp))
2922 if (!(tmp & BMSR_LSTATUS)) {
2927 tg3_writephy(tp, MII_BMCR, bmcr);
2931 tg3_writephy(tp, MII_BMCR,
2932 BMCR_ANENABLE | BMCR_ANRESTART);
2936 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2940 /* Turn off tap power management. */
2941 /* Set Extended packet length bit */
2942 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2944 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2945 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2947 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2948 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2950 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2951 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2953 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2954 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2956 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2957 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2964 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2966 u32 adv_reg, all_mask = 0;
2968 if (mask & ADVERTISED_10baseT_Half)
2969 all_mask |= ADVERTISE_10HALF;
2970 if (mask & ADVERTISED_10baseT_Full)
2971 all_mask |= ADVERTISE_10FULL;
2972 if (mask & ADVERTISED_100baseT_Half)
2973 all_mask |= ADVERTISE_100HALF;
2974 if (mask & ADVERTISED_100baseT_Full)
2975 all_mask |= ADVERTISE_100FULL;
2977 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2980 if ((adv_reg & all_mask) != all_mask)
2982 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2986 if (mask & ADVERTISED_1000baseT_Half)
2987 all_mask |= ADVERTISE_1000HALF;
2988 if (mask & ADVERTISED_1000baseT_Full)
2989 all_mask |= ADVERTISE_1000FULL;
2991 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2994 if ((tg3_ctrl & all_mask) != all_mask)
3000 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3004 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3007 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3008 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3010 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3011 if (curadv != reqadv)
3014 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
3015 tg3_readphy(tp, MII_LPA, rmtadv);
3017 /* Reprogram the advertisement register, even if it
3018 * does not affect the current link. If the link
3019 * gets renegotiated in the future, we can save an
3020 * additional renegotiation cycle by advertising
3021 * it correctly in the first place.
3023 if (curadv != reqadv) {
3024 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3025 ADVERTISE_PAUSE_ASYM);
3026 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3033 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3035 int current_link_up;
3037 u32 lcl_adv, rmt_adv;
3045 (MAC_STATUS_SYNC_CHANGED |
3046 MAC_STATUS_CFG_CHANGED |
3047 MAC_STATUS_MI_COMPLETION |
3048 MAC_STATUS_LNKSTATE_CHANGED));
3051 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3053 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3057 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3059 /* Some third-party PHYs need to be reset on link going
3062 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3063 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3064 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3065 netif_carrier_ok(tp->dev)) {
3066 tg3_readphy(tp, MII_BMSR, &bmsr);
3067 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3068 !(bmsr & BMSR_LSTATUS))
3074 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3075 tg3_readphy(tp, MII_BMSR, &bmsr);
3076 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3077 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3080 if (!(bmsr & BMSR_LSTATUS)) {
3081 err = tg3_init_5401phy_dsp(tp);
3085 tg3_readphy(tp, MII_BMSR, &bmsr);
3086 for (i = 0; i < 1000; i++) {
3088 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3089 (bmsr & BMSR_LSTATUS)) {
3095 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3096 TG3_PHY_REV_BCM5401_B0 &&
3097 !(bmsr & BMSR_LSTATUS) &&
3098 tp->link_config.active_speed == SPEED_1000) {
3099 err = tg3_phy_reset(tp);
3101 err = tg3_init_5401phy_dsp(tp);
3106 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3107 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3108 /* 5701 {A0,B0} CRC bug workaround */
3109 tg3_writephy(tp, 0x15, 0x0a75);
3110 tg3_writephy(tp, 0x1c, 0x8c68);
3111 tg3_writephy(tp, 0x1c, 0x8d68);
3112 tg3_writephy(tp, 0x1c, 0x8c68);
3115 /* Clear pending interrupts... */
3116 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3117 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3119 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
3120 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3121 else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
3122 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3124 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3125 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3126 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3127 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3128 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3130 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3133 current_link_up = 0;
3134 current_speed = SPEED_INVALID;
3135 current_duplex = DUPLEX_INVALID;
3137 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
3140 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3141 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3142 if (!(val & (1 << 10))) {
3144 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3150 for (i = 0; i < 100; i++) {
3151 tg3_readphy(tp, MII_BMSR, &bmsr);
3152 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3153 (bmsr & BMSR_LSTATUS))
3158 if (bmsr & BMSR_LSTATUS) {
3161 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3162 for (i = 0; i < 2000; i++) {
3164 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3169 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3174 for (i = 0; i < 200; i++) {
3175 tg3_readphy(tp, MII_BMCR, &bmcr);
3176 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3178 if (bmcr && bmcr != 0x7fff)
3186 tp->link_config.active_speed = current_speed;
3187 tp->link_config.active_duplex = current_duplex;
3189 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3190 if ((bmcr & BMCR_ANENABLE) &&
3191 tg3_copper_is_advertising_all(tp,
3192 tp->link_config.advertising)) {
3193 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3195 current_link_up = 1;
3198 if (!(bmcr & BMCR_ANENABLE) &&
3199 tp->link_config.speed == current_speed &&
3200 tp->link_config.duplex == current_duplex &&
3201 tp->link_config.flowctrl ==
3202 tp->link_config.active_flowctrl) {
3203 current_link_up = 1;
3207 if (current_link_up == 1 &&
3208 tp->link_config.active_duplex == DUPLEX_FULL)
3209 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3213 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
3216 tg3_phy_copper_begin(tp);
3218 tg3_readphy(tp, MII_BMSR, &tmp);
3219 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
3220 (tmp & BMSR_LSTATUS))
3221 current_link_up = 1;
3224 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3225 if (current_link_up == 1) {
3226 if (tp->link_config.active_speed == SPEED_100 ||
3227 tp->link_config.active_speed == SPEED_10)
3228 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3230 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3231 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
3232 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3234 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3236 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3237 if (tp->link_config.active_duplex == DUPLEX_HALF)
3238 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3240 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3241 if (current_link_up == 1 &&
3242 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3243 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3245 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3248 /* ??? Without this setting Netgear GA302T PHY does not
3249 * ??? send/receive packets...
3251 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3252 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3253 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3254 tw32_f(MAC_MI_MODE, tp->mi_mode);
3258 tw32_f(MAC_MODE, tp->mac_mode);
3261 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3262 /* Polled via timer. */
3263 tw32_f(MAC_EVENT, 0);
3265 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3269 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3270 current_link_up == 1 &&
3271 tp->link_config.active_speed == SPEED_1000 &&
3272 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3273 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3276 (MAC_STATUS_SYNC_CHANGED |
3277 MAC_STATUS_CFG_CHANGED));
3280 NIC_SRAM_FIRMWARE_MBOX,
3281 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3284 /* Prevent send BD corruption. */
3285 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3286 u16 oldlnkctl, newlnkctl;
3288 pci_read_config_word(tp->pdev,
3289 tp->pcie_cap + PCI_EXP_LNKCTL,
3291 if (tp->link_config.active_speed == SPEED_100 ||
3292 tp->link_config.active_speed == SPEED_10)
3293 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3295 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3296 if (newlnkctl != oldlnkctl)
3297 pci_write_config_word(tp->pdev,
3298 tp->pcie_cap + PCI_EXP_LNKCTL,
3302 if (current_link_up != netif_carrier_ok(tp->dev)) {
3303 if (current_link_up)
3304 netif_carrier_on(tp->dev);
3306 netif_carrier_off(tp->dev);
3307 tg3_link_report(tp);
3313 struct tg3_fiber_aneginfo {
3315 #define ANEG_STATE_UNKNOWN 0
3316 #define ANEG_STATE_AN_ENABLE 1
3317 #define ANEG_STATE_RESTART_INIT 2
3318 #define ANEG_STATE_RESTART 3
3319 #define ANEG_STATE_DISABLE_LINK_OK 4
3320 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3321 #define ANEG_STATE_ABILITY_DETECT 6
3322 #define ANEG_STATE_ACK_DETECT_INIT 7
3323 #define ANEG_STATE_ACK_DETECT 8
3324 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3325 #define ANEG_STATE_COMPLETE_ACK 10
3326 #define ANEG_STATE_IDLE_DETECT_INIT 11
3327 #define ANEG_STATE_IDLE_DETECT 12
3328 #define ANEG_STATE_LINK_OK 13
3329 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3330 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3333 #define MR_AN_ENABLE 0x00000001
3334 #define MR_RESTART_AN 0x00000002
3335 #define MR_AN_COMPLETE 0x00000004
3336 #define MR_PAGE_RX 0x00000008
3337 #define MR_NP_LOADED 0x00000010
3338 #define MR_TOGGLE_TX 0x00000020
3339 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3340 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3341 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3342 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3343 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3344 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3345 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3346 #define MR_TOGGLE_RX 0x00002000
3347 #define MR_NP_RX 0x00004000
3349 #define MR_LINK_OK 0x80000000
3351 unsigned long link_time, cur_time;
3353 u32 ability_match_cfg;
3354 int ability_match_count;
3356 char ability_match, idle_match, ack_match;
3358 u32 txconfig, rxconfig;
3359 #define ANEG_CFG_NP 0x00000080
3360 #define ANEG_CFG_ACK 0x00000040
3361 #define ANEG_CFG_RF2 0x00000020
3362 #define ANEG_CFG_RF1 0x00000010
3363 #define ANEG_CFG_PS2 0x00000001
3364 #define ANEG_CFG_PS1 0x00008000
3365 #define ANEG_CFG_HD 0x00004000
3366 #define ANEG_CFG_FD 0x00002000
3367 #define ANEG_CFG_INVAL 0x00001f06
3372 #define ANEG_TIMER_ENAB 2
3373 #define ANEG_FAILED -1
3375 #define ANEG_STATE_SETTLE_TIME 10000
3377 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3378 struct tg3_fiber_aneginfo *ap)
3381 unsigned long delta;
3385 if (ap->state == ANEG_STATE_UNKNOWN) {
3389 ap->ability_match_cfg = 0;
3390 ap->ability_match_count = 0;
3391 ap->ability_match = 0;
3397 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3398 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3400 if (rx_cfg_reg != ap->ability_match_cfg) {
3401 ap->ability_match_cfg = rx_cfg_reg;
3402 ap->ability_match = 0;
3403 ap->ability_match_count = 0;
3405 if (++ap->ability_match_count > 1) {
3406 ap->ability_match = 1;
3407 ap->ability_match_cfg = rx_cfg_reg;
3410 if (rx_cfg_reg & ANEG_CFG_ACK)
3418 ap->ability_match_cfg = 0;
3419 ap->ability_match_count = 0;
3420 ap->ability_match = 0;
3426 ap->rxconfig = rx_cfg_reg;
3429 switch (ap->state) {
3430 case ANEG_STATE_UNKNOWN:
3431 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3432 ap->state = ANEG_STATE_AN_ENABLE;
3435 case ANEG_STATE_AN_ENABLE:
3436 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3437 if (ap->flags & MR_AN_ENABLE) {
3440 ap->ability_match_cfg = 0;
3441 ap->ability_match_count = 0;
3442 ap->ability_match = 0;
3446 ap->state = ANEG_STATE_RESTART_INIT;
3448 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3452 case ANEG_STATE_RESTART_INIT:
3453 ap->link_time = ap->cur_time;
3454 ap->flags &= ~(MR_NP_LOADED);
3456 tw32(MAC_TX_AUTO_NEG, 0);
3457 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3458 tw32_f(MAC_MODE, tp->mac_mode);
3461 ret = ANEG_TIMER_ENAB;
3462 ap->state = ANEG_STATE_RESTART;
3465 case ANEG_STATE_RESTART:
3466 delta = ap->cur_time - ap->link_time;
3467 if (delta > ANEG_STATE_SETTLE_TIME)
3468 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3470 ret = ANEG_TIMER_ENAB;
3473 case ANEG_STATE_DISABLE_LINK_OK:
3477 case ANEG_STATE_ABILITY_DETECT_INIT:
3478 ap->flags &= ~(MR_TOGGLE_TX);
3479 ap->txconfig = ANEG_CFG_FD;
3480 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3481 if (flowctrl & ADVERTISE_1000XPAUSE)
3482 ap->txconfig |= ANEG_CFG_PS1;
3483 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3484 ap->txconfig |= ANEG_CFG_PS2;
3485 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3486 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3487 tw32_f(MAC_MODE, tp->mac_mode);
3490 ap->state = ANEG_STATE_ABILITY_DETECT;
3493 case ANEG_STATE_ABILITY_DETECT:
3494 if (ap->ability_match != 0 && ap->rxconfig != 0)
3495 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3498 case ANEG_STATE_ACK_DETECT_INIT:
3499 ap->txconfig |= ANEG_CFG_ACK;
3500 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3501 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3502 tw32_f(MAC_MODE, tp->mac_mode);
3505 ap->state = ANEG_STATE_ACK_DETECT;
3508 case ANEG_STATE_ACK_DETECT:
3509 if (ap->ack_match != 0) {
3510 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3511 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3512 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3514 ap->state = ANEG_STATE_AN_ENABLE;
3516 } else if (ap->ability_match != 0 &&
3517 ap->rxconfig == 0) {
3518 ap->state = ANEG_STATE_AN_ENABLE;
3522 case ANEG_STATE_COMPLETE_ACK_INIT:
3523 if (ap->rxconfig & ANEG_CFG_INVAL) {
3527 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3528 MR_LP_ADV_HALF_DUPLEX |
3529 MR_LP_ADV_SYM_PAUSE |
3530 MR_LP_ADV_ASYM_PAUSE |
3531 MR_LP_ADV_REMOTE_FAULT1 |
3532 MR_LP_ADV_REMOTE_FAULT2 |
3533 MR_LP_ADV_NEXT_PAGE |
3536 if (ap->rxconfig & ANEG_CFG_FD)
3537 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3538 if (ap->rxconfig & ANEG_CFG_HD)
3539 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3540 if (ap->rxconfig & ANEG_CFG_PS1)
3541 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3542 if (ap->rxconfig & ANEG_CFG_PS2)
3543 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3544 if (ap->rxconfig & ANEG_CFG_RF1)
3545 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3546 if (ap->rxconfig & ANEG_CFG_RF2)
3547 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3548 if (ap->rxconfig & ANEG_CFG_NP)
3549 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3551 ap->link_time = ap->cur_time;
3553 ap->flags ^= (MR_TOGGLE_TX);
3554 if (ap->rxconfig & 0x0008)
3555 ap->flags |= MR_TOGGLE_RX;
3556 if (ap->rxconfig & ANEG_CFG_NP)
3557 ap->flags |= MR_NP_RX;
3558 ap->flags |= MR_PAGE_RX;
3560 ap->state = ANEG_STATE_COMPLETE_ACK;
3561 ret = ANEG_TIMER_ENAB;
3564 case ANEG_STATE_COMPLETE_ACK:
3565 if (ap->ability_match != 0 &&
3566 ap->rxconfig == 0) {
3567 ap->state = ANEG_STATE_AN_ENABLE;
3570 delta = ap->cur_time - ap->link_time;
3571 if (delta > ANEG_STATE_SETTLE_TIME) {
3572 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3573 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3575 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3576 !(ap->flags & MR_NP_RX)) {
3577 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3585 case ANEG_STATE_IDLE_DETECT_INIT:
3586 ap->link_time = ap->cur_time;
3587 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3588 tw32_f(MAC_MODE, tp->mac_mode);
3591 ap->state = ANEG_STATE_IDLE_DETECT;
3592 ret = ANEG_TIMER_ENAB;
3595 case ANEG_STATE_IDLE_DETECT:
3596 if (ap->ability_match != 0 &&
3597 ap->rxconfig == 0) {
3598 ap->state = ANEG_STATE_AN_ENABLE;
3601 delta = ap->cur_time - ap->link_time;
3602 if (delta > ANEG_STATE_SETTLE_TIME) {
3603 /* XXX another gem from the Broadcom driver :( */
3604 ap->state = ANEG_STATE_LINK_OK;
3608 case ANEG_STATE_LINK_OK:
3609 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3613 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3614 /* ??? unimplemented */
3617 case ANEG_STATE_NEXT_PAGE_WAIT:
3618 /* ??? unimplemented */
3629 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3632 struct tg3_fiber_aneginfo aninfo;
3633 int status = ANEG_FAILED;
3637 tw32_f(MAC_TX_AUTO_NEG, 0);
3639 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3640 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3643 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3646 memset(&aninfo, 0, sizeof(aninfo));
3647 aninfo.flags |= MR_AN_ENABLE;
3648 aninfo.state = ANEG_STATE_UNKNOWN;
3649 aninfo.cur_time = 0;
3651 while (++tick < 195000) {
3652 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3653 if (status == ANEG_DONE || status == ANEG_FAILED)
3659 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3660 tw32_f(MAC_MODE, tp->mac_mode);
3663 *txflags = aninfo.txconfig;
3664 *rxflags = aninfo.flags;
3666 if (status == ANEG_DONE &&
3667 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3668 MR_LP_ADV_FULL_DUPLEX)))
3674 static void tg3_init_bcm8002(struct tg3 *tp)
3676 u32 mac_status = tr32(MAC_STATUS);
3679 /* Reset when initting first time or we have a link. */
3680 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3681 !(mac_status & MAC_STATUS_PCS_SYNCED))
3684 /* Set PLL lock range. */
3685 tg3_writephy(tp, 0x16, 0x8007);
3688 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3690 /* Wait for reset to complete. */
3691 /* XXX schedule_timeout() ... */
3692 for (i = 0; i < 500; i++)
3695 /* Config mode; select PMA/Ch 1 regs. */
3696 tg3_writephy(tp, 0x10, 0x8411);
3698 /* Enable auto-lock and comdet, select txclk for tx. */
3699 tg3_writephy(tp, 0x11, 0x0a10);
3701 tg3_writephy(tp, 0x18, 0x00a0);
3702 tg3_writephy(tp, 0x16, 0x41ff);
3704 /* Assert and deassert POR. */
3705 tg3_writephy(tp, 0x13, 0x0400);
3707 tg3_writephy(tp, 0x13, 0x0000);
3709 tg3_writephy(tp, 0x11, 0x0a50);
3711 tg3_writephy(tp, 0x11, 0x0a10);
3713 /* Wait for signal to stabilize */
3714 /* XXX schedule_timeout() ... */
3715 for (i = 0; i < 15000; i++)
3718 /* Deselect the channel register so we can read the PHYID
3721 tg3_writephy(tp, 0x10, 0x8011);
3724 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3727 u32 sg_dig_ctrl, sg_dig_status;
3728 u32 serdes_cfg, expected_sg_dig_ctrl;
3729 int workaround, port_a;
3730 int current_link_up;
3733 expected_sg_dig_ctrl = 0;
3736 current_link_up = 0;
3738 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3739 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3741 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3744 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3745 /* preserve bits 20-23 for voltage regulator */
3746 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3749 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3751 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3752 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3754 u32 val = serdes_cfg;
3760 tw32_f(MAC_SERDES_CFG, val);
3763 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3765 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3766 tg3_setup_flow_control(tp, 0, 0);
3767 current_link_up = 1;
3772 /* Want auto-negotiation. */
3773 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3775 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3776 if (flowctrl & ADVERTISE_1000XPAUSE)
3777 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3778 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3779 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3781 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3782 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3783 tp->serdes_counter &&
3784 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3785 MAC_STATUS_RCVD_CFG)) ==
3786 MAC_STATUS_PCS_SYNCED)) {
3787 tp->serdes_counter--;
3788 current_link_up = 1;
3793 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3794 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3796 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3798 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3799 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3800 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3801 MAC_STATUS_SIGNAL_DET)) {
3802 sg_dig_status = tr32(SG_DIG_STATUS);
3803 mac_status = tr32(MAC_STATUS);
3805 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3806 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3807 u32 local_adv = 0, remote_adv = 0;
3809 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3810 local_adv |= ADVERTISE_1000XPAUSE;
3811 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3812 local_adv |= ADVERTISE_1000XPSE_ASYM;
3814 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3815 remote_adv |= LPA_1000XPAUSE;
3816 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3817 remote_adv |= LPA_1000XPAUSE_ASYM;
3819 tg3_setup_flow_control(tp, local_adv, remote_adv);
3820 current_link_up = 1;
3821 tp->serdes_counter = 0;
3822 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3823 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3824 if (tp->serdes_counter)
3825 tp->serdes_counter--;
3828 u32 val = serdes_cfg;
3835 tw32_f(MAC_SERDES_CFG, val);
3838 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3841 /* Link parallel detection - link is up */
3842 /* only if we have PCS_SYNC and not */
3843 /* receiving config code words */
3844 mac_status = tr32(MAC_STATUS);
3845 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3846 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3847 tg3_setup_flow_control(tp, 0, 0);
3848 current_link_up = 1;
3850 TG3_FLG2_PARALLEL_DETECT;
3851 tp->serdes_counter =
3852 SERDES_PARALLEL_DET_TIMEOUT;
3854 goto restart_autoneg;
3858 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3859 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3863 return current_link_up;
3866 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3868 int current_link_up = 0;
3870 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3873 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3874 u32 txflags, rxflags;
3877 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3878 u32 local_adv = 0, remote_adv = 0;
3880 if (txflags & ANEG_CFG_PS1)
3881 local_adv |= ADVERTISE_1000XPAUSE;
3882 if (txflags & ANEG_CFG_PS2)
3883 local_adv |= ADVERTISE_1000XPSE_ASYM;
3885 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3886 remote_adv |= LPA_1000XPAUSE;
3887 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3888 remote_adv |= LPA_1000XPAUSE_ASYM;
3890 tg3_setup_flow_control(tp, local_adv, remote_adv);
3892 current_link_up = 1;
3894 for (i = 0; i < 30; i++) {
3897 (MAC_STATUS_SYNC_CHANGED |
3898 MAC_STATUS_CFG_CHANGED));
3900 if ((tr32(MAC_STATUS) &
3901 (MAC_STATUS_SYNC_CHANGED |
3902 MAC_STATUS_CFG_CHANGED)) == 0)
3906 mac_status = tr32(MAC_STATUS);
3907 if (current_link_up == 0 &&
3908 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3909 !(mac_status & MAC_STATUS_RCVD_CFG))
3910 current_link_up = 1;
3912 tg3_setup_flow_control(tp, 0, 0);
3914 /* Forcing 1000FD link up. */
3915 current_link_up = 1;
3917 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3920 tw32_f(MAC_MODE, tp->mac_mode);
3925 return current_link_up;
3928 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3931 u16 orig_active_speed;
3932 u8 orig_active_duplex;
3934 int current_link_up;
3937 orig_pause_cfg = tp->link_config.active_flowctrl;
3938 orig_active_speed = tp->link_config.active_speed;
3939 orig_active_duplex = tp->link_config.active_duplex;
3941 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3942 netif_carrier_ok(tp->dev) &&
3943 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3944 mac_status = tr32(MAC_STATUS);
3945 mac_status &= (MAC_STATUS_PCS_SYNCED |
3946 MAC_STATUS_SIGNAL_DET |
3947 MAC_STATUS_CFG_CHANGED |
3948 MAC_STATUS_RCVD_CFG);
3949 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3950 MAC_STATUS_SIGNAL_DET)) {
3951 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3952 MAC_STATUS_CFG_CHANGED));
3957 tw32_f(MAC_TX_AUTO_NEG, 0);
3959 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3960 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3961 tw32_f(MAC_MODE, tp->mac_mode);
3964 if (tp->phy_id == TG3_PHY_ID_BCM8002)
3965 tg3_init_bcm8002(tp);
3967 /* Enable link change event even when serdes polling. */
3968 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3971 current_link_up = 0;
3972 mac_status = tr32(MAC_STATUS);
3974 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3975 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3977 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3979 tp->napi[0].hw_status->status =
3980 (SD_STATUS_UPDATED |
3981 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
3983 for (i = 0; i < 100; i++) {
3984 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3985 MAC_STATUS_CFG_CHANGED));
3987 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3988 MAC_STATUS_CFG_CHANGED |
3989 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3993 mac_status = tr32(MAC_STATUS);
3994 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3995 current_link_up = 0;
3996 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3997 tp->serdes_counter == 0) {
3998 tw32_f(MAC_MODE, (tp->mac_mode |
3999 MAC_MODE_SEND_CONFIGS));
4001 tw32_f(MAC_MODE, tp->mac_mode);
4005 if (current_link_up == 1) {
4006 tp->link_config.active_speed = SPEED_1000;
4007 tp->link_config.active_duplex = DUPLEX_FULL;
4008 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4009 LED_CTRL_LNKLED_OVERRIDE |
4010 LED_CTRL_1000MBPS_ON));
4012 tp->link_config.active_speed = SPEED_INVALID;
4013 tp->link_config.active_duplex = DUPLEX_INVALID;
4014 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4015 LED_CTRL_LNKLED_OVERRIDE |
4016 LED_CTRL_TRAFFIC_OVERRIDE));
4019 if (current_link_up != netif_carrier_ok(tp->dev)) {
4020 if (current_link_up)
4021 netif_carrier_on(tp->dev);
4023 netif_carrier_off(tp->dev);
4024 tg3_link_report(tp);
4026 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4027 if (orig_pause_cfg != now_pause_cfg ||
4028 orig_active_speed != tp->link_config.active_speed ||
4029 orig_active_duplex != tp->link_config.active_duplex)
4030 tg3_link_report(tp);
4036 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4038 int current_link_up, err = 0;
4042 u32 local_adv, remote_adv;
4044 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4045 tw32_f(MAC_MODE, tp->mac_mode);
4051 (MAC_STATUS_SYNC_CHANGED |
4052 MAC_STATUS_CFG_CHANGED |
4053 MAC_STATUS_MI_COMPLETION |
4054 MAC_STATUS_LNKSTATE_CHANGED));
4060 current_link_up = 0;
4061 current_speed = SPEED_INVALID;
4062 current_duplex = DUPLEX_INVALID;
4064 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4065 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4066 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4067 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4068 bmsr |= BMSR_LSTATUS;
4070 bmsr &= ~BMSR_LSTATUS;
4073 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4075 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4076 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4077 /* do nothing, just check for link up at the end */
4078 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4081 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4082 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4083 ADVERTISE_1000XPAUSE |
4084 ADVERTISE_1000XPSE_ASYM |
4087 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4089 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4090 new_adv |= ADVERTISE_1000XHALF;
4091 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4092 new_adv |= ADVERTISE_1000XFULL;
4094 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4095 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4096 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4097 tg3_writephy(tp, MII_BMCR, bmcr);
4099 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4100 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4101 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4108 bmcr &= ~BMCR_SPEED1000;
4109 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4111 if (tp->link_config.duplex == DUPLEX_FULL)
4112 new_bmcr |= BMCR_FULLDPLX;
4114 if (new_bmcr != bmcr) {
4115 /* BMCR_SPEED1000 is a reserved bit that needs
4116 * to be set on write.
4118 new_bmcr |= BMCR_SPEED1000;
4120 /* Force a linkdown */
4121 if (netif_carrier_ok(tp->dev)) {
4124 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4125 adv &= ~(ADVERTISE_1000XFULL |
4126 ADVERTISE_1000XHALF |
4128 tg3_writephy(tp, MII_ADVERTISE, adv);
4129 tg3_writephy(tp, MII_BMCR, bmcr |
4133 netif_carrier_off(tp->dev);
4135 tg3_writephy(tp, MII_BMCR, new_bmcr);
4137 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4138 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4139 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4141 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4142 bmsr |= BMSR_LSTATUS;
4144 bmsr &= ~BMSR_LSTATUS;
4146 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4150 if (bmsr & BMSR_LSTATUS) {
4151 current_speed = SPEED_1000;
4152 current_link_up = 1;
4153 if (bmcr & BMCR_FULLDPLX)
4154 current_duplex = DUPLEX_FULL;
4156 current_duplex = DUPLEX_HALF;
4161 if (bmcr & BMCR_ANENABLE) {
4164 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4165 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4166 common = local_adv & remote_adv;
4167 if (common & (ADVERTISE_1000XHALF |
4168 ADVERTISE_1000XFULL)) {
4169 if (common & ADVERTISE_1000XFULL)
4170 current_duplex = DUPLEX_FULL;
4172 current_duplex = DUPLEX_HALF;
4174 current_link_up = 0;
4179 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4180 tg3_setup_flow_control(tp, local_adv, remote_adv);
4182 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4183 if (tp->link_config.active_duplex == DUPLEX_HALF)
4184 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4186 tw32_f(MAC_MODE, tp->mac_mode);
4189 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4191 tp->link_config.active_speed = current_speed;
4192 tp->link_config.active_duplex = current_duplex;
4194 if (current_link_up != netif_carrier_ok(tp->dev)) {
4195 if (current_link_up)
4196 netif_carrier_on(tp->dev);
4198 netif_carrier_off(tp->dev);
4199 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4201 tg3_link_report(tp);
4206 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4208 if (tp->serdes_counter) {
4209 /* Give autoneg time to complete. */
4210 tp->serdes_counter--;
4214 if (!netif_carrier_ok(tp->dev) &&
4215 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4218 tg3_readphy(tp, MII_BMCR, &bmcr);
4219 if (bmcr & BMCR_ANENABLE) {
4222 /* Select shadow register 0x1f */
4223 tg3_writephy(tp, 0x1c, 0x7c00);
4224 tg3_readphy(tp, 0x1c, &phy1);
4226 /* Select expansion interrupt status register */
4227 tg3_writephy(tp, 0x17, 0x0f01);
4228 tg3_readphy(tp, 0x15, &phy2);
4229 tg3_readphy(tp, 0x15, &phy2);
4231 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4232 /* We have signal detect and not receiving
4233 * config code words, link is up by parallel
4237 bmcr &= ~BMCR_ANENABLE;
4238 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4239 tg3_writephy(tp, MII_BMCR, bmcr);
4240 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
4243 } else if (netif_carrier_ok(tp->dev) &&
4244 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4245 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4248 /* Select expansion interrupt status register */
4249 tg3_writephy(tp, 0x17, 0x0f01);
4250 tg3_readphy(tp, 0x15, &phy2);
4254 /* Config code words received, turn on autoneg. */
4255 tg3_readphy(tp, MII_BMCR, &bmcr);
4256 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4258 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4264 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4268 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
4269 err = tg3_setup_fiber_phy(tp, force_reset);
4270 else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
4271 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4273 err = tg3_setup_copper_phy(tp, force_reset);
4275 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4278 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4279 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4281 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4286 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4287 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4288 tw32(GRC_MISC_CFG, val);
4291 if (tp->link_config.active_speed == SPEED_1000 &&
4292 tp->link_config.active_duplex == DUPLEX_HALF)
4293 tw32(MAC_TX_LENGTHS,
4294 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4295 (6 << TX_LENGTHS_IPG_SHIFT) |
4296 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4298 tw32(MAC_TX_LENGTHS,
4299 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4300 (6 << TX_LENGTHS_IPG_SHIFT) |
4301 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4303 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4304 if (netif_carrier_ok(tp->dev)) {
4305 tw32(HOSTCC_STAT_COAL_TICKS,
4306 tp->coal.stats_block_coalesce_usecs);
4308 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4312 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4313 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4314 if (!netif_carrier_ok(tp->dev))
4315 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4318 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4319 tw32(PCIE_PWR_MGMT_THRESH, val);
4325 /* This is called whenever we suspect that the system chipset is re-
4326 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4327 * is bogus tx completions. We try to recover by setting the
4328 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4331 static void tg3_tx_recover(struct tg3 *tp)
4333 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4334 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4336 netdev_warn(tp->dev,
4337 "The system may be re-ordering memory-mapped I/O "
4338 "cycles to the network device, attempting to recover. "
4339 "Please report the problem to the driver maintainer "
4340 "and include system chipset information.\n");
4342 spin_lock(&tp->lock);
4343 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4344 spin_unlock(&tp->lock);
4347 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4350 return tnapi->tx_pending -
4351 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4354 /* Tigon3 never reports partial packet sends. So we do not
4355 * need special logic to handle SKBs that have not had all
4356 * of their frags sent yet, like SunGEM does.
4358 static void tg3_tx(struct tg3_napi *tnapi)
4360 struct tg3 *tp = tnapi->tp;
4361 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4362 u32 sw_idx = tnapi->tx_cons;
4363 struct netdev_queue *txq;
4364 int index = tnapi - tp->napi;
4366 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
4369 txq = netdev_get_tx_queue(tp->dev, index);
4371 while (sw_idx != hw_idx) {
4372 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4373 struct sk_buff *skb = ri->skb;
4376 if (unlikely(skb == NULL)) {
4381 pci_unmap_single(tp->pdev,
4382 pci_unmap_addr(ri, mapping),
4388 sw_idx = NEXT_TX(sw_idx);
4390 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4391 ri = &tnapi->tx_buffers[sw_idx];
4392 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4395 pci_unmap_page(tp->pdev,
4396 pci_unmap_addr(ri, mapping),
4397 skb_shinfo(skb)->frags[i].size,
4399 sw_idx = NEXT_TX(sw_idx);
4404 if (unlikely(tx_bug)) {
4410 tnapi->tx_cons = sw_idx;
4412 /* Need to make the tx_cons update visible to tg3_start_xmit()
4413 * before checking for netif_queue_stopped(). Without the
4414 * memory barrier, there is a small possibility that tg3_start_xmit()
4415 * will miss it and cause the queue to be stopped forever.
4419 if (unlikely(netif_tx_queue_stopped(txq) &&
4420 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4421 __netif_tx_lock(txq, smp_processor_id());
4422 if (netif_tx_queue_stopped(txq) &&
4423 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4424 netif_tx_wake_queue(txq);
4425 __netif_tx_unlock(txq);
4429 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4434 pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping),
4435 map_sz, PCI_DMA_FROMDEVICE);
4436 dev_kfree_skb_any(ri->skb);
4440 /* Returns size of skb allocated or < 0 on error.
4442 * We only need to fill in the address because the other members
4443 * of the RX descriptor are invariant, see tg3_init_rings.
4445 * Note the purposeful assymetry of cpu vs. chip accesses. For
4446 * posting buffers we only dirty the first cache line of the RX
4447 * descriptor (containing the address). Whereas for the RX status
4448 * buffers the cpu only reads the last cacheline of the RX descriptor
4449 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4451 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4452 u32 opaque_key, u32 dest_idx_unmasked)
4454 struct tg3_rx_buffer_desc *desc;
4455 struct ring_info *map, *src_map;
4456 struct sk_buff *skb;
4458 int skb_size, dest_idx;
4461 switch (opaque_key) {
4462 case RXD_OPAQUE_RING_STD:
4463 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4464 desc = &tpr->rx_std[dest_idx];
4465 map = &tpr->rx_std_buffers[dest_idx];
4466 skb_size = tp->rx_pkt_map_sz;
4469 case RXD_OPAQUE_RING_JUMBO:
4470 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4471 desc = &tpr->rx_jmb[dest_idx].std;
4472 map = &tpr->rx_jmb_buffers[dest_idx];
4473 skb_size = TG3_RX_JMB_MAP_SZ;
4480 /* Do not overwrite any of the map or rp information
4481 * until we are sure we can commit to a new buffer.
4483 * Callers depend upon this behavior and assume that
4484 * we leave everything unchanged if we fail.
4486 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4490 skb_reserve(skb, tp->rx_offset);
4492 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4493 PCI_DMA_FROMDEVICE);
4494 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4500 pci_unmap_addr_set(map, mapping, mapping);
4502 desc->addr_hi = ((u64)mapping >> 32);
4503 desc->addr_lo = ((u64)mapping & 0xffffffff);
4508 /* We only need to move over in the address because the other
4509 * members of the RX descriptor are invariant. See notes above
4510 * tg3_alloc_rx_skb for full details.
4512 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4513 struct tg3_rx_prodring_set *dpr,
4514 u32 opaque_key, int src_idx,
4515 u32 dest_idx_unmasked)
4517 struct tg3 *tp = tnapi->tp;
4518 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4519 struct ring_info *src_map, *dest_map;
4520 struct tg3_rx_prodring_set *spr = &tp->prodring[0];
4523 switch (opaque_key) {
4524 case RXD_OPAQUE_RING_STD:
4525 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4526 dest_desc = &dpr->rx_std[dest_idx];
4527 dest_map = &dpr->rx_std_buffers[dest_idx];
4528 src_desc = &spr->rx_std[src_idx];
4529 src_map = &spr->rx_std_buffers[src_idx];
4532 case RXD_OPAQUE_RING_JUMBO:
4533 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4534 dest_desc = &dpr->rx_jmb[dest_idx].std;
4535 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4536 src_desc = &spr->rx_jmb[src_idx].std;
4537 src_map = &spr->rx_jmb_buffers[src_idx];
4544 dest_map->skb = src_map->skb;
4545 pci_unmap_addr_set(dest_map, mapping,
4546 pci_unmap_addr(src_map, mapping));
4547 dest_desc->addr_hi = src_desc->addr_hi;
4548 dest_desc->addr_lo = src_desc->addr_lo;
4550 /* Ensure that the update to the skb happens after the physical
4551 * addresses have been transferred to the new BD location.
4555 src_map->skb = NULL;
4558 /* The RX ring scheme is composed of multiple rings which post fresh
4559 * buffers to the chip, and one special ring the chip uses to report
4560 * status back to the host.
4562 * The special ring reports the status of received packets to the
4563 * host. The chip does not write into the original descriptor the
4564 * RX buffer was obtained from. The chip simply takes the original
4565 * descriptor as provided by the host, updates the status and length
4566 * field, then writes this into the next status ring entry.
4568 * Each ring the host uses to post buffers to the chip is described
4569 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4570 * it is first placed into the on-chip ram. When the packet's length
4571 * is known, it walks down the TG3_BDINFO entries to select the ring.
4572 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4573 * which is within the range of the new packet's length is chosen.
4575 * The "separate ring for rx status" scheme may sound queer, but it makes
4576 * sense from a cache coherency perspective. If only the host writes
4577 * to the buffer post rings, and only the chip writes to the rx status
4578 * rings, then cache lines never move beyond shared-modified state.
4579 * If both the host and chip were to write into the same ring, cache line
4580 * eviction could occur since both entities want it in an exclusive state.
4582 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4584 struct tg3 *tp = tnapi->tp;
4585 u32 work_mask, rx_std_posted = 0;
4586 u32 std_prod_idx, jmb_prod_idx;
4587 u32 sw_idx = tnapi->rx_rcb_ptr;
4590 struct tg3_rx_prodring_set *tpr = tnapi->prodring;
4592 hw_idx = *(tnapi->rx_rcb_prod_idx);
4594 * We need to order the read of hw_idx and the read of
4595 * the opaque cookie.
4600 std_prod_idx = tpr->rx_std_prod_idx;
4601 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4602 while (sw_idx != hw_idx && budget > 0) {
4603 struct ring_info *ri;
4604 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4606 struct sk_buff *skb;
4607 dma_addr_t dma_addr;
4608 u32 opaque_key, desc_idx, *post_ptr;
4610 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4611 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4612 if (opaque_key == RXD_OPAQUE_RING_STD) {
4613 ri = &tp->prodring[0].rx_std_buffers[desc_idx];
4614 dma_addr = pci_unmap_addr(ri, mapping);
4616 post_ptr = &std_prod_idx;
4618 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4619 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
4620 dma_addr = pci_unmap_addr(ri, mapping);
4622 post_ptr = &jmb_prod_idx;
4624 goto next_pkt_nopost;
4626 work_mask |= opaque_key;
4628 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4629 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4631 tg3_recycle_rx(tnapi, tpr, opaque_key,
4632 desc_idx, *post_ptr);
4634 /* Other statistics kept track of by card. */
4635 tp->net_stats.rx_dropped++;
4639 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4642 if (len > RX_COPY_THRESHOLD &&
4643 tp->rx_offset == NET_IP_ALIGN) {
4644 /* rx_offset will likely not equal NET_IP_ALIGN
4645 * if this is a 5701 card running in PCI-X mode
4646 * [see tg3_get_invariants()]
4650 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4655 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4656 PCI_DMA_FROMDEVICE);
4658 /* Ensure that the update to the skb happens
4659 * after the usage of the old DMA mapping.
4667 struct sk_buff *copy_skb;
4669 tg3_recycle_rx(tnapi, tpr, opaque_key,
4670 desc_idx, *post_ptr);
4672 copy_skb = netdev_alloc_skb(tp->dev,
4673 len + TG3_RAW_IP_ALIGN);
4674 if (copy_skb == NULL)
4675 goto drop_it_no_recycle;
4677 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4678 skb_put(copy_skb, len);
4679 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4680 skb_copy_from_linear_data(skb, copy_skb->data, len);
4681 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4683 /* We'll reuse the original ring buffer. */
4687 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4688 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4689 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4690 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4691 skb->ip_summed = CHECKSUM_UNNECESSARY;
4693 skb->ip_summed = CHECKSUM_NONE;
4695 skb->protocol = eth_type_trans(skb, tp->dev);
4697 if (len > (tp->dev->mtu + ETH_HLEN) &&
4698 skb->protocol != htons(ETH_P_8021Q)) {
4703 #if TG3_VLAN_TAG_USED
4704 if (tp->vlgrp != NULL &&
4705 desc->type_flags & RXD_FLAG_VLAN) {
4706 vlan_gro_receive(&tnapi->napi, tp->vlgrp,
4707 desc->err_vlan & RXD_VLAN_MASK, skb);
4710 napi_gro_receive(&tnapi->napi, skb);
4718 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4719 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4720 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4721 tpr->rx_std_prod_idx);
4722 work_mask &= ~RXD_OPAQUE_RING_STD;
4727 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4729 /* Refresh hw_idx to see if there is new work */
4730 if (sw_idx == hw_idx) {
4731 hw_idx = *(tnapi->rx_rcb_prod_idx);
4736 /* ACK the status ring. */
4737 tnapi->rx_rcb_ptr = sw_idx;
4738 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4740 /* Refill RX ring(s). */
4741 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
4742 if (work_mask & RXD_OPAQUE_RING_STD) {
4743 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4744 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4745 tpr->rx_std_prod_idx);
4747 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4748 tpr->rx_jmb_prod_idx = jmb_prod_idx %
4749 TG3_RX_JUMBO_RING_SIZE;
4750 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4751 tpr->rx_jmb_prod_idx);
4754 } else if (work_mask) {
4755 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4756 * updated before the producer indices can be updated.
4760 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4761 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
4763 if (tnapi != &tp->napi[1])
4764 napi_schedule(&tp->napi[1].napi);
4770 static void tg3_poll_link(struct tg3 *tp)
4772 /* handle link change and other phy events */
4773 if (!(tp->tg3_flags &
4774 (TG3_FLAG_USE_LINKCHG_REG |
4775 TG3_FLAG_POLL_SERDES))) {
4776 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4778 if (sblk->status & SD_STATUS_LINK_CHG) {
4779 sblk->status = SD_STATUS_UPDATED |
4780 (sblk->status & ~SD_STATUS_LINK_CHG);
4781 spin_lock(&tp->lock);
4782 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4784 (MAC_STATUS_SYNC_CHANGED |
4785 MAC_STATUS_CFG_CHANGED |
4786 MAC_STATUS_MI_COMPLETION |
4787 MAC_STATUS_LNKSTATE_CHANGED));
4790 tg3_setup_phy(tp, 0);
4791 spin_unlock(&tp->lock);
4796 static int tg3_rx_prodring_xfer(struct tg3 *tp,
4797 struct tg3_rx_prodring_set *dpr,
4798 struct tg3_rx_prodring_set *spr)
4800 u32 si, di, cpycnt, src_prod_idx;
4804 src_prod_idx = spr->rx_std_prod_idx;
4806 /* Make sure updates to the rx_std_buffers[] entries and the
4807 * standard producer index are seen in the correct order.
4811 if (spr->rx_std_cons_idx == src_prod_idx)
4814 if (spr->rx_std_cons_idx < src_prod_idx)
4815 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4817 cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
4819 cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
4821 si = spr->rx_std_cons_idx;
4822 di = dpr->rx_std_prod_idx;
4824 for (i = di; i < di + cpycnt; i++) {
4825 if (dpr->rx_std_buffers[i].skb) {
4835 /* Ensure that updates to the rx_std_buffers ring and the
4836 * shadowed hardware producer ring from tg3_recycle_skb() are
4837 * ordered correctly WRT the skb check above.
4841 memcpy(&dpr->rx_std_buffers[di],
4842 &spr->rx_std_buffers[si],
4843 cpycnt * sizeof(struct ring_info));
4845 for (i = 0; i < cpycnt; i++, di++, si++) {
4846 struct tg3_rx_buffer_desc *sbd, *dbd;
4847 sbd = &spr->rx_std[si];
4848 dbd = &dpr->rx_std[di];
4849 dbd->addr_hi = sbd->addr_hi;
4850 dbd->addr_lo = sbd->addr_lo;
4853 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
4855 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
4860 src_prod_idx = spr->rx_jmb_prod_idx;
4862 /* Make sure updates to the rx_jmb_buffers[] entries and
4863 * the jumbo producer index are seen in the correct order.
4867 if (spr->rx_jmb_cons_idx == src_prod_idx)
4870 if (spr->rx_jmb_cons_idx < src_prod_idx)
4871 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4873 cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
4875 cpycnt = min(cpycnt,
4876 TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
4878 si = spr->rx_jmb_cons_idx;
4879 di = dpr->rx_jmb_prod_idx;
4881 for (i = di; i < di + cpycnt; i++) {
4882 if (dpr->rx_jmb_buffers[i].skb) {
4892 /* Ensure that updates to the rx_jmb_buffers ring and the
4893 * shadowed hardware producer ring from tg3_recycle_skb() are
4894 * ordered correctly WRT the skb check above.
4898 memcpy(&dpr->rx_jmb_buffers[di],
4899 &spr->rx_jmb_buffers[si],
4900 cpycnt * sizeof(struct ring_info));
4902 for (i = 0; i < cpycnt; i++, di++, si++) {
4903 struct tg3_rx_buffer_desc *sbd, *dbd;
4904 sbd = &spr->rx_jmb[si].std;
4905 dbd = &dpr->rx_jmb[di].std;
4906 dbd->addr_hi = sbd->addr_hi;
4907 dbd->addr_lo = sbd->addr_lo;
4910 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
4911 TG3_RX_JUMBO_RING_SIZE;
4912 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
4913 TG3_RX_JUMBO_RING_SIZE;
4919 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4921 struct tg3 *tp = tnapi->tp;
4923 /* run TX completion thread */
4924 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4926 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4930 /* run RX thread, within the bounds set by NAPI.
4931 * All RX "locking" is done by ensuring outside
4932 * code synchronizes with tg3->napi.poll()
4934 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4935 work_done += tg3_rx(tnapi, budget - work_done);
4937 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4938 struct tg3_rx_prodring_set *dpr = &tp->prodring[0];
4940 u32 std_prod_idx = dpr->rx_std_prod_idx;
4941 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
4943 for (i = 1; i < tp->irq_cnt; i++)
4944 err |= tg3_rx_prodring_xfer(tp, dpr,
4945 tp->napi[i].prodring);
4949 if (std_prod_idx != dpr->rx_std_prod_idx)
4950 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4951 dpr->rx_std_prod_idx);
4953 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
4954 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4955 dpr->rx_jmb_prod_idx);
4960 tw32_f(HOSTCC_MODE, tp->coal_now);
4966 static int tg3_poll_msix(struct napi_struct *napi, int budget)
4968 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4969 struct tg3 *tp = tnapi->tp;
4971 struct tg3_hw_status *sblk = tnapi->hw_status;
4974 work_done = tg3_poll_work(tnapi, work_done, budget);
4976 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4979 if (unlikely(work_done >= budget))
4982 /* tp->last_tag is used in tg3_int_reenable() below
4983 * to tell the hw how much work has been processed,
4984 * so we must read it before checking for more work.
4986 tnapi->last_tag = sblk->status_tag;
4987 tnapi->last_irq_tag = tnapi->last_tag;
4990 /* check for RX/TX work to do */
4991 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
4992 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
4993 napi_complete(napi);
4994 /* Reenable interrupts. */
4995 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5004 /* work_done is guaranteed to be less than budget. */
5005 napi_complete(napi);
5006 schedule_work(&tp->reset_task);
5010 static int tg3_poll(struct napi_struct *napi, int budget)
5012 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5013 struct tg3 *tp = tnapi->tp;
5015 struct tg3_hw_status *sblk = tnapi->hw_status;
5020 work_done = tg3_poll_work(tnapi, work_done, budget);
5022 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5025 if (unlikely(work_done >= budget))
5028 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
5029 /* tp->last_tag is used in tg3_int_reenable() below
5030 * to tell the hw how much work has been processed,
5031 * so we must read it before checking for more work.
5033 tnapi->last_tag = sblk->status_tag;
5034 tnapi->last_irq_tag = tnapi->last_tag;
5037 sblk->status &= ~SD_STATUS_UPDATED;
5039 if (likely(!tg3_has_work(tnapi))) {
5040 napi_complete(napi);
5041 tg3_int_reenable(tnapi);
5049 /* work_done is guaranteed to be less than budget. */
5050 napi_complete(napi);
5051 schedule_work(&tp->reset_task);
5055 static void tg3_irq_quiesce(struct tg3 *tp)
5059 BUG_ON(tp->irq_sync);
5064 for (i = 0; i < tp->irq_cnt; i++)
5065 synchronize_irq(tp->napi[i].irq_vec);
5068 static inline int tg3_irq_sync(struct tg3 *tp)
5070 return tp->irq_sync;
5073 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5074 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5075 * with as well. Most of the time, this is not necessary except when
5076 * shutting down the device.
5078 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5080 spin_lock_bh(&tp->lock);
5082 tg3_irq_quiesce(tp);
5085 static inline void tg3_full_unlock(struct tg3 *tp)
5087 spin_unlock_bh(&tp->lock);
5090 /* One-shot MSI handler - Chip automatically disables interrupt
5091 * after sending MSI so driver doesn't have to do it.
5093 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5095 struct tg3_napi *tnapi = dev_id;
5096 struct tg3 *tp = tnapi->tp;
5098 prefetch(tnapi->hw_status);
5100 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5102 if (likely(!tg3_irq_sync(tp)))
5103 napi_schedule(&tnapi->napi);
5108 /* MSI ISR - No need to check for interrupt sharing and no need to
5109 * flush status block and interrupt mailbox. PCI ordering rules
5110 * guarantee that MSI will arrive after the status block.
5112 static irqreturn_t tg3_msi(int irq, void *dev_id)
5114 struct tg3_napi *tnapi = dev_id;
5115 struct tg3 *tp = tnapi->tp;
5117 prefetch(tnapi->hw_status);
5119 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5121 * Writing any value to intr-mbox-0 clears PCI INTA# and
5122 * chip-internal interrupt pending events.
5123 * Writing non-zero to intr-mbox-0 additional tells the
5124 * NIC to stop sending us irqs, engaging "in-intr-handler"
5127 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5128 if (likely(!tg3_irq_sync(tp)))
5129 napi_schedule(&tnapi->napi);
5131 return IRQ_RETVAL(1);
5134 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5136 struct tg3_napi *tnapi = dev_id;
5137 struct tg3 *tp = tnapi->tp;
5138 struct tg3_hw_status *sblk = tnapi->hw_status;
5139 unsigned int handled = 1;
5141 /* In INTx mode, it is possible for the interrupt to arrive at
5142 * the CPU before the status block posted prior to the interrupt.
5143 * Reading the PCI State register will confirm whether the
5144 * interrupt is ours and will flush the status block.
5146 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5147 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5148 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5155 * Writing any value to intr-mbox-0 clears PCI INTA# and
5156 * chip-internal interrupt pending events.
5157 * Writing non-zero to intr-mbox-0 additional tells the
5158 * NIC to stop sending us irqs, engaging "in-intr-handler"
5161 * Flush the mailbox to de-assert the IRQ immediately to prevent
5162 * spurious interrupts. The flush impacts performance but
5163 * excessive spurious interrupts can be worse in some cases.
5165 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5166 if (tg3_irq_sync(tp))
5168 sblk->status &= ~SD_STATUS_UPDATED;
5169 if (likely(tg3_has_work(tnapi))) {
5170 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5171 napi_schedule(&tnapi->napi);
5173 /* No work, shared interrupt perhaps? re-enable
5174 * interrupts, and flush that PCI write
5176 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5180 return IRQ_RETVAL(handled);
5183 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5185 struct tg3_napi *tnapi = dev_id;
5186 struct tg3 *tp = tnapi->tp;
5187 struct tg3_hw_status *sblk = tnapi->hw_status;
5188 unsigned int handled = 1;
5190 /* In INTx mode, it is possible for the interrupt to arrive at
5191 * the CPU before the status block posted prior to the interrupt.
5192 * Reading the PCI State register will confirm whether the
5193 * interrupt is ours and will flush the status block.
5195 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5196 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5197 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5204 * writing any value to intr-mbox-0 clears PCI INTA# and
5205 * chip-internal interrupt pending events.
5206 * writing non-zero to intr-mbox-0 additional tells the
5207 * NIC to stop sending us irqs, engaging "in-intr-handler"
5210 * Flush the mailbox to de-assert the IRQ immediately to prevent
5211 * spurious interrupts. The flush impacts performance but
5212 * excessive spurious interrupts can be worse in some cases.
5214 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5217 * In a shared interrupt configuration, sometimes other devices'
5218 * interrupts will scream. We record the current status tag here
5219 * so that the above check can report that the screaming interrupts
5220 * are unhandled. Eventually they will be silenced.
5222 tnapi->last_irq_tag = sblk->status_tag;
5224 if (tg3_irq_sync(tp))
5227 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5229 napi_schedule(&tnapi->napi);
5232 return IRQ_RETVAL(handled);
5235 /* ISR for interrupt test */
5236 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5238 struct tg3_napi *tnapi = dev_id;
5239 struct tg3 *tp = tnapi->tp;
5240 struct tg3_hw_status *sblk = tnapi->hw_status;
5242 if ((sblk->status & SD_STATUS_UPDATED) ||
5243 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5244 tg3_disable_ints(tp);
5245 return IRQ_RETVAL(1);
5247 return IRQ_RETVAL(0);
5250 static int tg3_init_hw(struct tg3 *, int);
5251 static int tg3_halt(struct tg3 *, int, int);
5253 /* Restart hardware after configuration changes, self-test, etc.
5254 * Invoked with tp->lock held.
5256 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5257 __releases(tp->lock)
5258 __acquires(tp->lock)
5262 err = tg3_init_hw(tp, reset_phy);
5265 "Failed to re-initialize device, aborting\n");
5266 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5267 tg3_full_unlock(tp);
5268 del_timer_sync(&tp->timer);
5270 tg3_napi_enable(tp);
5272 tg3_full_lock(tp, 0);
5277 #ifdef CONFIG_NET_POLL_CONTROLLER
5278 static void tg3_poll_controller(struct net_device *dev)
5281 struct tg3 *tp = netdev_priv(dev);
5283 for (i = 0; i < tp->irq_cnt; i++)
5284 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5288 static void tg3_reset_task(struct work_struct *work)
5290 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5292 unsigned int restart_timer;
5294 tg3_full_lock(tp, 0);
5296 if (!netif_running(tp->dev)) {
5297 tg3_full_unlock(tp);
5301 tg3_full_unlock(tp);
5307 tg3_full_lock(tp, 1);
5309 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5310 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5312 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5313 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5314 tp->write32_rx_mbox = tg3_write_flush_reg32;
5315 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5316 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5319 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5320 err = tg3_init_hw(tp, 1);
5324 tg3_netif_start(tp);
5327 mod_timer(&tp->timer, jiffies + 1);
5330 tg3_full_unlock(tp);
5336 static void tg3_dump_short_state(struct tg3 *tp)
5338 netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5339 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5340 netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5341 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5344 static void tg3_tx_timeout(struct net_device *dev)
5346 struct tg3 *tp = netdev_priv(dev);
5348 if (netif_msg_tx_err(tp)) {
5349 netdev_err(dev, "transmit timed out, resetting\n");
5350 tg3_dump_short_state(tp);
5353 schedule_work(&tp->reset_task);
5356 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5357 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5359 u32 base = (u32) mapping & 0xffffffff;
5361 return ((base > 0xffffdcc0) &&
5362 (base + len + 8 < base));
5365 /* Test for DMA addresses > 40-bit */
5366 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5369 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5370 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5371 return (((u64) mapping + len) > DMA_BIT_MASK(40));
5378 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5380 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5381 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5382 struct sk_buff *skb, u32 last_plus_one,
5383 u32 *start, u32 base_flags, u32 mss)
5385 struct tg3 *tp = tnapi->tp;
5386 struct sk_buff *new_skb;
5387 dma_addr_t new_addr = 0;
5391 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5392 new_skb = skb_copy(skb, GFP_ATOMIC);
5394 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5396 new_skb = skb_copy_expand(skb,
5397 skb_headroom(skb) + more_headroom,
5398 skb_tailroom(skb), GFP_ATOMIC);
5404 /* New SKB is guaranteed to be linear. */
5406 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5408 /* Make sure the mapping succeeded */
5409 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5411 dev_kfree_skb(new_skb);
5414 /* Make sure new skb does not cross any 4G boundaries.
5415 * Drop the packet if it does.
5417 } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5418 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5419 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5422 dev_kfree_skb(new_skb);
5425 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5426 base_flags, 1 | (mss << 1));
5427 *start = NEXT_TX(entry);
5431 /* Now clean up the sw ring entries. */
5433 while (entry != last_plus_one) {
5437 len = skb_headlen(skb);
5439 len = skb_shinfo(skb)->frags[i-1].size;
5441 pci_unmap_single(tp->pdev,
5442 pci_unmap_addr(&tnapi->tx_buffers[entry],
5444 len, PCI_DMA_TODEVICE);
5446 tnapi->tx_buffers[entry].skb = new_skb;
5447 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5450 tnapi->tx_buffers[entry].skb = NULL;
5452 entry = NEXT_TX(entry);
5461 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5462 dma_addr_t mapping, int len, u32 flags,
5465 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5466 int is_end = (mss_and_is_end & 0x1);
5467 u32 mss = (mss_and_is_end >> 1);
5471 flags |= TXD_FLAG_END;
5472 if (flags & TXD_FLAG_VLAN) {
5473 vlan_tag = flags >> 16;
5476 vlan_tag |= (mss << TXD_MSS_SHIFT);
5478 txd->addr_hi = ((u64) mapping >> 32);
5479 txd->addr_lo = ((u64) mapping & 0xffffffff);
5480 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5481 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5484 /* hard_start_xmit for devices that don't have any bugs and
5485 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5487 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5488 struct net_device *dev)
5490 struct tg3 *tp = netdev_priv(dev);
5491 u32 len, entry, base_flags, mss;
5493 struct tg3_napi *tnapi;
5494 struct netdev_queue *txq;
5495 unsigned int i, last;
5497 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5498 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5499 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5502 /* We are running in BH disabled context with netif_tx_lock
5503 * and TX reclaim runs via tp->napi.poll inside of a software
5504 * interrupt. Furthermore, IRQ processing runs lockless so we have
5505 * no IRQ context deadlocks to worry about either. Rejoice!
5507 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5508 if (!netif_tx_queue_stopped(txq)) {
5509 netif_tx_stop_queue(txq);
5511 /* This is a hard error, log it. */
5513 "BUG! Tx Ring full when queue awake!\n");
5515 return NETDEV_TX_BUSY;
5518 entry = tnapi->tx_prod;
5521 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5522 int tcp_opt_len, ip_tcp_len;
5525 if (skb_header_cloned(skb) &&
5526 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5531 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
5532 hdrlen = skb_headlen(skb) - ETH_HLEN;
5534 struct iphdr *iph = ip_hdr(skb);
5536 tcp_opt_len = tcp_optlen(skb);
5537 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5540 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5541 hdrlen = ip_tcp_len + tcp_opt_len;
5544 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5545 mss |= (hdrlen & 0xc) << 12;
5547 base_flags |= 0x00000010;
5548 base_flags |= (hdrlen & 0x3e0) << 5;
5552 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5553 TXD_FLAG_CPU_POST_DMA);
5555 tcp_hdr(skb)->check = 0;
5557 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5558 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5561 #if TG3_VLAN_TAG_USED
5562 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5563 base_flags |= (TXD_FLAG_VLAN |
5564 (vlan_tx_tag_get(skb) << 16));
5567 len = skb_headlen(skb);
5569 /* Queue skb data, a.k.a. the main skb fragment. */
5570 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5571 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5576 tnapi->tx_buffers[entry].skb = skb;
5577 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5579 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5580 !mss && skb->len > ETH_DATA_LEN)
5581 base_flags |= TXD_FLAG_JMB_PKT;
5583 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5584 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5586 entry = NEXT_TX(entry);
5588 /* Now loop through additional data fragments, and queue them. */
5589 if (skb_shinfo(skb)->nr_frags > 0) {
5590 last = skb_shinfo(skb)->nr_frags - 1;
5591 for (i = 0; i <= last; i++) {
5592 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5595 mapping = pci_map_page(tp->pdev,
5598 len, PCI_DMA_TODEVICE);
5599 if (pci_dma_mapping_error(tp->pdev, mapping))
5602 tnapi->tx_buffers[entry].skb = NULL;
5603 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5606 tg3_set_txd(tnapi, entry, mapping, len,
5607 base_flags, (i == last) | (mss << 1));
5609 entry = NEXT_TX(entry);
5613 /* Packets are ready, update Tx producer idx local and on card. */
5614 tw32_tx_mbox(tnapi->prodmbox, entry);
5616 tnapi->tx_prod = entry;
5617 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5618 netif_tx_stop_queue(txq);
5619 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5620 netif_tx_wake_queue(txq);
5626 return NETDEV_TX_OK;
5630 entry = tnapi->tx_prod;
5631 tnapi->tx_buffers[entry].skb = NULL;
5632 pci_unmap_single(tp->pdev,
5633 pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5636 for (i = 0; i <= last; i++) {
5637 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5638 entry = NEXT_TX(entry);
5640 pci_unmap_page(tp->pdev,
5641 pci_unmap_addr(&tnapi->tx_buffers[entry],
5643 frag->size, PCI_DMA_TODEVICE);
5647 return NETDEV_TX_OK;
5650 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5651 struct net_device *);
5653 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5654 * TSO header is greater than 80 bytes.
5656 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5658 struct sk_buff *segs, *nskb;
5659 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5661 /* Estimate the number of fragments in the worst case */
5662 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5663 netif_stop_queue(tp->dev);
5664 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5665 return NETDEV_TX_BUSY;
5667 netif_wake_queue(tp->dev);
5670 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5672 goto tg3_tso_bug_end;
5678 tg3_start_xmit_dma_bug(nskb, tp->dev);
5684 return NETDEV_TX_OK;
5687 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5688 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5690 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5691 struct net_device *dev)
5693 struct tg3 *tp = netdev_priv(dev);
5694 u32 len, entry, base_flags, mss;
5695 int would_hit_hwbug;
5697 struct tg3_napi *tnapi;
5698 struct netdev_queue *txq;
5699 unsigned int i, last;
5701 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5702 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5703 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5706 /* We are running in BH disabled context with netif_tx_lock
5707 * and TX reclaim runs via tp->napi.poll inside of a software
5708 * interrupt. Furthermore, IRQ processing runs lockless so we have
5709 * no IRQ context deadlocks to worry about either. Rejoice!
5711 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5712 if (!netif_tx_queue_stopped(txq)) {
5713 netif_tx_stop_queue(txq);
5715 /* This is a hard error, log it. */
5717 "BUG! Tx Ring full when queue awake!\n");
5719 return NETDEV_TX_BUSY;
5722 entry = tnapi->tx_prod;
5724 if (skb->ip_summed == CHECKSUM_PARTIAL)
5725 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5727 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5729 u32 tcp_opt_len, ip_tcp_len, hdr_len;
5731 if (skb_header_cloned(skb) &&
5732 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5737 tcp_opt_len = tcp_optlen(skb);
5738 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5740 hdr_len = ip_tcp_len + tcp_opt_len;
5741 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5742 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5743 return (tg3_tso_bug(tp, skb));
5745 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5746 TXD_FLAG_CPU_POST_DMA);
5750 iph->tot_len = htons(mss + hdr_len);
5751 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5752 tcp_hdr(skb)->check = 0;
5753 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5755 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5760 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5761 mss |= (hdr_len & 0xc) << 12;
5763 base_flags |= 0x00000010;
5764 base_flags |= (hdr_len & 0x3e0) << 5;
5765 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
5766 mss |= hdr_len << 9;
5767 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
5768 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5769 if (tcp_opt_len || iph->ihl > 5) {
5772 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5773 mss |= (tsflags << 11);
5776 if (tcp_opt_len || iph->ihl > 5) {
5779 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5780 base_flags |= tsflags << 12;
5784 #if TG3_VLAN_TAG_USED
5785 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5786 base_flags |= (TXD_FLAG_VLAN |
5787 (vlan_tx_tag_get(skb) << 16));
5790 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5791 !mss && skb->len > ETH_DATA_LEN)
5792 base_flags |= TXD_FLAG_JMB_PKT;
5794 len = skb_headlen(skb);
5796 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5797 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5802 tnapi->tx_buffers[entry].skb = skb;
5803 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5805 would_hit_hwbug = 0;
5807 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
5808 would_hit_hwbug = 1;
5810 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5811 tg3_4g_overflow_test(mapping, len))
5812 would_hit_hwbug = 1;
5814 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5815 tg3_40bit_overflow_test(tp, mapping, len))
5816 would_hit_hwbug = 1;
5818 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5819 would_hit_hwbug = 1;
5821 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5822 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5824 entry = NEXT_TX(entry);
5826 /* Now loop through additional data fragments, and queue them. */
5827 if (skb_shinfo(skb)->nr_frags > 0) {
5828 last = skb_shinfo(skb)->nr_frags - 1;
5829 for (i = 0; i <= last; i++) {
5830 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5833 mapping = pci_map_page(tp->pdev,
5836 len, PCI_DMA_TODEVICE);
5838 tnapi->tx_buffers[entry].skb = NULL;
5839 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5841 if (pci_dma_mapping_error(tp->pdev, mapping))
5844 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
5846 would_hit_hwbug = 1;
5848 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5849 tg3_4g_overflow_test(mapping, len))
5850 would_hit_hwbug = 1;
5852 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5853 tg3_40bit_overflow_test(tp, mapping, len))
5854 would_hit_hwbug = 1;
5856 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5857 tg3_set_txd(tnapi, entry, mapping, len,
5858 base_flags, (i == last)|(mss << 1));
5860 tg3_set_txd(tnapi, entry, mapping, len,
5861 base_flags, (i == last));
5863 entry = NEXT_TX(entry);
5867 if (would_hit_hwbug) {
5868 u32 last_plus_one = entry;
5871 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5872 start &= (TG3_TX_RING_SIZE - 1);
5874 /* If the workaround fails due to memory/mapping
5875 * failure, silently drop this packet.
5877 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
5878 &start, base_flags, mss))
5884 /* Packets are ready, update Tx producer idx local and on card. */
5885 tw32_tx_mbox(tnapi->prodmbox, entry);
5887 tnapi->tx_prod = entry;
5888 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5889 netif_tx_stop_queue(txq);
5890 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5891 netif_tx_wake_queue(txq);
5897 return NETDEV_TX_OK;
5901 entry = tnapi->tx_prod;
5902 tnapi->tx_buffers[entry].skb = NULL;
5903 pci_unmap_single(tp->pdev,
5904 pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5907 for (i = 0; i <= last; i++) {
5908 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5909 entry = NEXT_TX(entry);
5911 pci_unmap_page(tp->pdev,
5912 pci_unmap_addr(&tnapi->tx_buffers[entry],
5914 frag->size, PCI_DMA_TODEVICE);
5918 return NETDEV_TX_OK;
5921 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5926 if (new_mtu > ETH_DATA_LEN) {
5927 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5928 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5929 ethtool_op_set_tso(dev, 0);
5931 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5934 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5935 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5936 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5940 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5942 struct tg3 *tp = netdev_priv(dev);
5945 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5948 if (!netif_running(dev)) {
5949 /* We'll just catch it later when the
5952 tg3_set_mtu(dev, tp, new_mtu);
5960 tg3_full_lock(tp, 1);
5962 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5964 tg3_set_mtu(dev, tp, new_mtu);
5966 err = tg3_restart_hw(tp, 0);
5969 tg3_netif_start(tp);
5971 tg3_full_unlock(tp);
5979 static void tg3_rx_prodring_free(struct tg3 *tp,
5980 struct tg3_rx_prodring_set *tpr)
5984 if (tpr != &tp->prodring[0]) {
5985 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
5986 i = (i + 1) % TG3_RX_RING_SIZE)
5987 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
5990 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5991 for (i = tpr->rx_jmb_cons_idx;
5992 i != tpr->rx_jmb_prod_idx;
5993 i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) {
5994 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6002 for (i = 0; i < TG3_RX_RING_SIZE; i++)
6003 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6006 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6007 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++)
6008 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6013 /* Initialize rx rings for packet processing.
6015 * The chip has been shut down and the driver detached from
6016 * the networking, so no interrupts or new tx packets will
6017 * end up in the driver. tp->{tx,}lock are held and thus
6020 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6021 struct tg3_rx_prodring_set *tpr)
6023 u32 i, rx_pkt_dma_sz;
6025 tpr->rx_std_cons_idx = 0;
6026 tpr->rx_std_prod_idx = 0;
6027 tpr->rx_jmb_cons_idx = 0;
6028 tpr->rx_jmb_prod_idx = 0;
6030 if (tpr != &tp->prodring[0]) {
6031 memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE);
6032 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
6033 memset(&tpr->rx_jmb_buffers[0], 0,
6034 TG3_RX_JMB_BUFF_RING_SIZE);
6038 /* Zero out all descriptors. */
6039 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
6041 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6042 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
6043 tp->dev->mtu > ETH_DATA_LEN)
6044 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6045 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6047 /* Initialize invariants of the rings, we only set this
6048 * stuff once. This works because the card does not
6049 * write into the rx buffer posting rings.
6051 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
6052 struct tg3_rx_buffer_desc *rxd;
6054 rxd = &tpr->rx_std[i];
6055 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6056 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6057 rxd->opaque = (RXD_OPAQUE_RING_STD |
6058 (i << RXD_OPAQUE_INDEX_SHIFT));
6061 /* Now allocate fresh SKBs for each rx ring. */
6062 for (i = 0; i < tp->rx_pending; i++) {
6063 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6064 netdev_warn(tp->dev,
6065 "Using a smaller RX standard ring. Only "
6066 "%d out of %d buffers were allocated "
6067 "successfully\n", i, tp->rx_pending);
6075 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
6078 memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
6080 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE))
6083 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
6084 struct tg3_rx_buffer_desc *rxd;
6086 rxd = &tpr->rx_jmb[i].std;
6087 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6088 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6090 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6091 (i << RXD_OPAQUE_INDEX_SHIFT));
6094 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6095 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6096 netdev_warn(tp->dev,
6097 "Using a smaller RX jumbo ring. Only %d "
6098 "out of %d buffers were allocated "
6099 "successfully\n", i, tp->rx_jumbo_pending);
6102 tp->rx_jumbo_pending = i;
6111 tg3_rx_prodring_free(tp, tpr);
6115 static void tg3_rx_prodring_fini(struct tg3 *tp,
6116 struct tg3_rx_prodring_set *tpr)
6118 kfree(tpr->rx_std_buffers);
6119 tpr->rx_std_buffers = NULL;
6120 kfree(tpr->rx_jmb_buffers);
6121 tpr->rx_jmb_buffers = NULL;
6123 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
6124 tpr->rx_std, tpr->rx_std_mapping);
6128 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
6129 tpr->rx_jmb, tpr->rx_jmb_mapping);
6134 static int tg3_rx_prodring_init(struct tg3 *tp,
6135 struct tg3_rx_prodring_set *tpr)
6137 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL);
6138 if (!tpr->rx_std_buffers)
6141 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
6142 &tpr->rx_std_mapping);
6146 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6147 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE,
6149 if (!tpr->rx_jmb_buffers)
6152 tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
6153 TG3_RX_JUMBO_RING_BYTES,
6154 &tpr->rx_jmb_mapping);
6162 tg3_rx_prodring_fini(tp, tpr);
6166 /* Free up pending packets in all rx/tx rings.
6168 * The chip has been shut down and the driver detached from
6169 * the networking, so no interrupts or new tx packets will
6170 * end up in the driver. tp->{tx,}lock is not held and we are not
6171 * in an interrupt context and thus may sleep.
6173 static void tg3_free_rings(struct tg3 *tp)
6177 for (j = 0; j < tp->irq_cnt; j++) {
6178 struct tg3_napi *tnapi = &tp->napi[j];
6180 if (!tnapi->tx_buffers)
6183 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6184 struct ring_info *txp;
6185 struct sk_buff *skb;
6188 txp = &tnapi->tx_buffers[i];
6196 pci_unmap_single(tp->pdev,
6197 pci_unmap_addr(txp, mapping),
6204 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6205 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6206 pci_unmap_page(tp->pdev,
6207 pci_unmap_addr(txp, mapping),
6208 skb_shinfo(skb)->frags[k].size,
6213 dev_kfree_skb_any(skb);
6216 tg3_rx_prodring_free(tp, &tp->prodring[j]);
6220 /* Initialize tx/rx rings for packet processing.
6222 * The chip has been shut down and the driver detached from
6223 * the networking, so no interrupts or new tx packets will
6224 * end up in the driver. tp->{tx,}lock are held and thus
6227 static int tg3_init_rings(struct tg3 *tp)
6231 /* Free up all the SKBs. */
6234 for (i = 0; i < tp->irq_cnt; i++) {
6235 struct tg3_napi *tnapi = &tp->napi[i];
6237 tnapi->last_tag = 0;
6238 tnapi->last_irq_tag = 0;
6239 tnapi->hw_status->status = 0;
6240 tnapi->hw_status->status_tag = 0;
6241 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6246 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6248 tnapi->rx_rcb_ptr = 0;
6250 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6252 if (tg3_rx_prodring_alloc(tp, &tp->prodring[i])) {
6262 * Must not be invoked with interrupt sources disabled and
6263 * the hardware shutdown down.
6265 static void tg3_free_consistent(struct tg3 *tp)
6269 for (i = 0; i < tp->irq_cnt; i++) {
6270 struct tg3_napi *tnapi = &tp->napi[i];
6272 if (tnapi->tx_ring) {
6273 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
6274 tnapi->tx_ring, tnapi->tx_desc_mapping);
6275 tnapi->tx_ring = NULL;
6278 kfree(tnapi->tx_buffers);
6279 tnapi->tx_buffers = NULL;
6281 if (tnapi->rx_rcb) {
6282 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
6284 tnapi->rx_rcb_mapping);
6285 tnapi->rx_rcb = NULL;
6288 if (tnapi->hw_status) {
6289 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
6291 tnapi->status_mapping);
6292 tnapi->hw_status = NULL;
6297 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
6298 tp->hw_stats, tp->stats_mapping);
6299 tp->hw_stats = NULL;
6302 for (i = 0; i < tp->irq_cnt; i++)
6303 tg3_rx_prodring_fini(tp, &tp->prodring[i]);
6307 * Must not be invoked with interrupt sources disabled and
6308 * the hardware shutdown down. Can sleep.
6310 static int tg3_alloc_consistent(struct tg3 *tp)
6314 for (i = 0; i < tp->irq_cnt; i++) {
6315 if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
6319 tp->hw_stats = pci_alloc_consistent(tp->pdev,
6320 sizeof(struct tg3_hw_stats),
6321 &tp->stats_mapping);
6325 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6327 for (i = 0; i < tp->irq_cnt; i++) {
6328 struct tg3_napi *tnapi = &tp->napi[i];
6329 struct tg3_hw_status *sblk;
6331 tnapi->hw_status = pci_alloc_consistent(tp->pdev,
6333 &tnapi->status_mapping);
6334 if (!tnapi->hw_status)
6337 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6338 sblk = tnapi->hw_status;
6340 /* If multivector TSS is enabled, vector 0 does not handle
6341 * tx interrupts. Don't allocate any resources for it.
6343 if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) ||
6344 (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) {
6345 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6348 if (!tnapi->tx_buffers)
6351 tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
6353 &tnapi->tx_desc_mapping);
6354 if (!tnapi->tx_ring)
6359 * When RSS is enabled, the status block format changes
6360 * slightly. The "rx_jumbo_consumer", "reserved",
6361 * and "rx_mini_consumer" members get mapped to the
6362 * other three rx return ring producer indexes.
6366 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6369 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6372 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6375 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6379 tnapi->prodring = &tp->prodring[i];
6382 * If multivector RSS is enabled, vector 0 does not handle
6383 * rx or tx interrupts. Don't allocate any resources for it.
6385 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
6388 tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
6389 TG3_RX_RCB_RING_BYTES(tp),
6390 &tnapi->rx_rcb_mapping);
6394 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6400 tg3_free_consistent(tp);
6404 #define MAX_WAIT_CNT 1000
6406 /* To stop a block, clear the enable bit and poll till it
6407 * clears. tp->lock is held.
6409 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6414 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6421 /* We can't enable/disable these bits of the
6422 * 5705/5750, just say success.
6435 for (i = 0; i < MAX_WAIT_CNT; i++) {
6438 if ((val & enable_bit) == 0)
6442 if (i == MAX_WAIT_CNT && !silent) {
6443 dev_err(&tp->pdev->dev,
6444 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6452 /* tp->lock is held. */
6453 static int tg3_abort_hw(struct tg3 *tp, int silent)
6457 tg3_disable_ints(tp);
6459 tp->rx_mode &= ~RX_MODE_ENABLE;
6460 tw32_f(MAC_RX_MODE, tp->rx_mode);
6463 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6464 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6465 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6466 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6467 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6468 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6470 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6471 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6472 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6473 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6474 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6475 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6476 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6478 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6479 tw32_f(MAC_MODE, tp->mac_mode);
6482 tp->tx_mode &= ~TX_MODE_ENABLE;
6483 tw32_f(MAC_TX_MODE, tp->tx_mode);
6485 for (i = 0; i < MAX_WAIT_CNT; i++) {
6487 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6490 if (i >= MAX_WAIT_CNT) {
6491 dev_err(&tp->pdev->dev,
6492 "%s timed out, TX_MODE_ENABLE will not clear "
6493 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6497 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6498 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6499 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6501 tw32(FTQ_RESET, 0xffffffff);
6502 tw32(FTQ_RESET, 0x00000000);
6504 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6505 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6507 for (i = 0; i < tp->irq_cnt; i++) {
6508 struct tg3_napi *tnapi = &tp->napi[i];
6509 if (tnapi->hw_status)
6510 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6513 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6518 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6523 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6524 if (apedata != APE_SEG_SIG_MAGIC)
6527 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6528 if (!(apedata & APE_FW_STATUS_READY))
6531 /* Wait for up to 1 millisecond for APE to service previous event. */
6532 for (i = 0; i < 10; i++) {
6533 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6536 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6538 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6539 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6540 event | APE_EVENT_STATUS_EVENT_PENDING);
6542 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6544 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6550 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6551 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6554 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6559 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
6563 case RESET_KIND_INIT:
6564 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6565 APE_HOST_SEG_SIG_MAGIC);
6566 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6567 APE_HOST_SEG_LEN_MAGIC);
6568 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6569 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6570 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6571 APE_HOST_DRIVER_ID_MAGIC);
6572 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6573 APE_HOST_BEHAV_NO_PHYLOCK);
6575 event = APE_EVENT_STATUS_STATE_START;
6577 case RESET_KIND_SHUTDOWN:
6578 /* With the interface we are currently using,
6579 * APE does not track driver state. Wiping
6580 * out the HOST SEGMENT SIGNATURE forces
6581 * the APE to assume OS absent status.
6583 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6585 event = APE_EVENT_STATUS_STATE_UNLOAD;
6587 case RESET_KIND_SUSPEND:
6588 event = APE_EVENT_STATUS_STATE_SUSPEND;
6594 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6596 tg3_ape_send_event(tp, event);
6599 /* tp->lock is held. */
6600 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6602 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6603 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6605 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6607 case RESET_KIND_INIT:
6608 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6612 case RESET_KIND_SHUTDOWN:
6613 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6617 case RESET_KIND_SUSPEND:
6618 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6627 if (kind == RESET_KIND_INIT ||
6628 kind == RESET_KIND_SUSPEND)
6629 tg3_ape_driver_state_change(tp, kind);
6632 /* tp->lock is held. */
6633 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6635 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6637 case RESET_KIND_INIT:
6638 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6639 DRV_STATE_START_DONE);
6642 case RESET_KIND_SHUTDOWN:
6643 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6644 DRV_STATE_UNLOAD_DONE);
6652 if (kind == RESET_KIND_SHUTDOWN)
6653 tg3_ape_driver_state_change(tp, kind);
6656 /* tp->lock is held. */
6657 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6659 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6661 case RESET_KIND_INIT:
6662 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6666 case RESET_KIND_SHUTDOWN:
6667 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6671 case RESET_KIND_SUSPEND:
6672 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6682 static int tg3_poll_fw(struct tg3 *tp)
6687 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6688 /* Wait up to 20ms for init done. */
6689 for (i = 0; i < 200; i++) {
6690 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6697 /* Wait for firmware initialization to complete. */
6698 for (i = 0; i < 100000; i++) {
6699 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6700 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6705 /* Chip might not be fitted with firmware. Some Sun onboard
6706 * parts are configured like that. So don't signal the timeout
6707 * of the above loop as an error, but do report the lack of
6708 * running firmware once.
6711 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
6712 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
6714 netdev_info(tp->dev, "No firmware running\n");
6717 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6718 /* The 57765 A0 needs a little more
6719 * time to do some important work.
6727 /* Save PCI command register before chip reset */
6728 static void tg3_save_pci_state(struct tg3 *tp)
6730 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
6733 /* Restore PCI state after chip reset */
6734 static void tg3_restore_pci_state(struct tg3 *tp)
6738 /* Re-enable indirect register accesses. */
6739 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6740 tp->misc_host_ctrl);
6742 /* Set MAX PCI retry to zero. */
6743 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
6744 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6745 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
6746 val |= PCISTATE_RETRY_SAME_DMA;
6747 /* Allow reads and writes to the APE register and memory space. */
6748 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6749 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6750 PCISTATE_ALLOW_APE_SHMEM_WR;
6751 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
6753 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
6755 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6756 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6757 pcie_set_readrq(tp->pdev, 4096);
6759 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6760 tp->pci_cacheline_sz);
6761 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
6766 /* Make sure PCI-X relaxed ordering bit is clear. */
6767 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6770 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6772 pcix_cmd &= ~PCI_X_CMD_ERO;
6773 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6777 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6779 /* Chip reset on 5780 will reset MSI enable bit,
6780 * so need to restore it.
6782 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6785 pci_read_config_word(tp->pdev,
6786 tp->msi_cap + PCI_MSI_FLAGS,
6788 pci_write_config_word(tp->pdev,
6789 tp->msi_cap + PCI_MSI_FLAGS,
6790 ctrl | PCI_MSI_FLAGS_ENABLE);
6791 val = tr32(MSGINT_MODE);
6792 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
6797 static void tg3_stop_fw(struct tg3 *);
6799 /* tp->lock is held. */
6800 static int tg3_chip_reset(struct tg3 *tp)
6803 void (*write_op)(struct tg3 *, u32, u32);
6808 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
6810 /* No matching tg3_nvram_unlock() after this because
6811 * chip reset below will undo the nvram lock.
6813 tp->nvram_lock_cnt = 0;
6815 /* GRC_MISC_CFG core clock reset will clear the memory
6816 * enable bit in PCI register 4 and the MSI enable bit
6817 * on some chips, so we save relevant registers here.
6819 tg3_save_pci_state(tp);
6821 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
6822 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
6823 tw32(GRC_FASTBOOT_PC, 0);
6826 * We must avoid the readl() that normally takes place.
6827 * It locks machines, causes machine checks, and other
6828 * fun things. So, temporarily disable the 5701
6829 * hardware workaround, while we do the reset.
6831 write_op = tp->write32;
6832 if (write_op == tg3_write_flush_reg32)
6833 tp->write32 = tg3_write32;
6835 /* Prevent the irq handler from reading or writing PCI registers
6836 * during chip reset when the memory enable bit in the PCI command
6837 * register may be cleared. The chip does not generate interrupt
6838 * at this time, but the irq handler may still be called due to irq
6839 * sharing or irqpoll.
6841 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6842 for (i = 0; i < tp->irq_cnt; i++) {
6843 struct tg3_napi *tnapi = &tp->napi[i];
6844 if (tnapi->hw_status) {
6845 tnapi->hw_status->status = 0;
6846 tnapi->hw_status->status_tag = 0;
6848 tnapi->last_tag = 0;
6849 tnapi->last_irq_tag = 0;
6853 for (i = 0; i < tp->irq_cnt; i++)
6854 synchronize_irq(tp->napi[i].irq_vec);
6856 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6857 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
6858 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
6862 val = GRC_MISC_CFG_CORECLK_RESET;
6864 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6865 if (tr32(0x7e2c) == 0x60) {
6868 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6869 tw32(GRC_MISC_CFG, (1 << 29));
6874 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6875 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6876 tw32(GRC_VCPU_EXT_CTRL,
6877 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6880 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6881 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6882 tw32(GRC_MISC_CFG, val);
6884 /* restore 5701 hardware bug workaround write method */
6885 tp->write32 = write_op;
6887 /* Unfortunately, we have to delay before the PCI read back.
6888 * Some 575X chips even will not respond to a PCI cfg access
6889 * when the reset command is given to the chip.
6891 * How do these hardware designers expect things to work
6892 * properly if the PCI write is posted for a long period
6893 * of time? It is always necessary to have some method by
6894 * which a register read back can occur to push the write
6895 * out which does the reset.
6897 * For most tg3 variants the trick below was working.
6902 /* Flush PCI posted writes. The normal MMIO registers
6903 * are inaccessible at this time so this is the only
6904 * way to make this reliably (actually, this is no longer
6905 * the case, see above). I tried to use indirect
6906 * register read/write but this upset some 5701 variants.
6908 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6912 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
6915 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6919 /* Wait for link training to complete. */
6920 for (i = 0; i < 5000; i++)
6923 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6924 pci_write_config_dword(tp->pdev, 0xc4,
6925 cfg_val | (1 << 15));
6928 /* Clear the "no snoop" and "relaxed ordering" bits. */
6929 pci_read_config_word(tp->pdev,
6930 tp->pcie_cap + PCI_EXP_DEVCTL,
6932 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
6933 PCI_EXP_DEVCTL_NOSNOOP_EN);
6935 * Older PCIe devices only support the 128 byte
6936 * MPS setting. Enforce the restriction.
6938 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
6939 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784))
6940 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
6941 pci_write_config_word(tp->pdev,
6942 tp->pcie_cap + PCI_EXP_DEVCTL,
6945 pcie_set_readrq(tp->pdev, 4096);
6947 /* Clear error status */
6948 pci_write_config_word(tp->pdev,
6949 tp->pcie_cap + PCI_EXP_DEVSTA,
6950 PCI_EXP_DEVSTA_CED |
6951 PCI_EXP_DEVSTA_NFED |
6952 PCI_EXP_DEVSTA_FED |
6953 PCI_EXP_DEVSTA_URD);
6956 tg3_restore_pci_state(tp);
6958 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6961 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6962 val = tr32(MEMARB_MODE);
6963 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
6965 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6967 tw32(0x5000, 0x400);
6970 tw32(GRC_MODE, tp->grc_mode);
6972 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
6975 tw32(0xc4, val | (1 << 15));
6978 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6979 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6980 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6981 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6982 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6983 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6986 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6987 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6988 tw32_f(MAC_MODE, tp->mac_mode);
6989 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6990 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6991 tw32_f(MAC_MODE, tp->mac_mode);
6992 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6993 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6994 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6995 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6996 tw32_f(MAC_MODE, tp->mac_mode);
6998 tw32_f(MAC_MODE, 0);
7001 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7003 err = tg3_poll_fw(tp);
7009 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7012 phy_addr = tp->phy_addr;
7013 tp->phy_addr = TG3_PHY_PCIE_ADDR;
7015 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
7016 TG3_PCIEPHY_TXB_BLK << TG3_PCIEPHY_BLOCK_SHIFT);
7017 val = TG3_PCIEPHY_TX0CTRL1_TXOCM | TG3_PCIEPHY_TX0CTRL1_RDCTL |
7018 TG3_PCIEPHY_TX0CTRL1_TXCMV | TG3_PCIEPHY_TX0CTRL1_TKSEL |
7019 TG3_PCIEPHY_TX0CTRL1_NB_EN;
7020 tg3_writephy(tp, TG3_PCIEPHY_TX0CTRL1, val);
7023 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
7024 TG3_PCIEPHY_XGXS_BLK1 << TG3_PCIEPHY_BLOCK_SHIFT);
7025 val = TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN |
7026 TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN;
7027 tg3_writephy(tp, TG3_PCIEPHY_PWRMGMT4, val);
7030 tp->phy_addr = phy_addr;
7033 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7034 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7035 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7036 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
7037 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
7040 tw32(0x7c00, val | (1 << 25));
7043 /* Reprobe ASF enable state. */
7044 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
7045 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
7046 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7047 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7050 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7051 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7052 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7053 tp->last_event_jiffies = jiffies;
7054 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7055 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7062 /* tp->lock is held. */
7063 static void tg3_stop_fw(struct tg3 *tp)
7065 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7066 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7067 /* Wait for RX cpu to ACK the previous event. */
7068 tg3_wait_for_event_ack(tp);
7070 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7072 tg3_generate_fw_event(tp);
7074 /* Wait for RX cpu to ACK this event. */
7075 tg3_wait_for_event_ack(tp);
7079 /* tp->lock is held. */
7080 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7086 tg3_write_sig_pre_reset(tp, kind);
7088 tg3_abort_hw(tp, silent);
7089 err = tg3_chip_reset(tp);
7091 __tg3_set_mac_addr(tp, 0);
7093 tg3_write_sig_legacy(tp, kind);
7094 tg3_write_sig_post_reset(tp, kind);
7102 #define RX_CPU_SCRATCH_BASE 0x30000
7103 #define RX_CPU_SCRATCH_SIZE 0x04000
7104 #define TX_CPU_SCRATCH_BASE 0x34000
7105 #define TX_CPU_SCRATCH_SIZE 0x04000
7107 /* tp->lock is held. */
7108 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7112 BUG_ON(offset == TX_CPU_BASE &&
7113 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
7115 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7116 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7118 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7121 if (offset == RX_CPU_BASE) {
7122 for (i = 0; i < 10000; i++) {
7123 tw32(offset + CPU_STATE, 0xffffffff);
7124 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7125 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7129 tw32(offset + CPU_STATE, 0xffffffff);
7130 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7133 for (i = 0; i < 10000; i++) {
7134 tw32(offset + CPU_STATE, 0xffffffff);
7135 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7136 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7142 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7143 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7147 /* Clear firmware's nvram arbitration. */
7148 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7149 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7154 unsigned int fw_base;
7155 unsigned int fw_len;
7156 const __be32 *fw_data;
7159 /* tp->lock is held. */
7160 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7161 int cpu_scratch_size, struct fw_info *info)
7163 int err, lock_err, i;
7164 void (*write_op)(struct tg3 *, u32, u32);
7166 if (cpu_base == TX_CPU_BASE &&
7167 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7169 "%s: Trying to load TX cpu firmware which is 5705\n",
7174 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7175 write_op = tg3_write_mem;
7177 write_op = tg3_write_indirect_reg32;
7179 /* It is possible that bootcode is still loading at this point.
7180 * Get the nvram lock first before halting the cpu.
7182 lock_err = tg3_nvram_lock(tp);
7183 err = tg3_halt_cpu(tp, cpu_base);
7185 tg3_nvram_unlock(tp);
7189 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7190 write_op(tp, cpu_scratch_base + i, 0);
7191 tw32(cpu_base + CPU_STATE, 0xffffffff);
7192 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7193 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7194 write_op(tp, (cpu_scratch_base +
7195 (info->fw_base & 0xffff) +
7197 be32_to_cpu(info->fw_data[i]));
7205 /* tp->lock is held. */
7206 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7208 struct fw_info info;
7209 const __be32 *fw_data;
7212 fw_data = (void *)tp->fw->data;
7214 /* Firmware blob starts with version numbers, followed by
7215 start address and length. We are setting complete length.
7216 length = end_address_of_bss - start_address_of_text.
7217 Remainder is the blob to be loaded contiguously
7218 from start address. */
7220 info.fw_base = be32_to_cpu(fw_data[1]);
7221 info.fw_len = tp->fw->size - 12;
7222 info.fw_data = &fw_data[3];
7224 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7225 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7230 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7231 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7236 /* Now startup only the RX cpu. */
7237 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7238 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7240 for (i = 0; i < 5; i++) {
7241 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7243 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7244 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7245 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7249 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7250 "should be %08x\n", __func__,
7251 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7254 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7255 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7260 /* 5705 needs a special version of the TSO firmware. */
7262 /* tp->lock is held. */
7263 static int tg3_load_tso_firmware(struct tg3 *tp)
7265 struct fw_info info;
7266 const __be32 *fw_data;
7267 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7270 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7273 fw_data = (void *)tp->fw->data;
7275 /* Firmware blob starts with version numbers, followed by
7276 start address and length. We are setting complete length.
7277 length = end_address_of_bss - start_address_of_text.
7278 Remainder is the blob to be loaded contiguously
7279 from start address. */
7281 info.fw_base = be32_to_cpu(fw_data[1]);
7282 cpu_scratch_size = tp->fw_len;
7283 info.fw_len = tp->fw->size - 12;
7284 info.fw_data = &fw_data[3];
7286 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7287 cpu_base = RX_CPU_BASE;
7288 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7290 cpu_base = TX_CPU_BASE;
7291 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7292 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7295 err = tg3_load_firmware_cpu(tp, cpu_base,
7296 cpu_scratch_base, cpu_scratch_size,
7301 /* Now startup the cpu. */
7302 tw32(cpu_base + CPU_STATE, 0xffffffff);
7303 tw32_f(cpu_base + CPU_PC, info.fw_base);
7305 for (i = 0; i < 5; i++) {
7306 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7308 tw32(cpu_base + CPU_STATE, 0xffffffff);
7309 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7310 tw32_f(cpu_base + CPU_PC, info.fw_base);
7315 "%s fails to set CPU PC, is %08x should be %08x\n",
7316 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7319 tw32(cpu_base + CPU_STATE, 0xffffffff);
7320 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7325 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7327 struct tg3 *tp = netdev_priv(dev);
7328 struct sockaddr *addr = p;
7329 int err = 0, skip_mac_1 = 0;
7331 if (!is_valid_ether_addr(addr->sa_data))
7334 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7336 if (!netif_running(dev))
7339 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7340 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7342 addr0_high = tr32(MAC_ADDR_0_HIGH);
7343 addr0_low = tr32(MAC_ADDR_0_LOW);
7344 addr1_high = tr32(MAC_ADDR_1_HIGH);
7345 addr1_low = tr32(MAC_ADDR_1_LOW);
7347 /* Skip MAC addr 1 if ASF is using it. */
7348 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7349 !(addr1_high == 0 && addr1_low == 0))
7352 spin_lock_bh(&tp->lock);
7353 __tg3_set_mac_addr(tp, skip_mac_1);
7354 spin_unlock_bh(&tp->lock);
7359 /* tp->lock is held. */
7360 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7361 dma_addr_t mapping, u32 maxlen_flags,
7365 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7366 ((u64) mapping >> 32));
7368 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7369 ((u64) mapping & 0xffffffff));
7371 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7374 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7376 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7380 static void __tg3_set_rx_mode(struct net_device *);
7381 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7385 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) {
7386 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7387 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7388 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7390 tw32(HOSTCC_TXCOL_TICKS, 0);
7391 tw32(HOSTCC_TXMAX_FRAMES, 0);
7392 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7395 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
7396 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7397 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7398 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7400 tw32(HOSTCC_RXCOL_TICKS, 0);
7401 tw32(HOSTCC_RXMAX_FRAMES, 0);
7402 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7405 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7406 u32 val = ec->stats_block_coalesce_usecs;
7408 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7409 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7411 if (!netif_carrier_ok(tp->dev))
7414 tw32(HOSTCC_STAT_COAL_TICKS, val);
7417 for (i = 0; i < tp->irq_cnt - 1; i++) {
7420 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7421 tw32(reg, ec->rx_coalesce_usecs);
7422 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7423 tw32(reg, ec->rx_max_coalesced_frames);
7424 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7425 tw32(reg, ec->rx_max_coalesced_frames_irq);
7427 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7428 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7429 tw32(reg, ec->tx_coalesce_usecs);
7430 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7431 tw32(reg, ec->tx_max_coalesced_frames);
7432 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7433 tw32(reg, ec->tx_max_coalesced_frames_irq);
7437 for (; i < tp->irq_max - 1; i++) {
7438 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7439 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7440 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7442 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7443 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7444 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7445 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7450 /* tp->lock is held. */
7451 static void tg3_rings_reset(struct tg3 *tp)
7454 u32 stblk, txrcb, rxrcb, limit;
7455 struct tg3_napi *tnapi = &tp->napi[0];
7457 /* Disable all transmit rings but the first. */
7458 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7459 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7460 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7461 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7463 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7465 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7466 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7467 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7468 BDINFO_FLAGS_DISABLED);
7471 /* Disable all receive return rings but the first. */
7472 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
7473 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7474 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7475 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7476 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7477 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7478 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7480 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7482 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7483 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7484 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7485 BDINFO_FLAGS_DISABLED);
7487 /* Disable interrupts */
7488 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7490 /* Zero mailbox registers. */
7491 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
7492 for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
7493 tp->napi[i].tx_prod = 0;
7494 tp->napi[i].tx_cons = 0;
7495 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
7496 tw32_mailbox(tp->napi[i].prodmbox, 0);
7497 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7498 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7500 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
7501 tw32_mailbox(tp->napi[0].prodmbox, 0);
7503 tp->napi[0].tx_prod = 0;
7504 tp->napi[0].tx_cons = 0;
7505 tw32_mailbox(tp->napi[0].prodmbox, 0);
7506 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7509 /* Make sure the NIC-based send BD rings are disabled. */
7510 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7511 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7512 for (i = 0; i < 16; i++)
7513 tw32_tx_mbox(mbox + i * 8, 0);
7516 txrcb = NIC_SRAM_SEND_RCB;
7517 rxrcb = NIC_SRAM_RCV_RET_RCB;
7519 /* Clear status block in ram. */
7520 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7522 /* Set status block DMA address */
7523 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7524 ((u64) tnapi->status_mapping >> 32));
7525 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7526 ((u64) tnapi->status_mapping & 0xffffffff));
7528 if (tnapi->tx_ring) {
7529 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7530 (TG3_TX_RING_SIZE <<
7531 BDINFO_FLAGS_MAXLEN_SHIFT),
7532 NIC_SRAM_TX_BUFFER_DESC);
7533 txrcb += TG3_BDINFO_SIZE;
7536 if (tnapi->rx_rcb) {
7537 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7538 (TG3_RX_RCB_RING_SIZE(tp) <<
7539 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7540 rxrcb += TG3_BDINFO_SIZE;
7543 stblk = HOSTCC_STATBLCK_RING1;
7545 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7546 u64 mapping = (u64)tnapi->status_mapping;
7547 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7548 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7550 /* Clear status block in ram. */
7551 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7553 if (tnapi->tx_ring) {
7554 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7555 (TG3_TX_RING_SIZE <<
7556 BDINFO_FLAGS_MAXLEN_SHIFT),
7557 NIC_SRAM_TX_BUFFER_DESC);
7558 txrcb += TG3_BDINFO_SIZE;
7561 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7562 (TG3_RX_RCB_RING_SIZE(tp) <<
7563 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7566 rxrcb += TG3_BDINFO_SIZE;
7570 /* tp->lock is held. */
7571 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7573 u32 val, rdmac_mode;
7575 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
7577 tg3_disable_ints(tp);
7581 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7583 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
7584 tg3_abort_hw(tp, 1);
7589 err = tg3_chip_reset(tp);
7593 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7595 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7596 val = tr32(TG3_CPMU_CTRL);
7597 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7598 tw32(TG3_CPMU_CTRL, val);
7600 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7601 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7602 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7603 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7605 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7606 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7607 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7608 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7610 val = tr32(TG3_CPMU_HST_ACC);
7611 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7612 val |= CPMU_HST_ACC_MACCLK_6_25;
7613 tw32(TG3_CPMU_HST_ACC, val);
7616 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7617 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7618 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7619 PCIE_PWR_MGMT_L1_THRESH_4MS;
7620 tw32(PCIE_PWR_MGMT_THRESH, val);
7622 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7623 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7625 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7627 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7628 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7631 if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
7632 u32 grc_mode = tr32(GRC_MODE);
7634 /* Access the lower 1K of PL PCIE block registers. */
7635 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7636 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7638 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7639 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7640 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7642 tw32(GRC_MODE, grc_mode);
7645 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7646 u32 grc_mode = tr32(GRC_MODE);
7648 /* Access the lower 1K of PL PCIE block registers. */
7649 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7650 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7652 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5);
7653 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7654 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7656 tw32(GRC_MODE, grc_mode);
7659 /* This works around an issue with Athlon chipsets on
7660 * B3 tigon3 silicon. This bit has no effect on any
7661 * other revision. But do not set this on PCI Express
7662 * chips and don't even touch the clocks if the CPMU is present.
7664 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7665 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7666 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7667 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7670 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7671 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7672 val = tr32(TG3PCI_PCISTATE);
7673 val |= PCISTATE_RETRY_SAME_DMA;
7674 tw32(TG3PCI_PCISTATE, val);
7677 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7678 /* Allow reads and writes to the
7679 * APE register and memory space.
7681 val = tr32(TG3PCI_PCISTATE);
7682 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7683 PCISTATE_ALLOW_APE_SHMEM_WR;
7684 tw32(TG3PCI_PCISTATE, val);
7687 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7688 /* Enable some hw fixes. */
7689 val = tr32(TG3PCI_MSI_DATA);
7690 val |= (1 << 26) | (1 << 28) | (1 << 29);
7691 tw32(TG3PCI_MSI_DATA, val);
7694 /* Descriptor ring init may make accesses to the
7695 * NIC SRAM area to setup the TX descriptors, so we
7696 * can only do this after the hardware has been
7697 * successfully reset.
7699 err = tg3_init_rings(tp);
7703 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7704 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7705 val = tr32(TG3PCI_DMA_RW_CTRL) &
7706 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
7707 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
7708 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
7709 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
7710 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7711 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7712 /* This value is determined during the probe time DMA
7713 * engine test, tg3_test_dma.
7715 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7718 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7719 GRC_MODE_4X_NIC_SEND_RINGS |
7720 GRC_MODE_NO_TX_PHDR_CSUM |
7721 GRC_MODE_NO_RX_PHDR_CSUM);
7722 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7724 /* Pseudo-header checksum is done by hardware logic and not
7725 * the offload processers, so make the chip do the pseudo-
7726 * header checksums on receive. For transmit it is more
7727 * convenient to do the pseudo-header checksum in software
7728 * as Linux does that on transmit for us in all cases.
7730 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7734 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7736 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7737 val = tr32(GRC_MISC_CFG);
7739 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7740 tw32(GRC_MISC_CFG, val);
7742 /* Initialize MBUF/DESC pool. */
7743 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7745 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7746 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7747 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7748 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7750 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7751 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7752 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7753 } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7756 fw_len = tp->fw_len;
7757 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7758 tw32(BUFMGR_MB_POOL_ADDR,
7759 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7760 tw32(BUFMGR_MB_POOL_SIZE,
7761 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7764 if (tp->dev->mtu <= ETH_DATA_LEN) {
7765 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7766 tp->bufmgr_config.mbuf_read_dma_low_water);
7767 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7768 tp->bufmgr_config.mbuf_mac_rx_low_water);
7769 tw32(BUFMGR_MB_HIGH_WATER,
7770 tp->bufmgr_config.mbuf_high_water);
7772 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7773 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7774 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7775 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7776 tw32(BUFMGR_MB_HIGH_WATER,
7777 tp->bufmgr_config.mbuf_high_water_jumbo);
7779 tw32(BUFMGR_DMA_LOW_WATER,
7780 tp->bufmgr_config.dma_low_water);
7781 tw32(BUFMGR_DMA_HIGH_WATER,
7782 tp->bufmgr_config.dma_high_water);
7784 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7785 for (i = 0; i < 2000; i++) {
7786 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7791 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
7795 /* Setup replenish threshold. */
7796 val = tp->rx_pending / 8;
7799 else if (val > tp->rx_std_max_post)
7800 val = tp->rx_std_max_post;
7801 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7802 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7803 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7805 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7806 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7809 tw32(RCVBDI_STD_THRESH, val);
7811 /* Initialize TG3_BDINFO's at:
7812 * RCVDBDI_STD_BD: standard eth size rx ring
7813 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7814 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7817 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7818 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7819 * ring attribute flags
7820 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7822 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7823 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7825 * The size of each ring is fixed in the firmware, but the location is
7828 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7829 ((u64) tpr->rx_std_mapping >> 32));
7830 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7831 ((u64) tpr->rx_std_mapping & 0xffffffff));
7832 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7833 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7834 NIC_SRAM_RX_BUFFER_DESC);
7836 /* Disable the mini ring */
7837 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7838 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7839 BDINFO_FLAGS_DISABLED);
7841 /* Program the jumbo buffer descriptor ring control
7842 * blocks on those devices that have them.
7844 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
7845 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
7846 /* Setup replenish threshold. */
7847 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7849 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7850 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7851 ((u64) tpr->rx_jmb_mapping >> 32));
7852 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7853 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
7854 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7855 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7856 BDINFO_FLAGS_USE_EXT_RECV);
7857 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7858 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7859 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7861 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7862 BDINFO_FLAGS_DISABLED);
7865 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7866 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7867 val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
7868 (RX_STD_MAX_SIZE << 2);
7870 val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT;
7872 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
7874 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
7876 tpr->rx_std_prod_idx = tp->rx_pending;
7877 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
7879 tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7880 tp->rx_jumbo_pending : 0;
7881 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
7883 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7884 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7885 tw32(STD_REPLENISH_LWM, 32);
7886 tw32(JMB_REPLENISH_LWM, 16);
7889 tg3_rings_reset(tp);
7891 /* Initialize MAC address and backoff seed. */
7892 __tg3_set_mac_addr(tp, 0);
7894 /* MTU + ethernet header + FCS + optional VLAN tag */
7895 tw32(MAC_RX_MTU_SIZE,
7896 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
7898 /* The slot time is changed by tg3_setup_phy if we
7899 * run at gigabit with half duplex.
7901 tw32(MAC_TX_LENGTHS,
7902 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7903 (6 << TX_LENGTHS_IPG_SHIFT) |
7904 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7906 /* Receive rules. */
7907 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7908 tw32(RCVLPC_CONFIG, 0x0181);
7910 /* Calculate RDMAC_MODE setting early, we need it to determine
7911 * the RCVLPC_STATE_ENABLE mask.
7913 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7914 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7915 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7916 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7917 RDMAC_MODE_LNGREAD_ENAB);
7919 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
7920 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
7922 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7923 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7924 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7925 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7926 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7927 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7929 /* If statement applies to 5705 and 5750 PCI devices only */
7930 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7931 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7932 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7933 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7934 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7935 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7936 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7937 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7938 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7942 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7943 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7945 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7946 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
7948 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
7949 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7950 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7951 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
7953 /* Receive/send statistics. */
7954 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7955 val = tr32(RCVLPC_STATS_ENABLE);
7956 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7957 tw32(RCVLPC_STATS_ENABLE, val);
7958 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7959 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7960 val = tr32(RCVLPC_STATS_ENABLE);
7961 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7962 tw32(RCVLPC_STATS_ENABLE, val);
7964 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7966 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7967 tw32(SNDDATAI_STATSENAB, 0xffffff);
7968 tw32(SNDDATAI_STATSCTRL,
7969 (SNDDATAI_SCTRL_ENABLE |
7970 SNDDATAI_SCTRL_FASTUPD));
7972 /* Setup host coalescing engine. */
7973 tw32(HOSTCC_MODE, 0);
7974 for (i = 0; i < 2000; i++) {
7975 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7980 __tg3_set_coalesce(tp, &tp->coal);
7982 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7983 /* Status/statistics block address. See tg3_timer,
7984 * the tg3_periodic_fetch_stats call there, and
7985 * tg3_get_stats to see how this works for 5705/5750 chips.
7987 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7988 ((u64) tp->stats_mapping >> 32));
7989 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7990 ((u64) tp->stats_mapping & 0xffffffff));
7991 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7993 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7995 /* Clear statistics and status block memory areas */
7996 for (i = NIC_SRAM_STATS_BLK;
7997 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7999 tg3_write_mem(tp, i, 0);
8004 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8006 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8007 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8008 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8009 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8011 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8012 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
8013 /* reset to prevent losing 1st rx packet intermittently */
8014 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8018 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8019 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8022 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8023 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8024 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8025 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8026 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8027 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8028 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8031 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8032 * If TG3_FLG2_IS_NIC is zero, we should read the
8033 * register to preserve the GPIO settings for LOMs. The GPIOs,
8034 * whether used as inputs or outputs, are set by boot code after
8037 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
8040 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8041 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8042 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8044 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8045 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8046 GRC_LCLCTRL_GPIO_OUTPUT3;
8048 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8049 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8051 tp->grc_local_ctrl &= ~gpio_mask;
8052 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8054 /* GPIO1 must be driven high for eeprom write protect */
8055 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
8056 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8057 GRC_LCLCTRL_GPIO_OUTPUT1);
8059 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8062 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
8063 val = tr32(MSGINT_MODE);
8064 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8065 tw32(MSGINT_MODE, val);
8068 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8069 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8073 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8074 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8075 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8076 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8077 WDMAC_MODE_LNGREAD_ENAB);
8079 /* If statement applies to 5705 and 5750 PCI devices only */
8080 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8081 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
8082 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8083 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8084 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8085 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8087 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8088 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8089 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8090 val |= WDMAC_MODE_RX_ACCEL;
8094 /* Enable host coalescing bug fix */
8095 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8096 val |= WDMAC_MODE_STATUS_TAG_FIX;
8098 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8099 val |= WDMAC_MODE_BURST_ALL_DATA;
8101 tw32_f(WDMAC_MODE, val);
8104 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
8107 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8109 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8110 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8111 pcix_cmd |= PCI_X_CMD_READ_2K;
8112 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8113 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8114 pcix_cmd |= PCI_X_CMD_READ_2K;
8116 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8120 tw32_f(RDMAC_MODE, rdmac_mode);
8123 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8124 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8125 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8127 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8129 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8131 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8133 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8134 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8135 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
8136 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8137 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8138 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8139 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8140 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
8141 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8142 tw32(SNDBDI_MODE, val);
8143 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8145 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8146 err = tg3_load_5701_a0_firmware_fix(tp);
8151 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
8152 err = tg3_load_tso_firmware(tp);
8157 tp->tx_mode = TX_MODE_ENABLE;
8158 tw32_f(MAC_TX_MODE, tp->tx_mode);
8161 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
8162 u32 reg = MAC_RSS_INDIR_TBL_0;
8163 u8 *ent = (u8 *)&val;
8165 /* Setup the indirection table */
8166 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8167 int idx = i % sizeof(val);
8169 ent[idx] = i % (tp->irq_cnt - 1);
8170 if (idx == sizeof(val) - 1) {
8176 /* Setup the "secret" hash key. */
8177 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8178 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8179 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8180 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8181 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8182 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8183 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8184 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8185 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8186 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8189 tp->rx_mode = RX_MODE_ENABLE;
8190 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8191 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8193 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
8194 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8195 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8196 RX_MODE_RSS_IPV6_HASH_EN |
8197 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8198 RX_MODE_RSS_IPV4_HASH_EN |
8199 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8201 tw32_f(MAC_RX_MODE, tp->rx_mode);
8204 tw32(MAC_LED_CTRL, tp->led_ctrl);
8206 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8207 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8208 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8211 tw32_f(MAC_RX_MODE, tp->rx_mode);
8214 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8215 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8216 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
8217 /* Set drive transmission level to 1.2V */
8218 /* only if the signal pre-emphasis bit is not set */
8219 val = tr32(MAC_SERDES_CFG);
8222 tw32(MAC_SERDES_CFG, val);
8224 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8225 tw32(MAC_SERDES_CFG, 0x616000);
8228 /* Prevent chip from dropping frames when flow control
8231 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8235 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8237 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8238 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8239 /* Use hardware link auto-negotiation */
8240 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
8243 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
8244 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8247 tmp = tr32(SERDES_RX_CTRL);
8248 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8249 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8250 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8251 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8254 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
8255 if (tp->link_config.phy_is_low_power) {
8256 tp->link_config.phy_is_low_power = 0;
8257 tp->link_config.speed = tp->link_config.orig_speed;
8258 tp->link_config.duplex = tp->link_config.orig_duplex;
8259 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8262 err = tg3_setup_phy(tp, 0);
8266 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8267 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) {
8270 /* Clear CRC stats. */
8271 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8272 tg3_writephy(tp, MII_TG3_TEST1,
8273 tmp | MII_TG3_TEST1_CRC_EN);
8274 tg3_readphy(tp, 0x14, &tmp);
8279 __tg3_set_rx_mode(tp->dev);
8281 /* Initialize receive rules. */
8282 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8283 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8284 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8285 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8287 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8288 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
8292 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
8296 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8298 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8300 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8302 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8304 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8306 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8308 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8310 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8312 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8314 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8316 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8318 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8320 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8322 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8330 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8331 /* Write our heartbeat update interval to APE. */
8332 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8333 APE_HOST_HEARTBEAT_INT_DISABLE);
8335 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8340 /* Called at device open time to get the chip ready for
8341 * packet processing. Invoked with tp->lock held.
8343 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8345 tg3_switch_clocks(tp);
8347 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8349 return tg3_reset_hw(tp, reset_phy);
8352 #define TG3_STAT_ADD32(PSTAT, REG) \
8353 do { u32 __val = tr32(REG); \
8354 (PSTAT)->low += __val; \
8355 if ((PSTAT)->low < __val) \
8356 (PSTAT)->high += 1; \
8359 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8361 struct tg3_hw_stats *sp = tp->hw_stats;
8363 if (!netif_carrier_ok(tp->dev))
8366 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8367 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8368 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8369 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8370 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8371 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8372 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8373 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8374 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8375 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8376 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8377 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8378 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8380 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8381 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8382 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8383 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8384 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8385 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8386 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8387 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8388 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8389 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8390 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8391 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8392 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8393 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8395 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8396 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8397 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8400 static void tg3_timer(unsigned long __opaque)
8402 struct tg3 *tp = (struct tg3 *) __opaque;
8407 spin_lock(&tp->lock);
8409 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8410 /* All of this garbage is because when using non-tagged
8411 * IRQ status the mailbox/status_block protocol the chip
8412 * uses with the cpu is race prone.
8414 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8415 tw32(GRC_LOCAL_CTRL,
8416 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8418 tw32(HOSTCC_MODE, tp->coalesce_mode |
8419 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8422 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8423 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
8424 spin_unlock(&tp->lock);
8425 schedule_work(&tp->reset_task);
8430 /* This part only runs once per second. */
8431 if (!--tp->timer_counter) {
8432 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8433 tg3_periodic_fetch_stats(tp);
8435 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
8439 mac_stat = tr32(MAC_STATUS);
8442 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
8443 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8445 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8449 tg3_setup_phy(tp, 0);
8450 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
8451 u32 mac_stat = tr32(MAC_STATUS);
8454 if (netif_carrier_ok(tp->dev) &&
8455 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8458 if (! netif_carrier_ok(tp->dev) &&
8459 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8460 MAC_STATUS_SIGNAL_DET))) {
8464 if (!tp->serdes_counter) {
8467 ~MAC_MODE_PORT_MODE_MASK));
8469 tw32_f(MAC_MODE, tp->mac_mode);
8472 tg3_setup_phy(tp, 0);
8474 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8475 tg3_serdes_parallel_detect(tp);
8477 tp->timer_counter = tp->timer_multiplier;
8480 /* Heartbeat is only sent once every 2 seconds.
8482 * The heartbeat is to tell the ASF firmware that the host
8483 * driver is still alive. In the event that the OS crashes,
8484 * ASF needs to reset the hardware to free up the FIFO space
8485 * that may be filled with rx packets destined for the host.
8486 * If the FIFO is full, ASF will no longer function properly.
8488 * Unintended resets have been reported on real time kernels
8489 * where the timer doesn't run on time. Netpoll will also have
8492 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8493 * to check the ring condition when the heartbeat is expiring
8494 * before doing the reset. This will prevent most unintended
8497 if (!--tp->asf_counter) {
8498 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
8499 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
8500 tg3_wait_for_event_ack(tp);
8502 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8503 FWCMD_NICDRV_ALIVE3);
8504 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8505 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8506 TG3_FW_UPDATE_TIMEOUT_SEC);
8508 tg3_generate_fw_event(tp);
8510 tp->asf_counter = tp->asf_multiplier;
8513 spin_unlock(&tp->lock);
8516 tp->timer.expires = jiffies + tp->timer_offset;
8517 add_timer(&tp->timer);
8520 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8523 unsigned long flags;
8525 struct tg3_napi *tnapi = &tp->napi[irq_num];
8527 if (tp->irq_cnt == 1)
8528 name = tp->dev->name;
8530 name = &tnapi->irq_lbl[0];
8531 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8532 name[IFNAMSIZ-1] = 0;
8535 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8537 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8539 flags = IRQF_SAMPLE_RANDOM;
8542 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8543 fn = tg3_interrupt_tagged;
8544 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
8547 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8550 static int tg3_test_interrupt(struct tg3 *tp)
8552 struct tg3_napi *tnapi = &tp->napi[0];
8553 struct net_device *dev = tp->dev;
8554 int err, i, intr_ok = 0;
8557 if (!netif_running(dev))
8560 tg3_disable_ints(tp);
8562 free_irq(tnapi->irq_vec, tnapi);
8565 * Turn off MSI one shot mode. Otherwise this test has no
8566 * observable way to know whether the interrupt was delivered.
8568 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8569 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
8570 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8571 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8572 tw32(MSGINT_MODE, val);
8575 err = request_irq(tnapi->irq_vec, tg3_test_isr,
8576 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8580 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
8581 tg3_enable_ints(tp);
8583 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8586 for (i = 0; i < 5; i++) {
8587 u32 int_mbox, misc_host_ctrl;
8589 int_mbox = tr32_mailbox(tnapi->int_mbox);
8590 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8592 if ((int_mbox != 0) ||
8593 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8601 tg3_disable_ints(tp);
8603 free_irq(tnapi->irq_vec, tnapi);
8605 err = tg3_request_irq(tp, 0);
8611 /* Reenable MSI one shot mode. */
8612 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8613 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
8614 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8615 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
8616 tw32(MSGINT_MODE, val);
8624 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8625 * successfully restored
8627 static int tg3_test_msi(struct tg3 *tp)
8632 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8635 /* Turn off SERR reporting in case MSI terminates with Master
8638 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8639 pci_write_config_word(tp->pdev, PCI_COMMAND,
8640 pci_cmd & ~PCI_COMMAND_SERR);
8642 err = tg3_test_interrupt(tp);
8644 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8649 /* other failures */
8653 /* MSI test failed, go back to INTx mode */
8654 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
8655 "to INTx mode. Please report this failure to the PCI "
8656 "maintainer and include system chipset information\n");
8658 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8660 pci_disable_msi(tp->pdev);
8662 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8664 err = tg3_request_irq(tp, 0);
8668 /* Need to reset the chip because the MSI cycle may have terminated
8669 * with Master Abort.
8671 tg3_full_lock(tp, 1);
8673 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8674 err = tg3_init_hw(tp, 1);
8676 tg3_full_unlock(tp);
8679 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8684 static int tg3_request_firmware(struct tg3 *tp)
8686 const __be32 *fw_data;
8688 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
8689 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
8694 fw_data = (void *)tp->fw->data;
8696 /* Firmware blob starts with version numbers, followed by
8697 * start address and _full_ length including BSS sections
8698 * (which must be longer than the actual data, of course
8701 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
8702 if (tp->fw_len < (tp->fw->size - 12)) {
8703 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
8704 tp->fw_len, tp->fw_needed);
8705 release_firmware(tp->fw);
8710 /* We no longer need firmware; we have it. */
8711 tp->fw_needed = NULL;
8715 static bool tg3_enable_msix(struct tg3 *tp)
8717 int i, rc, cpus = num_online_cpus();
8718 struct msix_entry msix_ent[tp->irq_max];
8721 /* Just fallback to the simpler MSI mode. */
8725 * We want as many rx rings enabled as there are cpus.
8726 * The first MSIX vector only deals with link interrupts, etc,
8727 * so we add one to the number of vectors we are requesting.
8729 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
8731 for (i = 0; i < tp->irq_max; i++) {
8732 msix_ent[i].entry = i;
8733 msix_ent[i].vector = 0;
8736 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
8738 if (rc < TG3_RSS_MIN_NUM_MSIX_VECS)
8740 if (pci_enable_msix(tp->pdev, msix_ent, rc))
8742 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
8747 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
8749 for (i = 0; i < tp->irq_max; i++)
8750 tp->napi[i].irq_vec = msix_ent[i].vector;
8752 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
8753 tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
8754 tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
8756 tp->dev->real_num_tx_queues = 1;
8761 static void tg3_ints_init(struct tg3 *tp)
8763 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
8764 !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8765 /* All MSI supporting chips should support tagged
8766 * status. Assert that this is the case.
8768 netdev_warn(tp->dev,
8769 "MSI without TAGGED_STATUS? Not using MSI\n");
8773 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
8774 tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
8775 else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
8776 pci_enable_msi(tp->pdev) == 0)
8777 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8779 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8780 u32 msi_mode = tr32(MSGINT_MODE);
8781 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8782 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
8783 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8786 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
8788 tp->napi[0].irq_vec = tp->pdev->irq;
8789 tp->dev->real_num_tx_queues = 1;
8793 static void tg3_ints_fini(struct tg3 *tp)
8795 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8796 pci_disable_msix(tp->pdev);
8797 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
8798 pci_disable_msi(tp->pdev);
8799 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
8800 tp->tg3_flags3 &= ~TG3_FLG3_ENABLE_RSS;
8803 static int tg3_open(struct net_device *dev)
8805 struct tg3 *tp = netdev_priv(dev);
8808 if (tp->fw_needed) {
8809 err = tg3_request_firmware(tp);
8810 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8814 netdev_warn(tp->dev, "TSO capability disabled\n");
8815 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8816 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8817 netdev_notice(tp->dev, "TSO capability restored\n");
8818 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8822 netif_carrier_off(tp->dev);
8824 err = tg3_set_power_state(tp, PCI_D0);
8828 tg3_full_lock(tp, 0);
8830 tg3_disable_ints(tp);
8831 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8833 tg3_full_unlock(tp);
8836 * Setup interrupts first so we know how
8837 * many NAPI resources to allocate
8841 /* The placement of this call is tied
8842 * to the setup and use of Host TX descriptors.
8844 err = tg3_alloc_consistent(tp);
8848 tg3_napi_enable(tp);
8850 for (i = 0; i < tp->irq_cnt; i++) {
8851 struct tg3_napi *tnapi = &tp->napi[i];
8852 err = tg3_request_irq(tp, i);
8854 for (i--; i >= 0; i--)
8855 free_irq(tnapi->irq_vec, tnapi);
8863 tg3_full_lock(tp, 0);
8865 err = tg3_init_hw(tp, 1);
8867 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8870 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8871 tp->timer_offset = HZ;
8873 tp->timer_offset = HZ / 10;
8875 BUG_ON(tp->timer_offset > HZ);
8876 tp->timer_counter = tp->timer_multiplier =
8877 (HZ / tp->timer_offset);
8878 tp->asf_counter = tp->asf_multiplier =
8879 ((HZ / tp->timer_offset) * 2);
8881 init_timer(&tp->timer);
8882 tp->timer.expires = jiffies + tp->timer_offset;
8883 tp->timer.data = (unsigned long) tp;
8884 tp->timer.function = tg3_timer;
8887 tg3_full_unlock(tp);
8892 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8893 err = tg3_test_msi(tp);
8896 tg3_full_lock(tp, 0);
8897 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8899 tg3_full_unlock(tp);
8904 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8905 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8906 (tp->tg3_flags2 & TG3_FLG2_USING_MSI) &&
8907 (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) {
8908 u32 val = tr32(PCIE_TRANSACTION_CFG);
8910 tw32(PCIE_TRANSACTION_CFG,
8911 val | PCIE_TRANS_CFG_1SHOT_MSI);
8917 tg3_full_lock(tp, 0);
8919 add_timer(&tp->timer);
8920 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8921 tg3_enable_ints(tp);
8923 tg3_full_unlock(tp);
8925 netif_tx_start_all_queues(dev);
8930 for (i = tp->irq_cnt - 1; i >= 0; i--) {
8931 struct tg3_napi *tnapi = &tp->napi[i];
8932 free_irq(tnapi->irq_vec, tnapi);
8936 tg3_napi_disable(tp);
8937 tg3_free_consistent(tp);
8944 static struct net_device_stats *tg3_get_stats(struct net_device *);
8945 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8947 static int tg3_close(struct net_device *dev)
8950 struct tg3 *tp = netdev_priv(dev);
8952 tg3_napi_disable(tp);
8953 cancel_work_sync(&tp->reset_task);
8955 netif_tx_stop_all_queues(dev);
8957 del_timer_sync(&tp->timer);
8961 tg3_full_lock(tp, 1);
8963 tg3_disable_ints(tp);
8965 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8967 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8969 tg3_full_unlock(tp);
8971 for (i = tp->irq_cnt - 1; i >= 0; i--) {
8972 struct tg3_napi *tnapi = &tp->napi[i];
8973 free_irq(tnapi->irq_vec, tnapi);
8978 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8979 sizeof(tp->net_stats_prev));
8980 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8981 sizeof(tp->estats_prev));
8983 tg3_free_consistent(tp);
8985 tg3_set_power_state(tp, PCI_D3hot);
8987 netif_carrier_off(tp->dev);
8992 static inline unsigned long get_stat64(tg3_stat64_t *val)
8996 #if (BITS_PER_LONG == 32)
8999 ret = ((u64)val->high << 32) | ((u64)val->low);
9004 static inline u64 get_estat64(tg3_stat64_t *val)
9006 return ((u64)val->high << 32) | ((u64)val->low);
9009 static unsigned long calc_crc_errors(struct tg3 *tp)
9011 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9013 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
9014 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9015 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9018 spin_lock_bh(&tp->lock);
9019 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9020 tg3_writephy(tp, MII_TG3_TEST1,
9021 val | MII_TG3_TEST1_CRC_EN);
9022 tg3_readphy(tp, 0x14, &val);
9025 spin_unlock_bh(&tp->lock);
9027 tp->phy_crc_errors += val;
9029 return tp->phy_crc_errors;
9032 return get_stat64(&hw_stats->rx_fcs_errors);
9035 #define ESTAT_ADD(member) \
9036 estats->member = old_estats->member + \
9037 get_estat64(&hw_stats->member)
9039 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9041 struct tg3_ethtool_stats *estats = &tp->estats;
9042 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9043 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9048 ESTAT_ADD(rx_octets);
9049 ESTAT_ADD(rx_fragments);
9050 ESTAT_ADD(rx_ucast_packets);
9051 ESTAT_ADD(rx_mcast_packets);
9052 ESTAT_ADD(rx_bcast_packets);
9053 ESTAT_ADD(rx_fcs_errors);
9054 ESTAT_ADD(rx_align_errors);
9055 ESTAT_ADD(rx_xon_pause_rcvd);
9056 ESTAT_ADD(rx_xoff_pause_rcvd);
9057 ESTAT_ADD(rx_mac_ctrl_rcvd);
9058 ESTAT_ADD(rx_xoff_entered);
9059 ESTAT_ADD(rx_frame_too_long_errors);
9060 ESTAT_ADD(rx_jabbers);
9061 ESTAT_ADD(rx_undersize_packets);
9062 ESTAT_ADD(rx_in_length_errors);
9063 ESTAT_ADD(rx_out_length_errors);
9064 ESTAT_ADD(rx_64_or_less_octet_packets);
9065 ESTAT_ADD(rx_65_to_127_octet_packets);
9066 ESTAT_ADD(rx_128_to_255_octet_packets);
9067 ESTAT_ADD(rx_256_to_511_octet_packets);
9068 ESTAT_ADD(rx_512_to_1023_octet_packets);
9069 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9070 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9071 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9072 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9073 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9075 ESTAT_ADD(tx_octets);
9076 ESTAT_ADD(tx_collisions);
9077 ESTAT_ADD(tx_xon_sent);
9078 ESTAT_ADD(tx_xoff_sent);
9079 ESTAT_ADD(tx_flow_control);
9080 ESTAT_ADD(tx_mac_errors);
9081 ESTAT_ADD(tx_single_collisions);
9082 ESTAT_ADD(tx_mult_collisions);
9083 ESTAT_ADD(tx_deferred);
9084 ESTAT_ADD(tx_excessive_collisions);
9085 ESTAT_ADD(tx_late_collisions);
9086 ESTAT_ADD(tx_collide_2times);
9087 ESTAT_ADD(tx_collide_3times);
9088 ESTAT_ADD(tx_collide_4times);
9089 ESTAT_ADD(tx_collide_5times);
9090 ESTAT_ADD(tx_collide_6times);
9091 ESTAT_ADD(tx_collide_7times);
9092 ESTAT_ADD(tx_collide_8times);
9093 ESTAT_ADD(tx_collide_9times);
9094 ESTAT_ADD(tx_collide_10times);
9095 ESTAT_ADD(tx_collide_11times);
9096 ESTAT_ADD(tx_collide_12times);
9097 ESTAT_ADD(tx_collide_13times);
9098 ESTAT_ADD(tx_collide_14times);
9099 ESTAT_ADD(tx_collide_15times);
9100 ESTAT_ADD(tx_ucast_packets);
9101 ESTAT_ADD(tx_mcast_packets);
9102 ESTAT_ADD(tx_bcast_packets);
9103 ESTAT_ADD(tx_carrier_sense_errors);
9104 ESTAT_ADD(tx_discards);
9105 ESTAT_ADD(tx_errors);
9107 ESTAT_ADD(dma_writeq_full);
9108 ESTAT_ADD(dma_write_prioq_full);
9109 ESTAT_ADD(rxbds_empty);
9110 ESTAT_ADD(rx_discards);
9111 ESTAT_ADD(rx_errors);
9112 ESTAT_ADD(rx_threshold_hit);
9114 ESTAT_ADD(dma_readq_full);
9115 ESTAT_ADD(dma_read_prioq_full);
9116 ESTAT_ADD(tx_comp_queue_full);
9118 ESTAT_ADD(ring_set_send_prod_index);
9119 ESTAT_ADD(ring_status_update);
9120 ESTAT_ADD(nic_irqs);
9121 ESTAT_ADD(nic_avoided_irqs);
9122 ESTAT_ADD(nic_tx_threshold_hit);
9127 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
9129 struct tg3 *tp = netdev_priv(dev);
9130 struct net_device_stats *stats = &tp->net_stats;
9131 struct net_device_stats *old_stats = &tp->net_stats_prev;
9132 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9137 stats->rx_packets = old_stats->rx_packets +
9138 get_stat64(&hw_stats->rx_ucast_packets) +
9139 get_stat64(&hw_stats->rx_mcast_packets) +
9140 get_stat64(&hw_stats->rx_bcast_packets);
9142 stats->tx_packets = old_stats->tx_packets +
9143 get_stat64(&hw_stats->tx_ucast_packets) +
9144 get_stat64(&hw_stats->tx_mcast_packets) +
9145 get_stat64(&hw_stats->tx_bcast_packets);
9147 stats->rx_bytes = old_stats->rx_bytes +
9148 get_stat64(&hw_stats->rx_octets);
9149 stats->tx_bytes = old_stats->tx_bytes +
9150 get_stat64(&hw_stats->tx_octets);
9152 stats->rx_errors = old_stats->rx_errors +
9153 get_stat64(&hw_stats->rx_errors);
9154 stats->tx_errors = old_stats->tx_errors +
9155 get_stat64(&hw_stats->tx_errors) +
9156 get_stat64(&hw_stats->tx_mac_errors) +
9157 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9158 get_stat64(&hw_stats->tx_discards);
9160 stats->multicast = old_stats->multicast +
9161 get_stat64(&hw_stats->rx_mcast_packets);
9162 stats->collisions = old_stats->collisions +
9163 get_stat64(&hw_stats->tx_collisions);
9165 stats->rx_length_errors = old_stats->rx_length_errors +
9166 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9167 get_stat64(&hw_stats->rx_undersize_packets);
9169 stats->rx_over_errors = old_stats->rx_over_errors +
9170 get_stat64(&hw_stats->rxbds_empty);
9171 stats->rx_frame_errors = old_stats->rx_frame_errors +
9172 get_stat64(&hw_stats->rx_align_errors);
9173 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9174 get_stat64(&hw_stats->tx_discards);
9175 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9176 get_stat64(&hw_stats->tx_carrier_sense_errors);
9178 stats->rx_crc_errors = old_stats->rx_crc_errors +
9179 calc_crc_errors(tp);
9181 stats->rx_missed_errors = old_stats->rx_missed_errors +
9182 get_stat64(&hw_stats->rx_discards);
9187 static inline u32 calc_crc(unsigned char *buf, int len)
9195 for (j = 0; j < len; j++) {
9198 for (k = 0; k < 8; k++) {
9211 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9213 /* accept or reject all multicast frames */
9214 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9215 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9216 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9217 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9220 static void __tg3_set_rx_mode(struct net_device *dev)
9222 struct tg3 *tp = netdev_priv(dev);
9225 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9226 RX_MODE_KEEP_VLAN_TAG);
9228 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9231 #if TG3_VLAN_TAG_USED
9233 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9234 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9236 /* By definition, VLAN is disabled always in this
9239 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9240 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9243 if (dev->flags & IFF_PROMISC) {
9244 /* Promiscuous mode. */
9245 rx_mode |= RX_MODE_PROMISC;
9246 } else if (dev->flags & IFF_ALLMULTI) {
9247 /* Accept all multicast. */
9248 tg3_set_multi (tp, 1);
9249 } else if (netdev_mc_empty(dev)) {
9250 /* Reject all multicast. */
9251 tg3_set_multi (tp, 0);
9253 /* Accept one or more multicast(s). */
9254 struct netdev_hw_addr *ha;
9255 u32 mc_filter[4] = { 0, };
9260 netdev_for_each_mc_addr(ha, dev) {
9261 crc = calc_crc(ha->addr, ETH_ALEN);
9263 regidx = (bit & 0x60) >> 5;
9265 mc_filter[regidx] |= (1 << bit);
9268 tw32(MAC_HASH_REG_0, mc_filter[0]);
9269 tw32(MAC_HASH_REG_1, mc_filter[1]);
9270 tw32(MAC_HASH_REG_2, mc_filter[2]);
9271 tw32(MAC_HASH_REG_3, mc_filter[3]);
9274 if (rx_mode != tp->rx_mode) {
9275 tp->rx_mode = rx_mode;
9276 tw32_f(MAC_RX_MODE, rx_mode);
9281 static void tg3_set_rx_mode(struct net_device *dev)
9283 struct tg3 *tp = netdev_priv(dev);
9285 if (!netif_running(dev))
9288 tg3_full_lock(tp, 0);
9289 __tg3_set_rx_mode(dev);
9290 tg3_full_unlock(tp);
9293 #define TG3_REGDUMP_LEN (32 * 1024)
9295 static int tg3_get_regs_len(struct net_device *dev)
9297 return TG3_REGDUMP_LEN;
9300 static void tg3_get_regs(struct net_device *dev,
9301 struct ethtool_regs *regs, void *_p)
9304 struct tg3 *tp = netdev_priv(dev);
9310 memset(p, 0, TG3_REGDUMP_LEN);
9312 if (tp->link_config.phy_is_low_power)
9315 tg3_full_lock(tp, 0);
9317 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
9318 #define GET_REG32_LOOP(base,len) \
9319 do { p = (u32 *)(orig_p + (base)); \
9320 for (i = 0; i < len; i += 4) \
9321 __GET_REG32((base) + i); \
9323 #define GET_REG32_1(reg) \
9324 do { p = (u32 *)(orig_p + (reg)); \
9325 __GET_REG32((reg)); \
9328 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
9329 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
9330 GET_REG32_LOOP(MAC_MODE, 0x4f0);
9331 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
9332 GET_REG32_1(SNDDATAC_MODE);
9333 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
9334 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
9335 GET_REG32_1(SNDBDC_MODE);
9336 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
9337 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
9338 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
9339 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
9340 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
9341 GET_REG32_1(RCVDCC_MODE);
9342 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
9343 GET_REG32_LOOP(RCVCC_MODE, 0x14);
9344 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
9345 GET_REG32_1(MBFREE_MODE);
9346 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
9347 GET_REG32_LOOP(MEMARB_MODE, 0x10);
9348 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
9349 GET_REG32_LOOP(RDMAC_MODE, 0x08);
9350 GET_REG32_LOOP(WDMAC_MODE, 0x08);
9351 GET_REG32_1(RX_CPU_MODE);
9352 GET_REG32_1(RX_CPU_STATE);
9353 GET_REG32_1(RX_CPU_PGMCTR);
9354 GET_REG32_1(RX_CPU_HWBKPT);
9355 GET_REG32_1(TX_CPU_MODE);
9356 GET_REG32_1(TX_CPU_STATE);
9357 GET_REG32_1(TX_CPU_PGMCTR);
9358 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
9359 GET_REG32_LOOP(FTQ_RESET, 0x120);
9360 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
9361 GET_REG32_1(DMAC_MODE);
9362 GET_REG32_LOOP(GRC_MODE, 0x4c);
9363 if (tp->tg3_flags & TG3_FLAG_NVRAM)
9364 GET_REG32_LOOP(NVRAM_CMD, 0x24);
9367 #undef GET_REG32_LOOP
9370 tg3_full_unlock(tp);
9373 static int tg3_get_eeprom_len(struct net_device *dev)
9375 struct tg3 *tp = netdev_priv(dev);
9377 return tp->nvram_size;
9380 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9382 struct tg3 *tp = netdev_priv(dev);
9385 u32 i, offset, len, b_offset, b_count;
9388 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9391 if (tp->link_config.phy_is_low_power)
9394 offset = eeprom->offset;
9398 eeprom->magic = TG3_EEPROM_MAGIC;
9401 /* adjustments to start on required 4 byte boundary */
9402 b_offset = offset & 3;
9403 b_count = 4 - b_offset;
9404 if (b_count > len) {
9405 /* i.e. offset=1 len=2 */
9408 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9411 memcpy(data, ((char*)&val) + b_offset, b_count);
9414 eeprom->len += b_count;
9417 /* read bytes upto the last 4 byte boundary */
9418 pd = &data[eeprom->len];
9419 for (i = 0; i < (len - (len & 3)); i += 4) {
9420 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9425 memcpy(pd + i, &val, 4);
9430 /* read last bytes not ending on 4 byte boundary */
9431 pd = &data[eeprom->len];
9433 b_offset = offset + len - b_count;
9434 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9437 memcpy(pd, &val, b_count);
9438 eeprom->len += b_count;
9443 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9445 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9447 struct tg3 *tp = netdev_priv(dev);
9449 u32 offset, len, b_offset, odd_len;
9453 if (tp->link_config.phy_is_low_power)
9456 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
9457 eeprom->magic != TG3_EEPROM_MAGIC)
9460 offset = eeprom->offset;
9463 if ((b_offset = (offset & 3))) {
9464 /* adjustments to start on required 4 byte boundary */
9465 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9476 /* adjustments to end on required 4 byte boundary */
9478 len = (len + 3) & ~3;
9479 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9485 if (b_offset || odd_len) {
9486 buf = kmalloc(len, GFP_KERNEL);
9490 memcpy(buf, &start, 4);
9492 memcpy(buf+len-4, &end, 4);
9493 memcpy(buf + b_offset, data, eeprom->len);
9496 ret = tg3_nvram_write_block(tp, offset, len, buf);
9504 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9506 struct tg3 *tp = netdev_priv(dev);
9508 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9509 struct phy_device *phydev;
9510 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9512 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9513 return phy_ethtool_gset(phydev, cmd);
9516 cmd->supported = (SUPPORTED_Autoneg);
9518 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9519 cmd->supported |= (SUPPORTED_1000baseT_Half |
9520 SUPPORTED_1000baseT_Full);
9522 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
9523 cmd->supported |= (SUPPORTED_100baseT_Half |
9524 SUPPORTED_100baseT_Full |
9525 SUPPORTED_10baseT_Half |
9526 SUPPORTED_10baseT_Full |
9528 cmd->port = PORT_TP;
9530 cmd->supported |= SUPPORTED_FIBRE;
9531 cmd->port = PORT_FIBRE;
9534 cmd->advertising = tp->link_config.advertising;
9535 if (netif_running(dev)) {
9536 cmd->speed = tp->link_config.active_speed;
9537 cmd->duplex = tp->link_config.active_duplex;
9539 cmd->phy_address = tp->phy_addr;
9540 cmd->transceiver = XCVR_INTERNAL;
9541 cmd->autoneg = tp->link_config.autoneg;
9547 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9549 struct tg3 *tp = netdev_priv(dev);
9551 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9552 struct phy_device *phydev;
9553 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9555 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9556 return phy_ethtool_sset(phydev, cmd);
9559 if (cmd->autoneg != AUTONEG_ENABLE &&
9560 cmd->autoneg != AUTONEG_DISABLE)
9563 if (cmd->autoneg == AUTONEG_DISABLE &&
9564 cmd->duplex != DUPLEX_FULL &&
9565 cmd->duplex != DUPLEX_HALF)
9568 if (cmd->autoneg == AUTONEG_ENABLE) {
9569 u32 mask = ADVERTISED_Autoneg |
9571 ADVERTISED_Asym_Pause;
9573 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9574 mask |= ADVERTISED_1000baseT_Half |
9575 ADVERTISED_1000baseT_Full;
9577 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9578 mask |= ADVERTISED_100baseT_Half |
9579 ADVERTISED_100baseT_Full |
9580 ADVERTISED_10baseT_Half |
9581 ADVERTISED_10baseT_Full |
9584 mask |= ADVERTISED_FIBRE;
9586 if (cmd->advertising & ~mask)
9589 mask &= (ADVERTISED_1000baseT_Half |
9590 ADVERTISED_1000baseT_Full |
9591 ADVERTISED_100baseT_Half |
9592 ADVERTISED_100baseT_Full |
9593 ADVERTISED_10baseT_Half |
9594 ADVERTISED_10baseT_Full);
9596 cmd->advertising &= mask;
9598 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9599 if (cmd->speed != SPEED_1000)
9602 if (cmd->duplex != DUPLEX_FULL)
9605 if (cmd->speed != SPEED_100 &&
9606 cmd->speed != SPEED_10)
9611 tg3_full_lock(tp, 0);
9613 tp->link_config.autoneg = cmd->autoneg;
9614 if (cmd->autoneg == AUTONEG_ENABLE) {
9615 tp->link_config.advertising = (cmd->advertising |
9616 ADVERTISED_Autoneg);
9617 tp->link_config.speed = SPEED_INVALID;
9618 tp->link_config.duplex = DUPLEX_INVALID;
9620 tp->link_config.advertising = 0;
9621 tp->link_config.speed = cmd->speed;
9622 tp->link_config.duplex = cmd->duplex;
9625 tp->link_config.orig_speed = tp->link_config.speed;
9626 tp->link_config.orig_duplex = tp->link_config.duplex;
9627 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9629 if (netif_running(dev))
9630 tg3_setup_phy(tp, 1);
9632 tg3_full_unlock(tp);
9637 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9639 struct tg3 *tp = netdev_priv(dev);
9641 strcpy(info->driver, DRV_MODULE_NAME);
9642 strcpy(info->version, DRV_MODULE_VERSION);
9643 strcpy(info->fw_version, tp->fw_ver);
9644 strcpy(info->bus_info, pci_name(tp->pdev));
9647 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9649 struct tg3 *tp = netdev_priv(dev);
9651 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9652 device_can_wakeup(&tp->pdev->dev))
9653 wol->supported = WAKE_MAGIC;
9657 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9658 device_can_wakeup(&tp->pdev->dev))
9659 wol->wolopts = WAKE_MAGIC;
9660 memset(&wol->sopass, 0, sizeof(wol->sopass));
9663 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9665 struct tg3 *tp = netdev_priv(dev);
9666 struct device *dp = &tp->pdev->dev;
9668 if (wol->wolopts & ~WAKE_MAGIC)
9670 if ((wol->wolopts & WAKE_MAGIC) &&
9671 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9674 spin_lock_bh(&tp->lock);
9675 if (wol->wolopts & WAKE_MAGIC) {
9676 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9677 device_set_wakeup_enable(dp, true);
9679 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9680 device_set_wakeup_enable(dp, false);
9682 spin_unlock_bh(&tp->lock);
9687 static u32 tg3_get_msglevel(struct net_device *dev)
9689 struct tg3 *tp = netdev_priv(dev);
9690 return tp->msg_enable;
9693 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9695 struct tg3 *tp = netdev_priv(dev);
9696 tp->msg_enable = value;
9699 static int tg3_set_tso(struct net_device *dev, u32 value)
9701 struct tg3 *tp = netdev_priv(dev);
9703 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9708 if ((dev->features & NETIF_F_IPV6_CSUM) &&
9709 ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
9710 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
9712 dev->features |= NETIF_F_TSO6;
9713 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
9714 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9715 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9716 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9717 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9718 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9719 dev->features |= NETIF_F_TSO_ECN;
9721 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9723 return ethtool_op_set_tso(dev, value);
9726 static int tg3_nway_reset(struct net_device *dev)
9728 struct tg3 *tp = netdev_priv(dev);
9731 if (!netif_running(dev))
9734 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9737 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9738 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9740 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9744 spin_lock_bh(&tp->lock);
9746 tg3_readphy(tp, MII_BMCR, &bmcr);
9747 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9748 ((bmcr & BMCR_ANENABLE) ||
9749 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9750 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9754 spin_unlock_bh(&tp->lock);
9760 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9762 struct tg3 *tp = netdev_priv(dev);
9764 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9765 ering->rx_mini_max_pending = 0;
9766 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9767 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9769 ering->rx_jumbo_max_pending = 0;
9771 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9773 ering->rx_pending = tp->rx_pending;
9774 ering->rx_mini_pending = 0;
9775 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9776 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9778 ering->rx_jumbo_pending = 0;
9780 ering->tx_pending = tp->napi[0].tx_pending;
9783 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9785 struct tg3 *tp = netdev_priv(dev);
9786 int i, irq_sync = 0, err = 0;
9788 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9789 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9790 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9791 (ering->tx_pending <= MAX_SKB_FRAGS) ||
9792 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9793 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9796 if (netif_running(dev)) {
9802 tg3_full_lock(tp, irq_sync);
9804 tp->rx_pending = ering->rx_pending;
9806 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9807 tp->rx_pending > 63)
9808 tp->rx_pending = 63;
9809 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9811 for (i = 0; i < TG3_IRQ_MAX_VECS; i++)
9812 tp->napi[i].tx_pending = ering->tx_pending;
9814 if (netif_running(dev)) {
9815 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9816 err = tg3_restart_hw(tp, 1);
9818 tg3_netif_start(tp);
9821 tg3_full_unlock(tp);
9823 if (irq_sync && !err)
9829 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9831 struct tg3 *tp = netdev_priv(dev);
9833 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9835 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
9836 epause->rx_pause = 1;
9838 epause->rx_pause = 0;
9840 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
9841 epause->tx_pause = 1;
9843 epause->tx_pause = 0;
9846 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9848 struct tg3 *tp = netdev_priv(dev);
9851 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9853 struct phy_device *phydev;
9855 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9857 if (!(phydev->supported & SUPPORTED_Pause) ||
9858 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
9859 ((epause->rx_pause && !epause->tx_pause) ||
9860 (!epause->rx_pause && epause->tx_pause))))
9863 tp->link_config.flowctrl = 0;
9864 if (epause->rx_pause) {
9865 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9867 if (epause->tx_pause) {
9868 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9869 newadv = ADVERTISED_Pause;
9871 newadv = ADVERTISED_Pause |
9872 ADVERTISED_Asym_Pause;
9873 } else if (epause->tx_pause) {
9874 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9875 newadv = ADVERTISED_Asym_Pause;
9879 if (epause->autoneg)
9880 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9882 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9884 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9885 u32 oldadv = phydev->advertising &
9886 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
9887 if (oldadv != newadv) {
9888 phydev->advertising &=
9889 ~(ADVERTISED_Pause |
9890 ADVERTISED_Asym_Pause);
9891 phydev->advertising |= newadv;
9892 if (phydev->autoneg) {
9894 * Always renegotiate the link to
9895 * inform our link partner of our
9896 * flow control settings, even if the
9897 * flow control is forced. Let
9898 * tg3_adjust_link() do the final
9899 * flow control setup.
9901 return phy_start_aneg(phydev);
9905 if (!epause->autoneg)
9906 tg3_setup_flow_control(tp, 0, 0);
9908 tp->link_config.orig_advertising &=
9909 ~(ADVERTISED_Pause |
9910 ADVERTISED_Asym_Pause);
9911 tp->link_config.orig_advertising |= newadv;
9916 if (netif_running(dev)) {
9921 tg3_full_lock(tp, irq_sync);
9923 if (epause->autoneg)
9924 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9926 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9927 if (epause->rx_pause)
9928 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9930 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9931 if (epause->tx_pause)
9932 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9934 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9936 if (netif_running(dev)) {
9937 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9938 err = tg3_restart_hw(tp, 1);
9940 tg3_netif_start(tp);
9943 tg3_full_unlock(tp);
9949 static u32 tg3_get_rx_csum(struct net_device *dev)
9951 struct tg3 *tp = netdev_priv(dev);
9952 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9955 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9957 struct tg3 *tp = netdev_priv(dev);
9959 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9965 spin_lock_bh(&tp->lock);
9967 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9969 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9970 spin_unlock_bh(&tp->lock);
9975 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9977 struct tg3 *tp = netdev_priv(dev);
9979 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9985 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
9986 ethtool_op_set_tx_ipv6_csum(dev, data);
9988 ethtool_op_set_tx_csum(dev, data);
9993 static int tg3_get_sset_count (struct net_device *dev, int sset)
9997 return TG3_NUM_TEST;
9999 return TG3_NUM_STATS;
10001 return -EOPNOTSUPP;
10005 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
10007 switch (stringset) {
10009 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10012 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10015 WARN_ON(1); /* we need a WARN() */
10020 static int tg3_phys_id(struct net_device *dev, u32 data)
10022 struct tg3 *tp = netdev_priv(dev);
10025 if (!netif_running(tp->dev))
10029 data = UINT_MAX / 2;
10031 for (i = 0; i < (data * 2); i++) {
10033 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10034 LED_CTRL_1000MBPS_ON |
10035 LED_CTRL_100MBPS_ON |
10036 LED_CTRL_10MBPS_ON |
10037 LED_CTRL_TRAFFIC_OVERRIDE |
10038 LED_CTRL_TRAFFIC_BLINK |
10039 LED_CTRL_TRAFFIC_LED);
10042 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10043 LED_CTRL_TRAFFIC_OVERRIDE);
10045 if (msleep_interruptible(500))
10048 tw32(MAC_LED_CTRL, tp->led_ctrl);
10052 static void tg3_get_ethtool_stats (struct net_device *dev,
10053 struct ethtool_stats *estats, u64 *tmp_stats)
10055 struct tg3 *tp = netdev_priv(dev);
10056 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10059 #define NVRAM_TEST_SIZE 0x100
10060 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10061 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10062 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10063 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10064 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10066 static int tg3_test_nvram(struct tg3 *tp)
10070 int i, j, k, err = 0, size;
10072 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
10075 if (tg3_nvram_read(tp, 0, &magic) != 0)
10078 if (magic == TG3_EEPROM_MAGIC)
10079 size = NVRAM_TEST_SIZE;
10080 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10081 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10082 TG3_EEPROM_SB_FORMAT_1) {
10083 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10084 case TG3_EEPROM_SB_REVISION_0:
10085 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10087 case TG3_EEPROM_SB_REVISION_2:
10088 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10090 case TG3_EEPROM_SB_REVISION_3:
10091 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10098 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10099 size = NVRAM_SELFBOOT_HW_SIZE;
10103 buf = kmalloc(size, GFP_KERNEL);
10108 for (i = 0, j = 0; i < size; i += 4, j++) {
10109 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10116 /* Selfboot format */
10117 magic = be32_to_cpu(buf[0]);
10118 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10119 TG3_EEPROM_MAGIC_FW) {
10120 u8 *buf8 = (u8 *) buf, csum8 = 0;
10122 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10123 TG3_EEPROM_SB_REVISION_2) {
10124 /* For rev 2, the csum doesn't include the MBA. */
10125 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10127 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10130 for (i = 0; i < size; i++)
10143 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10144 TG3_EEPROM_MAGIC_HW) {
10145 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10146 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10147 u8 *buf8 = (u8 *) buf;
10149 /* Separate the parity bits and the data bytes. */
10150 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10151 if ((i == 0) || (i == 8)) {
10155 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10156 parity[k++] = buf8[i] & msk;
10158 } else if (i == 16) {
10162 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10163 parity[k++] = buf8[i] & msk;
10166 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10167 parity[k++] = buf8[i] & msk;
10170 data[j++] = buf8[i];
10174 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10175 u8 hw8 = hweight8(data[i]);
10177 if ((hw8 & 0x1) && parity[i])
10179 else if (!(hw8 & 0x1) && !parity[i])
10186 /* Bootstrap checksum at offset 0x10 */
10187 csum = calc_crc((unsigned char *) buf, 0x10);
10188 if (csum != be32_to_cpu(buf[0x10/4]))
10191 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10192 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10193 if (csum != be32_to_cpu(buf[0xfc/4]))
10203 #define TG3_SERDES_TIMEOUT_SEC 2
10204 #define TG3_COPPER_TIMEOUT_SEC 6
10206 static int tg3_test_link(struct tg3 *tp)
10210 if (!netif_running(tp->dev))
10213 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10214 max = TG3_SERDES_TIMEOUT_SEC;
10216 max = TG3_COPPER_TIMEOUT_SEC;
10218 for (i = 0; i < max; i++) {
10219 if (netif_carrier_ok(tp->dev))
10222 if (msleep_interruptible(1000))
10229 /* Only test the commonly used registers */
10230 static int tg3_test_registers(struct tg3 *tp)
10232 int i, is_5705, is_5750;
10233 u32 offset, read_mask, write_mask, val, save_val, read_val;
10237 #define TG3_FL_5705 0x1
10238 #define TG3_FL_NOT_5705 0x2
10239 #define TG3_FL_NOT_5788 0x4
10240 #define TG3_FL_NOT_5750 0x8
10244 /* MAC Control Registers */
10245 { MAC_MODE, TG3_FL_NOT_5705,
10246 0x00000000, 0x00ef6f8c },
10247 { MAC_MODE, TG3_FL_5705,
10248 0x00000000, 0x01ef6b8c },
10249 { MAC_STATUS, TG3_FL_NOT_5705,
10250 0x03800107, 0x00000000 },
10251 { MAC_STATUS, TG3_FL_5705,
10252 0x03800100, 0x00000000 },
10253 { MAC_ADDR_0_HIGH, 0x0000,
10254 0x00000000, 0x0000ffff },
10255 { MAC_ADDR_0_LOW, 0x0000,
10256 0x00000000, 0xffffffff },
10257 { MAC_RX_MTU_SIZE, 0x0000,
10258 0x00000000, 0x0000ffff },
10259 { MAC_TX_MODE, 0x0000,
10260 0x00000000, 0x00000070 },
10261 { MAC_TX_LENGTHS, 0x0000,
10262 0x00000000, 0x00003fff },
10263 { MAC_RX_MODE, TG3_FL_NOT_5705,
10264 0x00000000, 0x000007fc },
10265 { MAC_RX_MODE, TG3_FL_5705,
10266 0x00000000, 0x000007dc },
10267 { MAC_HASH_REG_0, 0x0000,
10268 0x00000000, 0xffffffff },
10269 { MAC_HASH_REG_1, 0x0000,
10270 0x00000000, 0xffffffff },
10271 { MAC_HASH_REG_2, 0x0000,
10272 0x00000000, 0xffffffff },
10273 { MAC_HASH_REG_3, 0x0000,
10274 0x00000000, 0xffffffff },
10276 /* Receive Data and Receive BD Initiator Control Registers. */
10277 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10278 0x00000000, 0xffffffff },
10279 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10280 0x00000000, 0xffffffff },
10281 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10282 0x00000000, 0x00000003 },
10283 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10284 0x00000000, 0xffffffff },
10285 { RCVDBDI_STD_BD+0, 0x0000,
10286 0x00000000, 0xffffffff },
10287 { RCVDBDI_STD_BD+4, 0x0000,
10288 0x00000000, 0xffffffff },
10289 { RCVDBDI_STD_BD+8, 0x0000,
10290 0x00000000, 0xffff0002 },
10291 { RCVDBDI_STD_BD+0xc, 0x0000,
10292 0x00000000, 0xffffffff },
10294 /* Receive BD Initiator Control Registers. */
10295 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10296 0x00000000, 0xffffffff },
10297 { RCVBDI_STD_THRESH, TG3_FL_5705,
10298 0x00000000, 0x000003ff },
10299 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10300 0x00000000, 0xffffffff },
10302 /* Host Coalescing Control Registers. */
10303 { HOSTCC_MODE, TG3_FL_NOT_5705,
10304 0x00000000, 0x00000004 },
10305 { HOSTCC_MODE, TG3_FL_5705,
10306 0x00000000, 0x000000f6 },
10307 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10308 0x00000000, 0xffffffff },
10309 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10310 0x00000000, 0x000003ff },
10311 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10312 0x00000000, 0xffffffff },
10313 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10314 0x00000000, 0x000003ff },
10315 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10316 0x00000000, 0xffffffff },
10317 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10318 0x00000000, 0x000000ff },
10319 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10320 0x00000000, 0xffffffff },
10321 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10322 0x00000000, 0x000000ff },
10323 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10324 0x00000000, 0xffffffff },
10325 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10326 0x00000000, 0xffffffff },
10327 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10328 0x00000000, 0xffffffff },
10329 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10330 0x00000000, 0x000000ff },
10331 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10332 0x00000000, 0xffffffff },
10333 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10334 0x00000000, 0x000000ff },
10335 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10336 0x00000000, 0xffffffff },
10337 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10338 0x00000000, 0xffffffff },
10339 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10340 0x00000000, 0xffffffff },
10341 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10342 0x00000000, 0xffffffff },
10343 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10344 0x00000000, 0xffffffff },
10345 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10346 0xffffffff, 0x00000000 },
10347 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10348 0xffffffff, 0x00000000 },
10350 /* Buffer Manager Control Registers. */
10351 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10352 0x00000000, 0x007fff80 },
10353 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10354 0x00000000, 0x007fffff },
10355 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10356 0x00000000, 0x0000003f },
10357 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10358 0x00000000, 0x000001ff },
10359 { BUFMGR_MB_HIGH_WATER, 0x0000,
10360 0x00000000, 0x000001ff },
10361 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10362 0xffffffff, 0x00000000 },
10363 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10364 0xffffffff, 0x00000000 },
10366 /* Mailbox Registers */
10367 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10368 0x00000000, 0x000001ff },
10369 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10370 0x00000000, 0x000001ff },
10371 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10372 0x00000000, 0x000007ff },
10373 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10374 0x00000000, 0x000001ff },
10376 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10379 is_5705 = is_5750 = 0;
10380 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10382 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10386 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10387 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10390 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10393 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10394 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10397 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10400 offset = (u32) reg_tbl[i].offset;
10401 read_mask = reg_tbl[i].read_mask;
10402 write_mask = reg_tbl[i].write_mask;
10404 /* Save the original register content */
10405 save_val = tr32(offset);
10407 /* Determine the read-only value. */
10408 read_val = save_val & read_mask;
10410 /* Write zero to the register, then make sure the read-only bits
10411 * are not changed and the read/write bits are all zeros.
10415 val = tr32(offset);
10417 /* Test the read-only and read/write bits. */
10418 if (((val & read_mask) != read_val) || (val & write_mask))
10421 /* Write ones to all the bits defined by RdMask and WrMask, then
10422 * make sure the read-only bits are not changed and the
10423 * read/write bits are all ones.
10425 tw32(offset, read_mask | write_mask);
10427 val = tr32(offset);
10429 /* Test the read-only bits. */
10430 if ((val & read_mask) != read_val)
10433 /* Test the read/write bits. */
10434 if ((val & write_mask) != write_mask)
10437 tw32(offset, save_val);
10443 if (netif_msg_hw(tp))
10444 netdev_err(tp->dev,
10445 "Register test failed at offset %x\n", offset);
10446 tw32(offset, save_val);
10450 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10452 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10456 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10457 for (j = 0; j < len; j += 4) {
10460 tg3_write_mem(tp, offset + j, test_pattern[i]);
10461 tg3_read_mem(tp, offset + j, &val);
10462 if (val != test_pattern[i])
10469 static int tg3_test_memory(struct tg3 *tp)
10471 static struct mem_entry {
10474 } mem_tbl_570x[] = {
10475 { 0x00000000, 0x00b50},
10476 { 0x00002000, 0x1c000},
10477 { 0xffffffff, 0x00000}
10478 }, mem_tbl_5705[] = {
10479 { 0x00000100, 0x0000c},
10480 { 0x00000200, 0x00008},
10481 { 0x00004000, 0x00800},
10482 { 0x00006000, 0x01000},
10483 { 0x00008000, 0x02000},
10484 { 0x00010000, 0x0e000},
10485 { 0xffffffff, 0x00000}
10486 }, mem_tbl_5755[] = {
10487 { 0x00000200, 0x00008},
10488 { 0x00004000, 0x00800},
10489 { 0x00006000, 0x00800},
10490 { 0x00008000, 0x02000},
10491 { 0x00010000, 0x0c000},
10492 { 0xffffffff, 0x00000}
10493 }, mem_tbl_5906[] = {
10494 { 0x00000200, 0x00008},
10495 { 0x00004000, 0x00400},
10496 { 0x00006000, 0x00400},
10497 { 0x00008000, 0x01000},
10498 { 0x00010000, 0x01000},
10499 { 0xffffffff, 0x00000}
10500 }, mem_tbl_5717[] = {
10501 { 0x00000200, 0x00008},
10502 { 0x00010000, 0x0a000},
10503 { 0x00020000, 0x13c00},
10504 { 0xffffffff, 0x00000}
10505 }, mem_tbl_57765[] = {
10506 { 0x00000200, 0x00008},
10507 { 0x00004000, 0x00800},
10508 { 0x00006000, 0x09800},
10509 { 0x00010000, 0x0a000},
10510 { 0xffffffff, 0x00000}
10512 struct mem_entry *mem_tbl;
10516 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
10517 mem_tbl = mem_tbl_5717;
10518 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10519 mem_tbl = mem_tbl_57765;
10520 else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10521 mem_tbl = mem_tbl_5755;
10522 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10523 mem_tbl = mem_tbl_5906;
10524 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
10525 mem_tbl = mem_tbl_5705;
10527 mem_tbl = mem_tbl_570x;
10529 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10530 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
10531 mem_tbl[i].len)) != 0)
10538 #define TG3_MAC_LOOPBACK 0
10539 #define TG3_PHY_LOOPBACK 1
10541 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10543 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10544 u32 desc_idx, coal_now;
10545 struct sk_buff *skb, *rx_skb;
10548 int num_pkts, tx_len, rx_len, i, err;
10549 struct tg3_rx_buffer_desc *desc;
10550 struct tg3_napi *tnapi, *rnapi;
10551 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
10553 tnapi = &tp->napi[0];
10554 rnapi = &tp->napi[0];
10555 if (tp->irq_cnt > 1) {
10556 rnapi = &tp->napi[1];
10557 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
10558 tnapi = &tp->napi[1];
10560 coal_now = tnapi->coal_now | rnapi->coal_now;
10562 if (loopback_mode == TG3_MAC_LOOPBACK) {
10563 /* HW errata - mac loopback fails in some cases on 5780.
10564 * Normal traffic and PHY loopback are not affected by
10567 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10570 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
10571 MAC_MODE_PORT_INT_LPBACK;
10572 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10573 mac_mode |= MAC_MODE_LINK_POLARITY;
10574 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10575 mac_mode |= MAC_MODE_PORT_MODE_MII;
10577 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10578 tw32(MAC_MODE, mac_mode);
10579 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10582 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10583 tg3_phy_fet_toggle_apd(tp, false);
10584 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10586 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10588 tg3_phy_toggle_automdix(tp, 0);
10590 tg3_writephy(tp, MII_BMCR, val);
10593 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10594 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10595 tg3_writephy(tp, MII_TG3_FET_PTEST,
10596 MII_TG3_FET_PTEST_FRC_TX_LINK |
10597 MII_TG3_FET_PTEST_FRC_TX_LOCK);
10598 /* The write needs to be flushed for the AC131 */
10599 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10600 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
10601 mac_mode |= MAC_MODE_PORT_MODE_MII;
10603 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10605 /* reset to prevent losing 1st rx packet intermittently */
10606 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10607 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10609 tw32_f(MAC_RX_MODE, tp->rx_mode);
10611 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10612 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
10613 if (masked_phy_id == TG3_PHY_ID_BCM5401)
10614 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10615 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
10616 mac_mode |= MAC_MODE_LINK_POLARITY;
10617 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10618 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10620 tw32(MAC_MODE, mac_mode);
10628 skb = netdev_alloc_skb(tp->dev, tx_len);
10632 tx_data = skb_put(skb, tx_len);
10633 memcpy(tx_data, tp->dev->dev_addr, 6);
10634 memset(tx_data + 6, 0x0, 8);
10636 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10638 for (i = 14; i < tx_len; i++)
10639 tx_data[i] = (u8) (i & 0xff);
10641 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10642 if (pci_dma_mapping_error(tp->pdev, map)) {
10643 dev_kfree_skb(skb);
10647 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10652 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
10656 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
10661 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
10662 tr32_mailbox(tnapi->prodmbox);
10666 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
10667 for (i = 0; i < 35; i++) {
10668 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10673 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
10674 rx_idx = rnapi->hw_status->idx[0].rx_producer;
10675 if ((tx_idx == tnapi->tx_prod) &&
10676 (rx_idx == (rx_start_idx + num_pkts)))
10680 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10681 dev_kfree_skb(skb);
10683 if (tx_idx != tnapi->tx_prod)
10686 if (rx_idx != rx_start_idx + num_pkts)
10689 desc = &rnapi->rx_rcb[rx_start_idx];
10690 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10691 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10692 if (opaque_key != RXD_OPAQUE_RING_STD)
10695 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10696 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10699 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10700 if (rx_len != tx_len)
10703 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
10705 map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
10706 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10708 for (i = 14; i < tx_len; i++) {
10709 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10714 /* tg3_free_rings will unmap and free the rx_skb */
10719 #define TG3_MAC_LOOPBACK_FAILED 1
10720 #define TG3_PHY_LOOPBACK_FAILED 2
10721 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10722 TG3_PHY_LOOPBACK_FAILED)
10724 static int tg3_test_loopback(struct tg3 *tp)
10729 if (!netif_running(tp->dev))
10730 return TG3_LOOPBACK_FAILED;
10732 err = tg3_reset_hw(tp, 1);
10734 return TG3_LOOPBACK_FAILED;
10736 /* Turn off gphy autopowerdown. */
10737 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10738 tg3_phy_toggle_apd(tp, false);
10740 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10744 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10746 /* Wait for up to 40 microseconds to acquire lock. */
10747 for (i = 0; i < 4; i++) {
10748 status = tr32(TG3_CPMU_MUTEX_GNT);
10749 if (status == CPMU_MUTEX_GNT_DRIVER)
10754 if (status != CPMU_MUTEX_GNT_DRIVER)
10755 return TG3_LOOPBACK_FAILED;
10757 /* Turn off link-based power management. */
10758 cpmuctrl = tr32(TG3_CPMU_CTRL);
10759 tw32(TG3_CPMU_CTRL,
10760 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10761 CPMU_CTRL_LINK_AWARE_MODE));
10764 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10765 err |= TG3_MAC_LOOPBACK_FAILED;
10767 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10768 tw32(TG3_CPMU_CTRL, cpmuctrl);
10770 /* Release the mutex */
10771 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10774 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10775 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10776 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10777 err |= TG3_PHY_LOOPBACK_FAILED;
10780 /* Re-enable gphy autopowerdown. */
10781 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10782 tg3_phy_toggle_apd(tp, true);
10787 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10790 struct tg3 *tp = netdev_priv(dev);
10792 if (tp->link_config.phy_is_low_power)
10793 tg3_set_power_state(tp, PCI_D0);
10795 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10797 if (tg3_test_nvram(tp) != 0) {
10798 etest->flags |= ETH_TEST_FL_FAILED;
10801 if (tg3_test_link(tp) != 0) {
10802 etest->flags |= ETH_TEST_FL_FAILED;
10805 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10806 int err, err2 = 0, irq_sync = 0;
10808 if (netif_running(dev)) {
10810 tg3_netif_stop(tp);
10814 tg3_full_lock(tp, irq_sync);
10816 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10817 err = tg3_nvram_lock(tp);
10818 tg3_halt_cpu(tp, RX_CPU_BASE);
10819 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10820 tg3_halt_cpu(tp, TX_CPU_BASE);
10822 tg3_nvram_unlock(tp);
10824 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10827 if (tg3_test_registers(tp) != 0) {
10828 etest->flags |= ETH_TEST_FL_FAILED;
10831 if (tg3_test_memory(tp) != 0) {
10832 etest->flags |= ETH_TEST_FL_FAILED;
10835 if ((data[4] = tg3_test_loopback(tp)) != 0)
10836 etest->flags |= ETH_TEST_FL_FAILED;
10838 tg3_full_unlock(tp);
10840 if (tg3_test_interrupt(tp) != 0) {
10841 etest->flags |= ETH_TEST_FL_FAILED;
10845 tg3_full_lock(tp, 0);
10847 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10848 if (netif_running(dev)) {
10849 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10850 err2 = tg3_restart_hw(tp, 1);
10852 tg3_netif_start(tp);
10855 tg3_full_unlock(tp);
10857 if (irq_sync && !err2)
10860 if (tp->link_config.phy_is_low_power)
10861 tg3_set_power_state(tp, PCI_D3hot);
10865 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10867 struct mii_ioctl_data *data = if_mii(ifr);
10868 struct tg3 *tp = netdev_priv(dev);
10871 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10872 struct phy_device *phydev;
10873 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10875 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10876 return phy_mii_ioctl(phydev, data, cmd);
10881 data->phy_id = tp->phy_addr;
10884 case SIOCGMIIREG: {
10887 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10888 break; /* We have no PHY */
10890 if (tp->link_config.phy_is_low_power)
10893 spin_lock_bh(&tp->lock);
10894 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10895 spin_unlock_bh(&tp->lock);
10897 data->val_out = mii_regval;
10903 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10904 break; /* We have no PHY */
10906 if (tp->link_config.phy_is_low_power)
10909 spin_lock_bh(&tp->lock);
10910 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10911 spin_unlock_bh(&tp->lock);
10919 return -EOPNOTSUPP;
10922 #if TG3_VLAN_TAG_USED
10923 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10925 struct tg3 *tp = netdev_priv(dev);
10927 if (!netif_running(dev)) {
10932 tg3_netif_stop(tp);
10934 tg3_full_lock(tp, 0);
10938 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10939 __tg3_set_rx_mode(dev);
10941 tg3_netif_start(tp);
10943 tg3_full_unlock(tp);
10947 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10949 struct tg3 *tp = netdev_priv(dev);
10951 memcpy(ec, &tp->coal, sizeof(*ec));
10955 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10957 struct tg3 *tp = netdev_priv(dev);
10958 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10959 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10961 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10962 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10963 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10964 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10965 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10968 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10969 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10970 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10971 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10972 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10973 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10974 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10975 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10976 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10977 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10980 /* No rx interrupts will be generated if both are zero */
10981 if ((ec->rx_coalesce_usecs == 0) &&
10982 (ec->rx_max_coalesced_frames == 0))
10985 /* No tx interrupts will be generated if both are zero */
10986 if ((ec->tx_coalesce_usecs == 0) &&
10987 (ec->tx_max_coalesced_frames == 0))
10990 /* Only copy relevant parameters, ignore all others. */
10991 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10992 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10993 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10994 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10995 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10996 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10997 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10998 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10999 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11001 if (netif_running(dev)) {
11002 tg3_full_lock(tp, 0);
11003 __tg3_set_coalesce(tp, &tp->coal);
11004 tg3_full_unlock(tp);
11009 static const struct ethtool_ops tg3_ethtool_ops = {
11010 .get_settings = tg3_get_settings,
11011 .set_settings = tg3_set_settings,
11012 .get_drvinfo = tg3_get_drvinfo,
11013 .get_regs_len = tg3_get_regs_len,
11014 .get_regs = tg3_get_regs,
11015 .get_wol = tg3_get_wol,
11016 .set_wol = tg3_set_wol,
11017 .get_msglevel = tg3_get_msglevel,
11018 .set_msglevel = tg3_set_msglevel,
11019 .nway_reset = tg3_nway_reset,
11020 .get_link = ethtool_op_get_link,
11021 .get_eeprom_len = tg3_get_eeprom_len,
11022 .get_eeprom = tg3_get_eeprom,
11023 .set_eeprom = tg3_set_eeprom,
11024 .get_ringparam = tg3_get_ringparam,
11025 .set_ringparam = tg3_set_ringparam,
11026 .get_pauseparam = tg3_get_pauseparam,
11027 .set_pauseparam = tg3_set_pauseparam,
11028 .get_rx_csum = tg3_get_rx_csum,
11029 .set_rx_csum = tg3_set_rx_csum,
11030 .set_tx_csum = tg3_set_tx_csum,
11031 .set_sg = ethtool_op_set_sg,
11032 .set_tso = tg3_set_tso,
11033 .self_test = tg3_self_test,
11034 .get_strings = tg3_get_strings,
11035 .phys_id = tg3_phys_id,
11036 .get_ethtool_stats = tg3_get_ethtool_stats,
11037 .get_coalesce = tg3_get_coalesce,
11038 .set_coalesce = tg3_set_coalesce,
11039 .get_sset_count = tg3_get_sset_count,
11042 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11044 u32 cursize, val, magic;
11046 tp->nvram_size = EEPROM_CHIP_SIZE;
11048 if (tg3_nvram_read(tp, 0, &magic) != 0)
11051 if ((magic != TG3_EEPROM_MAGIC) &&
11052 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11053 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11057 * Size the chip by reading offsets at increasing powers of two.
11058 * When we encounter our validation signature, we know the addressing
11059 * has wrapped around, and thus have our chip size.
11063 while (cursize < tp->nvram_size) {
11064 if (tg3_nvram_read(tp, cursize, &val) != 0)
11073 tp->nvram_size = cursize;
11076 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11080 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
11081 tg3_nvram_read(tp, 0, &val) != 0)
11084 /* Selfboot format */
11085 if (val != TG3_EEPROM_MAGIC) {
11086 tg3_get_eeprom_size(tp);
11090 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11092 /* This is confusing. We want to operate on the
11093 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11094 * call will read from NVRAM and byteswap the data
11095 * according to the byteswapping settings for all
11096 * other register accesses. This ensures the data we
11097 * want will always reside in the lower 16-bits.
11098 * However, the data in NVRAM is in LE format, which
11099 * means the data from the NVRAM read will always be
11100 * opposite the endianness of the CPU. The 16-bit
11101 * byteswap then brings the data to CPU endianness.
11103 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11107 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11110 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11114 nvcfg1 = tr32(NVRAM_CFG1);
11115 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11116 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11118 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11119 tw32(NVRAM_CFG1, nvcfg1);
11122 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11123 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11124 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11125 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11126 tp->nvram_jedecnum = JEDEC_ATMEL;
11127 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11128 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11130 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11131 tp->nvram_jedecnum = JEDEC_ATMEL;
11132 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11134 case FLASH_VENDOR_ATMEL_EEPROM:
11135 tp->nvram_jedecnum = JEDEC_ATMEL;
11136 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11137 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11139 case FLASH_VENDOR_ST:
11140 tp->nvram_jedecnum = JEDEC_ST;
11141 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11142 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11144 case FLASH_VENDOR_SAIFUN:
11145 tp->nvram_jedecnum = JEDEC_SAIFUN;
11146 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11148 case FLASH_VENDOR_SST_SMALL:
11149 case FLASH_VENDOR_SST_LARGE:
11150 tp->nvram_jedecnum = JEDEC_SST;
11151 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11155 tp->nvram_jedecnum = JEDEC_ATMEL;
11156 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11157 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11161 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11163 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11164 case FLASH_5752PAGE_SIZE_256:
11165 tp->nvram_pagesize = 256;
11167 case FLASH_5752PAGE_SIZE_512:
11168 tp->nvram_pagesize = 512;
11170 case FLASH_5752PAGE_SIZE_1K:
11171 tp->nvram_pagesize = 1024;
11173 case FLASH_5752PAGE_SIZE_2K:
11174 tp->nvram_pagesize = 2048;
11176 case FLASH_5752PAGE_SIZE_4K:
11177 tp->nvram_pagesize = 4096;
11179 case FLASH_5752PAGE_SIZE_264:
11180 tp->nvram_pagesize = 264;
11182 case FLASH_5752PAGE_SIZE_528:
11183 tp->nvram_pagesize = 528;
11188 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11192 nvcfg1 = tr32(NVRAM_CFG1);
11194 /* NVRAM protection for TPM */
11195 if (nvcfg1 & (1 << 27))
11196 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11198 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11199 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11200 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11201 tp->nvram_jedecnum = JEDEC_ATMEL;
11202 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11204 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11205 tp->nvram_jedecnum = JEDEC_ATMEL;
11206 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11207 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11209 case FLASH_5752VENDOR_ST_M45PE10:
11210 case FLASH_5752VENDOR_ST_M45PE20:
11211 case FLASH_5752VENDOR_ST_M45PE40:
11212 tp->nvram_jedecnum = JEDEC_ST;
11213 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11214 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11218 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
11219 tg3_nvram_get_pagesize(tp, nvcfg1);
11221 /* For eeprom, set pagesize to maximum eeprom size */
11222 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11224 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11225 tw32(NVRAM_CFG1, nvcfg1);
11229 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11231 u32 nvcfg1, protect = 0;
11233 nvcfg1 = tr32(NVRAM_CFG1);
11235 /* NVRAM protection for TPM */
11236 if (nvcfg1 & (1 << 27)) {
11237 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11241 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11243 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11244 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11245 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11246 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11247 tp->nvram_jedecnum = JEDEC_ATMEL;
11248 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11249 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11250 tp->nvram_pagesize = 264;
11251 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11252 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11253 tp->nvram_size = (protect ? 0x3e200 :
11254 TG3_NVRAM_SIZE_512KB);
11255 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11256 tp->nvram_size = (protect ? 0x1f200 :
11257 TG3_NVRAM_SIZE_256KB);
11259 tp->nvram_size = (protect ? 0x1f200 :
11260 TG3_NVRAM_SIZE_128KB);
11262 case FLASH_5752VENDOR_ST_M45PE10:
11263 case FLASH_5752VENDOR_ST_M45PE20:
11264 case FLASH_5752VENDOR_ST_M45PE40:
11265 tp->nvram_jedecnum = JEDEC_ST;
11266 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11267 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11268 tp->nvram_pagesize = 256;
11269 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11270 tp->nvram_size = (protect ?
11271 TG3_NVRAM_SIZE_64KB :
11272 TG3_NVRAM_SIZE_128KB);
11273 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11274 tp->nvram_size = (protect ?
11275 TG3_NVRAM_SIZE_64KB :
11276 TG3_NVRAM_SIZE_256KB);
11278 tp->nvram_size = (protect ?
11279 TG3_NVRAM_SIZE_128KB :
11280 TG3_NVRAM_SIZE_512KB);
11285 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11289 nvcfg1 = tr32(NVRAM_CFG1);
11291 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11292 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11293 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11294 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11295 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11296 tp->nvram_jedecnum = JEDEC_ATMEL;
11297 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11298 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11300 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11301 tw32(NVRAM_CFG1, nvcfg1);
11303 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11304 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11305 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11306 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11307 tp->nvram_jedecnum = JEDEC_ATMEL;
11308 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11309 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11310 tp->nvram_pagesize = 264;
11312 case FLASH_5752VENDOR_ST_M45PE10:
11313 case FLASH_5752VENDOR_ST_M45PE20:
11314 case FLASH_5752VENDOR_ST_M45PE40:
11315 tp->nvram_jedecnum = JEDEC_ST;
11316 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11317 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11318 tp->nvram_pagesize = 256;
11323 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11325 u32 nvcfg1, protect = 0;
11327 nvcfg1 = tr32(NVRAM_CFG1);
11329 /* NVRAM protection for TPM */
11330 if (nvcfg1 & (1 << 27)) {
11331 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11335 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11337 case FLASH_5761VENDOR_ATMEL_ADB021D:
11338 case FLASH_5761VENDOR_ATMEL_ADB041D:
11339 case FLASH_5761VENDOR_ATMEL_ADB081D:
11340 case FLASH_5761VENDOR_ATMEL_ADB161D:
11341 case FLASH_5761VENDOR_ATMEL_MDB021D:
11342 case FLASH_5761VENDOR_ATMEL_MDB041D:
11343 case FLASH_5761VENDOR_ATMEL_MDB081D:
11344 case FLASH_5761VENDOR_ATMEL_MDB161D:
11345 tp->nvram_jedecnum = JEDEC_ATMEL;
11346 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11347 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11348 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11349 tp->nvram_pagesize = 256;
11351 case FLASH_5761VENDOR_ST_A_M45PE20:
11352 case FLASH_5761VENDOR_ST_A_M45PE40:
11353 case FLASH_5761VENDOR_ST_A_M45PE80:
11354 case FLASH_5761VENDOR_ST_A_M45PE16:
11355 case FLASH_5761VENDOR_ST_M_M45PE20:
11356 case FLASH_5761VENDOR_ST_M_M45PE40:
11357 case FLASH_5761VENDOR_ST_M_M45PE80:
11358 case FLASH_5761VENDOR_ST_M_M45PE16:
11359 tp->nvram_jedecnum = JEDEC_ST;
11360 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11361 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11362 tp->nvram_pagesize = 256;
11367 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11370 case FLASH_5761VENDOR_ATMEL_ADB161D:
11371 case FLASH_5761VENDOR_ATMEL_MDB161D:
11372 case FLASH_5761VENDOR_ST_A_M45PE16:
11373 case FLASH_5761VENDOR_ST_M_M45PE16:
11374 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11376 case FLASH_5761VENDOR_ATMEL_ADB081D:
11377 case FLASH_5761VENDOR_ATMEL_MDB081D:
11378 case FLASH_5761VENDOR_ST_A_M45PE80:
11379 case FLASH_5761VENDOR_ST_M_M45PE80:
11380 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11382 case FLASH_5761VENDOR_ATMEL_ADB041D:
11383 case FLASH_5761VENDOR_ATMEL_MDB041D:
11384 case FLASH_5761VENDOR_ST_A_M45PE40:
11385 case FLASH_5761VENDOR_ST_M_M45PE40:
11386 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11388 case FLASH_5761VENDOR_ATMEL_ADB021D:
11389 case FLASH_5761VENDOR_ATMEL_MDB021D:
11390 case FLASH_5761VENDOR_ST_A_M45PE20:
11391 case FLASH_5761VENDOR_ST_M_M45PE20:
11392 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11398 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11400 tp->nvram_jedecnum = JEDEC_ATMEL;
11401 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11402 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11405 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11409 nvcfg1 = tr32(NVRAM_CFG1);
11411 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11412 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11413 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11414 tp->nvram_jedecnum = JEDEC_ATMEL;
11415 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11416 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11418 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11419 tw32(NVRAM_CFG1, nvcfg1);
11421 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11422 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11423 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11424 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11425 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11426 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11427 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11428 tp->nvram_jedecnum = JEDEC_ATMEL;
11429 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11430 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11432 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11433 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11434 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11435 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11436 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11438 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11439 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11440 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11442 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11443 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11444 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11448 case FLASH_5752VENDOR_ST_M45PE10:
11449 case FLASH_5752VENDOR_ST_M45PE20:
11450 case FLASH_5752VENDOR_ST_M45PE40:
11451 tp->nvram_jedecnum = JEDEC_ST;
11452 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11453 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11455 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11456 case FLASH_5752VENDOR_ST_M45PE10:
11457 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11459 case FLASH_5752VENDOR_ST_M45PE20:
11460 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11462 case FLASH_5752VENDOR_ST_M45PE40:
11463 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11468 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11472 tg3_nvram_get_pagesize(tp, nvcfg1);
11473 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11474 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11478 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11482 nvcfg1 = tr32(NVRAM_CFG1);
11484 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11485 case FLASH_5717VENDOR_ATMEL_EEPROM:
11486 case FLASH_5717VENDOR_MICRO_EEPROM:
11487 tp->nvram_jedecnum = JEDEC_ATMEL;
11488 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11489 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11491 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11492 tw32(NVRAM_CFG1, nvcfg1);
11494 case FLASH_5717VENDOR_ATMEL_MDB011D:
11495 case FLASH_5717VENDOR_ATMEL_ADB011B:
11496 case FLASH_5717VENDOR_ATMEL_ADB011D:
11497 case FLASH_5717VENDOR_ATMEL_MDB021D:
11498 case FLASH_5717VENDOR_ATMEL_ADB021B:
11499 case FLASH_5717VENDOR_ATMEL_ADB021D:
11500 case FLASH_5717VENDOR_ATMEL_45USPT:
11501 tp->nvram_jedecnum = JEDEC_ATMEL;
11502 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11503 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11505 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11506 case FLASH_5717VENDOR_ATMEL_MDB021D:
11507 case FLASH_5717VENDOR_ATMEL_ADB021B:
11508 case FLASH_5717VENDOR_ATMEL_ADB021D:
11509 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11512 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11516 case FLASH_5717VENDOR_ST_M_M25PE10:
11517 case FLASH_5717VENDOR_ST_A_M25PE10:
11518 case FLASH_5717VENDOR_ST_M_M45PE10:
11519 case FLASH_5717VENDOR_ST_A_M45PE10:
11520 case FLASH_5717VENDOR_ST_M_M25PE20:
11521 case FLASH_5717VENDOR_ST_A_M25PE20:
11522 case FLASH_5717VENDOR_ST_M_M45PE20:
11523 case FLASH_5717VENDOR_ST_A_M45PE20:
11524 case FLASH_5717VENDOR_ST_25USPT:
11525 case FLASH_5717VENDOR_ST_45USPT:
11526 tp->nvram_jedecnum = JEDEC_ST;
11527 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11528 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11530 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11531 case FLASH_5717VENDOR_ST_M_M25PE20:
11532 case FLASH_5717VENDOR_ST_A_M25PE20:
11533 case FLASH_5717VENDOR_ST_M_M45PE20:
11534 case FLASH_5717VENDOR_ST_A_M45PE20:
11535 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11538 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11543 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11547 tg3_nvram_get_pagesize(tp, nvcfg1);
11548 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11549 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11552 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
11553 static void __devinit tg3_nvram_init(struct tg3 *tp)
11555 tw32_f(GRC_EEPROM_ADDR,
11556 (EEPROM_ADDR_FSM_RESET |
11557 (EEPROM_DEFAULT_CLOCK_PERIOD <<
11558 EEPROM_ADDR_CLKPERD_SHIFT)));
11562 /* Enable seeprom accesses. */
11563 tw32_f(GRC_LOCAL_CTRL,
11564 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
11567 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11568 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
11569 tp->tg3_flags |= TG3_FLAG_NVRAM;
11571 if (tg3_nvram_lock(tp)) {
11572 netdev_warn(tp->dev,
11573 "Cannot get nvram lock, %s failed\n",
11577 tg3_enable_nvram_access(tp);
11579 tp->nvram_size = 0;
11581 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11582 tg3_get_5752_nvram_info(tp);
11583 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11584 tg3_get_5755_nvram_info(tp);
11585 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11586 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11587 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11588 tg3_get_5787_nvram_info(tp);
11589 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11590 tg3_get_5761_nvram_info(tp);
11591 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11592 tg3_get_5906_nvram_info(tp);
11593 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
11594 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11595 tg3_get_57780_nvram_info(tp);
11596 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
11597 tg3_get_5717_nvram_info(tp);
11599 tg3_get_nvram_info(tp);
11601 if (tp->nvram_size == 0)
11602 tg3_get_nvram_size(tp);
11604 tg3_disable_nvram_access(tp);
11605 tg3_nvram_unlock(tp);
11608 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
11610 tg3_get_eeprom_size(tp);
11614 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11615 u32 offset, u32 len, u8 *buf)
11620 for (i = 0; i < len; i += 4) {
11626 memcpy(&data, buf + i, 4);
11629 * The SEEPROM interface expects the data to always be opposite
11630 * the native endian format. We accomplish this by reversing
11631 * all the operations that would have been performed on the
11632 * data from a call to tg3_nvram_read_be32().
11634 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
11636 val = tr32(GRC_EEPROM_ADDR);
11637 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11639 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11641 tw32(GRC_EEPROM_ADDR, val |
11642 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11643 (addr & EEPROM_ADDR_ADDR_MASK) |
11644 EEPROM_ADDR_START |
11645 EEPROM_ADDR_WRITE);
11647 for (j = 0; j < 1000; j++) {
11648 val = tr32(GRC_EEPROM_ADDR);
11650 if (val & EEPROM_ADDR_COMPLETE)
11654 if (!(val & EEPROM_ADDR_COMPLETE)) {
11663 /* offset and length are dword aligned */
11664 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11668 u32 pagesize = tp->nvram_pagesize;
11669 u32 pagemask = pagesize - 1;
11673 tmp = kmalloc(pagesize, GFP_KERNEL);
11679 u32 phy_addr, page_off, size;
11681 phy_addr = offset & ~pagemask;
11683 for (j = 0; j < pagesize; j += 4) {
11684 ret = tg3_nvram_read_be32(tp, phy_addr + j,
11685 (__be32 *) (tmp + j));
11692 page_off = offset & pagemask;
11699 memcpy(tmp + page_off, buf, size);
11701 offset = offset + (pagesize - page_off);
11703 tg3_enable_nvram_access(tp);
11706 * Before we can erase the flash page, we need
11707 * to issue a special "write enable" command.
11709 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11711 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11714 /* Erase the target page */
11715 tw32(NVRAM_ADDR, phy_addr);
11717 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11718 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11720 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11723 /* Issue another write enable to start the write. */
11724 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11726 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11729 for (j = 0; j < pagesize; j += 4) {
11732 data = *((__be32 *) (tmp + j));
11734 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11736 tw32(NVRAM_ADDR, phy_addr + j);
11738 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11742 nvram_cmd |= NVRAM_CMD_FIRST;
11743 else if (j == (pagesize - 4))
11744 nvram_cmd |= NVRAM_CMD_LAST;
11746 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11753 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11754 tg3_nvram_exec_cmd(tp, nvram_cmd);
11761 /* offset and length are dword aligned */
11762 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11767 for (i = 0; i < len; i += 4, offset += 4) {
11768 u32 page_off, phy_addr, nvram_cmd;
11771 memcpy(&data, buf + i, 4);
11772 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11774 page_off = offset % tp->nvram_pagesize;
11776 phy_addr = tg3_nvram_phys_addr(tp, offset);
11778 tw32(NVRAM_ADDR, phy_addr);
11780 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11782 if (page_off == 0 || i == 0)
11783 nvram_cmd |= NVRAM_CMD_FIRST;
11784 if (page_off == (tp->nvram_pagesize - 4))
11785 nvram_cmd |= NVRAM_CMD_LAST;
11787 if (i == (len - 4))
11788 nvram_cmd |= NVRAM_CMD_LAST;
11790 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11791 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
11792 (tp->nvram_jedecnum == JEDEC_ST) &&
11793 (nvram_cmd & NVRAM_CMD_FIRST)) {
11795 if ((ret = tg3_nvram_exec_cmd(tp,
11796 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11801 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11802 /* We always do complete word writes to eeprom. */
11803 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11806 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11812 /* offset and length are dword aligned */
11813 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11817 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11818 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11819 ~GRC_LCLCTRL_GPIO_OUTPUT1);
11823 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11824 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11828 ret = tg3_nvram_lock(tp);
11832 tg3_enable_nvram_access(tp);
11833 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11834 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
11835 tw32(NVRAM_WRITE1, 0x406);
11837 grc_mode = tr32(GRC_MODE);
11838 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11840 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11841 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11843 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11846 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11850 grc_mode = tr32(GRC_MODE);
11851 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11853 tg3_disable_nvram_access(tp);
11854 tg3_nvram_unlock(tp);
11857 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11858 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11865 struct subsys_tbl_ent {
11866 u16 subsys_vendor, subsys_devid;
11870 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
11871 /* Broadcom boards. */
11872 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11873 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
11874 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11875 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
11876 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11877 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
11878 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11879 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
11880 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11881 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
11882 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11883 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
11884 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11885 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
11886 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11887 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
11888 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11889 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
11890 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11891 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
11892 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11893 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
11896 { TG3PCI_SUBVENDOR_ID_3COM,
11897 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
11898 { TG3PCI_SUBVENDOR_ID_3COM,
11899 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
11900 { TG3PCI_SUBVENDOR_ID_3COM,
11901 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
11902 { TG3PCI_SUBVENDOR_ID_3COM,
11903 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
11904 { TG3PCI_SUBVENDOR_ID_3COM,
11905 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
11908 { TG3PCI_SUBVENDOR_ID_DELL,
11909 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
11910 { TG3PCI_SUBVENDOR_ID_DELL,
11911 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
11912 { TG3PCI_SUBVENDOR_ID_DELL,
11913 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
11914 { TG3PCI_SUBVENDOR_ID_DELL,
11915 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
11917 /* Compaq boards. */
11918 { TG3PCI_SUBVENDOR_ID_COMPAQ,
11919 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
11920 { TG3PCI_SUBVENDOR_ID_COMPAQ,
11921 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
11922 { TG3PCI_SUBVENDOR_ID_COMPAQ,
11923 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
11924 { TG3PCI_SUBVENDOR_ID_COMPAQ,
11925 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
11926 { TG3PCI_SUBVENDOR_ID_COMPAQ,
11927 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
11930 { TG3PCI_SUBVENDOR_ID_IBM,
11931 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
11934 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
11938 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11939 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11940 tp->pdev->subsystem_vendor) &&
11941 (subsys_id_to_phy_id[i].subsys_devid ==
11942 tp->pdev->subsystem_device))
11943 return &subsys_id_to_phy_id[i];
11948 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11953 /* On some early chips the SRAM cannot be accessed in D3hot state,
11954 * so need make sure we're in D0.
11956 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11957 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11958 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11961 /* Make sure register accesses (indirect or otherwise)
11962 * will function correctly.
11964 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11965 tp->misc_host_ctrl);
11967 /* The memory arbiter has to be enabled in order for SRAM accesses
11968 * to succeed. Normally on powerup the tg3 chip firmware will make
11969 * sure it is enabled, but other entities such as system netboot
11970 * code might disable it.
11972 val = tr32(MEMARB_MODE);
11973 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11975 tp->phy_id = TG3_PHY_ID_INVALID;
11976 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11978 /* Assume an onboard device and WOL capable by default. */
11979 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11981 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11982 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11983 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11984 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11986 val = tr32(VCPU_CFGSHDW);
11987 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11988 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11989 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11990 (val & VCPU_CFGSHDW_WOL_MAGPKT))
11991 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11995 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11996 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11997 u32 nic_cfg, led_cfg;
11998 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11999 int eeprom_phy_serdes = 0;
12001 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12002 tp->nic_sram_data_cfg = nic_cfg;
12004 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12005 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12006 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
12007 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
12008 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
12009 (ver > 0) && (ver < 0x100))
12010 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12012 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12013 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12015 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12016 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12017 eeprom_phy_serdes = 1;
12019 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12020 if (nic_phy_id != 0) {
12021 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12022 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12024 eeprom_phy_id = (id1 >> 16) << 10;
12025 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12026 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12030 tp->phy_id = eeprom_phy_id;
12031 if (eeprom_phy_serdes) {
12032 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
12033 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12034 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
12036 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12039 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12040 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12041 SHASTA_EXT_LED_MODE_MASK);
12043 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12047 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12048 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12051 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12052 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12055 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12056 tp->led_ctrl = LED_CTRL_MODE_MAC;
12058 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12059 * read on some older 5700/5701 bootcode.
12061 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12063 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12065 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12069 case SHASTA_EXT_LED_SHARED:
12070 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12071 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12072 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12073 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12074 LED_CTRL_MODE_PHY_2);
12077 case SHASTA_EXT_LED_MAC:
12078 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12081 case SHASTA_EXT_LED_COMBO:
12082 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12083 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12084 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12085 LED_CTRL_MODE_PHY_2);
12090 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12091 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12092 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12093 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12095 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12096 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12098 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12099 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
12100 if ((tp->pdev->subsystem_vendor ==
12101 PCI_VENDOR_ID_ARIMA) &&
12102 (tp->pdev->subsystem_device == 0x205a ||
12103 tp->pdev->subsystem_device == 0x2063))
12104 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12106 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12107 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12110 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12111 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
12112 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12113 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
12116 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12117 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12118 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
12120 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
12121 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12122 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
12124 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
12125 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
12126 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12128 if (cfg2 & (1 << 17))
12129 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
12131 /* serdes signal pre-emphasis in register 0x590 set by */
12132 /* bootcode if bit 18 is set */
12133 if (cfg2 & (1 << 18))
12134 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
12136 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12137 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12138 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12139 tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
12141 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12144 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12145 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12146 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12149 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12150 tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE;
12151 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12152 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
12153 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12154 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
12157 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
12158 device_set_wakeup_enable(&tp->pdev->dev,
12159 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
12162 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12167 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12168 tw32(OTP_CTRL, cmd);
12170 /* Wait for up to 1 ms for command to execute. */
12171 for (i = 0; i < 100; i++) {
12172 val = tr32(OTP_STATUS);
12173 if (val & OTP_STATUS_CMD_DONE)
12178 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12181 /* Read the gphy configuration from the OTP region of the chip. The gphy
12182 * configuration is a 32-bit value that straddles the alignment boundary.
12183 * We do two 32-bit reads and then shift and merge the results.
12185 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12187 u32 bhalf_otp, thalf_otp;
12189 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12191 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12194 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12196 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12199 thalf_otp = tr32(OTP_READ_DATA);
12201 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12203 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12206 bhalf_otp = tr32(OTP_READ_DATA);
12208 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12211 static int __devinit tg3_phy_probe(struct tg3 *tp)
12213 u32 hw_phy_id_1, hw_phy_id_2;
12214 u32 hw_phy_id, hw_phy_id_masked;
12217 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
12218 return tg3_phy_init(tp);
12220 /* Reading the PHY ID register can conflict with ASF
12221 * firmware access to the PHY hardware.
12224 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12225 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
12226 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12228 /* Now read the physical PHY_ID from the chip and verify
12229 * that it is sane. If it doesn't look good, we fall back
12230 * to either the hard-coded table based PHY_ID and failing
12231 * that the value found in the eeprom area.
12233 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12234 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12236 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
12237 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12238 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
12240 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12243 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12244 tp->phy_id = hw_phy_id;
12245 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12246 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12248 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
12250 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12251 /* Do nothing, phy ID already set up in
12252 * tg3_get_eeprom_hw_cfg().
12255 struct subsys_tbl_ent *p;
12257 /* No eeprom signature? Try the hardcoded
12258 * subsys device table.
12260 p = tg3_lookup_by_subsys(tp);
12264 tp->phy_id = p->phy_id;
12266 tp->phy_id == TG3_PHY_ID_BCM8002)
12267 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12271 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
12272 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
12273 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
12274 u32 bmsr, adv_reg, tg3_ctrl, mask;
12276 tg3_readphy(tp, MII_BMSR, &bmsr);
12277 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12278 (bmsr & BMSR_LSTATUS))
12279 goto skip_phy_reset;
12281 err = tg3_phy_reset(tp);
12285 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
12286 ADVERTISE_100HALF | ADVERTISE_100FULL |
12287 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
12289 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
12290 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
12291 MII_TG3_CTRL_ADV_1000_FULL);
12292 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12293 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
12294 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
12295 MII_TG3_CTRL_ENABLE_AS_MASTER);
12298 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12299 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12300 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12301 if (!tg3_copper_is_advertising_all(tp, mask)) {
12302 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12304 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
12305 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12307 tg3_writephy(tp, MII_BMCR,
12308 BMCR_ANENABLE | BMCR_ANRESTART);
12310 tg3_phy_set_wirespeed(tp);
12312 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12313 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
12314 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12318 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
12319 err = tg3_init_5401phy_dsp(tp);
12323 err = tg3_init_5401phy_dsp(tp);
12326 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
12327 tp->link_config.advertising =
12328 (ADVERTISED_1000baseT_Half |
12329 ADVERTISED_1000baseT_Full |
12330 ADVERTISED_Autoneg |
12332 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
12333 tp->link_config.advertising &=
12334 ~(ADVERTISED_1000baseT_Half |
12335 ADVERTISED_1000baseT_Full);
12340 static void __devinit tg3_read_vpd(struct tg3 *tp)
12342 u8 vpd_data[TG3_NVM_VPD_LEN];
12343 unsigned int block_end, rosize, len;
12347 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
12348 tg3_nvram_read(tp, 0x0, &magic))
12349 goto out_not_found;
12351 if (magic == TG3_EEPROM_MAGIC) {
12352 for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
12355 /* The data is in little-endian format in NVRAM.
12356 * Use the big-endian read routines to preserve
12357 * the byte order as it exists in NVRAM.
12359 if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp))
12360 goto out_not_found;
12362 memcpy(&vpd_data[i], &tmp, sizeof(tmp));
12366 unsigned int pos = 0;
12368 for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) {
12369 cnt = pci_read_vpd(tp->pdev, pos,
12370 TG3_NVM_VPD_LEN - pos,
12372 if (cnt == -ETIMEDOUT || -EINTR)
12375 goto out_not_found;
12377 if (pos != TG3_NVM_VPD_LEN)
12378 goto out_not_found;
12381 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
12382 PCI_VPD_LRDT_RO_DATA);
12384 goto out_not_found;
12386 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
12387 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
12388 i += PCI_VPD_LRDT_TAG_SIZE;
12390 if (block_end > TG3_NVM_VPD_LEN)
12391 goto out_not_found;
12393 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12394 PCI_VPD_RO_KEYWORD_MFR_ID);
12396 len = pci_vpd_info_field_size(&vpd_data[j]);
12398 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12399 if (j + len > block_end || len != 4 ||
12400 memcmp(&vpd_data[j], "1028", 4))
12403 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12404 PCI_VPD_RO_KEYWORD_VENDOR0);
12408 len = pci_vpd_info_field_size(&vpd_data[j]);
12410 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12411 if (j + len > block_end)
12414 memcpy(tp->fw_ver, &vpd_data[j], len);
12415 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
12419 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12420 PCI_VPD_RO_KEYWORD_PARTNO);
12422 goto out_not_found;
12424 len = pci_vpd_info_field_size(&vpd_data[i]);
12426 i += PCI_VPD_INFO_FLD_HDR_SIZE;
12427 if (len > TG3_BPN_SIZE ||
12428 (len + i) > TG3_NVM_VPD_LEN)
12429 goto out_not_found;
12431 memcpy(tp->board_part_number, &vpd_data[i], len);
12436 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12437 strcpy(tp->board_part_number, "BCM95906");
12438 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12439 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
12440 strcpy(tp->board_part_number, "BCM57780");
12441 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12442 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
12443 strcpy(tp->board_part_number, "BCM57760");
12444 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12445 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
12446 strcpy(tp->board_part_number, "BCM57790");
12447 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12448 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
12449 strcpy(tp->board_part_number, "BCM57788");
12450 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12451 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
12452 strcpy(tp->board_part_number, "BCM57761");
12453 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12454 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
12455 strcpy(tp->board_part_number, "BCM57765");
12456 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12457 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
12458 strcpy(tp->board_part_number, "BCM57781");
12459 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12460 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
12461 strcpy(tp->board_part_number, "BCM57785");
12462 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12463 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
12464 strcpy(tp->board_part_number, "BCM57791");
12465 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12466 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
12467 strcpy(tp->board_part_number, "BCM57795");
12469 strcpy(tp->board_part_number, "none");
12472 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
12476 if (tg3_nvram_read(tp, offset, &val) ||
12477 (val & 0xfc000000) != 0x0c000000 ||
12478 tg3_nvram_read(tp, offset + 4, &val) ||
12485 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
12487 u32 val, offset, start, ver_offset;
12489 bool newver = false;
12491 if (tg3_nvram_read(tp, 0xc, &offset) ||
12492 tg3_nvram_read(tp, 0x4, &start))
12495 offset = tg3_nvram_logical_addr(tp, offset);
12497 if (tg3_nvram_read(tp, offset, &val))
12500 if ((val & 0xfc000000) == 0x0c000000) {
12501 if (tg3_nvram_read(tp, offset + 4, &val))
12508 dst_off = strlen(tp->fw_ver);
12511 if (TG3_VER_SIZE - dst_off < 16 ||
12512 tg3_nvram_read(tp, offset + 8, &ver_offset))
12515 offset = offset + ver_offset - start;
12516 for (i = 0; i < 16; i += 4) {
12518 if (tg3_nvram_read_be32(tp, offset + i, &v))
12521 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
12526 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
12529 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
12530 TG3_NVM_BCVER_MAJSFT;
12531 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
12532 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
12533 "v%d.%02d", major, minor);
12537 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
12539 u32 val, major, minor;
12541 /* Use native endian representation */
12542 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
12545 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
12546 TG3_NVM_HWSB_CFG1_MAJSFT;
12547 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
12548 TG3_NVM_HWSB_CFG1_MINSFT;
12550 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
12553 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12555 u32 offset, major, minor, build;
12557 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
12559 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
12562 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
12563 case TG3_EEPROM_SB_REVISION_0:
12564 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
12566 case TG3_EEPROM_SB_REVISION_2:
12567 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
12569 case TG3_EEPROM_SB_REVISION_3:
12570 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
12572 case TG3_EEPROM_SB_REVISION_4:
12573 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
12575 case TG3_EEPROM_SB_REVISION_5:
12576 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
12582 if (tg3_nvram_read(tp, offset, &val))
12585 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
12586 TG3_EEPROM_SB_EDH_BLD_SHFT;
12587 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
12588 TG3_EEPROM_SB_EDH_MAJ_SHFT;
12589 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
12591 if (minor > 99 || build > 26)
12594 offset = strlen(tp->fw_ver);
12595 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
12596 " v%d.%02d", major, minor);
12599 offset = strlen(tp->fw_ver);
12600 if (offset < TG3_VER_SIZE - 1)
12601 tp->fw_ver[offset] = 'a' + build - 1;
12605 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
12607 u32 val, offset, start;
12610 for (offset = TG3_NVM_DIR_START;
12611 offset < TG3_NVM_DIR_END;
12612 offset += TG3_NVM_DIRENT_SIZE) {
12613 if (tg3_nvram_read(tp, offset, &val))
12616 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
12620 if (offset == TG3_NVM_DIR_END)
12623 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12624 start = 0x08000000;
12625 else if (tg3_nvram_read(tp, offset - 4, &start))
12628 if (tg3_nvram_read(tp, offset + 4, &offset) ||
12629 !tg3_fw_img_is_valid(tp, offset) ||
12630 tg3_nvram_read(tp, offset + 8, &val))
12633 offset += val - start;
12635 vlen = strlen(tp->fw_ver);
12637 tp->fw_ver[vlen++] = ',';
12638 tp->fw_ver[vlen++] = ' ';
12640 for (i = 0; i < 4; i++) {
12642 if (tg3_nvram_read_be32(tp, offset, &v))
12645 offset += sizeof(v);
12647 if (vlen > TG3_VER_SIZE - sizeof(v)) {
12648 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
12652 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
12657 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
12662 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
12663 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
12666 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
12667 if (apedata != APE_SEG_SIG_MAGIC)
12670 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
12671 if (!(apedata & APE_FW_STATUS_READY))
12674 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
12676 vlen = strlen(tp->fw_ver);
12678 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " DASH v%d.%d.%d.%d",
12679 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
12680 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
12681 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
12682 (apedata & APE_FW_VERSION_BLDMSK));
12685 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
12688 bool vpd_vers = false;
12690 if (tp->fw_ver[0] != 0)
12693 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
12694 strcat(tp->fw_ver, "sb");
12698 if (tg3_nvram_read(tp, 0, &val))
12701 if (val == TG3_EEPROM_MAGIC)
12702 tg3_read_bc_ver(tp);
12703 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
12704 tg3_read_sb_ver(tp, val);
12705 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12706 tg3_read_hwsb_ver(tp);
12710 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12711 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers)
12714 tg3_read_mgmtfw_ver(tp);
12717 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
12720 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
12722 static int __devinit tg3_get_invariants(struct tg3 *tp)
12724 static struct pci_device_id write_reorder_chipsets[] = {
12725 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12726 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
12727 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12728 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
12729 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
12730 PCI_DEVICE_ID_VIA_8385_0) },
12734 u32 pci_state_reg, grc_misc_cfg;
12739 /* Force memory write invalidate off. If we leave it on,
12740 * then on 5700_BX chips we have to enable a workaround.
12741 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
12742 * to match the cacheline size. The Broadcom driver have this
12743 * workaround but turns MWI off all the times so never uses
12744 * it. This seems to suggest that the workaround is insufficient.
12746 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12747 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
12748 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12750 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
12751 * has the register indirect write enable bit set before
12752 * we try to access any of the MMIO registers. It is also
12753 * critical that the PCI-X hw workaround situation is decided
12754 * before that as well.
12756 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12759 tp->pci_chip_rev_id = (misc_ctrl_reg >>
12760 MISC_HOST_CTRL_CHIPREV_SHIFT);
12761 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12762 u32 prod_id_asic_rev;
12764 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
12765 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
12766 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724)
12767 pci_read_config_dword(tp->pdev,
12768 TG3PCI_GEN2_PRODID_ASICREV,
12769 &prod_id_asic_rev);
12770 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
12771 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
12772 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
12773 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
12774 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
12775 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
12776 pci_read_config_dword(tp->pdev,
12777 TG3PCI_GEN15_PRODID_ASICREV,
12778 &prod_id_asic_rev);
12780 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
12781 &prod_id_asic_rev);
12783 tp->pci_chip_rev_id = prod_id_asic_rev;
12786 /* Wrong chip ID in 5752 A0. This code can be removed later
12787 * as A0 is not in production.
12789 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12790 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12792 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12793 * we need to disable memory and use config. cycles
12794 * only to access all registers. The 5702/03 chips
12795 * can mistakenly decode the special cycles from the
12796 * ICH chipsets as memory write cycles, causing corruption
12797 * of register and memory space. Only certain ICH bridges
12798 * will drive special cycles with non-zero data during the
12799 * address phase which can fall within the 5703's address
12800 * range. This is not an ICH bug as the PCI spec allows
12801 * non-zero address during special cycles. However, only
12802 * these ICH bridges are known to drive non-zero addresses
12803 * during special cycles.
12805 * Since special cycles do not cross PCI bridges, we only
12806 * enable this workaround if the 5703 is on the secondary
12807 * bus of these ICH bridges.
12809 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12810 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12811 static struct tg3_dev_id {
12815 } ich_chipsets[] = {
12816 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12818 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12820 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12822 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12826 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12827 struct pci_dev *bridge = NULL;
12829 while (pci_id->vendor != 0) {
12830 bridge = pci_get_device(pci_id->vendor, pci_id->device,
12836 if (pci_id->rev != PCI_ANY_ID) {
12837 if (bridge->revision > pci_id->rev)
12840 if (bridge->subordinate &&
12841 (bridge->subordinate->number ==
12842 tp->pdev->bus->number)) {
12844 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12845 pci_dev_put(bridge);
12851 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12852 static struct tg3_dev_id {
12855 } bridge_chipsets[] = {
12856 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12857 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12860 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12861 struct pci_dev *bridge = NULL;
12863 while (pci_id->vendor != 0) {
12864 bridge = pci_get_device(pci_id->vendor,
12871 if (bridge->subordinate &&
12872 (bridge->subordinate->number <=
12873 tp->pdev->bus->number) &&
12874 (bridge->subordinate->subordinate >=
12875 tp->pdev->bus->number)) {
12876 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12877 pci_dev_put(bridge);
12883 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12884 * DMA addresses > 40-bit. This bridge may have other additional
12885 * 57xx devices behind it in some 4-port NIC designs for example.
12886 * Any tg3 device found behind the bridge will also need the 40-bit
12889 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12890 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12891 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
12892 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12893 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12895 struct pci_dev *bridge = NULL;
12898 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12899 PCI_DEVICE_ID_SERVERWORKS_EPB,
12901 if (bridge && bridge->subordinate &&
12902 (bridge->subordinate->number <=
12903 tp->pdev->bus->number) &&
12904 (bridge->subordinate->subordinate >=
12905 tp->pdev->bus->number)) {
12906 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12907 pci_dev_put(bridge);
12913 /* Initialize misc host control in PCI block. */
12914 tp->misc_host_ctrl |= (misc_ctrl_reg &
12915 MISC_HOST_CTRL_CHIPREV);
12916 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12917 tp->misc_host_ctrl);
12919 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
12920 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
12921 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12922 tp->pdev_peer = tg3_find_peer(tp);
12924 /* Intentionally exclude ASIC_REV_5906 */
12925 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12926 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12927 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12928 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12929 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12930 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12931 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12932 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12933 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
12935 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12936 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12937 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12938 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12939 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12940 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12942 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12943 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12944 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12946 /* 5700 B0 chips do not support checksumming correctly due
12947 * to hardware bugs.
12949 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12950 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12952 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12953 tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12954 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
12955 tp->dev->features |= NETIF_F_IPV6_CSUM;
12958 /* Determine TSO capabilities */
12959 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12960 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12961 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
12962 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12963 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12964 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12965 else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12966 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12967 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
12968 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12969 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12970 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12971 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12972 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
12973 tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
12974 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
12975 tp->fw_needed = FIRMWARE_TG3TSO5;
12977 tp->fw_needed = FIRMWARE_TG3TSO;
12982 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12983 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12984 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12985 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12986 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12987 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12988 tp->pdev_peer == tp->pdev))
12989 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12991 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12992 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12993 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12996 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12997 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
12998 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
12999 tp->irq_max = TG3_IRQ_MAX_VECS;
13003 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13004 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13005 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
13006 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
13007 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
13008 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
13011 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13012 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13013 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
13015 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13016 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
13017 (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
13018 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
13020 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13023 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13024 if (tp->pcie_cap != 0) {
13027 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13029 pcie_set_readrq(tp->pdev, 4096);
13031 pci_read_config_word(tp->pdev,
13032 tp->pcie_cap + PCI_EXP_LNKCTL,
13034 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13035 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13036 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
13037 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13038 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13039 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13040 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13041 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
13042 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13043 tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
13045 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13046 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13047 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13048 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13049 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13050 if (!tp->pcix_cap) {
13051 dev_err(&tp->pdev->dev,
13052 "Cannot find PCI-X capability, aborting\n");
13056 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13057 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
13060 /* If we have an AMD 762 or VIA K8T800 chipset, write
13061 * reordering to the mailbox registers done by the host
13062 * controller can cause major troubles. We read back from
13063 * every mailbox register write to force the writes to be
13064 * posted to the chip in order.
13066 if (pci_dev_present(write_reorder_chipsets) &&
13067 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13068 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
13070 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13071 &tp->pci_cacheline_sz);
13072 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13073 &tp->pci_lat_timer);
13074 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13075 tp->pci_lat_timer < 64) {
13076 tp->pci_lat_timer = 64;
13077 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13078 tp->pci_lat_timer);
13081 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13082 /* 5700 BX chips need to have their TX producer index
13083 * mailboxes written twice to workaround a bug.
13085 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
13087 /* If we are in PCI-X mode, enable register write workaround.
13089 * The workaround is to use indirect register accesses
13090 * for all chip writes not to mailbox registers.
13092 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13095 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13097 /* The chip can have it's power management PCI config
13098 * space registers clobbered due to this bug.
13099 * So explicitly force the chip into D0 here.
13101 pci_read_config_dword(tp->pdev,
13102 tp->pm_cap + PCI_PM_CTRL,
13104 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13105 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13106 pci_write_config_dword(tp->pdev,
13107 tp->pm_cap + PCI_PM_CTRL,
13110 /* Also, force SERR#/PERR# in PCI command. */
13111 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13112 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13113 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13117 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13118 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
13119 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13120 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
13122 /* Chip-specific fixup from Broadcom driver */
13123 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13124 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13125 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13126 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13129 /* Default fast path register access methods */
13130 tp->read32 = tg3_read32;
13131 tp->write32 = tg3_write32;
13132 tp->read32_mbox = tg3_read32;
13133 tp->write32_mbox = tg3_write32;
13134 tp->write32_tx_mbox = tg3_write32;
13135 tp->write32_rx_mbox = tg3_write32;
13137 /* Various workaround register access methods */
13138 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
13139 tp->write32 = tg3_write_indirect_reg32;
13140 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13141 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
13142 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13144 * Back to back register writes can cause problems on these
13145 * chips, the workaround is to read back all reg writes
13146 * except those to mailbox regs.
13148 * See tg3_write_indirect_reg32().
13150 tp->write32 = tg3_write_flush_reg32;
13153 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
13154 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
13155 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13156 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
13157 tp->write32_rx_mbox = tg3_write_flush_reg32;
13160 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
13161 tp->read32 = tg3_read_indirect_reg32;
13162 tp->write32 = tg3_write_indirect_reg32;
13163 tp->read32_mbox = tg3_read_indirect_mbox;
13164 tp->write32_mbox = tg3_write_indirect_mbox;
13165 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13166 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13171 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13172 pci_cmd &= ~PCI_COMMAND_MEMORY;
13173 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13175 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13176 tp->read32_mbox = tg3_read32_mbox_5906;
13177 tp->write32_mbox = tg3_write32_mbox_5906;
13178 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13179 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13182 if (tp->write32 == tg3_write_indirect_reg32 ||
13183 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13184 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13185 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13186 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
13188 /* Get eeprom hw config before calling tg3_set_power_state().
13189 * In particular, the TG3_FLG2_IS_NIC flag must be
13190 * determined before calling tg3_set_power_state() so that
13191 * we know whether or not to switch out of Vaux power.
13192 * When the flag is set, it means that GPIO1 is used for eeprom
13193 * write protect and also implies that it is a LOM where GPIOs
13194 * are not used to switch power.
13196 tg3_get_eeprom_hw_cfg(tp);
13198 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13199 /* Allow reads and writes to the
13200 * APE register and memory space.
13202 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13203 PCISTATE_ALLOW_APE_SHMEM_WR;
13204 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13208 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13209 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13210 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13211 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13212 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13213 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13214 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
13216 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
13217 * GPIO1 driven high will bring 5700's external PHY out of reset.
13218 * It is also used as eeprom write protect on LOMs.
13220 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13221 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13222 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
13223 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13224 GRC_LCLCTRL_GPIO_OUTPUT1);
13225 /* Unused GPIO3 must be driven as output on 5752 because there
13226 * are no pull-up resistors on unused GPIO pins.
13228 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13229 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13231 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13232 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13233 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13234 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13236 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13237 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13238 /* Turn off the debug UART. */
13239 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13240 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
13241 /* Keep VMain power. */
13242 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13243 GRC_LCLCTRL_GPIO_OUTPUT0;
13246 /* Force the chip into D0. */
13247 err = tg3_set_power_state(tp, PCI_D0);
13249 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13253 /* Derive initial jumbo mode from MTU assigned in
13254 * ether_setup() via the alloc_etherdev() call
13256 if (tp->dev->mtu > ETH_DATA_LEN &&
13257 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13258 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
13260 /* Determine WakeOnLan speed to use. */
13261 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13262 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13263 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13264 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13265 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
13267 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
13270 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13271 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
13273 /* A few boards don't want Ethernet@WireSpeed phy feature */
13274 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13275 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
13276 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13277 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13278 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) ||
13279 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
13280 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
13282 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13283 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13284 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
13285 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13286 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
13288 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
13289 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
13290 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13291 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13292 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
13293 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
13294 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13295 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13296 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13297 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13298 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13299 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13300 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
13301 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13302 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
13304 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
13307 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13308 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13309 tp->phy_otp = tg3_read_otp_phycfg(tp);
13310 if (tp->phy_otp == 0)
13311 tp->phy_otp = TG3_OTP_DEFAULT;
13314 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
13315 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13317 tp->mi_mode = MAC_MI_MODE_BASE;
13319 tp->coalesce_mode = 0;
13320 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
13321 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
13322 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
13324 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13325 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13326 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
13328 err = tg3_mdio_init(tp);
13332 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
13333 (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 ||
13334 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
13337 /* Initialize data/descriptor byte/word swapping. */
13338 val = tr32(GRC_MODE);
13339 val &= GRC_MODE_HOST_STACKUP;
13340 tw32(GRC_MODE, val | tp->grc_mode);
13342 tg3_switch_clocks(tp);
13344 /* Clear this out for sanity. */
13345 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
13347 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13349 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
13350 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
13351 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
13353 if (chiprevid == CHIPREV_ID_5701_A0 ||
13354 chiprevid == CHIPREV_ID_5701_B0 ||
13355 chiprevid == CHIPREV_ID_5701_B2 ||
13356 chiprevid == CHIPREV_ID_5701_B5) {
13357 void __iomem *sram_base;
13359 /* Write some dummy words into the SRAM status block
13360 * area, see if it reads back correctly. If the return
13361 * value is bad, force enable the PCIX workaround.
13363 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
13365 writel(0x00000000, sram_base);
13366 writel(0x00000000, sram_base + 4);
13367 writel(0xffffffff, sram_base + 4);
13368 if (readl(sram_base) != 0x00000000)
13369 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13374 tg3_nvram_init(tp);
13376 grc_misc_cfg = tr32(GRC_MISC_CFG);
13377 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
13379 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13380 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
13381 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
13382 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
13384 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
13385 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
13386 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
13387 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
13388 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
13389 HOSTCC_MODE_CLRTICK_TXBD);
13391 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
13392 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13393 tp->misc_host_ctrl);
13396 /* Preserve the APE MAC_MODE bits */
13397 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
13398 tp->mac_mode = tr32(MAC_MODE) |
13399 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13401 tp->mac_mode = TG3_DEF_MAC_MODE;
13403 /* these are limited to 10/100 only */
13404 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13405 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
13406 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13407 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13408 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
13409 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
13410 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
13411 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13412 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
13413 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
13414 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
13415 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
13416 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13417 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13418 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
13419 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
13421 err = tg3_phy_probe(tp);
13423 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
13424 /* ... but do not return immediately ... */
13429 tg3_read_fw_ver(tp);
13431 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
13432 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
13434 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13435 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
13437 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
13440 /* 5700 {AX,BX} chips have a broken status block link
13441 * change bit implementation, so we must use the
13442 * status register in those cases.
13444 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13445 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
13447 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
13449 /* The led_ctrl is set during tg3_phy_probe, here we might
13450 * have to force the link status polling mechanism based
13451 * upon subsystem IDs.
13453 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
13454 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13455 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
13456 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
13457 TG3_FLAG_USE_LINKCHG_REG);
13460 /* For all SERDES we poll the MAC status register. */
13461 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
13462 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
13464 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
13466 tp->rx_offset = NET_IP_ALIGN;
13467 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13468 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
13471 tp->rx_std_max_post = TG3_RX_RING_SIZE;
13473 /* Increment the rx prod index on the rx std ring by at most
13474 * 8 for these chips to workaround hw errata.
13476 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13477 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13478 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13479 tp->rx_std_max_post = 8;
13481 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
13482 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
13483 PCIE_PWR_MGMT_L1_THRESH_MSK;
13488 #ifdef CONFIG_SPARC
13489 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
13491 struct net_device *dev = tp->dev;
13492 struct pci_dev *pdev = tp->pdev;
13493 struct device_node *dp = pci_device_to_OF_node(pdev);
13494 const unsigned char *addr;
13497 addr = of_get_property(dp, "local-mac-address", &len);
13498 if (addr && len == 6) {
13499 memcpy(dev->dev_addr, addr, 6);
13500 memcpy(dev->perm_addr, dev->dev_addr, 6);
13506 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
13508 struct net_device *dev = tp->dev;
13510 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
13511 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
13516 static int __devinit tg3_get_device_address(struct tg3 *tp)
13518 struct net_device *dev = tp->dev;
13519 u32 hi, lo, mac_offset;
13522 #ifdef CONFIG_SPARC
13523 if (!tg3_get_macaddr_sparc(tp))
13528 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
13529 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13530 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
13532 if (tg3_nvram_lock(tp))
13533 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
13535 tg3_nvram_unlock(tp);
13536 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13537 if (tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC)
13539 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13542 /* First try to get it from MAC address mailbox. */
13543 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
13544 if ((hi >> 16) == 0x484b) {
13545 dev->dev_addr[0] = (hi >> 8) & 0xff;
13546 dev->dev_addr[1] = (hi >> 0) & 0xff;
13548 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
13549 dev->dev_addr[2] = (lo >> 24) & 0xff;
13550 dev->dev_addr[3] = (lo >> 16) & 0xff;
13551 dev->dev_addr[4] = (lo >> 8) & 0xff;
13552 dev->dev_addr[5] = (lo >> 0) & 0xff;
13554 /* Some old bootcode may report a 0 MAC address in SRAM */
13555 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
13558 /* Next, try NVRAM. */
13559 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
13560 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
13561 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
13562 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
13563 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
13565 /* Finally just fetch it out of the MAC control regs. */
13567 hi = tr32(MAC_ADDR_0_HIGH);
13568 lo = tr32(MAC_ADDR_0_LOW);
13570 dev->dev_addr[5] = lo & 0xff;
13571 dev->dev_addr[4] = (lo >> 8) & 0xff;
13572 dev->dev_addr[3] = (lo >> 16) & 0xff;
13573 dev->dev_addr[2] = (lo >> 24) & 0xff;
13574 dev->dev_addr[1] = hi & 0xff;
13575 dev->dev_addr[0] = (hi >> 8) & 0xff;
13579 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
13580 #ifdef CONFIG_SPARC
13581 if (!tg3_get_default_macaddr_sparc(tp))
13586 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
13590 #define BOUNDARY_SINGLE_CACHELINE 1
13591 #define BOUNDARY_MULTI_CACHELINE 2
13593 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
13595 int cacheline_size;
13599 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
13601 cacheline_size = 1024;
13603 cacheline_size = (int) byte * 4;
13605 /* On 5703 and later chips, the boundary bits have no
13608 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13609 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13610 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13613 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
13614 goal = BOUNDARY_MULTI_CACHELINE;
13616 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
13617 goal = BOUNDARY_SINGLE_CACHELINE;
13623 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13624 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13625 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
13632 /* PCI controllers on most RISC systems tend to disconnect
13633 * when a device tries to burst across a cache-line boundary.
13634 * Therefore, letting tg3 do so just wastes PCI bandwidth.
13636 * Unfortunately, for PCI-E there are only limited
13637 * write-side controls for this, and thus for reads
13638 * we will still get the disconnects. We'll also waste
13639 * these PCI cycles for both read and write for chips
13640 * other than 5700 and 5701 which do not implement the
13643 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13644 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
13645 switch (cacheline_size) {
13650 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13651 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
13652 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
13654 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13655 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13660 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
13661 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
13665 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13666 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13669 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13670 switch (cacheline_size) {
13674 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13675 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13676 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
13682 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13683 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
13687 switch (cacheline_size) {
13689 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13690 val |= (DMA_RWCTRL_READ_BNDRY_16 |
13691 DMA_RWCTRL_WRITE_BNDRY_16);
13696 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13697 val |= (DMA_RWCTRL_READ_BNDRY_32 |
13698 DMA_RWCTRL_WRITE_BNDRY_32);
13703 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13704 val |= (DMA_RWCTRL_READ_BNDRY_64 |
13705 DMA_RWCTRL_WRITE_BNDRY_64);
13710 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13711 val |= (DMA_RWCTRL_READ_BNDRY_128 |
13712 DMA_RWCTRL_WRITE_BNDRY_128);
13717 val |= (DMA_RWCTRL_READ_BNDRY_256 |
13718 DMA_RWCTRL_WRITE_BNDRY_256);
13721 val |= (DMA_RWCTRL_READ_BNDRY_512 |
13722 DMA_RWCTRL_WRITE_BNDRY_512);
13726 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
13727 DMA_RWCTRL_WRITE_BNDRY_1024);
13736 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
13738 struct tg3_internal_buffer_desc test_desc;
13739 u32 sram_dma_descs;
13742 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
13744 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
13745 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
13746 tw32(RDMAC_STATUS, 0);
13747 tw32(WDMAC_STATUS, 0);
13749 tw32(BUFMGR_MODE, 0);
13750 tw32(FTQ_RESET, 0);
13752 test_desc.addr_hi = ((u64) buf_dma) >> 32;
13753 test_desc.addr_lo = buf_dma & 0xffffffff;
13754 test_desc.nic_mbuf = 0x00002100;
13755 test_desc.len = size;
13758 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
13759 * the *second* time the tg3 driver was getting loaded after an
13762 * Broadcom tells me:
13763 * ...the DMA engine is connected to the GRC block and a DMA
13764 * reset may affect the GRC block in some unpredictable way...
13765 * The behavior of resets to individual blocks has not been tested.
13767 * Broadcom noted the GRC reset will also reset all sub-components.
13770 test_desc.cqid_sqid = (13 << 8) | 2;
13772 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
13775 test_desc.cqid_sqid = (16 << 8) | 7;
13777 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
13780 test_desc.flags = 0x00000005;
13782 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
13785 val = *(((u32 *)&test_desc) + i);
13786 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
13787 sram_dma_descs + (i * sizeof(u32)));
13788 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
13790 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
13793 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
13795 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
13798 for (i = 0; i < 40; i++) {
13802 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
13804 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
13805 if ((val & 0xffff) == sram_dma_descs) {
13816 #define TEST_BUFFER_SIZE 0x2000
13818 static int __devinit tg3_test_dma(struct tg3 *tp)
13820 dma_addr_t buf_dma;
13821 u32 *buf, saved_dma_rwctrl;
13824 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
13830 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
13831 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
13833 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
13835 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13836 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13839 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13840 /* DMA read watermark not used on PCIE */
13841 tp->dma_rwctrl |= 0x00180000;
13842 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
13843 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13844 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
13845 tp->dma_rwctrl |= 0x003f0000;
13847 tp->dma_rwctrl |= 0x003f000f;
13849 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13850 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
13851 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
13852 u32 read_water = 0x7;
13854 /* If the 5704 is behind the EPB bridge, we can
13855 * do the less restrictive ONE_DMA workaround for
13856 * better performance.
13858 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13859 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13860 tp->dma_rwctrl |= 0x8000;
13861 else if (ccval == 0x6 || ccval == 0x7)
13862 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13864 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13866 /* Set bit 23 to enable PCIX hw bug fix */
13868 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13869 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13871 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13872 /* 5780 always in PCIX mode */
13873 tp->dma_rwctrl |= 0x00144000;
13874 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13875 /* 5714 always in PCIX mode */
13876 tp->dma_rwctrl |= 0x00148000;
13878 tp->dma_rwctrl |= 0x001b000f;
13882 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13883 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13884 tp->dma_rwctrl &= 0xfffffff0;
13886 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13887 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13888 /* Remove this if it causes problems for some boards. */
13889 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13891 /* On 5700/5701 chips, we need to set this bit.
13892 * Otherwise the chip will issue cacheline transactions
13893 * to streamable DMA memory with not all the byte
13894 * enables turned on. This is an error on several
13895 * RISC PCI controllers, in particular sparc64.
13897 * On 5703/5704 chips, this bit has been reassigned
13898 * a different meaning. In particular, it is used
13899 * on those chips to enable a PCI-X workaround.
13901 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
13904 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13907 /* Unneeded, already done by tg3_get_invariants. */
13908 tg3_switch_clocks(tp);
13911 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13912 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13915 /* It is best to perform DMA test with maximum write burst size
13916 * to expose the 5700/5701 write DMA bug.
13918 saved_dma_rwctrl = tp->dma_rwctrl;
13919 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13920 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13925 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
13928 /* Send the buffer to the chip. */
13929 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13931 dev_err(&tp->pdev->dev,
13932 "%s: Buffer write failed. err = %d\n",
13938 /* validate data reached card RAM correctly. */
13939 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13941 tg3_read_mem(tp, 0x2100 + (i*4), &val);
13942 if (le32_to_cpu(val) != p[i]) {
13943 dev_err(&tp->pdev->dev,
13944 "%s: Buffer corrupted on device! "
13945 "(%d != %d)\n", __func__, val, i);
13946 /* ret = -ENODEV here? */
13951 /* Now read it back. */
13952 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13954 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
13955 "err = %d\n", __func__, ret);
13960 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13964 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13965 DMA_RWCTRL_WRITE_BNDRY_16) {
13966 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13967 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13968 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13971 dev_err(&tp->pdev->dev,
13972 "%s: Buffer corrupted on read back! "
13973 "(%d != %d)\n", __func__, p[i], i);
13979 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13985 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13986 DMA_RWCTRL_WRITE_BNDRY_16) {
13987 static struct pci_device_id dma_wait_state_chipsets[] = {
13988 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13989 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13993 /* DMA test passed without adjusting DMA boundary,
13994 * now look for chipsets that are known to expose the
13995 * DMA bug without failing the test.
13997 if (pci_dev_present(dma_wait_state_chipsets)) {
13998 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13999 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14001 /* Safe to use the calculated DMA boundary. */
14002 tp->dma_rwctrl = saved_dma_rwctrl;
14005 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14009 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
14014 static void __devinit tg3_init_link_config(struct tg3 *tp)
14016 tp->link_config.advertising =
14017 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
14018 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
14019 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
14020 ADVERTISED_Autoneg | ADVERTISED_MII);
14021 tp->link_config.speed = SPEED_INVALID;
14022 tp->link_config.duplex = DUPLEX_INVALID;
14023 tp->link_config.autoneg = AUTONEG_ENABLE;
14024 tp->link_config.active_speed = SPEED_INVALID;
14025 tp->link_config.active_duplex = DUPLEX_INVALID;
14026 tp->link_config.phy_is_low_power = 0;
14027 tp->link_config.orig_speed = SPEED_INVALID;
14028 tp->link_config.orig_duplex = DUPLEX_INVALID;
14029 tp->link_config.orig_autoneg = AUTONEG_INVALID;
14032 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14034 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14035 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14036 tp->bufmgr_config.mbuf_read_dma_low_water =
14037 DEFAULT_MB_RDMA_LOW_WATER_5705;
14038 tp->bufmgr_config.mbuf_mac_rx_low_water =
14039 DEFAULT_MB_MACRX_LOW_WATER_57765;
14040 tp->bufmgr_config.mbuf_high_water =
14041 DEFAULT_MB_HIGH_WATER_57765;
14043 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14044 DEFAULT_MB_RDMA_LOW_WATER_5705;
14045 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14046 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14047 tp->bufmgr_config.mbuf_high_water_jumbo =
14048 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14049 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14050 tp->bufmgr_config.mbuf_read_dma_low_water =
14051 DEFAULT_MB_RDMA_LOW_WATER_5705;
14052 tp->bufmgr_config.mbuf_mac_rx_low_water =
14053 DEFAULT_MB_MACRX_LOW_WATER_5705;
14054 tp->bufmgr_config.mbuf_high_water =
14055 DEFAULT_MB_HIGH_WATER_5705;
14056 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14057 tp->bufmgr_config.mbuf_mac_rx_low_water =
14058 DEFAULT_MB_MACRX_LOW_WATER_5906;
14059 tp->bufmgr_config.mbuf_high_water =
14060 DEFAULT_MB_HIGH_WATER_5906;
14063 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14064 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14065 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14066 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14067 tp->bufmgr_config.mbuf_high_water_jumbo =
14068 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14070 tp->bufmgr_config.mbuf_read_dma_low_water =
14071 DEFAULT_MB_RDMA_LOW_WATER;
14072 tp->bufmgr_config.mbuf_mac_rx_low_water =
14073 DEFAULT_MB_MACRX_LOW_WATER;
14074 tp->bufmgr_config.mbuf_high_water =
14075 DEFAULT_MB_HIGH_WATER;
14077 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14078 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14079 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14080 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14081 tp->bufmgr_config.mbuf_high_water_jumbo =
14082 DEFAULT_MB_HIGH_WATER_JUMBO;
14085 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14086 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14089 static char * __devinit tg3_phy_string(struct tg3 *tp)
14091 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14092 case TG3_PHY_ID_BCM5400: return "5400";
14093 case TG3_PHY_ID_BCM5401: return "5401";
14094 case TG3_PHY_ID_BCM5411: return "5411";
14095 case TG3_PHY_ID_BCM5701: return "5701";
14096 case TG3_PHY_ID_BCM5703: return "5703";
14097 case TG3_PHY_ID_BCM5704: return "5704";
14098 case TG3_PHY_ID_BCM5705: return "5705";
14099 case TG3_PHY_ID_BCM5750: return "5750";
14100 case TG3_PHY_ID_BCM5752: return "5752";
14101 case TG3_PHY_ID_BCM5714: return "5714";
14102 case TG3_PHY_ID_BCM5780: return "5780";
14103 case TG3_PHY_ID_BCM5755: return "5755";
14104 case TG3_PHY_ID_BCM5787: return "5787";
14105 case TG3_PHY_ID_BCM5784: return "5784";
14106 case TG3_PHY_ID_BCM5756: return "5722/5756";
14107 case TG3_PHY_ID_BCM5906: return "5906";
14108 case TG3_PHY_ID_BCM5761: return "5761";
14109 case TG3_PHY_ID_BCM5718C: return "5718C";
14110 case TG3_PHY_ID_BCM5718S: return "5718S";
14111 case TG3_PHY_ID_BCM57765: return "57765";
14112 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14113 case 0: return "serdes";
14114 default: return "unknown";
14118 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14120 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14121 strcpy(str, "PCI Express");
14123 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
14124 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14126 strcpy(str, "PCIX:");
14128 if ((clock_ctrl == 7) ||
14129 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14130 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14131 strcat(str, "133MHz");
14132 else if (clock_ctrl == 0)
14133 strcat(str, "33MHz");
14134 else if (clock_ctrl == 2)
14135 strcat(str, "50MHz");
14136 else if (clock_ctrl == 4)
14137 strcat(str, "66MHz");
14138 else if (clock_ctrl == 6)
14139 strcat(str, "100MHz");
14141 strcpy(str, "PCI:");
14142 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
14143 strcat(str, "66MHz");
14145 strcat(str, "33MHz");
14147 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
14148 strcat(str, ":32-bit");
14150 strcat(str, ":64-bit");
14154 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14156 struct pci_dev *peer;
14157 unsigned int func, devnr = tp->pdev->devfn & ~7;
14159 for (func = 0; func < 8; func++) {
14160 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14161 if (peer && peer != tp->pdev)
14165 /* 5704 can be configured in single-port mode, set peer to
14166 * tp->pdev in that case.
14174 * We don't need to keep the refcount elevated; there's no way
14175 * to remove one half of this device without removing the other
14182 static void __devinit tg3_init_coal(struct tg3 *tp)
14184 struct ethtool_coalesce *ec = &tp->coal;
14186 memset(ec, 0, sizeof(*ec));
14187 ec->cmd = ETHTOOL_GCOALESCE;
14188 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14189 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14190 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14191 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14192 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14193 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14194 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14195 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14196 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14198 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14199 HOSTCC_MODE_CLRTICK_TXBD)) {
14200 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14201 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14202 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14203 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14206 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14207 ec->rx_coalesce_usecs_irq = 0;
14208 ec->tx_coalesce_usecs_irq = 0;
14209 ec->stats_block_coalesce_usecs = 0;
14213 static const struct net_device_ops tg3_netdev_ops = {
14214 .ndo_open = tg3_open,
14215 .ndo_stop = tg3_close,
14216 .ndo_start_xmit = tg3_start_xmit,
14217 .ndo_get_stats = tg3_get_stats,
14218 .ndo_validate_addr = eth_validate_addr,
14219 .ndo_set_multicast_list = tg3_set_rx_mode,
14220 .ndo_set_mac_address = tg3_set_mac_addr,
14221 .ndo_do_ioctl = tg3_ioctl,
14222 .ndo_tx_timeout = tg3_tx_timeout,
14223 .ndo_change_mtu = tg3_change_mtu,
14224 #if TG3_VLAN_TAG_USED
14225 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14227 #ifdef CONFIG_NET_POLL_CONTROLLER
14228 .ndo_poll_controller = tg3_poll_controller,
14232 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
14233 .ndo_open = tg3_open,
14234 .ndo_stop = tg3_close,
14235 .ndo_start_xmit = tg3_start_xmit_dma_bug,
14236 .ndo_get_stats = tg3_get_stats,
14237 .ndo_validate_addr = eth_validate_addr,
14238 .ndo_set_multicast_list = tg3_set_rx_mode,
14239 .ndo_set_mac_address = tg3_set_mac_addr,
14240 .ndo_do_ioctl = tg3_ioctl,
14241 .ndo_tx_timeout = tg3_tx_timeout,
14242 .ndo_change_mtu = tg3_change_mtu,
14243 #if TG3_VLAN_TAG_USED
14244 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14246 #ifdef CONFIG_NET_POLL_CONTROLLER
14247 .ndo_poll_controller = tg3_poll_controller,
14251 static int __devinit tg3_init_one(struct pci_dev *pdev,
14252 const struct pci_device_id *ent)
14254 struct net_device *dev;
14256 int i, err, pm_cap;
14257 u32 sndmbx, rcvmbx, intmbx;
14259 u64 dma_mask, persist_dma_mask;
14261 printk_once(KERN_INFO "%s\n", version);
14263 err = pci_enable_device(pdev);
14265 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14269 err = pci_request_regions(pdev, DRV_MODULE_NAME);
14271 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14272 goto err_out_disable_pdev;
14275 pci_set_master(pdev);
14277 /* Find power-management capability. */
14278 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14280 dev_err(&pdev->dev,
14281 "Cannot find Power Management capability, aborting\n");
14283 goto err_out_free_res;
14286 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14288 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14290 goto err_out_free_res;
14293 SET_NETDEV_DEV(dev, &pdev->dev);
14295 #if TG3_VLAN_TAG_USED
14296 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
14299 tp = netdev_priv(dev);
14302 tp->pm_cap = pm_cap;
14303 tp->rx_mode = TG3_DEF_RX_MODE;
14304 tp->tx_mode = TG3_DEF_TX_MODE;
14307 tp->msg_enable = tg3_debug;
14309 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14311 /* The word/byte swap controls here control register access byte
14312 * swapping. DMA data byte swapping is controlled in the GRC_MODE
14315 tp->misc_host_ctrl =
14316 MISC_HOST_CTRL_MASK_PCI_INT |
14317 MISC_HOST_CTRL_WORD_SWAP |
14318 MISC_HOST_CTRL_INDIR_ACCESS |
14319 MISC_HOST_CTRL_PCISTATE_RW;
14321 /* The NONFRM (non-frame) byte/word swap controls take effect
14322 * on descriptor entries, anything which isn't packet data.
14324 * The StrongARM chips on the board (one for tx, one for rx)
14325 * are running in big-endian mode.
14327 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14328 GRC_MODE_WSWAP_NONFRM_DATA);
14329 #ifdef __BIG_ENDIAN
14330 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14332 spin_lock_init(&tp->lock);
14333 spin_lock_init(&tp->indirect_lock);
14334 INIT_WORK(&tp->reset_task, tg3_reset_task);
14336 tp->regs = pci_ioremap_bar(pdev, BAR_0);
14338 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14340 goto err_out_free_dev;
14343 tg3_init_link_config(tp);
14345 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
14346 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
14348 dev->ethtool_ops = &tg3_ethtool_ops;
14349 dev->watchdog_timeo = TG3_TX_TIMEOUT;
14350 dev->irq = pdev->irq;
14352 err = tg3_get_invariants(tp);
14354 dev_err(&pdev->dev,
14355 "Problem fetching invariants of chip, aborting\n");
14356 goto err_out_iounmap;
14359 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
14360 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
14361 dev->netdev_ops = &tg3_netdev_ops;
14363 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
14366 /* The EPB bridge inside 5714, 5715, and 5780 and any
14367 * device behind the EPB cannot support DMA addresses > 40-bit.
14368 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
14369 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
14370 * do DMA address check in tg3_start_xmit().
14372 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
14373 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
14374 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
14375 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
14376 #ifdef CONFIG_HIGHMEM
14377 dma_mask = DMA_BIT_MASK(64);
14380 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
14382 /* Configure DMA attributes. */
14383 if (dma_mask > DMA_BIT_MASK(32)) {
14384 err = pci_set_dma_mask(pdev, dma_mask);
14386 dev->features |= NETIF_F_HIGHDMA;
14387 err = pci_set_consistent_dma_mask(pdev,
14390 dev_err(&pdev->dev, "Unable to obtain 64 bit "
14391 "DMA for consistent allocations\n");
14392 goto err_out_iounmap;
14396 if (err || dma_mask == DMA_BIT_MASK(32)) {
14397 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
14399 dev_err(&pdev->dev,
14400 "No usable DMA configuration, aborting\n");
14401 goto err_out_iounmap;
14405 tg3_init_bufmgr_config(tp);
14407 /* Selectively allow TSO based on operating conditions */
14408 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
14409 (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
14410 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
14412 tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
14413 tp->fw_needed = NULL;
14416 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14417 tp->fw_needed = FIRMWARE_TG3;
14419 /* TSO is on by default on chips that support hardware TSO.
14420 * Firmware TSO on older chips gives lower performance, so it
14421 * is off by default, but can be enabled using ethtool.
14423 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
14424 (dev->features & NETIF_F_IP_CSUM))
14425 dev->features |= NETIF_F_TSO;
14427 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
14428 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
14429 if (dev->features & NETIF_F_IPV6_CSUM)
14430 dev->features |= NETIF_F_TSO6;
14431 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
14432 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14433 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14434 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
14435 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14436 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14437 dev->features |= NETIF_F_TSO_ECN;
14440 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
14441 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
14442 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
14443 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
14444 tp->rx_pending = 63;
14447 err = tg3_get_device_address(tp);
14449 dev_err(&pdev->dev,
14450 "Could not obtain valid ethernet address, aborting\n");
14451 goto err_out_iounmap;
14454 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
14455 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
14456 if (!tp->aperegs) {
14457 dev_err(&pdev->dev,
14458 "Cannot map APE registers, aborting\n");
14460 goto err_out_iounmap;
14463 tg3_ape_lock_init(tp);
14465 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
14466 tg3_read_dash_ver(tp);
14470 * Reset chip in case UNDI or EFI driver did not shutdown
14471 * DMA self test will enable WDMAC and we'll see (spurious)
14472 * pending DMA on the PCI bus at that point.
14474 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
14475 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
14476 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
14477 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14480 err = tg3_test_dma(tp);
14482 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
14483 goto err_out_apeunmap;
14486 /* flow control autonegotiation is default behavior */
14487 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
14488 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14490 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
14491 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
14492 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
14493 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
14494 struct tg3_napi *tnapi = &tp->napi[i];
14497 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
14499 tnapi->int_mbox = intmbx;
14505 tnapi->consmbox = rcvmbx;
14506 tnapi->prodmbox = sndmbx;
14509 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
14510 netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64);
14512 tnapi->coal_now = HOSTCC_MODE_NOW;
14513 netif_napi_add(dev, &tnapi->napi, tg3_poll, 64);
14516 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
14520 * If we support MSIX, we'll be using RSS. If we're using
14521 * RSS, the first vector only handles link interrupts and the
14522 * remaining vectors handle rx and tx interrupts. Reuse the
14523 * mailbox values for the next iteration. The values we setup
14524 * above are still useful for the single vectored mode.
14539 pci_set_drvdata(pdev, dev);
14541 err = register_netdev(dev);
14543 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
14544 goto err_out_apeunmap;
14547 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
14548 tp->board_part_number,
14549 tp->pci_chip_rev_id,
14550 tg3_bus_string(tp, str),
14553 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
14554 struct phy_device *phydev;
14555 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
14557 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14558 phydev->drv->name, dev_name(&phydev->dev));
14560 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
14561 "(WireSpeed[%d])\n", tg3_phy_string(tp),
14562 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
14563 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
14564 "10/100/1000Base-T")),
14565 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
14567 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
14568 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
14569 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
14570 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
14571 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
14572 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
14573 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
14575 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
14576 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
14582 iounmap(tp->aperegs);
14583 tp->aperegs = NULL;
14596 pci_release_regions(pdev);
14598 err_out_disable_pdev:
14599 pci_disable_device(pdev);
14600 pci_set_drvdata(pdev, NULL);
14604 static void __devexit tg3_remove_one(struct pci_dev *pdev)
14606 struct net_device *dev = pci_get_drvdata(pdev);
14609 struct tg3 *tp = netdev_priv(dev);
14612 release_firmware(tp->fw);
14614 flush_scheduled_work();
14616 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
14621 unregister_netdev(dev);
14623 iounmap(tp->aperegs);
14624 tp->aperegs = NULL;
14631 pci_release_regions(pdev);
14632 pci_disable_device(pdev);
14633 pci_set_drvdata(pdev, NULL);
14637 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
14639 struct net_device *dev = pci_get_drvdata(pdev);
14640 struct tg3 *tp = netdev_priv(dev);
14641 pci_power_t target_state;
14644 /* PCI register 4 needs to be saved whether netif_running() or not.
14645 * MSI address and data need to be saved if using MSI and
14648 pci_save_state(pdev);
14650 if (!netif_running(dev))
14653 flush_scheduled_work();
14655 tg3_netif_stop(tp);
14657 del_timer_sync(&tp->timer);
14659 tg3_full_lock(tp, 1);
14660 tg3_disable_ints(tp);
14661 tg3_full_unlock(tp);
14663 netif_device_detach(dev);
14665 tg3_full_lock(tp, 0);
14666 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14667 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
14668 tg3_full_unlock(tp);
14670 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
14672 err = tg3_set_power_state(tp, target_state);
14676 tg3_full_lock(tp, 0);
14678 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14679 err2 = tg3_restart_hw(tp, 1);
14683 tp->timer.expires = jiffies + tp->timer_offset;
14684 add_timer(&tp->timer);
14686 netif_device_attach(dev);
14687 tg3_netif_start(tp);
14690 tg3_full_unlock(tp);
14699 static int tg3_resume(struct pci_dev *pdev)
14701 struct net_device *dev = pci_get_drvdata(pdev);
14702 struct tg3 *tp = netdev_priv(dev);
14705 pci_restore_state(tp->pdev);
14707 if (!netif_running(dev))
14710 err = tg3_set_power_state(tp, PCI_D0);
14714 netif_device_attach(dev);
14716 tg3_full_lock(tp, 0);
14718 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14719 err = tg3_restart_hw(tp, 1);
14723 tp->timer.expires = jiffies + tp->timer_offset;
14724 add_timer(&tp->timer);
14726 tg3_netif_start(tp);
14729 tg3_full_unlock(tp);
14737 static struct pci_driver tg3_driver = {
14738 .name = DRV_MODULE_NAME,
14739 .id_table = tg3_pci_tbl,
14740 .probe = tg3_init_one,
14741 .remove = __devexit_p(tg3_remove_one),
14742 .suspend = tg3_suspend,
14743 .resume = tg3_resume
14746 static int __init tg3_init(void)
14748 return pci_register_driver(&tg3_driver);
14751 static void __exit tg3_cleanup(void)
14753 pci_unregister_driver(&tg3_driver);
14756 module_init(tg3_init);
14757 module_exit(tg3_cleanup);