2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2009 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
45 #include <net/checksum.h>
48 #include <asm/system.h>
50 #include <asm/byteorder.h>
51 #include <asm/uaccess.h>
54 #include <asm/idprom.h>
61 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
62 #define TG3_VLAN_TAG_USED 1
64 #define TG3_VLAN_TAG_USED 0
69 #define DRV_MODULE_NAME "tg3"
70 #define PFX DRV_MODULE_NAME ": "
71 #define DRV_MODULE_VERSION "3.103"
72 #define DRV_MODULE_RELDATE "November 2, 2009"
74 #define TG3_DEF_MAC_MODE 0
75 #define TG3_DEF_RX_MODE 0
76 #define TG3_DEF_TX_MODE 0
77 #define TG3_DEF_MSG_ENABLE \
87 /* length of time before we decide the hardware is borked,
88 * and dev->tx_timeout() should be called to fix the problem
90 #define TG3_TX_TIMEOUT (5 * HZ)
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU 60
94 #define TG3_MAX_MTU(tp) \
95 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98 * You can't change the ring sizes, but you can change where you place
99 * them in the NIC onboard memory.
101 #define TG3_RX_RING_SIZE 512
102 #define TG3_DEF_RX_RING_PENDING 200
103 #define TG3_RX_JUMBO_RING_SIZE 256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
105 #define TG3_RSS_INDIR_TBL_SIZE 128
107 /* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
113 #define TG3_RX_RCB_RING_SIZE(tp) \
114 (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \
115 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512)
117 #define TG3_TX_RING_SIZE 512
118 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
120 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
123 TG3_RX_JUMBO_RING_SIZE)
124 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
125 TG3_RX_RCB_RING_SIZE(tp))
126 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
128 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
130 #define TG3_DMA_BYTE_ENAB 64
132 #define TG3_RX_STD_DMA_SZ 1536
133 #define TG3_RX_JMB_DMA_SZ 9046
135 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
137 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
138 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
140 #define TG3_RX_STD_BUFF_RING_SIZE \
141 (sizeof(struct ring_info) * TG3_RX_RING_SIZE)
143 #define TG3_RX_JMB_BUFF_RING_SIZE \
144 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
146 /* minimum number of free TX descriptors required to wake up TX process */
147 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
149 #define TG3_RAW_IP_ALIGN 2
151 /* number of ETHTOOL_GSTATS u64's */
152 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
154 #define TG3_NUM_TEST 6
156 #define FIRMWARE_TG3 "tigon/tg3.bin"
157 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
158 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
160 static char version[] __devinitdata =
161 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
163 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
164 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
165 MODULE_LICENSE("GPL");
166 MODULE_VERSION(DRV_MODULE_VERSION);
167 MODULE_FIRMWARE(FIRMWARE_TG3);
168 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
169 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
171 #define TG3_RSS_MIN_NUM_MSIX_VECS 2
173 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
174 module_param(tg3_debug, int, 0);
175 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
177 static struct pci_device_id tg3_pci_tbl[] = {
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
244 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
245 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
246 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
247 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
248 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
249 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
250 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
254 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
256 static const struct {
257 const char string[ETH_GSTRING_LEN];
258 } ethtool_stats_keys[TG3_NUM_STATS] = {
261 { "rx_ucast_packets" },
262 { "rx_mcast_packets" },
263 { "rx_bcast_packets" },
265 { "rx_align_errors" },
266 { "rx_xon_pause_rcvd" },
267 { "rx_xoff_pause_rcvd" },
268 { "rx_mac_ctrl_rcvd" },
269 { "rx_xoff_entered" },
270 { "rx_frame_too_long_errors" },
272 { "rx_undersize_packets" },
273 { "rx_in_length_errors" },
274 { "rx_out_length_errors" },
275 { "rx_64_or_less_octet_packets" },
276 { "rx_65_to_127_octet_packets" },
277 { "rx_128_to_255_octet_packets" },
278 { "rx_256_to_511_octet_packets" },
279 { "rx_512_to_1023_octet_packets" },
280 { "rx_1024_to_1522_octet_packets" },
281 { "rx_1523_to_2047_octet_packets" },
282 { "rx_2048_to_4095_octet_packets" },
283 { "rx_4096_to_8191_octet_packets" },
284 { "rx_8192_to_9022_octet_packets" },
291 { "tx_flow_control" },
293 { "tx_single_collisions" },
294 { "tx_mult_collisions" },
296 { "tx_excessive_collisions" },
297 { "tx_late_collisions" },
298 { "tx_collide_2times" },
299 { "tx_collide_3times" },
300 { "tx_collide_4times" },
301 { "tx_collide_5times" },
302 { "tx_collide_6times" },
303 { "tx_collide_7times" },
304 { "tx_collide_8times" },
305 { "tx_collide_9times" },
306 { "tx_collide_10times" },
307 { "tx_collide_11times" },
308 { "tx_collide_12times" },
309 { "tx_collide_13times" },
310 { "tx_collide_14times" },
311 { "tx_collide_15times" },
312 { "tx_ucast_packets" },
313 { "tx_mcast_packets" },
314 { "tx_bcast_packets" },
315 { "tx_carrier_sense_errors" },
319 { "dma_writeq_full" },
320 { "dma_write_prioq_full" },
324 { "rx_threshold_hit" },
326 { "dma_readq_full" },
327 { "dma_read_prioq_full" },
328 { "tx_comp_queue_full" },
330 { "ring_set_send_prod_index" },
331 { "ring_status_update" },
333 { "nic_avoided_irqs" },
334 { "nic_tx_threshold_hit" }
337 static const struct {
338 const char string[ETH_GSTRING_LEN];
339 } ethtool_test_keys[TG3_NUM_TEST] = {
340 { "nvram test (online) " },
341 { "link test (online) " },
342 { "register test (offline)" },
343 { "memory test (offline)" },
344 { "loopback test (offline)" },
345 { "interrupt test (offline)" },
348 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
350 writel(val, tp->regs + off);
353 static u32 tg3_read32(struct tg3 *tp, u32 off)
355 return (readl(tp->regs + off));
358 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
360 writel(val, tp->aperegs + off);
363 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
365 return (readl(tp->aperegs + off));
368 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
372 spin_lock_irqsave(&tp->indirect_lock, flags);
373 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
374 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
375 spin_unlock_irqrestore(&tp->indirect_lock, flags);
378 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
380 writel(val, tp->regs + off);
381 readl(tp->regs + off);
384 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
389 spin_lock_irqsave(&tp->indirect_lock, flags);
390 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
391 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
392 spin_unlock_irqrestore(&tp->indirect_lock, flags);
396 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
400 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
401 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
402 TG3_64BIT_REG_LOW, val);
405 if (off == TG3_RX_STD_PROD_IDX_REG) {
406 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
407 TG3_64BIT_REG_LOW, val);
411 spin_lock_irqsave(&tp->indirect_lock, flags);
412 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
413 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
414 spin_unlock_irqrestore(&tp->indirect_lock, flags);
416 /* In indirect mode when disabling interrupts, we also need
417 * to clear the interrupt bit in the GRC local ctrl register.
419 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
421 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
422 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
426 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
431 spin_lock_irqsave(&tp->indirect_lock, flags);
432 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
433 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
434 spin_unlock_irqrestore(&tp->indirect_lock, flags);
438 /* usec_wait specifies the wait time in usec when writing to certain registers
439 * where it is unsafe to read back the register without some delay.
440 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
441 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
443 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
445 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
446 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
447 /* Non-posted methods */
448 tp->write32(tp, off, val);
451 tg3_write32(tp, off, val);
456 /* Wait again after the read for the posted method to guarantee that
457 * the wait time is met.
463 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
465 tp->write32_mbox(tp, off, val);
466 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
467 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
468 tp->read32_mbox(tp, off);
471 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
473 void __iomem *mbox = tp->regs + off;
475 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
477 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
481 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
483 return (readl(tp->regs + off + GRCMBOX_BASE));
486 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
488 writel(val, tp->regs + off + GRCMBOX_BASE);
491 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
492 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
493 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
494 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
495 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
497 #define tw32(reg,val) tp->write32(tp, reg, val)
498 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
499 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
500 #define tr32(reg) tp->read32(tp, reg)
502 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
506 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
507 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
510 spin_lock_irqsave(&tp->indirect_lock, flags);
511 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
512 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
513 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
515 /* Always leave this as zero. */
516 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
518 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
519 tw32_f(TG3PCI_MEM_WIN_DATA, val);
521 /* Always leave this as zero. */
522 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
524 spin_unlock_irqrestore(&tp->indirect_lock, flags);
527 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
531 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
532 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
537 spin_lock_irqsave(&tp->indirect_lock, flags);
538 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
539 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
540 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
542 /* Always leave this as zero. */
543 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
545 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
546 *val = tr32(TG3PCI_MEM_WIN_DATA);
548 /* Always leave this as zero. */
549 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
551 spin_unlock_irqrestore(&tp->indirect_lock, flags);
554 static void tg3_ape_lock_init(struct tg3 *tp)
558 /* Make sure the driver hasn't any stale locks. */
559 for (i = 0; i < 8; i++)
560 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
561 APE_LOCK_GRANT_DRIVER);
564 static int tg3_ape_lock(struct tg3 *tp, int locknum)
570 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
574 case TG3_APE_LOCK_GRC:
575 case TG3_APE_LOCK_MEM:
583 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
585 /* Wait for up to 1 millisecond to acquire lock. */
586 for (i = 0; i < 100; i++) {
587 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
588 if (status == APE_LOCK_GRANT_DRIVER)
593 if (status != APE_LOCK_GRANT_DRIVER) {
594 /* Revoke the lock request. */
595 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
596 APE_LOCK_GRANT_DRIVER);
604 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
608 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
612 case TG3_APE_LOCK_GRC:
613 case TG3_APE_LOCK_MEM:
620 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
623 static void tg3_disable_ints(struct tg3 *tp)
627 tw32(TG3PCI_MISC_HOST_CTRL,
628 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
629 for (i = 0; i < tp->irq_max; i++)
630 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
633 static void tg3_enable_ints(struct tg3 *tp)
641 tw32(TG3PCI_MISC_HOST_CTRL,
642 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
644 for (i = 0; i < tp->irq_cnt; i++) {
645 struct tg3_napi *tnapi = &tp->napi[i];
646 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
647 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
648 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
650 coal_now |= tnapi->coal_now;
653 /* Force an initial interrupt */
654 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
655 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
656 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
658 tw32(HOSTCC_MODE, tp->coalesce_mode |
659 HOSTCC_MODE_ENABLE | coal_now);
662 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
664 struct tg3 *tp = tnapi->tp;
665 struct tg3_hw_status *sblk = tnapi->hw_status;
666 unsigned int work_exists = 0;
668 /* check for phy events */
669 if (!(tp->tg3_flags &
670 (TG3_FLAG_USE_LINKCHG_REG |
671 TG3_FLAG_POLL_SERDES))) {
672 if (sblk->status & SD_STATUS_LINK_CHG)
675 /* check for RX/TX work to do */
676 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
677 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
684 * similar to tg3_enable_ints, but it accurately determines whether there
685 * is new work pending and can return without flushing the PIO write
686 * which reenables interrupts
688 static void tg3_int_reenable(struct tg3_napi *tnapi)
690 struct tg3 *tp = tnapi->tp;
692 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
695 /* When doing tagged status, this work check is unnecessary.
696 * The last_tag we write above tells the chip which piece of
697 * work we've completed.
699 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
701 tw32(HOSTCC_MODE, tp->coalesce_mode |
702 HOSTCC_MODE_ENABLE | tnapi->coal_now);
705 static void tg3_napi_disable(struct tg3 *tp)
709 for (i = tp->irq_cnt - 1; i >= 0; i--)
710 napi_disable(&tp->napi[i].napi);
713 static void tg3_napi_enable(struct tg3 *tp)
717 for (i = 0; i < tp->irq_cnt; i++)
718 napi_enable(&tp->napi[i].napi);
721 static inline void tg3_netif_stop(struct tg3 *tp)
723 tp->dev->trans_start = jiffies; /* prevent tx timeout */
724 tg3_napi_disable(tp);
725 netif_tx_disable(tp->dev);
728 static inline void tg3_netif_start(struct tg3 *tp)
730 /* NOTE: unconditional netif_tx_wake_all_queues is only
731 * appropriate so long as all callers are assured to
732 * have free tx slots (such as after tg3_init_hw)
734 netif_tx_wake_all_queues(tp->dev);
737 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
741 static void tg3_switch_clocks(struct tg3 *tp)
746 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
747 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
750 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
752 orig_clock_ctrl = clock_ctrl;
753 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
754 CLOCK_CTRL_CLKRUN_OENABLE |
756 tp->pci_clock_ctrl = clock_ctrl;
758 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
759 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
760 tw32_wait_f(TG3PCI_CLOCK_CTRL,
761 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
763 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
764 tw32_wait_f(TG3PCI_CLOCK_CTRL,
766 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
768 tw32_wait_f(TG3PCI_CLOCK_CTRL,
769 clock_ctrl | (CLOCK_CTRL_ALTCLK),
772 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
775 #define PHY_BUSY_LOOPS 5000
777 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
783 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
785 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
791 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
792 MI_COM_PHY_ADDR_MASK);
793 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
794 MI_COM_REG_ADDR_MASK);
795 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
797 tw32_f(MAC_MI_COM, frame_val);
799 loops = PHY_BUSY_LOOPS;
802 frame_val = tr32(MAC_MI_COM);
804 if ((frame_val & MI_COM_BUSY) == 0) {
806 frame_val = tr32(MAC_MI_COM);
814 *val = frame_val & MI_COM_DATA_MASK;
818 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
819 tw32_f(MAC_MI_MODE, tp->mi_mode);
826 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
832 if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
833 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
836 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
838 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
842 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
843 MI_COM_PHY_ADDR_MASK);
844 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
845 MI_COM_REG_ADDR_MASK);
846 frame_val |= (val & MI_COM_DATA_MASK);
847 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
849 tw32_f(MAC_MI_COM, frame_val);
851 loops = PHY_BUSY_LOOPS;
854 frame_val = tr32(MAC_MI_COM);
855 if ((frame_val & MI_COM_BUSY) == 0) {
857 frame_val = tr32(MAC_MI_COM);
867 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
868 tw32_f(MAC_MI_MODE, tp->mi_mode);
875 static int tg3_bmcr_reset(struct tg3 *tp)
880 /* OK, reset it, and poll the BMCR_RESET bit until it
881 * clears or we time out.
883 phy_control = BMCR_RESET;
884 err = tg3_writephy(tp, MII_BMCR, phy_control);
890 err = tg3_readphy(tp, MII_BMCR, &phy_control);
894 if ((phy_control & BMCR_RESET) == 0) {
906 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
908 struct tg3 *tp = bp->priv;
911 spin_lock_bh(&tp->lock);
913 if (tg3_readphy(tp, reg, &val))
916 spin_unlock_bh(&tp->lock);
921 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
923 struct tg3 *tp = bp->priv;
926 spin_lock_bh(&tp->lock);
928 if (tg3_writephy(tp, reg, val))
931 spin_unlock_bh(&tp->lock);
936 static int tg3_mdio_reset(struct mii_bus *bp)
941 static void tg3_mdio_config_5785(struct tg3 *tp)
944 struct phy_device *phydev;
946 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
947 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
948 case TG3_PHY_ID_BCM50610:
949 case TG3_PHY_ID_BCM50610M:
950 val = MAC_PHYCFG2_50610_LED_MODES;
952 case TG3_PHY_ID_BCMAC131:
953 val = MAC_PHYCFG2_AC131_LED_MODES;
955 case TG3_PHY_ID_RTL8211C:
956 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
958 case TG3_PHY_ID_RTL8201E:
959 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
965 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
966 tw32(MAC_PHYCFG2, val);
968 val = tr32(MAC_PHYCFG1);
969 val &= ~(MAC_PHYCFG1_RGMII_INT |
970 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
971 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
972 tw32(MAC_PHYCFG1, val);
977 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
978 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
979 MAC_PHYCFG2_FMODE_MASK_MASK |
980 MAC_PHYCFG2_GMODE_MASK_MASK |
981 MAC_PHYCFG2_ACT_MASK_MASK |
982 MAC_PHYCFG2_QUAL_MASK_MASK |
983 MAC_PHYCFG2_INBAND_ENABLE;
985 tw32(MAC_PHYCFG2, val);
987 val = tr32(MAC_PHYCFG1);
988 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
989 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
990 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
991 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
992 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
993 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
994 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
996 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
997 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
998 tw32(MAC_PHYCFG1, val);
1000 val = tr32(MAC_EXT_RGMII_MODE);
1001 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1002 MAC_RGMII_MODE_RX_QUALITY |
1003 MAC_RGMII_MODE_RX_ACTIVITY |
1004 MAC_RGMII_MODE_RX_ENG_DET |
1005 MAC_RGMII_MODE_TX_ENABLE |
1006 MAC_RGMII_MODE_TX_LOWPWR |
1007 MAC_RGMII_MODE_TX_RESET);
1008 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
1009 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1010 val |= MAC_RGMII_MODE_RX_INT_B |
1011 MAC_RGMII_MODE_RX_QUALITY |
1012 MAC_RGMII_MODE_RX_ACTIVITY |
1013 MAC_RGMII_MODE_RX_ENG_DET;
1014 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1015 val |= MAC_RGMII_MODE_TX_ENABLE |
1016 MAC_RGMII_MODE_TX_LOWPWR |
1017 MAC_RGMII_MODE_TX_RESET;
1019 tw32(MAC_EXT_RGMII_MODE, val);
1022 static void tg3_mdio_start(struct tg3 *tp)
1024 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1025 tw32_f(MAC_MI_MODE, tp->mi_mode);
1028 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
1029 u32 funcnum, is_serdes;
1031 funcnum = tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC;
1037 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1041 tp->phy_addr = TG3_PHY_MII_ADDR;
1043 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1044 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1045 tg3_mdio_config_5785(tp);
1048 static int tg3_mdio_init(struct tg3 *tp)
1052 struct phy_device *phydev;
1056 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1057 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1060 tp->mdio_bus = mdiobus_alloc();
1061 if (tp->mdio_bus == NULL)
1064 tp->mdio_bus->name = "tg3 mdio bus";
1065 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1066 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1067 tp->mdio_bus->priv = tp;
1068 tp->mdio_bus->parent = &tp->pdev->dev;
1069 tp->mdio_bus->read = &tg3_mdio_read;
1070 tp->mdio_bus->write = &tg3_mdio_write;
1071 tp->mdio_bus->reset = &tg3_mdio_reset;
1072 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1073 tp->mdio_bus->irq = &tp->mdio_irq[0];
1075 for (i = 0; i < PHY_MAX_ADDR; i++)
1076 tp->mdio_bus->irq[i] = PHY_POLL;
1078 /* The bus registration will look for all the PHYs on the mdio bus.
1079 * Unfortunately, it does not ensure the PHY is powered up before
1080 * accessing the PHY ID registers. A chip reset is the
1081 * quickest way to bring the device back to an operational state..
1083 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1086 i = mdiobus_register(tp->mdio_bus);
1088 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1090 mdiobus_free(tp->mdio_bus);
1094 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1096 if (!phydev || !phydev->drv) {
1097 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1098 mdiobus_unregister(tp->mdio_bus);
1099 mdiobus_free(tp->mdio_bus);
1103 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1104 case TG3_PHY_ID_BCM57780:
1105 phydev->interface = PHY_INTERFACE_MODE_GMII;
1106 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1108 case TG3_PHY_ID_BCM50610:
1109 case TG3_PHY_ID_BCM50610M:
1110 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1111 PHY_BRCM_RX_REFCLK_UNUSED |
1112 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1113 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1114 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1115 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1116 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1117 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1118 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1119 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1121 case TG3_PHY_ID_RTL8211C:
1122 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1124 case TG3_PHY_ID_RTL8201E:
1125 case TG3_PHY_ID_BCMAC131:
1126 phydev->interface = PHY_INTERFACE_MODE_MII;
1127 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1128 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1132 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1134 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1135 tg3_mdio_config_5785(tp);
1140 static void tg3_mdio_fini(struct tg3 *tp)
1142 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1143 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1144 mdiobus_unregister(tp->mdio_bus);
1145 mdiobus_free(tp->mdio_bus);
1149 /* tp->lock is held. */
1150 static inline void tg3_generate_fw_event(struct tg3 *tp)
1154 val = tr32(GRC_RX_CPU_EVENT);
1155 val |= GRC_RX_CPU_DRIVER_EVENT;
1156 tw32_f(GRC_RX_CPU_EVENT, val);
1158 tp->last_event_jiffies = jiffies;
1161 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1163 /* tp->lock is held. */
1164 static void tg3_wait_for_event_ack(struct tg3 *tp)
1167 unsigned int delay_cnt;
1170 /* If enough time has passed, no wait is necessary. */
1171 time_remain = (long)(tp->last_event_jiffies + 1 +
1172 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1174 if (time_remain < 0)
1177 /* Check if we can shorten the wait time. */
1178 delay_cnt = jiffies_to_usecs(time_remain);
1179 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1180 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1181 delay_cnt = (delay_cnt >> 3) + 1;
1183 for (i = 0; i < delay_cnt; i++) {
1184 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1190 /* tp->lock is held. */
1191 static void tg3_ump_link_report(struct tg3 *tp)
1196 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1197 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1200 tg3_wait_for_event_ack(tp);
1202 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1204 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1207 if (!tg3_readphy(tp, MII_BMCR, ®))
1209 if (!tg3_readphy(tp, MII_BMSR, ®))
1210 val |= (reg & 0xffff);
1211 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1214 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1216 if (!tg3_readphy(tp, MII_LPA, ®))
1217 val |= (reg & 0xffff);
1218 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1221 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1222 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1224 if (!tg3_readphy(tp, MII_STAT1000, ®))
1225 val |= (reg & 0xffff);
1227 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1229 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1233 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1235 tg3_generate_fw_event(tp);
1238 static void tg3_link_report(struct tg3 *tp)
1240 if (!netif_carrier_ok(tp->dev)) {
1241 if (netif_msg_link(tp))
1242 printk(KERN_INFO PFX "%s: Link is down.\n",
1244 tg3_ump_link_report(tp);
1245 } else if (netif_msg_link(tp)) {
1246 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1248 (tp->link_config.active_speed == SPEED_1000 ?
1250 (tp->link_config.active_speed == SPEED_100 ?
1252 (tp->link_config.active_duplex == DUPLEX_FULL ?
1255 printk(KERN_INFO PFX
1256 "%s: Flow control is %s for TX and %s for RX.\n",
1258 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1260 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1262 tg3_ump_link_report(tp);
1266 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1270 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1271 miireg = ADVERTISE_PAUSE_CAP;
1272 else if (flow_ctrl & FLOW_CTRL_TX)
1273 miireg = ADVERTISE_PAUSE_ASYM;
1274 else if (flow_ctrl & FLOW_CTRL_RX)
1275 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1282 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1286 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1287 miireg = ADVERTISE_1000XPAUSE;
1288 else if (flow_ctrl & FLOW_CTRL_TX)
1289 miireg = ADVERTISE_1000XPSE_ASYM;
1290 else if (flow_ctrl & FLOW_CTRL_RX)
1291 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1298 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1302 if (lcladv & ADVERTISE_1000XPAUSE) {
1303 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1304 if (rmtadv & LPA_1000XPAUSE)
1305 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1306 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1309 if (rmtadv & LPA_1000XPAUSE)
1310 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1312 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1313 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1320 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1324 u32 old_rx_mode = tp->rx_mode;
1325 u32 old_tx_mode = tp->tx_mode;
1327 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1328 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1330 autoneg = tp->link_config.autoneg;
1332 if (autoneg == AUTONEG_ENABLE &&
1333 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1334 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1335 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1337 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1339 flowctrl = tp->link_config.flowctrl;
1341 tp->link_config.active_flowctrl = flowctrl;
1343 if (flowctrl & FLOW_CTRL_RX)
1344 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1346 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1348 if (old_rx_mode != tp->rx_mode)
1349 tw32_f(MAC_RX_MODE, tp->rx_mode);
1351 if (flowctrl & FLOW_CTRL_TX)
1352 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1354 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1356 if (old_tx_mode != tp->tx_mode)
1357 tw32_f(MAC_TX_MODE, tp->tx_mode);
1360 static void tg3_adjust_link(struct net_device *dev)
1362 u8 oldflowctrl, linkmesg = 0;
1363 u32 mac_mode, lcl_adv, rmt_adv;
1364 struct tg3 *tp = netdev_priv(dev);
1365 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1367 spin_lock_bh(&tp->lock);
1369 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1370 MAC_MODE_HALF_DUPLEX);
1372 oldflowctrl = tp->link_config.active_flowctrl;
1378 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1379 mac_mode |= MAC_MODE_PORT_MODE_MII;
1380 else if (phydev->speed == SPEED_1000 ||
1381 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1382 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1384 mac_mode |= MAC_MODE_PORT_MODE_MII;
1386 if (phydev->duplex == DUPLEX_HALF)
1387 mac_mode |= MAC_MODE_HALF_DUPLEX;
1389 lcl_adv = tg3_advert_flowctrl_1000T(
1390 tp->link_config.flowctrl);
1393 rmt_adv = LPA_PAUSE_CAP;
1394 if (phydev->asym_pause)
1395 rmt_adv |= LPA_PAUSE_ASYM;
1398 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1400 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1402 if (mac_mode != tp->mac_mode) {
1403 tp->mac_mode = mac_mode;
1404 tw32_f(MAC_MODE, tp->mac_mode);
1408 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1409 if (phydev->speed == SPEED_10)
1411 MAC_MI_STAT_10MBPS_MODE |
1412 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1414 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1417 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1418 tw32(MAC_TX_LENGTHS,
1419 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1420 (6 << TX_LENGTHS_IPG_SHIFT) |
1421 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1423 tw32(MAC_TX_LENGTHS,
1424 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1425 (6 << TX_LENGTHS_IPG_SHIFT) |
1426 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1428 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1429 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1430 phydev->speed != tp->link_config.active_speed ||
1431 phydev->duplex != tp->link_config.active_duplex ||
1432 oldflowctrl != tp->link_config.active_flowctrl)
1435 tp->link_config.active_speed = phydev->speed;
1436 tp->link_config.active_duplex = phydev->duplex;
1438 spin_unlock_bh(&tp->lock);
1441 tg3_link_report(tp);
1444 static int tg3_phy_init(struct tg3 *tp)
1446 struct phy_device *phydev;
1448 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1451 /* Bring the PHY back to a known state. */
1454 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1456 /* Attach the MAC to the PHY. */
1457 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1458 phydev->dev_flags, phydev->interface);
1459 if (IS_ERR(phydev)) {
1460 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1461 return PTR_ERR(phydev);
1464 /* Mask with MAC supported features. */
1465 switch (phydev->interface) {
1466 case PHY_INTERFACE_MODE_GMII:
1467 case PHY_INTERFACE_MODE_RGMII:
1468 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1469 phydev->supported &= (PHY_GBIT_FEATURES |
1471 SUPPORTED_Asym_Pause);
1475 case PHY_INTERFACE_MODE_MII:
1476 phydev->supported &= (PHY_BASIC_FEATURES |
1478 SUPPORTED_Asym_Pause);
1481 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1485 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1487 phydev->advertising = phydev->supported;
1492 static void tg3_phy_start(struct tg3 *tp)
1494 struct phy_device *phydev;
1496 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1499 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1501 if (tp->link_config.phy_is_low_power) {
1502 tp->link_config.phy_is_low_power = 0;
1503 phydev->speed = tp->link_config.orig_speed;
1504 phydev->duplex = tp->link_config.orig_duplex;
1505 phydev->autoneg = tp->link_config.orig_autoneg;
1506 phydev->advertising = tp->link_config.orig_advertising;
1511 phy_start_aneg(phydev);
1514 static void tg3_phy_stop(struct tg3 *tp)
1516 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1519 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1522 static void tg3_phy_fini(struct tg3 *tp)
1524 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1525 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1526 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1530 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1532 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1533 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1536 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1540 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1543 tg3_writephy(tp, MII_TG3_FET_TEST,
1544 phytest | MII_TG3_FET_SHADOW_EN);
1545 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1547 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1549 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1550 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1552 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1556 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1560 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
1563 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1564 tg3_phy_fet_toggle_apd(tp, enable);
1568 reg = MII_TG3_MISC_SHDW_WREN |
1569 MII_TG3_MISC_SHDW_SCR5_SEL |
1570 MII_TG3_MISC_SHDW_SCR5_LPED |
1571 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1572 MII_TG3_MISC_SHDW_SCR5_SDTL |
1573 MII_TG3_MISC_SHDW_SCR5_C125OE;
1574 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1575 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1577 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1580 reg = MII_TG3_MISC_SHDW_WREN |
1581 MII_TG3_MISC_SHDW_APD_SEL |
1582 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1584 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1586 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1589 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1593 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1594 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1597 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1600 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1601 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1603 tg3_writephy(tp, MII_TG3_FET_TEST,
1604 ephy | MII_TG3_FET_SHADOW_EN);
1605 if (!tg3_readphy(tp, reg, &phy)) {
1607 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1609 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1610 tg3_writephy(tp, reg, phy);
1612 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1615 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1616 MII_TG3_AUXCTL_SHDWSEL_MISC;
1617 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1618 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1620 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1622 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1623 phy |= MII_TG3_AUXCTL_MISC_WREN;
1624 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1629 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1633 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1636 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1637 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1638 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1639 (val | (1 << 15) | (1 << 4)));
1642 static void tg3_phy_apply_otp(struct tg3 *tp)
1651 /* Enable SM_DSP clock and tx 6dB coding. */
1652 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1653 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1654 MII_TG3_AUXCTL_ACTL_TX_6DB;
1655 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1657 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1658 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1659 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1661 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1662 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1663 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1665 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1666 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1667 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1669 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1670 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1672 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1673 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1675 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1676 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1677 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1679 /* Turn off SM_DSP clock. */
1680 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1681 MII_TG3_AUXCTL_ACTL_TX_6DB;
1682 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1685 static int tg3_wait_macro_done(struct tg3 *tp)
1692 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1693 if ((tmp32 & 0x1000) == 0)
1703 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1705 static const u32 test_pat[4][6] = {
1706 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1707 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1708 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1709 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1713 for (chan = 0; chan < 4; chan++) {
1716 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1717 (chan * 0x2000) | 0x0200);
1718 tg3_writephy(tp, 0x16, 0x0002);
1720 for (i = 0; i < 6; i++)
1721 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1724 tg3_writephy(tp, 0x16, 0x0202);
1725 if (tg3_wait_macro_done(tp)) {
1730 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1731 (chan * 0x2000) | 0x0200);
1732 tg3_writephy(tp, 0x16, 0x0082);
1733 if (tg3_wait_macro_done(tp)) {
1738 tg3_writephy(tp, 0x16, 0x0802);
1739 if (tg3_wait_macro_done(tp)) {
1744 for (i = 0; i < 6; i += 2) {
1747 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1748 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1749 tg3_wait_macro_done(tp)) {
1755 if (low != test_pat[chan][i] ||
1756 high != test_pat[chan][i+1]) {
1757 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1758 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1759 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1769 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1773 for (chan = 0; chan < 4; chan++) {
1776 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1777 (chan * 0x2000) | 0x0200);
1778 tg3_writephy(tp, 0x16, 0x0002);
1779 for (i = 0; i < 6; i++)
1780 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1781 tg3_writephy(tp, 0x16, 0x0202);
1782 if (tg3_wait_macro_done(tp))
1789 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1791 u32 reg32, phy9_orig;
1792 int retries, do_phy_reset, err;
1798 err = tg3_bmcr_reset(tp);
1804 /* Disable transmitter and interrupt. */
1805 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1809 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1811 /* Set full-duplex, 1000 mbps. */
1812 tg3_writephy(tp, MII_BMCR,
1813 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1815 /* Set to master mode. */
1816 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1819 tg3_writephy(tp, MII_TG3_CTRL,
1820 (MII_TG3_CTRL_AS_MASTER |
1821 MII_TG3_CTRL_ENABLE_AS_MASTER));
1823 /* Enable SM_DSP_CLOCK and 6dB. */
1824 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1826 /* Block the PHY control access. */
1827 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1828 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1830 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1833 } while (--retries);
1835 err = tg3_phy_reset_chanpat(tp);
1839 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1840 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1842 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1843 tg3_writephy(tp, 0x16, 0x0000);
1845 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1846 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1847 /* Set Extended packet length bit for jumbo frames */
1848 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1851 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1854 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1856 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1858 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1865 /* This will reset the tigon3 PHY if there is no valid
1866 * link unless the FORCE argument is non-zero.
1868 static int tg3_phy_reset(struct tg3 *tp)
1874 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1877 val = tr32(GRC_MISC_CFG);
1878 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1881 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1882 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1886 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1887 netif_carrier_off(tp->dev);
1888 tg3_link_report(tp);
1891 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1892 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1893 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1894 err = tg3_phy_reset_5703_4_5(tp);
1901 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1902 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1903 cpmuctrl = tr32(TG3_CPMU_CTRL);
1904 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1906 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1909 err = tg3_bmcr_reset(tp);
1913 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1916 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1917 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1919 tw32(TG3_CPMU_CTRL, cpmuctrl);
1922 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1923 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1926 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1927 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1928 CPMU_LSPD_1000MB_MACCLK_12_5) {
1929 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1931 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1935 tg3_phy_apply_otp(tp);
1937 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1938 tg3_phy_toggle_apd(tp, true);
1940 tg3_phy_toggle_apd(tp, false);
1943 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1944 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1945 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1946 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1947 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1948 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1949 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1951 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1952 tg3_writephy(tp, 0x1c, 0x8d68);
1953 tg3_writephy(tp, 0x1c, 0x8d68);
1955 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1956 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1957 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1958 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1959 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1960 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1961 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1962 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1963 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1965 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1966 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1967 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1968 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1969 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1970 tg3_writephy(tp, MII_TG3_TEST1,
1971 MII_TG3_TEST1_TRIM_EN | 0x4);
1973 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1974 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1976 /* Set Extended packet length bit (bit 14) on all chips that */
1977 /* support jumbo frames */
1978 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1979 /* Cannot do read-modify-write on 5401 */
1980 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1981 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1984 /* Set bit 14 with read-modify-write to preserve other bits */
1985 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1986 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1987 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1990 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1991 * jumbo frames transmission.
1993 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1996 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1997 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1998 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2001 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2002 /* adjust output voltage */
2003 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2006 tg3_phy_toggle_automdix(tp, 1);
2007 tg3_phy_set_wirespeed(tp);
2011 static void tg3_frob_aux_power(struct tg3 *tp)
2013 struct tg3 *tp_peer = tp;
2015 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
2018 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2019 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2020 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
2021 struct net_device *dev_peer;
2023 dev_peer = pci_get_drvdata(tp->pdev_peer);
2024 /* remove_one() may have been run on the peer. */
2028 tp_peer = netdev_priv(dev_peer);
2031 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2032 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
2033 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2034 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
2035 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2036 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2037 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2038 (GRC_LCLCTRL_GPIO_OE0 |
2039 GRC_LCLCTRL_GPIO_OE1 |
2040 GRC_LCLCTRL_GPIO_OE2 |
2041 GRC_LCLCTRL_GPIO_OUTPUT0 |
2042 GRC_LCLCTRL_GPIO_OUTPUT1),
2044 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2045 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2046 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2047 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2048 GRC_LCLCTRL_GPIO_OE1 |
2049 GRC_LCLCTRL_GPIO_OE2 |
2050 GRC_LCLCTRL_GPIO_OUTPUT0 |
2051 GRC_LCLCTRL_GPIO_OUTPUT1 |
2053 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2055 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2056 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2058 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2059 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2062 u32 grc_local_ctrl = 0;
2064 if (tp_peer != tp &&
2065 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2068 /* Workaround to prevent overdrawing Amps. */
2069 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2071 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2072 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2073 grc_local_ctrl, 100);
2076 /* On 5753 and variants, GPIO2 cannot be used. */
2077 no_gpio2 = tp->nic_sram_data_cfg &
2078 NIC_SRAM_DATA_CFG_NO_GPIO2;
2080 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2081 GRC_LCLCTRL_GPIO_OE1 |
2082 GRC_LCLCTRL_GPIO_OE2 |
2083 GRC_LCLCTRL_GPIO_OUTPUT1 |
2084 GRC_LCLCTRL_GPIO_OUTPUT2;
2086 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2087 GRC_LCLCTRL_GPIO_OUTPUT2);
2089 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2090 grc_local_ctrl, 100);
2092 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2094 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2095 grc_local_ctrl, 100);
2098 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2099 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2100 grc_local_ctrl, 100);
2104 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2105 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2106 if (tp_peer != tp &&
2107 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2110 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2111 (GRC_LCLCTRL_GPIO_OE1 |
2112 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2114 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2115 GRC_LCLCTRL_GPIO_OE1, 100);
2117 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2118 (GRC_LCLCTRL_GPIO_OE1 |
2119 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2124 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2126 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2128 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2129 if (speed != SPEED_10)
2131 } else if (speed == SPEED_10)
2137 static int tg3_setup_phy(struct tg3 *, int);
2139 #define RESET_KIND_SHUTDOWN 0
2140 #define RESET_KIND_INIT 1
2141 #define RESET_KIND_SUSPEND 2
2143 static void tg3_write_sig_post_reset(struct tg3 *, int);
2144 static int tg3_halt_cpu(struct tg3 *, u32);
2146 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2150 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2151 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2152 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2153 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2156 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2157 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2158 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2163 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2165 val = tr32(GRC_MISC_CFG);
2166 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2169 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2171 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2174 tg3_writephy(tp, MII_ADVERTISE, 0);
2175 tg3_writephy(tp, MII_BMCR,
2176 BMCR_ANENABLE | BMCR_ANRESTART);
2178 tg3_writephy(tp, MII_TG3_FET_TEST,
2179 phytest | MII_TG3_FET_SHADOW_EN);
2180 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2181 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2183 MII_TG3_FET_SHDW_AUXMODE4,
2186 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2189 } else if (do_low_power) {
2190 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2191 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2193 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2194 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2195 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2196 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2197 MII_TG3_AUXCTL_PCTL_VREG_11V);
2200 /* The PHY should not be powered down on some chips because
2203 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2204 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2205 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2206 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2209 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2210 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2211 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2212 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2213 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2214 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2217 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2220 /* tp->lock is held. */
2221 static int tg3_nvram_lock(struct tg3 *tp)
2223 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2226 if (tp->nvram_lock_cnt == 0) {
2227 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2228 for (i = 0; i < 8000; i++) {
2229 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2234 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2238 tp->nvram_lock_cnt++;
2243 /* tp->lock is held. */
2244 static void tg3_nvram_unlock(struct tg3 *tp)
2246 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2247 if (tp->nvram_lock_cnt > 0)
2248 tp->nvram_lock_cnt--;
2249 if (tp->nvram_lock_cnt == 0)
2250 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2254 /* tp->lock is held. */
2255 static void tg3_enable_nvram_access(struct tg3 *tp)
2257 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2258 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2259 u32 nvaccess = tr32(NVRAM_ACCESS);
2261 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2265 /* tp->lock is held. */
2266 static void tg3_disable_nvram_access(struct tg3 *tp)
2268 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2269 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2270 u32 nvaccess = tr32(NVRAM_ACCESS);
2272 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2276 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2277 u32 offset, u32 *val)
2282 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2285 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2286 EEPROM_ADDR_DEVID_MASK |
2288 tw32(GRC_EEPROM_ADDR,
2290 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2291 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2292 EEPROM_ADDR_ADDR_MASK) |
2293 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2295 for (i = 0; i < 1000; i++) {
2296 tmp = tr32(GRC_EEPROM_ADDR);
2298 if (tmp & EEPROM_ADDR_COMPLETE)
2302 if (!(tmp & EEPROM_ADDR_COMPLETE))
2305 tmp = tr32(GRC_EEPROM_DATA);
2308 * The data will always be opposite the native endian
2309 * format. Perform a blind byteswap to compensate.
2316 #define NVRAM_CMD_TIMEOUT 10000
2318 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2322 tw32(NVRAM_CMD, nvram_cmd);
2323 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2325 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2331 if (i == NVRAM_CMD_TIMEOUT)
2337 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2339 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2340 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2341 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2342 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2343 (tp->nvram_jedecnum == JEDEC_ATMEL))
2345 addr = ((addr / tp->nvram_pagesize) <<
2346 ATMEL_AT45DB0X1B_PAGE_POS) +
2347 (addr % tp->nvram_pagesize);
2352 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2354 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2355 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2356 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2357 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2358 (tp->nvram_jedecnum == JEDEC_ATMEL))
2360 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2361 tp->nvram_pagesize) +
2362 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2367 /* NOTE: Data read in from NVRAM is byteswapped according to
2368 * the byteswapping settings for all other register accesses.
2369 * tg3 devices are BE devices, so on a BE machine, the data
2370 * returned will be exactly as it is seen in NVRAM. On a LE
2371 * machine, the 32-bit value will be byteswapped.
2373 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2377 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2378 return tg3_nvram_read_using_eeprom(tp, offset, val);
2380 offset = tg3_nvram_phys_addr(tp, offset);
2382 if (offset > NVRAM_ADDR_MSK)
2385 ret = tg3_nvram_lock(tp);
2389 tg3_enable_nvram_access(tp);
2391 tw32(NVRAM_ADDR, offset);
2392 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2393 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2396 *val = tr32(NVRAM_RDDATA);
2398 tg3_disable_nvram_access(tp);
2400 tg3_nvram_unlock(tp);
2405 /* Ensures NVRAM data is in bytestream format. */
2406 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2409 int res = tg3_nvram_read(tp, offset, &v);
2411 *val = cpu_to_be32(v);
2415 /* tp->lock is held. */
2416 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2418 u32 addr_high, addr_low;
2421 addr_high = ((tp->dev->dev_addr[0] << 8) |
2422 tp->dev->dev_addr[1]);
2423 addr_low = ((tp->dev->dev_addr[2] << 24) |
2424 (tp->dev->dev_addr[3] << 16) |
2425 (tp->dev->dev_addr[4] << 8) |
2426 (tp->dev->dev_addr[5] << 0));
2427 for (i = 0; i < 4; i++) {
2428 if (i == 1 && skip_mac_1)
2430 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2431 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2434 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2435 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2436 for (i = 0; i < 12; i++) {
2437 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2438 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2442 addr_high = (tp->dev->dev_addr[0] +
2443 tp->dev->dev_addr[1] +
2444 tp->dev->dev_addr[2] +
2445 tp->dev->dev_addr[3] +
2446 tp->dev->dev_addr[4] +
2447 tp->dev->dev_addr[5]) &
2448 TX_BACKOFF_SEED_MASK;
2449 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2452 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2455 bool device_should_wake, do_low_power;
2457 /* Make sure register accesses (indirect or otherwise)
2458 * will function correctly.
2460 pci_write_config_dword(tp->pdev,
2461 TG3PCI_MISC_HOST_CTRL,
2462 tp->misc_host_ctrl);
2466 pci_enable_wake(tp->pdev, state, false);
2467 pci_set_power_state(tp->pdev, PCI_D0);
2469 /* Switch out of Vaux if it is a NIC */
2470 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2471 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2481 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2482 tp->dev->name, state);
2486 /* Restore the CLKREQ setting. */
2487 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2490 pci_read_config_word(tp->pdev,
2491 tp->pcie_cap + PCI_EXP_LNKCTL,
2493 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2494 pci_write_config_word(tp->pdev,
2495 tp->pcie_cap + PCI_EXP_LNKCTL,
2499 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2500 tw32(TG3PCI_MISC_HOST_CTRL,
2501 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2503 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2504 device_may_wakeup(&tp->pdev->dev) &&
2505 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2507 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2508 do_low_power = false;
2509 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2510 !tp->link_config.phy_is_low_power) {
2511 struct phy_device *phydev;
2512 u32 phyid, advertising;
2514 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2516 tp->link_config.phy_is_low_power = 1;
2518 tp->link_config.orig_speed = phydev->speed;
2519 tp->link_config.orig_duplex = phydev->duplex;
2520 tp->link_config.orig_autoneg = phydev->autoneg;
2521 tp->link_config.orig_advertising = phydev->advertising;
2523 advertising = ADVERTISED_TP |
2525 ADVERTISED_Autoneg |
2526 ADVERTISED_10baseT_Half;
2528 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2529 device_should_wake) {
2530 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2532 ADVERTISED_100baseT_Half |
2533 ADVERTISED_100baseT_Full |
2534 ADVERTISED_10baseT_Full;
2536 advertising |= ADVERTISED_10baseT_Full;
2539 phydev->advertising = advertising;
2541 phy_start_aneg(phydev);
2543 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2544 if (phyid != TG3_PHY_ID_BCMAC131) {
2545 phyid &= TG3_PHY_OUI_MASK;
2546 if (phyid == TG3_PHY_OUI_1 ||
2547 phyid == TG3_PHY_OUI_2 ||
2548 phyid == TG3_PHY_OUI_3)
2549 do_low_power = true;
2553 do_low_power = true;
2555 if (tp->link_config.phy_is_low_power == 0) {
2556 tp->link_config.phy_is_low_power = 1;
2557 tp->link_config.orig_speed = tp->link_config.speed;
2558 tp->link_config.orig_duplex = tp->link_config.duplex;
2559 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2562 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2563 tp->link_config.speed = SPEED_10;
2564 tp->link_config.duplex = DUPLEX_HALF;
2565 tp->link_config.autoneg = AUTONEG_ENABLE;
2566 tg3_setup_phy(tp, 0);
2570 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2573 val = tr32(GRC_VCPU_EXT_CTRL);
2574 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2575 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2579 for (i = 0; i < 200; i++) {
2580 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2581 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2586 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2587 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2588 WOL_DRV_STATE_SHUTDOWN |
2592 if (device_should_wake) {
2595 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2597 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2601 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2602 mac_mode = MAC_MODE_PORT_MODE_GMII;
2604 mac_mode = MAC_MODE_PORT_MODE_MII;
2606 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2607 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2609 u32 speed = (tp->tg3_flags &
2610 TG3_FLAG_WOL_SPEED_100MB) ?
2611 SPEED_100 : SPEED_10;
2612 if (tg3_5700_link_polarity(tp, speed))
2613 mac_mode |= MAC_MODE_LINK_POLARITY;
2615 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2618 mac_mode = MAC_MODE_PORT_MODE_TBI;
2621 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2622 tw32(MAC_LED_CTRL, tp->led_ctrl);
2624 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2625 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2626 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2627 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2628 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2629 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2631 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2632 mac_mode |= tp->mac_mode &
2633 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2634 if (mac_mode & MAC_MODE_APE_TX_EN)
2635 mac_mode |= MAC_MODE_TDE_ENABLE;
2638 tw32_f(MAC_MODE, mac_mode);
2641 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2645 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2646 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2647 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2650 base_val = tp->pci_clock_ctrl;
2651 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2652 CLOCK_CTRL_TXCLK_DISABLE);
2654 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2655 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2656 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2657 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2658 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2660 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2661 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2662 u32 newbits1, newbits2;
2664 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2665 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2666 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2667 CLOCK_CTRL_TXCLK_DISABLE |
2669 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2670 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2671 newbits1 = CLOCK_CTRL_625_CORE;
2672 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2674 newbits1 = CLOCK_CTRL_ALTCLK;
2675 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2678 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2681 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2684 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2687 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2688 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2689 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2690 CLOCK_CTRL_TXCLK_DISABLE |
2691 CLOCK_CTRL_44MHZ_CORE);
2693 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2696 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2697 tp->pci_clock_ctrl | newbits3, 40);
2701 if (!(device_should_wake) &&
2702 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2703 tg3_power_down_phy(tp, do_low_power);
2705 tg3_frob_aux_power(tp);
2707 /* Workaround for unstable PLL clock */
2708 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2709 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2710 u32 val = tr32(0x7d00);
2712 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2714 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2717 err = tg3_nvram_lock(tp);
2718 tg3_halt_cpu(tp, RX_CPU_BASE);
2720 tg3_nvram_unlock(tp);
2724 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2726 if (device_should_wake)
2727 pci_enable_wake(tp->pdev, state, true);
2729 /* Finally, set the new power state. */
2730 pci_set_power_state(tp->pdev, state);
2735 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2737 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2738 case MII_TG3_AUX_STAT_10HALF:
2740 *duplex = DUPLEX_HALF;
2743 case MII_TG3_AUX_STAT_10FULL:
2745 *duplex = DUPLEX_FULL;
2748 case MII_TG3_AUX_STAT_100HALF:
2750 *duplex = DUPLEX_HALF;
2753 case MII_TG3_AUX_STAT_100FULL:
2755 *duplex = DUPLEX_FULL;
2758 case MII_TG3_AUX_STAT_1000HALF:
2759 *speed = SPEED_1000;
2760 *duplex = DUPLEX_HALF;
2763 case MII_TG3_AUX_STAT_1000FULL:
2764 *speed = SPEED_1000;
2765 *duplex = DUPLEX_FULL;
2769 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2770 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2772 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2776 *speed = SPEED_INVALID;
2777 *duplex = DUPLEX_INVALID;
2782 static void tg3_phy_copper_begin(struct tg3 *tp)
2787 if (tp->link_config.phy_is_low_power) {
2788 /* Entering low power mode. Disable gigabit and
2789 * 100baseT advertisements.
2791 tg3_writephy(tp, MII_TG3_CTRL, 0);
2793 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2794 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2795 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2796 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2798 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2799 } else if (tp->link_config.speed == SPEED_INVALID) {
2800 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2801 tp->link_config.advertising &=
2802 ~(ADVERTISED_1000baseT_Half |
2803 ADVERTISED_1000baseT_Full);
2805 new_adv = ADVERTISE_CSMA;
2806 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2807 new_adv |= ADVERTISE_10HALF;
2808 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2809 new_adv |= ADVERTISE_10FULL;
2810 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2811 new_adv |= ADVERTISE_100HALF;
2812 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2813 new_adv |= ADVERTISE_100FULL;
2815 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2817 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2819 if (tp->link_config.advertising &
2820 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2822 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2823 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2824 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2825 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2826 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2827 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2828 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2829 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2830 MII_TG3_CTRL_ENABLE_AS_MASTER);
2831 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2833 tg3_writephy(tp, MII_TG3_CTRL, 0);
2836 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2837 new_adv |= ADVERTISE_CSMA;
2839 /* Asking for a specific link mode. */
2840 if (tp->link_config.speed == SPEED_1000) {
2841 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2843 if (tp->link_config.duplex == DUPLEX_FULL)
2844 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2846 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2847 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2848 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2849 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2850 MII_TG3_CTRL_ENABLE_AS_MASTER);
2852 if (tp->link_config.speed == SPEED_100) {
2853 if (tp->link_config.duplex == DUPLEX_FULL)
2854 new_adv |= ADVERTISE_100FULL;
2856 new_adv |= ADVERTISE_100HALF;
2858 if (tp->link_config.duplex == DUPLEX_FULL)
2859 new_adv |= ADVERTISE_10FULL;
2861 new_adv |= ADVERTISE_10HALF;
2863 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2868 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2871 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2872 tp->link_config.speed != SPEED_INVALID) {
2873 u32 bmcr, orig_bmcr;
2875 tp->link_config.active_speed = tp->link_config.speed;
2876 tp->link_config.active_duplex = tp->link_config.duplex;
2879 switch (tp->link_config.speed) {
2885 bmcr |= BMCR_SPEED100;
2889 bmcr |= TG3_BMCR_SPEED1000;
2893 if (tp->link_config.duplex == DUPLEX_FULL)
2894 bmcr |= BMCR_FULLDPLX;
2896 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2897 (bmcr != orig_bmcr)) {
2898 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2899 for (i = 0; i < 1500; i++) {
2903 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2904 tg3_readphy(tp, MII_BMSR, &tmp))
2906 if (!(tmp & BMSR_LSTATUS)) {
2911 tg3_writephy(tp, MII_BMCR, bmcr);
2915 tg3_writephy(tp, MII_BMCR,
2916 BMCR_ANENABLE | BMCR_ANRESTART);
2920 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2924 /* Turn off tap power management. */
2925 /* Set Extended packet length bit */
2926 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2928 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2929 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2931 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2932 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2934 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2935 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2937 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2938 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2940 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2941 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2948 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2950 u32 adv_reg, all_mask = 0;
2952 if (mask & ADVERTISED_10baseT_Half)
2953 all_mask |= ADVERTISE_10HALF;
2954 if (mask & ADVERTISED_10baseT_Full)
2955 all_mask |= ADVERTISE_10FULL;
2956 if (mask & ADVERTISED_100baseT_Half)
2957 all_mask |= ADVERTISE_100HALF;
2958 if (mask & ADVERTISED_100baseT_Full)
2959 all_mask |= ADVERTISE_100FULL;
2961 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2964 if ((adv_reg & all_mask) != all_mask)
2966 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2970 if (mask & ADVERTISED_1000baseT_Half)
2971 all_mask |= ADVERTISE_1000HALF;
2972 if (mask & ADVERTISED_1000baseT_Full)
2973 all_mask |= ADVERTISE_1000FULL;
2975 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2978 if ((tg3_ctrl & all_mask) != all_mask)
2984 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2988 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2991 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2992 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2994 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2995 if (curadv != reqadv)
2998 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2999 tg3_readphy(tp, MII_LPA, rmtadv);
3001 /* Reprogram the advertisement register, even if it
3002 * does not affect the current link. If the link
3003 * gets renegotiated in the future, we can save an
3004 * additional renegotiation cycle by advertising
3005 * it correctly in the first place.
3007 if (curadv != reqadv) {
3008 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3009 ADVERTISE_PAUSE_ASYM);
3010 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3017 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3019 int current_link_up;
3021 u32 lcl_adv, rmt_adv;
3029 (MAC_STATUS_SYNC_CHANGED |
3030 MAC_STATUS_CFG_CHANGED |
3031 MAC_STATUS_MI_COMPLETION |
3032 MAC_STATUS_LNKSTATE_CHANGED));
3035 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3037 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3041 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3043 /* Some third-party PHYs need to be reset on link going
3046 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3047 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3048 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3049 netif_carrier_ok(tp->dev)) {
3050 tg3_readphy(tp, MII_BMSR, &bmsr);
3051 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3052 !(bmsr & BMSR_LSTATUS))
3058 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
3059 tg3_readphy(tp, MII_BMSR, &bmsr);
3060 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3061 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3064 if (!(bmsr & BMSR_LSTATUS)) {
3065 err = tg3_init_5401phy_dsp(tp);
3069 tg3_readphy(tp, MII_BMSR, &bmsr);
3070 for (i = 0; i < 1000; i++) {
3072 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3073 (bmsr & BMSR_LSTATUS)) {
3079 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
3080 !(bmsr & BMSR_LSTATUS) &&
3081 tp->link_config.active_speed == SPEED_1000) {
3082 err = tg3_phy_reset(tp);
3084 err = tg3_init_5401phy_dsp(tp);
3089 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3090 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3091 /* 5701 {A0,B0} CRC bug workaround */
3092 tg3_writephy(tp, 0x15, 0x0a75);
3093 tg3_writephy(tp, 0x1c, 0x8c68);
3094 tg3_writephy(tp, 0x1c, 0x8d68);
3095 tg3_writephy(tp, 0x1c, 0x8c68);
3098 /* Clear pending interrupts... */
3099 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3100 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3102 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
3103 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3104 else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
3105 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3107 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3108 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3109 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3110 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3111 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3113 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3116 current_link_up = 0;
3117 current_speed = SPEED_INVALID;
3118 current_duplex = DUPLEX_INVALID;
3120 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
3123 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3124 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3125 if (!(val & (1 << 10))) {
3127 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3133 for (i = 0; i < 100; i++) {
3134 tg3_readphy(tp, MII_BMSR, &bmsr);
3135 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3136 (bmsr & BMSR_LSTATUS))
3141 if (bmsr & BMSR_LSTATUS) {
3144 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3145 for (i = 0; i < 2000; i++) {
3147 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3152 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3157 for (i = 0; i < 200; i++) {
3158 tg3_readphy(tp, MII_BMCR, &bmcr);
3159 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3161 if (bmcr && bmcr != 0x7fff)
3169 tp->link_config.active_speed = current_speed;
3170 tp->link_config.active_duplex = current_duplex;
3172 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3173 if ((bmcr & BMCR_ANENABLE) &&
3174 tg3_copper_is_advertising_all(tp,
3175 tp->link_config.advertising)) {
3176 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3178 current_link_up = 1;
3181 if (!(bmcr & BMCR_ANENABLE) &&
3182 tp->link_config.speed == current_speed &&
3183 tp->link_config.duplex == current_duplex &&
3184 tp->link_config.flowctrl ==
3185 tp->link_config.active_flowctrl) {
3186 current_link_up = 1;
3190 if (current_link_up == 1 &&
3191 tp->link_config.active_duplex == DUPLEX_FULL)
3192 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3196 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
3199 tg3_phy_copper_begin(tp);
3201 tg3_readphy(tp, MII_BMSR, &tmp);
3202 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
3203 (tmp & BMSR_LSTATUS))
3204 current_link_up = 1;
3207 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3208 if (current_link_up == 1) {
3209 if (tp->link_config.active_speed == SPEED_100 ||
3210 tp->link_config.active_speed == SPEED_10)
3211 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3213 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3214 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
3215 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3217 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3219 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3220 if (tp->link_config.active_duplex == DUPLEX_HALF)
3221 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3223 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3224 if (current_link_up == 1 &&
3225 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3226 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3228 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3231 /* ??? Without this setting Netgear GA302T PHY does not
3232 * ??? send/receive packets...
3234 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
3235 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3236 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3237 tw32_f(MAC_MI_MODE, tp->mi_mode);
3241 tw32_f(MAC_MODE, tp->mac_mode);
3244 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3245 /* Polled via timer. */
3246 tw32_f(MAC_EVENT, 0);
3248 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3252 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3253 current_link_up == 1 &&
3254 tp->link_config.active_speed == SPEED_1000 &&
3255 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3256 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3259 (MAC_STATUS_SYNC_CHANGED |
3260 MAC_STATUS_CFG_CHANGED));
3263 NIC_SRAM_FIRMWARE_MBOX,
3264 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3267 /* Prevent send BD corruption. */
3268 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3269 u16 oldlnkctl, newlnkctl;
3271 pci_read_config_word(tp->pdev,
3272 tp->pcie_cap + PCI_EXP_LNKCTL,
3274 if (tp->link_config.active_speed == SPEED_100 ||
3275 tp->link_config.active_speed == SPEED_10)
3276 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3278 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3279 if (newlnkctl != oldlnkctl)
3280 pci_write_config_word(tp->pdev,
3281 tp->pcie_cap + PCI_EXP_LNKCTL,
3285 if (current_link_up != netif_carrier_ok(tp->dev)) {
3286 if (current_link_up)
3287 netif_carrier_on(tp->dev);
3289 netif_carrier_off(tp->dev);
3290 tg3_link_report(tp);
3296 struct tg3_fiber_aneginfo {
3298 #define ANEG_STATE_UNKNOWN 0
3299 #define ANEG_STATE_AN_ENABLE 1
3300 #define ANEG_STATE_RESTART_INIT 2
3301 #define ANEG_STATE_RESTART 3
3302 #define ANEG_STATE_DISABLE_LINK_OK 4
3303 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3304 #define ANEG_STATE_ABILITY_DETECT 6
3305 #define ANEG_STATE_ACK_DETECT_INIT 7
3306 #define ANEG_STATE_ACK_DETECT 8
3307 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3308 #define ANEG_STATE_COMPLETE_ACK 10
3309 #define ANEG_STATE_IDLE_DETECT_INIT 11
3310 #define ANEG_STATE_IDLE_DETECT 12
3311 #define ANEG_STATE_LINK_OK 13
3312 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3313 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3316 #define MR_AN_ENABLE 0x00000001
3317 #define MR_RESTART_AN 0x00000002
3318 #define MR_AN_COMPLETE 0x00000004
3319 #define MR_PAGE_RX 0x00000008
3320 #define MR_NP_LOADED 0x00000010
3321 #define MR_TOGGLE_TX 0x00000020
3322 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3323 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3324 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3325 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3326 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3327 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3328 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3329 #define MR_TOGGLE_RX 0x00002000
3330 #define MR_NP_RX 0x00004000
3332 #define MR_LINK_OK 0x80000000
3334 unsigned long link_time, cur_time;
3336 u32 ability_match_cfg;
3337 int ability_match_count;
3339 char ability_match, idle_match, ack_match;
3341 u32 txconfig, rxconfig;
3342 #define ANEG_CFG_NP 0x00000080
3343 #define ANEG_CFG_ACK 0x00000040
3344 #define ANEG_CFG_RF2 0x00000020
3345 #define ANEG_CFG_RF1 0x00000010
3346 #define ANEG_CFG_PS2 0x00000001
3347 #define ANEG_CFG_PS1 0x00008000
3348 #define ANEG_CFG_HD 0x00004000
3349 #define ANEG_CFG_FD 0x00002000
3350 #define ANEG_CFG_INVAL 0x00001f06
3355 #define ANEG_TIMER_ENAB 2
3356 #define ANEG_FAILED -1
3358 #define ANEG_STATE_SETTLE_TIME 10000
3360 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3361 struct tg3_fiber_aneginfo *ap)
3364 unsigned long delta;
3368 if (ap->state == ANEG_STATE_UNKNOWN) {
3372 ap->ability_match_cfg = 0;
3373 ap->ability_match_count = 0;
3374 ap->ability_match = 0;
3380 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3381 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3383 if (rx_cfg_reg != ap->ability_match_cfg) {
3384 ap->ability_match_cfg = rx_cfg_reg;
3385 ap->ability_match = 0;
3386 ap->ability_match_count = 0;
3388 if (++ap->ability_match_count > 1) {
3389 ap->ability_match = 1;
3390 ap->ability_match_cfg = rx_cfg_reg;
3393 if (rx_cfg_reg & ANEG_CFG_ACK)
3401 ap->ability_match_cfg = 0;
3402 ap->ability_match_count = 0;
3403 ap->ability_match = 0;
3409 ap->rxconfig = rx_cfg_reg;
3413 case ANEG_STATE_UNKNOWN:
3414 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3415 ap->state = ANEG_STATE_AN_ENABLE;
3418 case ANEG_STATE_AN_ENABLE:
3419 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3420 if (ap->flags & MR_AN_ENABLE) {
3423 ap->ability_match_cfg = 0;
3424 ap->ability_match_count = 0;
3425 ap->ability_match = 0;
3429 ap->state = ANEG_STATE_RESTART_INIT;
3431 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3435 case ANEG_STATE_RESTART_INIT:
3436 ap->link_time = ap->cur_time;
3437 ap->flags &= ~(MR_NP_LOADED);
3439 tw32(MAC_TX_AUTO_NEG, 0);
3440 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3441 tw32_f(MAC_MODE, tp->mac_mode);
3444 ret = ANEG_TIMER_ENAB;
3445 ap->state = ANEG_STATE_RESTART;
3448 case ANEG_STATE_RESTART:
3449 delta = ap->cur_time - ap->link_time;
3450 if (delta > ANEG_STATE_SETTLE_TIME) {
3451 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3453 ret = ANEG_TIMER_ENAB;
3457 case ANEG_STATE_DISABLE_LINK_OK:
3461 case ANEG_STATE_ABILITY_DETECT_INIT:
3462 ap->flags &= ~(MR_TOGGLE_TX);
3463 ap->txconfig = ANEG_CFG_FD;
3464 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3465 if (flowctrl & ADVERTISE_1000XPAUSE)
3466 ap->txconfig |= ANEG_CFG_PS1;
3467 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3468 ap->txconfig |= ANEG_CFG_PS2;
3469 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3470 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3471 tw32_f(MAC_MODE, tp->mac_mode);
3474 ap->state = ANEG_STATE_ABILITY_DETECT;
3477 case ANEG_STATE_ABILITY_DETECT:
3478 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3479 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3483 case ANEG_STATE_ACK_DETECT_INIT:
3484 ap->txconfig |= ANEG_CFG_ACK;
3485 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3486 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3487 tw32_f(MAC_MODE, tp->mac_mode);
3490 ap->state = ANEG_STATE_ACK_DETECT;
3493 case ANEG_STATE_ACK_DETECT:
3494 if (ap->ack_match != 0) {
3495 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3496 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3497 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3499 ap->state = ANEG_STATE_AN_ENABLE;
3501 } else if (ap->ability_match != 0 &&
3502 ap->rxconfig == 0) {
3503 ap->state = ANEG_STATE_AN_ENABLE;
3507 case ANEG_STATE_COMPLETE_ACK_INIT:
3508 if (ap->rxconfig & ANEG_CFG_INVAL) {
3512 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3513 MR_LP_ADV_HALF_DUPLEX |
3514 MR_LP_ADV_SYM_PAUSE |
3515 MR_LP_ADV_ASYM_PAUSE |
3516 MR_LP_ADV_REMOTE_FAULT1 |
3517 MR_LP_ADV_REMOTE_FAULT2 |
3518 MR_LP_ADV_NEXT_PAGE |
3521 if (ap->rxconfig & ANEG_CFG_FD)
3522 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3523 if (ap->rxconfig & ANEG_CFG_HD)
3524 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3525 if (ap->rxconfig & ANEG_CFG_PS1)
3526 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3527 if (ap->rxconfig & ANEG_CFG_PS2)
3528 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3529 if (ap->rxconfig & ANEG_CFG_RF1)
3530 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3531 if (ap->rxconfig & ANEG_CFG_RF2)
3532 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3533 if (ap->rxconfig & ANEG_CFG_NP)
3534 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3536 ap->link_time = ap->cur_time;
3538 ap->flags ^= (MR_TOGGLE_TX);
3539 if (ap->rxconfig & 0x0008)
3540 ap->flags |= MR_TOGGLE_RX;
3541 if (ap->rxconfig & ANEG_CFG_NP)
3542 ap->flags |= MR_NP_RX;
3543 ap->flags |= MR_PAGE_RX;
3545 ap->state = ANEG_STATE_COMPLETE_ACK;
3546 ret = ANEG_TIMER_ENAB;
3549 case ANEG_STATE_COMPLETE_ACK:
3550 if (ap->ability_match != 0 &&
3551 ap->rxconfig == 0) {
3552 ap->state = ANEG_STATE_AN_ENABLE;
3555 delta = ap->cur_time - ap->link_time;
3556 if (delta > ANEG_STATE_SETTLE_TIME) {
3557 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3558 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3560 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3561 !(ap->flags & MR_NP_RX)) {
3562 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3570 case ANEG_STATE_IDLE_DETECT_INIT:
3571 ap->link_time = ap->cur_time;
3572 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3573 tw32_f(MAC_MODE, tp->mac_mode);
3576 ap->state = ANEG_STATE_IDLE_DETECT;
3577 ret = ANEG_TIMER_ENAB;
3580 case ANEG_STATE_IDLE_DETECT:
3581 if (ap->ability_match != 0 &&
3582 ap->rxconfig == 0) {
3583 ap->state = ANEG_STATE_AN_ENABLE;
3586 delta = ap->cur_time - ap->link_time;
3587 if (delta > ANEG_STATE_SETTLE_TIME) {
3588 /* XXX another gem from the Broadcom driver :( */
3589 ap->state = ANEG_STATE_LINK_OK;
3593 case ANEG_STATE_LINK_OK:
3594 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3598 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3599 /* ??? unimplemented */
3602 case ANEG_STATE_NEXT_PAGE_WAIT:
3603 /* ??? unimplemented */
3614 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3617 struct tg3_fiber_aneginfo aninfo;
3618 int status = ANEG_FAILED;
3622 tw32_f(MAC_TX_AUTO_NEG, 0);
3624 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3625 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3628 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3631 memset(&aninfo, 0, sizeof(aninfo));
3632 aninfo.flags |= MR_AN_ENABLE;
3633 aninfo.state = ANEG_STATE_UNKNOWN;
3634 aninfo.cur_time = 0;
3636 while (++tick < 195000) {
3637 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3638 if (status == ANEG_DONE || status == ANEG_FAILED)
3644 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3645 tw32_f(MAC_MODE, tp->mac_mode);
3648 *txflags = aninfo.txconfig;
3649 *rxflags = aninfo.flags;
3651 if (status == ANEG_DONE &&
3652 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3653 MR_LP_ADV_FULL_DUPLEX)))
3659 static void tg3_init_bcm8002(struct tg3 *tp)
3661 u32 mac_status = tr32(MAC_STATUS);
3664 /* Reset when initting first time or we have a link. */
3665 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3666 !(mac_status & MAC_STATUS_PCS_SYNCED))
3669 /* Set PLL lock range. */
3670 tg3_writephy(tp, 0x16, 0x8007);
3673 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3675 /* Wait for reset to complete. */
3676 /* XXX schedule_timeout() ... */
3677 for (i = 0; i < 500; i++)
3680 /* Config mode; select PMA/Ch 1 regs. */
3681 tg3_writephy(tp, 0x10, 0x8411);
3683 /* Enable auto-lock and comdet, select txclk for tx. */
3684 tg3_writephy(tp, 0x11, 0x0a10);
3686 tg3_writephy(tp, 0x18, 0x00a0);
3687 tg3_writephy(tp, 0x16, 0x41ff);
3689 /* Assert and deassert POR. */
3690 tg3_writephy(tp, 0x13, 0x0400);
3692 tg3_writephy(tp, 0x13, 0x0000);
3694 tg3_writephy(tp, 0x11, 0x0a50);
3696 tg3_writephy(tp, 0x11, 0x0a10);
3698 /* Wait for signal to stabilize */
3699 /* XXX schedule_timeout() ... */
3700 for (i = 0; i < 15000; i++)
3703 /* Deselect the channel register so we can read the PHYID
3706 tg3_writephy(tp, 0x10, 0x8011);
3709 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3712 u32 sg_dig_ctrl, sg_dig_status;
3713 u32 serdes_cfg, expected_sg_dig_ctrl;
3714 int workaround, port_a;
3715 int current_link_up;
3718 expected_sg_dig_ctrl = 0;
3721 current_link_up = 0;
3723 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3724 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3726 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3729 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3730 /* preserve bits 20-23 for voltage regulator */
3731 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3734 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3736 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3737 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3739 u32 val = serdes_cfg;
3745 tw32_f(MAC_SERDES_CFG, val);
3748 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3750 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3751 tg3_setup_flow_control(tp, 0, 0);
3752 current_link_up = 1;
3757 /* Want auto-negotiation. */
3758 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3760 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3761 if (flowctrl & ADVERTISE_1000XPAUSE)
3762 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3763 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3764 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3766 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3767 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3768 tp->serdes_counter &&
3769 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3770 MAC_STATUS_RCVD_CFG)) ==
3771 MAC_STATUS_PCS_SYNCED)) {
3772 tp->serdes_counter--;
3773 current_link_up = 1;
3778 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3779 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3781 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3783 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3784 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3785 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3786 MAC_STATUS_SIGNAL_DET)) {
3787 sg_dig_status = tr32(SG_DIG_STATUS);
3788 mac_status = tr32(MAC_STATUS);
3790 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3791 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3792 u32 local_adv = 0, remote_adv = 0;
3794 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3795 local_adv |= ADVERTISE_1000XPAUSE;
3796 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3797 local_adv |= ADVERTISE_1000XPSE_ASYM;
3799 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3800 remote_adv |= LPA_1000XPAUSE;
3801 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3802 remote_adv |= LPA_1000XPAUSE_ASYM;
3804 tg3_setup_flow_control(tp, local_adv, remote_adv);
3805 current_link_up = 1;
3806 tp->serdes_counter = 0;
3807 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3808 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3809 if (tp->serdes_counter)
3810 tp->serdes_counter--;
3813 u32 val = serdes_cfg;
3820 tw32_f(MAC_SERDES_CFG, val);
3823 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3826 /* Link parallel detection - link is up */
3827 /* only if we have PCS_SYNC and not */
3828 /* receiving config code words */
3829 mac_status = tr32(MAC_STATUS);
3830 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3831 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3832 tg3_setup_flow_control(tp, 0, 0);
3833 current_link_up = 1;
3835 TG3_FLG2_PARALLEL_DETECT;
3836 tp->serdes_counter =
3837 SERDES_PARALLEL_DET_TIMEOUT;
3839 goto restart_autoneg;
3843 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3844 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3848 return current_link_up;
3851 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3853 int current_link_up = 0;
3855 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3858 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3859 u32 txflags, rxflags;
3862 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3863 u32 local_adv = 0, remote_adv = 0;
3865 if (txflags & ANEG_CFG_PS1)
3866 local_adv |= ADVERTISE_1000XPAUSE;
3867 if (txflags & ANEG_CFG_PS2)
3868 local_adv |= ADVERTISE_1000XPSE_ASYM;
3870 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3871 remote_adv |= LPA_1000XPAUSE;
3872 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3873 remote_adv |= LPA_1000XPAUSE_ASYM;
3875 tg3_setup_flow_control(tp, local_adv, remote_adv);
3877 current_link_up = 1;
3879 for (i = 0; i < 30; i++) {
3882 (MAC_STATUS_SYNC_CHANGED |
3883 MAC_STATUS_CFG_CHANGED));
3885 if ((tr32(MAC_STATUS) &
3886 (MAC_STATUS_SYNC_CHANGED |
3887 MAC_STATUS_CFG_CHANGED)) == 0)
3891 mac_status = tr32(MAC_STATUS);
3892 if (current_link_up == 0 &&
3893 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3894 !(mac_status & MAC_STATUS_RCVD_CFG))
3895 current_link_up = 1;
3897 tg3_setup_flow_control(tp, 0, 0);
3899 /* Forcing 1000FD link up. */
3900 current_link_up = 1;
3902 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3905 tw32_f(MAC_MODE, tp->mac_mode);
3910 return current_link_up;
3913 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3916 u16 orig_active_speed;
3917 u8 orig_active_duplex;
3919 int current_link_up;
3922 orig_pause_cfg = tp->link_config.active_flowctrl;
3923 orig_active_speed = tp->link_config.active_speed;
3924 orig_active_duplex = tp->link_config.active_duplex;
3926 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3927 netif_carrier_ok(tp->dev) &&
3928 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3929 mac_status = tr32(MAC_STATUS);
3930 mac_status &= (MAC_STATUS_PCS_SYNCED |
3931 MAC_STATUS_SIGNAL_DET |
3932 MAC_STATUS_CFG_CHANGED |
3933 MAC_STATUS_RCVD_CFG);
3934 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3935 MAC_STATUS_SIGNAL_DET)) {
3936 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3937 MAC_STATUS_CFG_CHANGED));
3942 tw32_f(MAC_TX_AUTO_NEG, 0);
3944 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3945 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3946 tw32_f(MAC_MODE, tp->mac_mode);
3949 if (tp->phy_id == PHY_ID_BCM8002)
3950 tg3_init_bcm8002(tp);
3952 /* Enable link change event even when serdes polling. */
3953 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3956 current_link_up = 0;
3957 mac_status = tr32(MAC_STATUS);
3959 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3960 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3962 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3964 tp->napi[0].hw_status->status =
3965 (SD_STATUS_UPDATED |
3966 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
3968 for (i = 0; i < 100; i++) {
3969 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3970 MAC_STATUS_CFG_CHANGED));
3972 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3973 MAC_STATUS_CFG_CHANGED |
3974 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3978 mac_status = tr32(MAC_STATUS);
3979 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3980 current_link_up = 0;
3981 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3982 tp->serdes_counter == 0) {
3983 tw32_f(MAC_MODE, (tp->mac_mode |
3984 MAC_MODE_SEND_CONFIGS));
3986 tw32_f(MAC_MODE, tp->mac_mode);
3990 if (current_link_up == 1) {
3991 tp->link_config.active_speed = SPEED_1000;
3992 tp->link_config.active_duplex = DUPLEX_FULL;
3993 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3994 LED_CTRL_LNKLED_OVERRIDE |
3995 LED_CTRL_1000MBPS_ON));
3997 tp->link_config.active_speed = SPEED_INVALID;
3998 tp->link_config.active_duplex = DUPLEX_INVALID;
3999 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4000 LED_CTRL_LNKLED_OVERRIDE |
4001 LED_CTRL_TRAFFIC_OVERRIDE));
4004 if (current_link_up != netif_carrier_ok(tp->dev)) {
4005 if (current_link_up)
4006 netif_carrier_on(tp->dev);
4008 netif_carrier_off(tp->dev);
4009 tg3_link_report(tp);
4011 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4012 if (orig_pause_cfg != now_pause_cfg ||
4013 orig_active_speed != tp->link_config.active_speed ||
4014 orig_active_duplex != tp->link_config.active_duplex)
4015 tg3_link_report(tp);
4021 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4023 int current_link_up, err = 0;
4027 u32 local_adv, remote_adv;
4029 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4030 tw32_f(MAC_MODE, tp->mac_mode);
4036 (MAC_STATUS_SYNC_CHANGED |
4037 MAC_STATUS_CFG_CHANGED |
4038 MAC_STATUS_MI_COMPLETION |
4039 MAC_STATUS_LNKSTATE_CHANGED));
4045 current_link_up = 0;
4046 current_speed = SPEED_INVALID;
4047 current_duplex = DUPLEX_INVALID;
4049 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4050 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4051 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4052 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4053 bmsr |= BMSR_LSTATUS;
4055 bmsr &= ~BMSR_LSTATUS;
4058 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4060 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4061 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4062 /* do nothing, just check for link up at the end */
4063 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4066 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4067 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4068 ADVERTISE_1000XPAUSE |
4069 ADVERTISE_1000XPSE_ASYM |
4072 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4074 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4075 new_adv |= ADVERTISE_1000XHALF;
4076 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4077 new_adv |= ADVERTISE_1000XFULL;
4079 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4080 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4081 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4082 tg3_writephy(tp, MII_BMCR, bmcr);
4084 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4085 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4086 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4093 bmcr &= ~BMCR_SPEED1000;
4094 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4096 if (tp->link_config.duplex == DUPLEX_FULL)
4097 new_bmcr |= BMCR_FULLDPLX;
4099 if (new_bmcr != bmcr) {
4100 /* BMCR_SPEED1000 is a reserved bit that needs
4101 * to be set on write.
4103 new_bmcr |= BMCR_SPEED1000;
4105 /* Force a linkdown */
4106 if (netif_carrier_ok(tp->dev)) {
4109 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4110 adv &= ~(ADVERTISE_1000XFULL |
4111 ADVERTISE_1000XHALF |
4113 tg3_writephy(tp, MII_ADVERTISE, adv);
4114 tg3_writephy(tp, MII_BMCR, bmcr |
4118 netif_carrier_off(tp->dev);
4120 tg3_writephy(tp, MII_BMCR, new_bmcr);
4122 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4123 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4124 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4126 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4127 bmsr |= BMSR_LSTATUS;
4129 bmsr &= ~BMSR_LSTATUS;
4131 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4135 if (bmsr & BMSR_LSTATUS) {
4136 current_speed = SPEED_1000;
4137 current_link_up = 1;
4138 if (bmcr & BMCR_FULLDPLX)
4139 current_duplex = DUPLEX_FULL;
4141 current_duplex = DUPLEX_HALF;
4146 if (bmcr & BMCR_ANENABLE) {
4149 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4150 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4151 common = local_adv & remote_adv;
4152 if (common & (ADVERTISE_1000XHALF |
4153 ADVERTISE_1000XFULL)) {
4154 if (common & ADVERTISE_1000XFULL)
4155 current_duplex = DUPLEX_FULL;
4157 current_duplex = DUPLEX_HALF;
4160 current_link_up = 0;
4164 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4165 tg3_setup_flow_control(tp, local_adv, remote_adv);
4167 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4168 if (tp->link_config.active_duplex == DUPLEX_HALF)
4169 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4171 tw32_f(MAC_MODE, tp->mac_mode);
4174 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4176 tp->link_config.active_speed = current_speed;
4177 tp->link_config.active_duplex = current_duplex;
4179 if (current_link_up != netif_carrier_ok(tp->dev)) {
4180 if (current_link_up)
4181 netif_carrier_on(tp->dev);
4183 netif_carrier_off(tp->dev);
4184 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4186 tg3_link_report(tp);
4191 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4193 if (tp->serdes_counter) {
4194 /* Give autoneg time to complete. */
4195 tp->serdes_counter--;
4198 if (!netif_carrier_ok(tp->dev) &&
4199 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4202 tg3_readphy(tp, MII_BMCR, &bmcr);
4203 if (bmcr & BMCR_ANENABLE) {
4206 /* Select shadow register 0x1f */
4207 tg3_writephy(tp, 0x1c, 0x7c00);
4208 tg3_readphy(tp, 0x1c, &phy1);
4210 /* Select expansion interrupt status register */
4211 tg3_writephy(tp, 0x17, 0x0f01);
4212 tg3_readphy(tp, 0x15, &phy2);
4213 tg3_readphy(tp, 0x15, &phy2);
4215 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4216 /* We have signal detect and not receiving
4217 * config code words, link is up by parallel
4221 bmcr &= ~BMCR_ANENABLE;
4222 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4223 tg3_writephy(tp, MII_BMCR, bmcr);
4224 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
4228 else if (netif_carrier_ok(tp->dev) &&
4229 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4230 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4233 /* Select expansion interrupt status register */
4234 tg3_writephy(tp, 0x17, 0x0f01);
4235 tg3_readphy(tp, 0x15, &phy2);
4239 /* Config code words received, turn on autoneg. */
4240 tg3_readphy(tp, MII_BMCR, &bmcr);
4241 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4243 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4249 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4253 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4254 err = tg3_setup_fiber_phy(tp, force_reset);
4255 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4256 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4258 err = tg3_setup_copper_phy(tp, force_reset);
4261 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4264 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4265 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4267 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4272 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4273 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4274 tw32(GRC_MISC_CFG, val);
4277 if (tp->link_config.active_speed == SPEED_1000 &&
4278 tp->link_config.active_duplex == DUPLEX_HALF)
4279 tw32(MAC_TX_LENGTHS,
4280 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4281 (6 << TX_LENGTHS_IPG_SHIFT) |
4282 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4284 tw32(MAC_TX_LENGTHS,
4285 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4286 (6 << TX_LENGTHS_IPG_SHIFT) |
4287 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4289 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4290 if (netif_carrier_ok(tp->dev)) {
4291 tw32(HOSTCC_STAT_COAL_TICKS,
4292 tp->coal.stats_block_coalesce_usecs);
4294 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4298 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4299 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4300 if (!netif_carrier_ok(tp->dev))
4301 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4304 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4305 tw32(PCIE_PWR_MGMT_THRESH, val);
4311 /* This is called whenever we suspect that the system chipset is re-
4312 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4313 * is bogus tx completions. We try to recover by setting the
4314 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4317 static void tg3_tx_recover(struct tg3 *tp)
4319 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4320 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4322 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
4323 "mapped I/O cycles to the network device, attempting to "
4324 "recover. Please report the problem to the driver maintainer "
4325 "and include system chipset information.\n", tp->dev->name);
4327 spin_lock(&tp->lock);
4328 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4329 spin_unlock(&tp->lock);
4332 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4335 return tnapi->tx_pending -
4336 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4339 /* Tigon3 never reports partial packet sends. So we do not
4340 * need special logic to handle SKBs that have not had all
4341 * of their frags sent yet, like SunGEM does.
4343 static void tg3_tx(struct tg3_napi *tnapi)
4345 struct tg3 *tp = tnapi->tp;
4346 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4347 u32 sw_idx = tnapi->tx_cons;
4348 struct netdev_queue *txq;
4349 int index = tnapi - tp->napi;
4351 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
4354 txq = netdev_get_tx_queue(tp->dev, index);
4356 while (sw_idx != hw_idx) {
4357 struct tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
4358 struct sk_buff *skb = ri->skb;
4361 if (unlikely(skb == NULL)) {
4366 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4370 sw_idx = NEXT_TX(sw_idx);
4372 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4373 ri = &tnapi->tx_buffers[sw_idx];
4374 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4376 sw_idx = NEXT_TX(sw_idx);
4381 if (unlikely(tx_bug)) {
4387 tnapi->tx_cons = sw_idx;
4389 /* Need to make the tx_cons update visible to tg3_start_xmit()
4390 * before checking for netif_queue_stopped(). Without the
4391 * memory barrier, there is a small possibility that tg3_start_xmit()
4392 * will miss it and cause the queue to be stopped forever.
4396 if (unlikely(netif_tx_queue_stopped(txq) &&
4397 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4398 __netif_tx_lock(txq, smp_processor_id());
4399 if (netif_tx_queue_stopped(txq) &&
4400 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4401 netif_tx_wake_queue(txq);
4402 __netif_tx_unlock(txq);
4406 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4411 pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping),
4412 map_sz, PCI_DMA_FROMDEVICE);
4413 dev_kfree_skb_any(ri->skb);
4417 /* Returns size of skb allocated or < 0 on error.
4419 * We only need to fill in the address because the other members
4420 * of the RX descriptor are invariant, see tg3_init_rings.
4422 * Note the purposeful assymetry of cpu vs. chip accesses. For
4423 * posting buffers we only dirty the first cache line of the RX
4424 * descriptor (containing the address). Whereas for the RX status
4425 * buffers the cpu only reads the last cacheline of the RX descriptor
4426 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4428 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4429 u32 opaque_key, u32 dest_idx_unmasked)
4431 struct tg3_rx_buffer_desc *desc;
4432 struct ring_info *map, *src_map;
4433 struct sk_buff *skb;
4435 int skb_size, dest_idx;
4438 switch (opaque_key) {
4439 case RXD_OPAQUE_RING_STD:
4440 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4441 desc = &tpr->rx_std[dest_idx];
4442 map = &tpr->rx_std_buffers[dest_idx];
4443 skb_size = tp->rx_pkt_map_sz;
4446 case RXD_OPAQUE_RING_JUMBO:
4447 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4448 desc = &tpr->rx_jmb[dest_idx].std;
4449 map = &tpr->rx_jmb_buffers[dest_idx];
4450 skb_size = TG3_RX_JMB_MAP_SZ;
4457 /* Do not overwrite any of the map or rp information
4458 * until we are sure we can commit to a new buffer.
4460 * Callers depend upon this behavior and assume that
4461 * we leave everything unchanged if we fail.
4463 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4467 skb_reserve(skb, tp->rx_offset);
4469 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4470 PCI_DMA_FROMDEVICE);
4471 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4477 pci_unmap_addr_set(map, mapping, mapping);
4479 desc->addr_hi = ((u64)mapping >> 32);
4480 desc->addr_lo = ((u64)mapping & 0xffffffff);
4485 /* We only need to move over in the address because the other
4486 * members of the RX descriptor are invariant. See notes above
4487 * tg3_alloc_rx_skb for full details.
4489 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4490 struct tg3_rx_prodring_set *dpr,
4491 u32 opaque_key, int src_idx,
4492 u32 dest_idx_unmasked)
4494 struct tg3 *tp = tnapi->tp;
4495 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4496 struct ring_info *src_map, *dest_map;
4498 struct tg3_rx_prodring_set *spr = &tp->prodring[0];
4500 switch (opaque_key) {
4501 case RXD_OPAQUE_RING_STD:
4502 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4503 dest_desc = &dpr->rx_std[dest_idx];
4504 dest_map = &dpr->rx_std_buffers[dest_idx];
4505 src_desc = &spr->rx_std[src_idx];
4506 src_map = &spr->rx_std_buffers[src_idx];
4509 case RXD_OPAQUE_RING_JUMBO:
4510 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4511 dest_desc = &dpr->rx_jmb[dest_idx].std;
4512 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4513 src_desc = &spr->rx_jmb[src_idx].std;
4514 src_map = &spr->rx_jmb_buffers[src_idx];
4521 dest_map->skb = src_map->skb;
4522 pci_unmap_addr_set(dest_map, mapping,
4523 pci_unmap_addr(src_map, mapping));
4524 dest_desc->addr_hi = src_desc->addr_hi;
4525 dest_desc->addr_lo = src_desc->addr_lo;
4526 src_map->skb = NULL;
4529 /* The RX ring scheme is composed of multiple rings which post fresh
4530 * buffers to the chip, and one special ring the chip uses to report
4531 * status back to the host.
4533 * The special ring reports the status of received packets to the
4534 * host. The chip does not write into the original descriptor the
4535 * RX buffer was obtained from. The chip simply takes the original
4536 * descriptor as provided by the host, updates the status and length
4537 * field, then writes this into the next status ring entry.
4539 * Each ring the host uses to post buffers to the chip is described
4540 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4541 * it is first placed into the on-chip ram. When the packet's length
4542 * is known, it walks down the TG3_BDINFO entries to select the ring.
4543 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4544 * which is within the range of the new packet's length is chosen.
4546 * The "separate ring for rx status" scheme may sound queer, but it makes
4547 * sense from a cache coherency perspective. If only the host writes
4548 * to the buffer post rings, and only the chip writes to the rx status
4549 * rings, then cache lines never move beyond shared-modified state.
4550 * If both the host and chip were to write into the same ring, cache line
4551 * eviction could occur since both entities want it in an exclusive state.
4553 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4555 struct tg3 *tp = tnapi->tp;
4556 u32 work_mask, rx_std_posted = 0;
4557 u32 std_prod_idx, jmb_prod_idx;
4558 u32 sw_idx = tnapi->rx_rcb_ptr;
4561 struct tg3_rx_prodring_set *tpr = tnapi->prodring;
4563 hw_idx = *(tnapi->rx_rcb_prod_idx);
4565 * We need to order the read of hw_idx and the read of
4566 * the opaque cookie.
4571 std_prod_idx = tpr->rx_std_prod_idx;
4572 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4573 while (sw_idx != hw_idx && budget > 0) {
4574 struct ring_info *ri;
4575 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4577 struct sk_buff *skb;
4578 dma_addr_t dma_addr;
4579 u32 opaque_key, desc_idx, *post_ptr;
4581 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4582 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4583 if (opaque_key == RXD_OPAQUE_RING_STD) {
4584 ri = &tp->prodring[0].rx_std_buffers[desc_idx];
4585 dma_addr = pci_unmap_addr(ri, mapping);
4587 post_ptr = &std_prod_idx;
4589 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4590 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
4591 dma_addr = pci_unmap_addr(ri, mapping);
4593 post_ptr = &jmb_prod_idx;
4595 goto next_pkt_nopost;
4597 work_mask |= opaque_key;
4599 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4600 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4602 tg3_recycle_rx(tnapi, tpr, opaque_key,
4603 desc_idx, *post_ptr);
4605 /* Other statistics kept track of by card. */
4606 tp->net_stats.rx_dropped++;
4610 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4613 if (len > RX_COPY_THRESHOLD
4614 && tp->rx_offset == NET_IP_ALIGN
4615 /* rx_offset will likely not equal NET_IP_ALIGN
4616 * if this is a 5701 card running in PCI-X mode
4617 * [see tg3_get_invariants()]
4622 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4629 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4630 PCI_DMA_FROMDEVICE);
4634 struct sk_buff *copy_skb;
4636 tg3_recycle_rx(tnapi, tpr, opaque_key,
4637 desc_idx, *post_ptr);
4639 copy_skb = netdev_alloc_skb(tp->dev,
4640 len + TG3_RAW_IP_ALIGN);
4641 if (copy_skb == NULL)
4642 goto drop_it_no_recycle;
4644 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4645 skb_put(copy_skb, len);
4646 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4647 skb_copy_from_linear_data(skb, copy_skb->data, len);
4648 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4650 /* We'll reuse the original ring buffer. */
4654 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4655 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4656 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4657 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4658 skb->ip_summed = CHECKSUM_UNNECESSARY;
4660 skb->ip_summed = CHECKSUM_NONE;
4662 skb->protocol = eth_type_trans(skb, tp->dev);
4664 if (len > (tp->dev->mtu + ETH_HLEN) &&
4665 skb->protocol != htons(ETH_P_8021Q)) {
4670 #if TG3_VLAN_TAG_USED
4671 if (tp->vlgrp != NULL &&
4672 desc->type_flags & RXD_FLAG_VLAN) {
4673 vlan_gro_receive(&tnapi->napi, tp->vlgrp,
4674 desc->err_vlan & RXD_VLAN_MASK, skb);
4677 napi_gro_receive(&tnapi->napi, skb);
4685 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4686 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4687 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, idx);
4688 work_mask &= ~RXD_OPAQUE_RING_STD;
4693 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4695 /* Refresh hw_idx to see if there is new work */
4696 if (sw_idx == hw_idx) {
4697 hw_idx = *(tnapi->rx_rcb_prod_idx);
4702 /* ACK the status ring. */
4703 tnapi->rx_rcb_ptr = sw_idx;
4704 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4706 /* Refill RX ring(s). */
4707 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) || tnapi == &tp->napi[1]) {
4708 if (work_mask & RXD_OPAQUE_RING_STD) {
4709 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4710 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4711 tpr->rx_std_prod_idx);
4713 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4714 tpr->rx_jmb_prod_idx = jmb_prod_idx %
4715 TG3_RX_JUMBO_RING_SIZE;
4716 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4717 tpr->rx_jmb_prod_idx);
4720 } else if (work_mask) {
4721 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4722 * updated before the producer indices can be updated.
4726 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4727 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
4729 napi_schedule(&tp->napi[1].napi);
4735 static void tg3_poll_link(struct tg3 *tp)
4737 /* handle link change and other phy events */
4738 if (!(tp->tg3_flags &
4739 (TG3_FLAG_USE_LINKCHG_REG |
4740 TG3_FLAG_POLL_SERDES))) {
4741 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4743 if (sblk->status & SD_STATUS_LINK_CHG) {
4744 sblk->status = SD_STATUS_UPDATED |
4745 (sblk->status & ~SD_STATUS_LINK_CHG);
4746 spin_lock(&tp->lock);
4747 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4749 (MAC_STATUS_SYNC_CHANGED |
4750 MAC_STATUS_CFG_CHANGED |
4751 MAC_STATUS_MI_COMPLETION |
4752 MAC_STATUS_LNKSTATE_CHANGED));
4755 tg3_setup_phy(tp, 0);
4756 spin_unlock(&tp->lock);
4761 static void tg3_rx_prodring_xfer(struct tg3 *tp,
4762 struct tg3_rx_prodring_set *dpr,
4763 struct tg3_rx_prodring_set *spr)
4765 u32 si, di, cpycnt, src_prod_idx;
4769 src_prod_idx = spr->rx_std_prod_idx;
4771 /* Make sure updates to the rx_std_buffers[] entries and the
4772 * standard producer index are seen in the correct order.
4776 if (spr->rx_std_cons_idx == src_prod_idx)
4779 if (spr->rx_std_cons_idx < src_prod_idx)
4780 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4782 cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
4784 cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
4786 si = spr->rx_std_cons_idx;
4787 di = dpr->rx_std_prod_idx;
4789 memcpy(&dpr->rx_std_buffers[di],
4790 &spr->rx_std_buffers[si],
4791 cpycnt * sizeof(struct ring_info));
4793 for (i = 0; i < cpycnt; i++, di++, si++) {
4794 struct tg3_rx_buffer_desc *sbd, *dbd;
4795 sbd = &spr->rx_std[si];
4796 dbd = &dpr->rx_std[di];
4797 dbd->addr_hi = sbd->addr_hi;
4798 dbd->addr_lo = sbd->addr_lo;
4801 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
4803 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
4808 src_prod_idx = spr->rx_jmb_prod_idx;
4810 /* Make sure updates to the rx_jmb_buffers[] entries and
4811 * the jumbo producer index are seen in the correct order.
4815 if (spr->rx_jmb_cons_idx == src_prod_idx)
4818 if (spr->rx_jmb_cons_idx < src_prod_idx)
4819 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4821 cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
4823 cpycnt = min(cpycnt,
4824 TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
4826 si = spr->rx_jmb_cons_idx;
4827 di = dpr->rx_jmb_prod_idx;
4829 memcpy(&dpr->rx_jmb_buffers[di],
4830 &spr->rx_jmb_buffers[si],
4831 cpycnt * sizeof(struct ring_info));
4833 for (i = 0; i < cpycnt; i++, di++, si++) {
4834 struct tg3_rx_buffer_desc *sbd, *dbd;
4835 sbd = &spr->rx_jmb[si].std;
4836 dbd = &dpr->rx_jmb[di].std;
4837 dbd->addr_hi = sbd->addr_hi;
4838 dbd->addr_lo = sbd->addr_lo;
4841 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
4842 TG3_RX_JUMBO_RING_SIZE;
4843 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
4844 TG3_RX_JUMBO_RING_SIZE;
4848 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4850 struct tg3 *tp = tnapi->tp;
4852 /* run TX completion thread */
4853 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4855 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4859 /* run RX thread, within the bounds set by NAPI.
4860 * All RX "locking" is done by ensuring outside
4861 * code synchronizes with tg3->napi.poll()
4863 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4864 work_done += tg3_rx(tnapi, budget - work_done);
4866 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4868 u32 std_prod_idx = tp->prodring[0].rx_std_prod_idx;
4869 u32 jmb_prod_idx = tp->prodring[0].rx_jmb_prod_idx;
4871 for (i = 2; i < tp->irq_cnt; i++)
4872 tg3_rx_prodring_xfer(tp, tnapi->prodring,
4873 tp->napi[i].prodring);
4877 if (std_prod_idx != tp->prodring[0].rx_std_prod_idx) {
4878 u32 mbox = TG3_RX_STD_PROD_IDX_REG;
4879 tw32_rx_mbox(mbox, tp->prodring[0].rx_std_prod_idx);
4882 if (jmb_prod_idx != tp->prodring[0].rx_jmb_prod_idx) {
4883 u32 mbox = TG3_RX_JMB_PROD_IDX_REG;
4884 tw32_rx_mbox(mbox, tp->prodring[0].rx_jmb_prod_idx);
4893 static int tg3_poll_msix(struct napi_struct *napi, int budget)
4895 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4896 struct tg3 *tp = tnapi->tp;
4898 struct tg3_hw_status *sblk = tnapi->hw_status;
4901 work_done = tg3_poll_work(tnapi, work_done, budget);
4903 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4906 if (unlikely(work_done >= budget))
4909 /* tp->last_tag is used in tg3_restart_ints() below
4910 * to tell the hw how much work has been processed,
4911 * so we must read it before checking for more work.
4913 tnapi->last_tag = sblk->status_tag;
4914 tnapi->last_irq_tag = tnapi->last_tag;
4917 /* check for RX/TX work to do */
4918 if (sblk->idx[0].tx_consumer == tnapi->tx_cons &&
4919 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) {
4920 napi_complete(napi);
4921 /* Reenable interrupts. */
4922 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
4931 /* work_done is guaranteed to be less than budget. */
4932 napi_complete(napi);
4933 schedule_work(&tp->reset_task);
4937 static int tg3_poll(struct napi_struct *napi, int budget)
4939 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4940 struct tg3 *tp = tnapi->tp;
4942 struct tg3_hw_status *sblk = tnapi->hw_status;
4947 work_done = tg3_poll_work(tnapi, work_done, budget);
4949 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4952 if (unlikely(work_done >= budget))
4955 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4956 /* tp->last_tag is used in tg3_int_reenable() below
4957 * to tell the hw how much work has been processed,
4958 * so we must read it before checking for more work.
4960 tnapi->last_tag = sblk->status_tag;
4961 tnapi->last_irq_tag = tnapi->last_tag;
4964 sblk->status &= ~SD_STATUS_UPDATED;
4966 if (likely(!tg3_has_work(tnapi))) {
4967 napi_complete(napi);
4968 tg3_int_reenable(tnapi);
4976 /* work_done is guaranteed to be less than budget. */
4977 napi_complete(napi);
4978 schedule_work(&tp->reset_task);
4982 static void tg3_irq_quiesce(struct tg3 *tp)
4986 BUG_ON(tp->irq_sync);
4991 for (i = 0; i < tp->irq_cnt; i++)
4992 synchronize_irq(tp->napi[i].irq_vec);
4995 static inline int tg3_irq_sync(struct tg3 *tp)
4997 return tp->irq_sync;
5000 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5001 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5002 * with as well. Most of the time, this is not necessary except when
5003 * shutting down the device.
5005 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5007 spin_lock_bh(&tp->lock);
5009 tg3_irq_quiesce(tp);
5012 static inline void tg3_full_unlock(struct tg3 *tp)
5014 spin_unlock_bh(&tp->lock);
5017 /* One-shot MSI handler - Chip automatically disables interrupt
5018 * after sending MSI so driver doesn't have to do it.
5020 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5022 struct tg3_napi *tnapi = dev_id;
5023 struct tg3 *tp = tnapi->tp;
5025 prefetch(tnapi->hw_status);
5027 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5029 if (likely(!tg3_irq_sync(tp)))
5030 napi_schedule(&tnapi->napi);
5035 /* MSI ISR - No need to check for interrupt sharing and no need to
5036 * flush status block and interrupt mailbox. PCI ordering rules
5037 * guarantee that MSI will arrive after the status block.
5039 static irqreturn_t tg3_msi(int irq, void *dev_id)
5041 struct tg3_napi *tnapi = dev_id;
5042 struct tg3 *tp = tnapi->tp;
5044 prefetch(tnapi->hw_status);
5046 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5048 * Writing any value to intr-mbox-0 clears PCI INTA# and
5049 * chip-internal interrupt pending events.
5050 * Writing non-zero to intr-mbox-0 additional tells the
5051 * NIC to stop sending us irqs, engaging "in-intr-handler"
5054 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5055 if (likely(!tg3_irq_sync(tp)))
5056 napi_schedule(&tnapi->napi);
5058 return IRQ_RETVAL(1);
5061 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5063 struct tg3_napi *tnapi = dev_id;
5064 struct tg3 *tp = tnapi->tp;
5065 struct tg3_hw_status *sblk = tnapi->hw_status;
5066 unsigned int handled = 1;
5068 /* In INTx mode, it is possible for the interrupt to arrive at
5069 * the CPU before the status block posted prior to the interrupt.
5070 * Reading the PCI State register will confirm whether the
5071 * interrupt is ours and will flush the status block.
5073 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5074 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5075 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5082 * Writing any value to intr-mbox-0 clears PCI INTA# and
5083 * chip-internal interrupt pending events.
5084 * Writing non-zero to intr-mbox-0 additional tells the
5085 * NIC to stop sending us irqs, engaging "in-intr-handler"
5088 * Flush the mailbox to de-assert the IRQ immediately to prevent
5089 * spurious interrupts. The flush impacts performance but
5090 * excessive spurious interrupts can be worse in some cases.
5092 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5093 if (tg3_irq_sync(tp))
5095 sblk->status &= ~SD_STATUS_UPDATED;
5096 if (likely(tg3_has_work(tnapi))) {
5097 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5098 napi_schedule(&tnapi->napi);
5100 /* No work, shared interrupt perhaps? re-enable
5101 * interrupts, and flush that PCI write
5103 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5107 return IRQ_RETVAL(handled);
5110 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5112 struct tg3_napi *tnapi = dev_id;
5113 struct tg3 *tp = tnapi->tp;
5114 struct tg3_hw_status *sblk = tnapi->hw_status;
5115 unsigned int handled = 1;
5117 /* In INTx mode, it is possible for the interrupt to arrive at
5118 * the CPU before the status block posted prior to the interrupt.
5119 * Reading the PCI State register will confirm whether the
5120 * interrupt is ours and will flush the status block.
5122 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5123 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5124 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5131 * writing any value to intr-mbox-0 clears PCI INTA# and
5132 * chip-internal interrupt pending events.
5133 * writing non-zero to intr-mbox-0 additional tells the
5134 * NIC to stop sending us irqs, engaging "in-intr-handler"
5137 * Flush the mailbox to de-assert the IRQ immediately to prevent
5138 * spurious interrupts. The flush impacts performance but
5139 * excessive spurious interrupts can be worse in some cases.
5141 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5144 * In a shared interrupt configuration, sometimes other devices'
5145 * interrupts will scream. We record the current status tag here
5146 * so that the above check can report that the screaming interrupts
5147 * are unhandled. Eventually they will be silenced.
5149 tnapi->last_irq_tag = sblk->status_tag;
5151 if (tg3_irq_sync(tp))
5154 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5156 napi_schedule(&tnapi->napi);
5159 return IRQ_RETVAL(handled);
5162 /* ISR for interrupt test */
5163 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5165 struct tg3_napi *tnapi = dev_id;
5166 struct tg3 *tp = tnapi->tp;
5167 struct tg3_hw_status *sblk = tnapi->hw_status;
5169 if ((sblk->status & SD_STATUS_UPDATED) ||
5170 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5171 tg3_disable_ints(tp);
5172 return IRQ_RETVAL(1);
5174 return IRQ_RETVAL(0);
5177 static int tg3_init_hw(struct tg3 *, int);
5178 static int tg3_halt(struct tg3 *, int, int);
5180 /* Restart hardware after configuration changes, self-test, etc.
5181 * Invoked with tp->lock held.
5183 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5184 __releases(tp->lock)
5185 __acquires(tp->lock)
5189 err = tg3_init_hw(tp, reset_phy);
5191 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
5192 "aborting.\n", tp->dev->name);
5193 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5194 tg3_full_unlock(tp);
5195 del_timer_sync(&tp->timer);
5197 tg3_napi_enable(tp);
5199 tg3_full_lock(tp, 0);
5204 #ifdef CONFIG_NET_POLL_CONTROLLER
5205 static void tg3_poll_controller(struct net_device *dev)
5208 struct tg3 *tp = netdev_priv(dev);
5210 for (i = 0; i < tp->irq_cnt; i++)
5211 tg3_interrupt(tp->napi[i].irq_vec, dev);
5215 static void tg3_reset_task(struct work_struct *work)
5217 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5219 unsigned int restart_timer;
5221 tg3_full_lock(tp, 0);
5223 if (!netif_running(tp->dev)) {
5224 tg3_full_unlock(tp);
5228 tg3_full_unlock(tp);
5234 tg3_full_lock(tp, 1);
5236 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5237 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5239 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5240 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5241 tp->write32_rx_mbox = tg3_write_flush_reg32;
5242 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5243 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5246 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5247 err = tg3_init_hw(tp, 1);
5251 tg3_netif_start(tp);
5254 mod_timer(&tp->timer, jiffies + 1);
5257 tg3_full_unlock(tp);
5263 static void tg3_dump_short_state(struct tg3 *tp)
5265 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5266 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5267 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5268 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5271 static void tg3_tx_timeout(struct net_device *dev)
5273 struct tg3 *tp = netdev_priv(dev);
5275 if (netif_msg_tx_err(tp)) {
5276 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
5278 tg3_dump_short_state(tp);
5281 schedule_work(&tp->reset_task);
5284 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5285 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5287 u32 base = (u32) mapping & 0xffffffff;
5289 return ((base > 0xffffdcc0) &&
5290 (base + len + 8 < base));
5293 /* Test for DMA addresses > 40-bit */
5294 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5297 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5298 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5299 return (((u64) mapping + len) > DMA_BIT_MASK(40));
5306 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5308 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5309 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5310 struct sk_buff *skb, u32 last_plus_one,
5311 u32 *start, u32 base_flags, u32 mss)
5313 struct tg3 *tp = tnapi->tp;
5314 struct sk_buff *new_skb;
5315 dma_addr_t new_addr = 0;
5319 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5320 new_skb = skb_copy(skb, GFP_ATOMIC);
5322 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5324 new_skb = skb_copy_expand(skb,
5325 skb_headroom(skb) + more_headroom,
5326 skb_tailroom(skb), GFP_ATOMIC);
5332 /* New SKB is guaranteed to be linear. */
5334 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
5335 new_addr = skb_shinfo(new_skb)->dma_head;
5337 /* Make sure new skb does not cross any 4G boundaries.
5338 * Drop the packet if it does.
5340 if (ret || ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5341 tg3_4g_overflow_test(new_addr, new_skb->len))) {
5343 skb_dma_unmap(&tp->pdev->dev, new_skb,
5346 dev_kfree_skb(new_skb);
5349 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5350 base_flags, 1 | (mss << 1));
5351 *start = NEXT_TX(entry);
5355 /* Now clean up the sw ring entries. */
5357 while (entry != last_plus_one) {
5359 tnapi->tx_buffers[entry].skb = new_skb;
5361 tnapi->tx_buffers[entry].skb = NULL;
5362 entry = NEXT_TX(entry);
5366 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5372 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5373 dma_addr_t mapping, int len, u32 flags,
5376 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5377 int is_end = (mss_and_is_end & 0x1);
5378 u32 mss = (mss_and_is_end >> 1);
5382 flags |= TXD_FLAG_END;
5383 if (flags & TXD_FLAG_VLAN) {
5384 vlan_tag = flags >> 16;
5387 vlan_tag |= (mss << TXD_MSS_SHIFT);
5389 txd->addr_hi = ((u64) mapping >> 32);
5390 txd->addr_lo = ((u64) mapping & 0xffffffff);
5391 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5392 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5395 /* hard_start_xmit for devices that don't have any bugs and
5396 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5398 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5399 struct net_device *dev)
5401 struct tg3 *tp = netdev_priv(dev);
5402 u32 len, entry, base_flags, mss;
5403 struct skb_shared_info *sp;
5405 struct tg3_napi *tnapi;
5406 struct netdev_queue *txq;
5408 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5409 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5410 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
5413 /* We are running in BH disabled context with netif_tx_lock
5414 * and TX reclaim runs via tp->napi.poll inside of a software
5415 * interrupt. Furthermore, IRQ processing runs lockless so we have
5416 * no IRQ context deadlocks to worry about either. Rejoice!
5418 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5419 if (!netif_tx_queue_stopped(txq)) {
5420 netif_tx_stop_queue(txq);
5422 /* This is a hard error, log it. */
5423 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5424 "queue awake!\n", dev->name);
5426 return NETDEV_TX_BUSY;
5429 entry = tnapi->tx_prod;
5432 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5433 int tcp_opt_len, ip_tcp_len;
5436 if (skb_header_cloned(skb) &&
5437 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5442 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
5443 hdrlen = skb_headlen(skb) - ETH_HLEN;
5445 struct iphdr *iph = ip_hdr(skb);
5447 tcp_opt_len = tcp_optlen(skb);
5448 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5451 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5452 hdrlen = ip_tcp_len + tcp_opt_len;
5455 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5456 mss |= (hdrlen & 0xc) << 12;
5458 base_flags |= 0x00000010;
5459 base_flags |= (hdrlen & 0x3e0) << 5;
5463 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5464 TXD_FLAG_CPU_POST_DMA);
5466 tcp_hdr(skb)->check = 0;
5469 else if (skb->ip_summed == CHECKSUM_PARTIAL)
5470 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5471 #if TG3_VLAN_TAG_USED
5472 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5473 base_flags |= (TXD_FLAG_VLAN |
5474 (vlan_tx_tag_get(skb) << 16));
5477 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5482 sp = skb_shinfo(skb);
5484 mapping = sp->dma_head;
5486 tnapi->tx_buffers[entry].skb = skb;
5488 len = skb_headlen(skb);
5490 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
5491 !mss && skb->len > ETH_DATA_LEN)
5492 base_flags |= TXD_FLAG_JMB_PKT;
5494 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5495 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5497 entry = NEXT_TX(entry);
5499 /* Now loop through additional data fragments, and queue them. */
5500 if (skb_shinfo(skb)->nr_frags > 0) {
5501 unsigned int i, last;
5503 last = skb_shinfo(skb)->nr_frags - 1;
5504 for (i = 0; i <= last; i++) {
5505 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5508 mapping = sp->dma_maps[i];
5509 tnapi->tx_buffers[entry].skb = NULL;
5511 tg3_set_txd(tnapi, entry, mapping, len,
5512 base_flags, (i == last) | (mss << 1));
5514 entry = NEXT_TX(entry);
5518 /* Packets are ready, update Tx producer idx local and on card. */
5519 tw32_tx_mbox(tnapi->prodmbox, entry);
5521 tnapi->tx_prod = entry;
5522 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5523 netif_tx_stop_queue(txq);
5524 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5525 netif_tx_wake_queue(txq);
5531 return NETDEV_TX_OK;
5534 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5535 struct net_device *);
5537 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5538 * TSO header is greater than 80 bytes.
5540 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5542 struct sk_buff *segs, *nskb;
5543 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5545 /* Estimate the number of fragments in the worst case */
5546 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5547 netif_stop_queue(tp->dev);
5548 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5549 return NETDEV_TX_BUSY;
5551 netif_wake_queue(tp->dev);
5554 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5556 goto tg3_tso_bug_end;
5562 tg3_start_xmit_dma_bug(nskb, tp->dev);
5568 return NETDEV_TX_OK;
5571 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5572 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5574 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5575 struct net_device *dev)
5577 struct tg3 *tp = netdev_priv(dev);
5578 u32 len, entry, base_flags, mss;
5579 struct skb_shared_info *sp;
5580 int would_hit_hwbug;
5582 struct tg3_napi *tnapi;
5583 struct netdev_queue *txq;
5585 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5586 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5587 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
5590 /* We are running in BH disabled context with netif_tx_lock
5591 * and TX reclaim runs via tp->napi.poll inside of a software
5592 * interrupt. Furthermore, IRQ processing runs lockless so we have
5593 * no IRQ context deadlocks to worry about either. Rejoice!
5595 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5596 if (!netif_tx_queue_stopped(txq)) {
5597 netif_tx_stop_queue(txq);
5599 /* This is a hard error, log it. */
5600 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5601 "queue awake!\n", dev->name);
5603 return NETDEV_TX_BUSY;
5606 entry = tnapi->tx_prod;
5608 if (skb->ip_summed == CHECKSUM_PARTIAL)
5609 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5611 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5613 u32 tcp_opt_len, ip_tcp_len, hdr_len;
5615 if (skb_header_cloned(skb) &&
5616 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5621 tcp_opt_len = tcp_optlen(skb);
5622 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5624 hdr_len = ip_tcp_len + tcp_opt_len;
5625 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5626 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5627 return (tg3_tso_bug(tp, skb));
5629 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5630 TXD_FLAG_CPU_POST_DMA);
5634 iph->tot_len = htons(mss + hdr_len);
5635 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5636 tcp_hdr(skb)->check = 0;
5637 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5639 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5644 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5645 mss |= (hdr_len & 0xc) << 12;
5647 base_flags |= 0x00000010;
5648 base_flags |= (hdr_len & 0x3e0) << 5;
5649 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
5650 mss |= hdr_len << 9;
5651 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
5652 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5653 if (tcp_opt_len || iph->ihl > 5) {
5656 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5657 mss |= (tsflags << 11);
5660 if (tcp_opt_len || iph->ihl > 5) {
5663 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5664 base_flags |= tsflags << 12;
5668 #if TG3_VLAN_TAG_USED
5669 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5670 base_flags |= (TXD_FLAG_VLAN |
5671 (vlan_tx_tag_get(skb) << 16));
5674 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
5675 !mss && skb->len > ETH_DATA_LEN)
5676 base_flags |= TXD_FLAG_JMB_PKT;
5678 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5683 sp = skb_shinfo(skb);
5685 mapping = sp->dma_head;
5687 tnapi->tx_buffers[entry].skb = skb;
5689 would_hit_hwbug = 0;
5691 len = skb_headlen(skb);
5693 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
5694 would_hit_hwbug = 1;
5696 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5697 tg3_4g_overflow_test(mapping, len))
5698 would_hit_hwbug = 1;
5700 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5701 tg3_40bit_overflow_test(tp, mapping, len))
5702 would_hit_hwbug = 1;
5704 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5705 would_hit_hwbug = 1;
5707 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5708 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5710 entry = NEXT_TX(entry);
5712 /* Now loop through additional data fragments, and queue them. */
5713 if (skb_shinfo(skb)->nr_frags > 0) {
5714 unsigned int i, last;
5716 last = skb_shinfo(skb)->nr_frags - 1;
5717 for (i = 0; i <= last; i++) {
5718 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5721 mapping = sp->dma_maps[i];
5723 tnapi->tx_buffers[entry].skb = NULL;
5725 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
5727 would_hit_hwbug = 1;
5729 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5730 tg3_4g_overflow_test(mapping, len))
5731 would_hit_hwbug = 1;
5733 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5734 tg3_40bit_overflow_test(tp, mapping, len))
5735 would_hit_hwbug = 1;
5737 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5738 tg3_set_txd(tnapi, entry, mapping, len,
5739 base_flags, (i == last)|(mss << 1));
5741 tg3_set_txd(tnapi, entry, mapping, len,
5742 base_flags, (i == last));
5744 entry = NEXT_TX(entry);
5748 if (would_hit_hwbug) {
5749 u32 last_plus_one = entry;
5752 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5753 start &= (TG3_TX_RING_SIZE - 1);
5755 /* If the workaround fails due to memory/mapping
5756 * failure, silently drop this packet.
5758 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
5759 &start, base_flags, mss))
5765 /* Packets are ready, update Tx producer idx local and on card. */
5766 tw32_tx_mbox(tnapi->prodmbox, entry);
5768 tnapi->tx_prod = entry;
5769 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5770 netif_tx_stop_queue(txq);
5771 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5772 netif_tx_wake_queue(txq);
5778 return NETDEV_TX_OK;
5781 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5786 if (new_mtu > ETH_DATA_LEN) {
5787 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5788 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5789 ethtool_op_set_tso(dev, 0);
5792 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5794 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5795 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5796 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5800 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5802 struct tg3 *tp = netdev_priv(dev);
5805 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5808 if (!netif_running(dev)) {
5809 /* We'll just catch it later when the
5812 tg3_set_mtu(dev, tp, new_mtu);
5820 tg3_full_lock(tp, 1);
5822 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5824 tg3_set_mtu(dev, tp, new_mtu);
5826 err = tg3_restart_hw(tp, 0);
5829 tg3_netif_start(tp);
5831 tg3_full_unlock(tp);
5839 static void tg3_rx_prodring_free(struct tg3 *tp,
5840 struct tg3_rx_prodring_set *tpr)
5844 if (tpr != &tp->prodring[0]) {
5845 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
5846 i = (i + 1) % TG3_RX_RING_SIZE)
5847 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
5850 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5851 for (i = tpr->rx_jmb_cons_idx;
5852 i != tpr->rx_jmb_prod_idx;
5853 i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) {
5854 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
5862 for (i = 0; i < TG3_RX_RING_SIZE; i++)
5863 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
5866 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5867 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++)
5868 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
5873 /* Initialize tx/rx rings for packet processing.
5875 * The chip has been shut down and the driver detached from
5876 * the networking, so no interrupts or new tx packets will
5877 * end up in the driver. tp->{tx,}lock are held and thus
5880 static int tg3_rx_prodring_alloc(struct tg3 *tp,
5881 struct tg3_rx_prodring_set *tpr)
5883 u32 i, rx_pkt_dma_sz;
5885 tpr->rx_std_cons_idx = 0;
5886 tpr->rx_std_prod_idx = 0;
5887 tpr->rx_jmb_cons_idx = 0;
5888 tpr->rx_jmb_prod_idx = 0;
5890 if (tpr != &tp->prodring[0]) {
5891 memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE);
5892 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
5893 memset(&tpr->rx_jmb_buffers[0], 0,
5894 TG3_RX_JMB_BUFF_RING_SIZE);
5898 /* Zero out all descriptors. */
5899 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
5901 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
5902 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5903 tp->dev->mtu > ETH_DATA_LEN)
5904 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
5905 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
5907 /* Initialize invariants of the rings, we only set this
5908 * stuff once. This works because the card does not
5909 * write into the rx buffer posting rings.
5911 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5912 struct tg3_rx_buffer_desc *rxd;
5914 rxd = &tpr->rx_std[i];
5915 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
5916 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5917 rxd->opaque = (RXD_OPAQUE_RING_STD |
5918 (i << RXD_OPAQUE_INDEX_SHIFT));
5921 /* Now allocate fresh SKBs for each rx ring. */
5922 for (i = 0; i < tp->rx_pending; i++) {
5923 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
5924 printk(KERN_WARNING PFX
5925 "%s: Using a smaller RX standard ring, "
5926 "only %d out of %d buffers were allocated "
5928 tp->dev->name, i, tp->rx_pending);
5936 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
5939 memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
5941 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5942 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5943 struct tg3_rx_buffer_desc *rxd;
5945 rxd = &tpr->rx_jmb[i].std;
5946 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
5947 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5949 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5950 (i << RXD_OPAQUE_INDEX_SHIFT));
5953 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5954 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO,
5956 printk(KERN_WARNING PFX
5957 "%s: Using a smaller RX jumbo ring, "
5958 "only %d out of %d buffers were "
5959 "allocated successfully.\n",
5960 tp->dev->name, i, tp->rx_jumbo_pending);
5963 tp->rx_jumbo_pending = i;
5973 tg3_rx_prodring_free(tp, tpr);
5977 static void tg3_rx_prodring_fini(struct tg3 *tp,
5978 struct tg3_rx_prodring_set *tpr)
5980 kfree(tpr->rx_std_buffers);
5981 tpr->rx_std_buffers = NULL;
5982 kfree(tpr->rx_jmb_buffers);
5983 tpr->rx_jmb_buffers = NULL;
5985 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5986 tpr->rx_std, tpr->rx_std_mapping);
5990 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5991 tpr->rx_jmb, tpr->rx_jmb_mapping);
5996 static int tg3_rx_prodring_init(struct tg3 *tp,
5997 struct tg3_rx_prodring_set *tpr)
5999 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL);
6000 if (!tpr->rx_std_buffers)
6003 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
6004 &tpr->rx_std_mapping);
6008 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6009 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE,
6011 if (!tpr->rx_jmb_buffers)
6014 tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
6015 TG3_RX_JUMBO_RING_BYTES,
6016 &tpr->rx_jmb_mapping);
6024 tg3_rx_prodring_fini(tp, tpr);
6028 /* Free up pending packets in all rx/tx rings.
6030 * The chip has been shut down and the driver detached from
6031 * the networking, so no interrupts or new tx packets will
6032 * end up in the driver. tp->{tx,}lock is not held and we are not
6033 * in an interrupt context and thus may sleep.
6035 static void tg3_free_rings(struct tg3 *tp)
6039 for (j = 0; j < tp->irq_cnt; j++) {
6040 struct tg3_napi *tnapi = &tp->napi[j];
6042 if (!tnapi->tx_buffers)
6045 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6046 struct tx_ring_info *txp;
6047 struct sk_buff *skb;
6049 txp = &tnapi->tx_buffers[i];
6057 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
6061 i += skb_shinfo(skb)->nr_frags + 1;
6063 dev_kfree_skb_any(skb);
6066 if (tp->irq_cnt == 1 || j != tp->irq_cnt - 1)
6067 tg3_rx_prodring_free(tp, &tp->prodring[j]);
6071 /* Initialize tx/rx rings for packet processing.
6073 * The chip has been shut down and the driver detached from
6074 * the networking, so no interrupts or new tx packets will
6075 * end up in the driver. tp->{tx,}lock are held and thus
6078 static int tg3_init_rings(struct tg3 *tp)
6082 /* Free up all the SKBs. */
6085 for (i = 0; i < tp->irq_cnt; i++) {
6086 struct tg3_napi *tnapi = &tp->napi[i];
6088 tnapi->last_tag = 0;
6089 tnapi->last_irq_tag = 0;
6090 tnapi->hw_status->status = 0;
6091 tnapi->hw_status->status_tag = 0;
6092 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6097 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6099 tnapi->rx_rcb_ptr = 0;
6101 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6103 if ((tp->irq_cnt == 1 || i != tp->irq_cnt - 1) &&
6104 tg3_rx_prodring_alloc(tp, &tp->prodring[i]))
6112 * Must not be invoked with interrupt sources disabled and
6113 * the hardware shutdown down.
6115 static void tg3_free_consistent(struct tg3 *tp)
6119 for (i = 0; i < tp->irq_cnt; i++) {
6120 struct tg3_napi *tnapi = &tp->napi[i];
6122 if (tnapi->tx_ring) {
6123 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
6124 tnapi->tx_ring, tnapi->tx_desc_mapping);
6125 tnapi->tx_ring = NULL;
6128 kfree(tnapi->tx_buffers);
6129 tnapi->tx_buffers = NULL;
6131 if (tnapi->rx_rcb) {
6132 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
6134 tnapi->rx_rcb_mapping);
6135 tnapi->rx_rcb = NULL;
6138 if (tnapi->hw_status) {
6139 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
6141 tnapi->status_mapping);
6142 tnapi->hw_status = NULL;
6147 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
6148 tp->hw_stats, tp->stats_mapping);
6149 tp->hw_stats = NULL;
6152 for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++)
6153 tg3_rx_prodring_fini(tp, &tp->prodring[i]);
6157 * Must not be invoked with interrupt sources disabled and
6158 * the hardware shutdown down. Can sleep.
6160 static int tg3_alloc_consistent(struct tg3 *tp)
6164 for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) {
6165 if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
6169 tp->hw_stats = pci_alloc_consistent(tp->pdev,
6170 sizeof(struct tg3_hw_stats),
6171 &tp->stats_mapping);
6175 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6177 for (i = 0; i < tp->irq_cnt; i++) {
6178 struct tg3_napi *tnapi = &tp->napi[i];
6179 struct tg3_hw_status *sblk;
6181 tnapi->hw_status = pci_alloc_consistent(tp->pdev,
6183 &tnapi->status_mapping);
6184 if (!tnapi->hw_status)
6187 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6188 sblk = tnapi->hw_status;
6191 * When RSS is enabled, the status block format changes
6192 * slightly. The "rx_jumbo_consumer", "reserved",
6193 * and "rx_mini_consumer" members get mapped to the
6194 * other three rx return ring producer indexes.
6198 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6201 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6204 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6207 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6211 if (tp->irq_cnt == 1)
6212 tnapi->prodring = &tp->prodring[0];
6214 tnapi->prodring = &tp->prodring[i - 1];
6217 * If multivector RSS is enabled, vector 0 does not handle
6218 * rx or tx interrupts. Don't allocate any resources for it.
6220 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
6223 tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
6224 TG3_RX_RCB_RING_BYTES(tp),
6225 &tnapi->rx_rcb_mapping);
6229 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6231 tnapi->tx_buffers = kzalloc(sizeof(struct tx_ring_info) *
6232 TG3_TX_RING_SIZE, GFP_KERNEL);
6233 if (!tnapi->tx_buffers)
6236 tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
6238 &tnapi->tx_desc_mapping);
6239 if (!tnapi->tx_ring)
6246 tg3_free_consistent(tp);
6250 #define MAX_WAIT_CNT 1000
6252 /* To stop a block, clear the enable bit and poll till it
6253 * clears. tp->lock is held.
6255 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6260 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6267 /* We can't enable/disable these bits of the
6268 * 5705/5750, just say success.
6281 for (i = 0; i < MAX_WAIT_CNT; i++) {
6284 if ((val & enable_bit) == 0)
6288 if (i == MAX_WAIT_CNT && !silent) {
6289 printk(KERN_ERR PFX "tg3_stop_block timed out, "
6290 "ofs=%lx enable_bit=%x\n",
6298 /* tp->lock is held. */
6299 static int tg3_abort_hw(struct tg3 *tp, int silent)
6303 tg3_disable_ints(tp);
6305 tp->rx_mode &= ~RX_MODE_ENABLE;
6306 tw32_f(MAC_RX_MODE, tp->rx_mode);
6309 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6310 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6311 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6312 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6313 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6314 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6316 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6317 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6318 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6319 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6320 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6321 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6322 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6324 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6325 tw32_f(MAC_MODE, tp->mac_mode);
6328 tp->tx_mode &= ~TX_MODE_ENABLE;
6329 tw32_f(MAC_TX_MODE, tp->tx_mode);
6331 for (i = 0; i < MAX_WAIT_CNT; i++) {
6333 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6336 if (i >= MAX_WAIT_CNT) {
6337 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
6338 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
6339 tp->dev->name, tr32(MAC_TX_MODE));
6343 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6344 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6345 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6347 tw32(FTQ_RESET, 0xffffffff);
6348 tw32(FTQ_RESET, 0x00000000);
6350 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6351 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6353 for (i = 0; i < tp->irq_cnt; i++) {
6354 struct tg3_napi *tnapi = &tp->napi[i];
6355 if (tnapi->hw_status)
6356 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6359 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6364 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6369 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6370 if (apedata != APE_SEG_SIG_MAGIC)
6373 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6374 if (!(apedata & APE_FW_STATUS_READY))
6377 /* Wait for up to 1 millisecond for APE to service previous event. */
6378 for (i = 0; i < 10; i++) {
6379 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6382 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6384 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6385 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6386 event | APE_EVENT_STATUS_EVENT_PENDING);
6388 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6390 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6396 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6397 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6400 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6405 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
6409 case RESET_KIND_INIT:
6410 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6411 APE_HOST_SEG_SIG_MAGIC);
6412 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6413 APE_HOST_SEG_LEN_MAGIC);
6414 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6415 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6416 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6417 APE_HOST_DRIVER_ID_MAGIC);
6418 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6419 APE_HOST_BEHAV_NO_PHYLOCK);
6421 event = APE_EVENT_STATUS_STATE_START;
6423 case RESET_KIND_SHUTDOWN:
6424 /* With the interface we are currently using,
6425 * APE does not track driver state. Wiping
6426 * out the HOST SEGMENT SIGNATURE forces
6427 * the APE to assume OS absent status.
6429 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6431 event = APE_EVENT_STATUS_STATE_UNLOAD;
6433 case RESET_KIND_SUSPEND:
6434 event = APE_EVENT_STATUS_STATE_SUSPEND;
6440 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6442 tg3_ape_send_event(tp, event);
6445 /* tp->lock is held. */
6446 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6448 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6449 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6451 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6453 case RESET_KIND_INIT:
6454 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6458 case RESET_KIND_SHUTDOWN:
6459 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6463 case RESET_KIND_SUSPEND:
6464 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6473 if (kind == RESET_KIND_INIT ||
6474 kind == RESET_KIND_SUSPEND)
6475 tg3_ape_driver_state_change(tp, kind);
6478 /* tp->lock is held. */
6479 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6481 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6483 case RESET_KIND_INIT:
6484 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6485 DRV_STATE_START_DONE);
6488 case RESET_KIND_SHUTDOWN:
6489 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6490 DRV_STATE_UNLOAD_DONE);
6498 if (kind == RESET_KIND_SHUTDOWN)
6499 tg3_ape_driver_state_change(tp, kind);
6502 /* tp->lock is held. */
6503 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6505 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6507 case RESET_KIND_INIT:
6508 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6512 case RESET_KIND_SHUTDOWN:
6513 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6517 case RESET_KIND_SUSPEND:
6518 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6528 static int tg3_poll_fw(struct tg3 *tp)
6533 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6534 /* Wait up to 20ms for init done. */
6535 for (i = 0; i < 200; i++) {
6536 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6543 /* Wait for firmware initialization to complete. */
6544 for (i = 0; i < 100000; i++) {
6545 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6546 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6551 /* Chip might not be fitted with firmware. Some Sun onboard
6552 * parts are configured like that. So don't signal the timeout
6553 * of the above loop as an error, but do report the lack of
6554 * running firmware once.
6557 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
6558 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
6560 printk(KERN_INFO PFX "%s: No firmware running.\n",
6567 /* Save PCI command register before chip reset */
6568 static void tg3_save_pci_state(struct tg3 *tp)
6570 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
6573 /* Restore PCI state after chip reset */
6574 static void tg3_restore_pci_state(struct tg3 *tp)
6578 /* Re-enable indirect register accesses. */
6579 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6580 tp->misc_host_ctrl);
6582 /* Set MAX PCI retry to zero. */
6583 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
6584 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6585 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
6586 val |= PCISTATE_RETRY_SAME_DMA;
6587 /* Allow reads and writes to the APE register and memory space. */
6588 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6589 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6590 PCISTATE_ALLOW_APE_SHMEM_WR;
6591 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
6593 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
6595 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6596 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6597 pcie_set_readrq(tp->pdev, 4096);
6599 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6600 tp->pci_cacheline_sz);
6601 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
6606 /* Make sure PCI-X relaxed ordering bit is clear. */
6607 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6610 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6612 pcix_cmd &= ~PCI_X_CMD_ERO;
6613 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6617 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6619 /* Chip reset on 5780 will reset MSI enable bit,
6620 * so need to restore it.
6622 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6625 pci_read_config_word(tp->pdev,
6626 tp->msi_cap + PCI_MSI_FLAGS,
6628 pci_write_config_word(tp->pdev,
6629 tp->msi_cap + PCI_MSI_FLAGS,
6630 ctrl | PCI_MSI_FLAGS_ENABLE);
6631 val = tr32(MSGINT_MODE);
6632 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
6637 static void tg3_stop_fw(struct tg3 *);
6639 /* tp->lock is held. */
6640 static int tg3_chip_reset(struct tg3 *tp)
6643 void (*write_op)(struct tg3 *, u32, u32);
6648 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
6650 /* No matching tg3_nvram_unlock() after this because
6651 * chip reset below will undo the nvram lock.
6653 tp->nvram_lock_cnt = 0;
6655 /* GRC_MISC_CFG core clock reset will clear the memory
6656 * enable bit in PCI register 4 and the MSI enable bit
6657 * on some chips, so we save relevant registers here.
6659 tg3_save_pci_state(tp);
6661 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
6662 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
6663 tw32(GRC_FASTBOOT_PC, 0);
6666 * We must avoid the readl() that normally takes place.
6667 * It locks machines, causes machine checks, and other
6668 * fun things. So, temporarily disable the 5701
6669 * hardware workaround, while we do the reset.
6671 write_op = tp->write32;
6672 if (write_op == tg3_write_flush_reg32)
6673 tp->write32 = tg3_write32;
6675 /* Prevent the irq handler from reading or writing PCI registers
6676 * during chip reset when the memory enable bit in the PCI command
6677 * register may be cleared. The chip does not generate interrupt
6678 * at this time, but the irq handler may still be called due to irq
6679 * sharing or irqpoll.
6681 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6682 for (i = 0; i < tp->irq_cnt; i++) {
6683 struct tg3_napi *tnapi = &tp->napi[i];
6684 if (tnapi->hw_status) {
6685 tnapi->hw_status->status = 0;
6686 tnapi->hw_status->status_tag = 0;
6688 tnapi->last_tag = 0;
6689 tnapi->last_irq_tag = 0;
6693 for (i = 0; i < tp->irq_cnt; i++)
6694 synchronize_irq(tp->napi[i].irq_vec);
6696 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6697 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
6698 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
6702 val = GRC_MISC_CFG_CORECLK_RESET;
6704 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6705 if (tr32(0x7e2c) == 0x60) {
6708 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6709 tw32(GRC_MISC_CFG, (1 << 29));
6714 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6715 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6716 tw32(GRC_VCPU_EXT_CTRL,
6717 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6720 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6721 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6722 tw32(GRC_MISC_CFG, val);
6724 /* restore 5701 hardware bug workaround write method */
6725 tp->write32 = write_op;
6727 /* Unfortunately, we have to delay before the PCI read back.
6728 * Some 575X chips even will not respond to a PCI cfg access
6729 * when the reset command is given to the chip.
6731 * How do these hardware designers expect things to work
6732 * properly if the PCI write is posted for a long period
6733 * of time? It is always necessary to have some method by
6734 * which a register read back can occur to push the write
6735 * out which does the reset.
6737 * For most tg3 variants the trick below was working.
6742 /* Flush PCI posted writes. The normal MMIO registers
6743 * are inaccessible at this time so this is the only
6744 * way to make this reliably (actually, this is no longer
6745 * the case, see above). I tried to use indirect
6746 * register read/write but this upset some 5701 variants.
6748 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6752 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
6755 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6759 /* Wait for link training to complete. */
6760 for (i = 0; i < 5000; i++)
6763 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6764 pci_write_config_dword(tp->pdev, 0xc4,
6765 cfg_val | (1 << 15));
6768 /* Clear the "no snoop" and "relaxed ordering" bits. */
6769 pci_read_config_word(tp->pdev,
6770 tp->pcie_cap + PCI_EXP_DEVCTL,
6772 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
6773 PCI_EXP_DEVCTL_NOSNOOP_EN);
6775 * Older PCIe devices only support the 128 byte
6776 * MPS setting. Enforce the restriction.
6778 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
6779 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784))
6780 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
6781 pci_write_config_word(tp->pdev,
6782 tp->pcie_cap + PCI_EXP_DEVCTL,
6785 pcie_set_readrq(tp->pdev, 4096);
6787 /* Clear error status */
6788 pci_write_config_word(tp->pdev,
6789 tp->pcie_cap + PCI_EXP_DEVSTA,
6790 PCI_EXP_DEVSTA_CED |
6791 PCI_EXP_DEVSTA_NFED |
6792 PCI_EXP_DEVSTA_FED |
6793 PCI_EXP_DEVSTA_URD);
6796 tg3_restore_pci_state(tp);
6798 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6801 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6802 val = tr32(MEMARB_MODE);
6803 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
6805 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6807 tw32(0x5000, 0x400);
6810 tw32(GRC_MODE, tp->grc_mode);
6812 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
6815 tw32(0xc4, val | (1 << 15));
6818 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6819 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6820 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6821 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6822 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6823 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6826 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6827 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6828 tw32_f(MAC_MODE, tp->mac_mode);
6829 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6830 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6831 tw32_f(MAC_MODE, tp->mac_mode);
6832 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6833 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6834 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6835 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6836 tw32_f(MAC_MODE, tp->mac_mode);
6838 tw32_f(MAC_MODE, 0);
6841 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6843 err = tg3_poll_fw(tp);
6849 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6852 phy_addr = tp->phy_addr;
6853 tp->phy_addr = TG3_PHY_PCIE_ADDR;
6855 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
6856 TG3_PCIEPHY_TXB_BLK << TG3_PCIEPHY_BLOCK_SHIFT);
6857 val = TG3_PCIEPHY_TX0CTRL1_TXOCM | TG3_PCIEPHY_TX0CTRL1_RDCTL |
6858 TG3_PCIEPHY_TX0CTRL1_TXCMV | TG3_PCIEPHY_TX0CTRL1_TKSEL |
6859 TG3_PCIEPHY_TX0CTRL1_NB_EN;
6860 tg3_writephy(tp, TG3_PCIEPHY_TX0CTRL1, val);
6863 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
6864 TG3_PCIEPHY_XGXS_BLK1 << TG3_PCIEPHY_BLOCK_SHIFT);
6865 val = TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN |
6866 TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN;
6867 tg3_writephy(tp, TG3_PCIEPHY_PWRMGMT4, val);
6870 tp->phy_addr = phy_addr;
6873 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6874 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
6875 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
6876 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
6879 tw32(0x7c00, val | (1 << 25));
6882 /* Reprobe ASF enable state. */
6883 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6884 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6885 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6886 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6889 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6890 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6891 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
6892 tp->last_event_jiffies = jiffies;
6893 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
6894 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6901 /* tp->lock is held. */
6902 static void tg3_stop_fw(struct tg3 *tp)
6904 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6905 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
6906 /* Wait for RX cpu to ACK the previous event. */
6907 tg3_wait_for_event_ack(tp);
6909 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
6911 tg3_generate_fw_event(tp);
6913 /* Wait for RX cpu to ACK this event. */
6914 tg3_wait_for_event_ack(tp);
6918 /* tp->lock is held. */
6919 static int tg3_halt(struct tg3 *tp, int kind, int silent)
6925 tg3_write_sig_pre_reset(tp, kind);
6927 tg3_abort_hw(tp, silent);
6928 err = tg3_chip_reset(tp);
6930 __tg3_set_mac_addr(tp, 0);
6932 tg3_write_sig_legacy(tp, kind);
6933 tg3_write_sig_post_reset(tp, kind);
6941 #define RX_CPU_SCRATCH_BASE 0x30000
6942 #define RX_CPU_SCRATCH_SIZE 0x04000
6943 #define TX_CPU_SCRATCH_BASE 0x34000
6944 #define TX_CPU_SCRATCH_SIZE 0x04000
6946 /* tp->lock is held. */
6947 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6951 BUG_ON(offset == TX_CPU_BASE &&
6952 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6954 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6955 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6957 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6960 if (offset == RX_CPU_BASE) {
6961 for (i = 0; i < 10000; i++) {
6962 tw32(offset + CPU_STATE, 0xffffffff);
6963 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6964 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6968 tw32(offset + CPU_STATE, 0xffffffff);
6969 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
6972 for (i = 0; i < 10000; i++) {
6973 tw32(offset + CPU_STATE, 0xffffffff);
6974 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6975 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6981 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6984 (offset == RX_CPU_BASE ? "RX" : "TX"));
6988 /* Clear firmware's nvram arbitration. */
6989 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6990 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6995 unsigned int fw_base;
6996 unsigned int fw_len;
6997 const __be32 *fw_data;
7000 /* tp->lock is held. */
7001 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7002 int cpu_scratch_size, struct fw_info *info)
7004 int err, lock_err, i;
7005 void (*write_op)(struct tg3 *, u32, u32);
7007 if (cpu_base == TX_CPU_BASE &&
7008 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7009 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
7010 "TX cpu firmware on %s which is 5705.\n",
7015 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7016 write_op = tg3_write_mem;
7018 write_op = tg3_write_indirect_reg32;
7020 /* It is possible that bootcode is still loading at this point.
7021 * Get the nvram lock first before halting the cpu.
7023 lock_err = tg3_nvram_lock(tp);
7024 err = tg3_halt_cpu(tp, cpu_base);
7026 tg3_nvram_unlock(tp);
7030 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7031 write_op(tp, cpu_scratch_base + i, 0);
7032 tw32(cpu_base + CPU_STATE, 0xffffffff);
7033 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7034 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7035 write_op(tp, (cpu_scratch_base +
7036 (info->fw_base & 0xffff) +
7038 be32_to_cpu(info->fw_data[i]));
7046 /* tp->lock is held. */
7047 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7049 struct fw_info info;
7050 const __be32 *fw_data;
7053 fw_data = (void *)tp->fw->data;
7055 /* Firmware blob starts with version numbers, followed by
7056 start address and length. We are setting complete length.
7057 length = end_address_of_bss - start_address_of_text.
7058 Remainder is the blob to be loaded contiguously
7059 from start address. */
7061 info.fw_base = be32_to_cpu(fw_data[1]);
7062 info.fw_len = tp->fw->size - 12;
7063 info.fw_data = &fw_data[3];
7065 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7066 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7071 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7072 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7077 /* Now startup only the RX cpu. */
7078 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7079 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7081 for (i = 0; i < 5; i++) {
7082 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7084 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7085 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7086 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7090 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
7091 "to set RX CPU PC, is %08x should be %08x\n",
7092 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
7096 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7097 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7102 /* 5705 needs a special version of the TSO firmware. */
7104 /* tp->lock is held. */
7105 static int tg3_load_tso_firmware(struct tg3 *tp)
7107 struct fw_info info;
7108 const __be32 *fw_data;
7109 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7112 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7115 fw_data = (void *)tp->fw->data;
7117 /* Firmware blob starts with version numbers, followed by
7118 start address and length. We are setting complete length.
7119 length = end_address_of_bss - start_address_of_text.
7120 Remainder is the blob to be loaded contiguously
7121 from start address. */
7123 info.fw_base = be32_to_cpu(fw_data[1]);
7124 cpu_scratch_size = tp->fw_len;
7125 info.fw_len = tp->fw->size - 12;
7126 info.fw_data = &fw_data[3];
7128 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7129 cpu_base = RX_CPU_BASE;
7130 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7132 cpu_base = TX_CPU_BASE;
7133 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7134 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7137 err = tg3_load_firmware_cpu(tp, cpu_base,
7138 cpu_scratch_base, cpu_scratch_size,
7143 /* Now startup the cpu. */
7144 tw32(cpu_base + CPU_STATE, 0xffffffff);
7145 tw32_f(cpu_base + CPU_PC, info.fw_base);
7147 for (i = 0; i < 5; i++) {
7148 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7150 tw32(cpu_base + CPU_STATE, 0xffffffff);
7151 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7152 tw32_f(cpu_base + CPU_PC, info.fw_base);
7156 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
7157 "to set CPU PC, is %08x should be %08x\n",
7158 tp->dev->name, tr32(cpu_base + CPU_PC),
7162 tw32(cpu_base + CPU_STATE, 0xffffffff);
7163 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7168 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7170 struct tg3 *tp = netdev_priv(dev);
7171 struct sockaddr *addr = p;
7172 int err = 0, skip_mac_1 = 0;
7174 if (!is_valid_ether_addr(addr->sa_data))
7177 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7179 if (!netif_running(dev))
7182 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7183 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7185 addr0_high = tr32(MAC_ADDR_0_HIGH);
7186 addr0_low = tr32(MAC_ADDR_0_LOW);
7187 addr1_high = tr32(MAC_ADDR_1_HIGH);
7188 addr1_low = tr32(MAC_ADDR_1_LOW);
7190 /* Skip MAC addr 1 if ASF is using it. */
7191 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7192 !(addr1_high == 0 && addr1_low == 0))
7195 spin_lock_bh(&tp->lock);
7196 __tg3_set_mac_addr(tp, skip_mac_1);
7197 spin_unlock_bh(&tp->lock);
7202 /* tp->lock is held. */
7203 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7204 dma_addr_t mapping, u32 maxlen_flags,
7208 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7209 ((u64) mapping >> 32));
7211 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7212 ((u64) mapping & 0xffffffff));
7214 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7217 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7219 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7223 static void __tg3_set_rx_mode(struct net_device *);
7224 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7228 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
7229 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7230 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7231 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7233 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7234 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7235 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7237 tw32(HOSTCC_TXCOL_TICKS, 0);
7238 tw32(HOSTCC_TXMAX_FRAMES, 0);
7239 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7241 tw32(HOSTCC_RXCOL_TICKS, 0);
7242 tw32(HOSTCC_RXMAX_FRAMES, 0);
7243 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7246 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7247 u32 val = ec->stats_block_coalesce_usecs;
7249 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7250 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7252 if (!netif_carrier_ok(tp->dev))
7255 tw32(HOSTCC_STAT_COAL_TICKS, val);
7258 for (i = 0; i < tp->irq_cnt - 1; i++) {
7261 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7262 tw32(reg, ec->rx_coalesce_usecs);
7263 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7264 tw32(reg, ec->tx_coalesce_usecs);
7265 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7266 tw32(reg, ec->rx_max_coalesced_frames);
7267 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7268 tw32(reg, ec->tx_max_coalesced_frames);
7269 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7270 tw32(reg, ec->rx_max_coalesced_frames_irq);
7271 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7272 tw32(reg, ec->tx_max_coalesced_frames_irq);
7275 for (; i < tp->irq_max - 1; i++) {
7276 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7277 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7278 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7279 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7280 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7281 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7285 /* tp->lock is held. */
7286 static void tg3_rings_reset(struct tg3 *tp)
7289 u32 stblk, txrcb, rxrcb, limit;
7290 struct tg3_napi *tnapi = &tp->napi[0];
7292 /* Disable all transmit rings but the first. */
7293 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7294 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7296 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7298 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7299 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7300 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7301 BDINFO_FLAGS_DISABLED);
7304 /* Disable all receive return rings but the first. */
7305 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
7306 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7307 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7308 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7309 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7310 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7312 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7314 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7315 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7316 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7317 BDINFO_FLAGS_DISABLED);
7319 /* Disable interrupts */
7320 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7322 /* Zero mailbox registers. */
7323 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
7324 for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
7325 tp->napi[i].tx_prod = 0;
7326 tp->napi[i].tx_cons = 0;
7327 tw32_mailbox(tp->napi[i].prodmbox, 0);
7328 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7329 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7332 tp->napi[0].tx_prod = 0;
7333 tp->napi[0].tx_cons = 0;
7334 tw32_mailbox(tp->napi[0].prodmbox, 0);
7335 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7338 /* Make sure the NIC-based send BD rings are disabled. */
7339 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7340 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7341 for (i = 0; i < 16; i++)
7342 tw32_tx_mbox(mbox + i * 8, 0);
7345 txrcb = NIC_SRAM_SEND_RCB;
7346 rxrcb = NIC_SRAM_RCV_RET_RCB;
7348 /* Clear status block in ram. */
7349 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7351 /* Set status block DMA address */
7352 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7353 ((u64) tnapi->status_mapping >> 32));
7354 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7355 ((u64) tnapi->status_mapping & 0xffffffff));
7357 if (tnapi->tx_ring) {
7358 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7359 (TG3_TX_RING_SIZE <<
7360 BDINFO_FLAGS_MAXLEN_SHIFT),
7361 NIC_SRAM_TX_BUFFER_DESC);
7362 txrcb += TG3_BDINFO_SIZE;
7365 if (tnapi->rx_rcb) {
7366 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7367 (TG3_RX_RCB_RING_SIZE(tp) <<
7368 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7369 rxrcb += TG3_BDINFO_SIZE;
7372 stblk = HOSTCC_STATBLCK_RING1;
7374 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7375 u64 mapping = (u64)tnapi->status_mapping;
7376 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7377 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7379 /* Clear status block in ram. */
7380 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7382 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7383 (TG3_TX_RING_SIZE <<
7384 BDINFO_FLAGS_MAXLEN_SHIFT),
7385 NIC_SRAM_TX_BUFFER_DESC);
7387 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7388 (TG3_RX_RCB_RING_SIZE(tp) <<
7389 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7392 txrcb += TG3_BDINFO_SIZE;
7393 rxrcb += TG3_BDINFO_SIZE;
7397 /* tp->lock is held. */
7398 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7400 u32 val, rdmac_mode;
7402 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
7404 tg3_disable_ints(tp);
7408 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7410 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7411 tg3_abort_hw(tp, 1);
7415 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7418 err = tg3_chip_reset(tp);
7422 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7424 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7425 val = tr32(TG3_CPMU_CTRL);
7426 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7427 tw32(TG3_CPMU_CTRL, val);
7429 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7430 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7431 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7432 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7434 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7435 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7436 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7437 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7439 val = tr32(TG3_CPMU_HST_ACC);
7440 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7441 val |= CPMU_HST_ACC_MACCLK_6_25;
7442 tw32(TG3_CPMU_HST_ACC, val);
7445 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7446 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7447 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7448 PCIE_PWR_MGMT_L1_THRESH_4MS;
7449 tw32(PCIE_PWR_MGMT_THRESH, val);
7451 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7452 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7454 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7456 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7457 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7460 /* This works around an issue with Athlon chipsets on
7461 * B3 tigon3 silicon. This bit has no effect on any
7462 * other revision. But do not set this on PCI Express
7463 * chips and don't even touch the clocks if the CPMU is present.
7465 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7466 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7467 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7468 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7471 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7472 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7473 val = tr32(TG3PCI_PCISTATE);
7474 val |= PCISTATE_RETRY_SAME_DMA;
7475 tw32(TG3PCI_PCISTATE, val);
7478 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7479 /* Allow reads and writes to the
7480 * APE register and memory space.
7482 val = tr32(TG3PCI_PCISTATE);
7483 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7484 PCISTATE_ALLOW_APE_SHMEM_WR;
7485 tw32(TG3PCI_PCISTATE, val);
7488 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7489 /* Enable some hw fixes. */
7490 val = tr32(TG3PCI_MSI_DATA);
7491 val |= (1 << 26) | (1 << 28) | (1 << 29);
7492 tw32(TG3PCI_MSI_DATA, val);
7495 /* Descriptor ring init may make accesses to the
7496 * NIC SRAM area to setup the TX descriptors, so we
7497 * can only do this after the hardware has been
7498 * successfully reset.
7500 err = tg3_init_rings(tp);
7504 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
7505 val = tr32(TG3PCI_DMA_RW_CTRL) &
7506 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
7507 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
7508 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7509 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7510 /* This value is determined during the probe time DMA
7511 * engine test, tg3_test_dma.
7513 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7516 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7517 GRC_MODE_4X_NIC_SEND_RINGS |
7518 GRC_MODE_NO_TX_PHDR_CSUM |
7519 GRC_MODE_NO_RX_PHDR_CSUM);
7520 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7522 /* Pseudo-header checksum is done by hardware logic and not
7523 * the offload processers, so make the chip do the pseudo-
7524 * header checksums on receive. For transmit it is more
7525 * convenient to do the pseudo-header checksum in software
7526 * as Linux does that on transmit for us in all cases.
7528 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7532 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7534 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7535 val = tr32(GRC_MISC_CFG);
7537 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7538 tw32(GRC_MISC_CFG, val);
7540 /* Initialize MBUF/DESC pool. */
7541 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7543 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7544 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7545 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7546 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7548 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7549 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7550 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7552 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7555 fw_len = tp->fw_len;
7556 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7557 tw32(BUFMGR_MB_POOL_ADDR,
7558 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7559 tw32(BUFMGR_MB_POOL_SIZE,
7560 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7563 if (tp->dev->mtu <= ETH_DATA_LEN) {
7564 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7565 tp->bufmgr_config.mbuf_read_dma_low_water);
7566 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7567 tp->bufmgr_config.mbuf_mac_rx_low_water);
7568 tw32(BUFMGR_MB_HIGH_WATER,
7569 tp->bufmgr_config.mbuf_high_water);
7571 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7572 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7573 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7574 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7575 tw32(BUFMGR_MB_HIGH_WATER,
7576 tp->bufmgr_config.mbuf_high_water_jumbo);
7578 tw32(BUFMGR_DMA_LOW_WATER,
7579 tp->bufmgr_config.dma_low_water);
7580 tw32(BUFMGR_DMA_HIGH_WATER,
7581 tp->bufmgr_config.dma_high_water);
7583 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7584 for (i = 0; i < 2000; i++) {
7585 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7590 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7595 /* Setup replenish threshold. */
7596 val = tp->rx_pending / 8;
7599 else if (val > tp->rx_std_max_post)
7600 val = tp->rx_std_max_post;
7601 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7602 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7603 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7605 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7606 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7609 tw32(RCVBDI_STD_THRESH, val);
7611 /* Initialize TG3_BDINFO's at:
7612 * RCVDBDI_STD_BD: standard eth size rx ring
7613 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7614 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7617 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7618 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7619 * ring attribute flags
7620 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7622 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7623 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7625 * The size of each ring is fixed in the firmware, but the location is
7628 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7629 ((u64) tpr->rx_std_mapping >> 32));
7630 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7631 ((u64) tpr->rx_std_mapping & 0xffffffff));
7632 if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
7633 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7634 NIC_SRAM_RX_BUFFER_DESC);
7636 /* Disable the mini ring */
7637 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7638 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7639 BDINFO_FLAGS_DISABLED);
7641 /* Program the jumbo buffer descriptor ring control
7642 * blocks on those devices that have them.
7644 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
7645 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
7646 /* Setup replenish threshold. */
7647 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7649 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7650 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7651 ((u64) tpr->rx_jmb_mapping >> 32));
7652 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7653 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
7654 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7655 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7656 BDINFO_FLAGS_USE_EXT_RECV);
7657 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7658 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7659 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7661 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7662 BDINFO_FLAGS_DISABLED);
7665 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
7666 val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
7667 (RX_STD_MAX_SIZE << 2);
7669 val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT;
7671 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
7673 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
7675 tpr->rx_std_prod_idx = tp->rx_pending;
7676 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
7678 tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7679 tp->rx_jumbo_pending : 0;
7680 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
7682 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
7683 tw32(STD_REPLENISH_LWM, 32);
7684 tw32(JMB_REPLENISH_LWM, 16);
7687 tg3_rings_reset(tp);
7689 /* Initialize MAC address and backoff seed. */
7690 __tg3_set_mac_addr(tp, 0);
7692 /* MTU + ethernet header + FCS + optional VLAN tag */
7693 tw32(MAC_RX_MTU_SIZE,
7694 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
7696 /* The slot time is changed by tg3_setup_phy if we
7697 * run at gigabit with half duplex.
7699 tw32(MAC_TX_LENGTHS,
7700 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7701 (6 << TX_LENGTHS_IPG_SHIFT) |
7702 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7704 /* Receive rules. */
7705 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7706 tw32(RCVLPC_CONFIG, 0x0181);
7708 /* Calculate RDMAC_MODE setting early, we need it to determine
7709 * the RCVLPC_STATE_ENABLE mask.
7711 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7712 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7713 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7714 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7715 RDMAC_MODE_LNGREAD_ENAB);
7717 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7718 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7719 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7720 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7721 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7722 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7724 /* If statement applies to 5705 and 5750 PCI devices only */
7725 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7726 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7727 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7728 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7729 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7730 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7731 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7732 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7733 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7737 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7738 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7740 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7741 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
7743 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
7744 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7745 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7746 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
7748 /* Receive/send statistics. */
7749 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7750 val = tr32(RCVLPC_STATS_ENABLE);
7751 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7752 tw32(RCVLPC_STATS_ENABLE, val);
7753 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7754 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7755 val = tr32(RCVLPC_STATS_ENABLE);
7756 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7757 tw32(RCVLPC_STATS_ENABLE, val);
7759 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7761 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7762 tw32(SNDDATAI_STATSENAB, 0xffffff);
7763 tw32(SNDDATAI_STATSCTRL,
7764 (SNDDATAI_SCTRL_ENABLE |
7765 SNDDATAI_SCTRL_FASTUPD));
7767 /* Setup host coalescing engine. */
7768 tw32(HOSTCC_MODE, 0);
7769 for (i = 0; i < 2000; i++) {
7770 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7775 __tg3_set_coalesce(tp, &tp->coal);
7777 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7778 /* Status/statistics block address. See tg3_timer,
7779 * the tg3_periodic_fetch_stats call there, and
7780 * tg3_get_stats to see how this works for 5705/5750 chips.
7782 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7783 ((u64) tp->stats_mapping >> 32));
7784 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7785 ((u64) tp->stats_mapping & 0xffffffff));
7786 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7788 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7790 /* Clear statistics and status block memory areas */
7791 for (i = NIC_SRAM_STATS_BLK;
7792 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7794 tg3_write_mem(tp, i, 0);
7799 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7801 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7802 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7803 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7804 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7806 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7807 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7808 /* reset to prevent losing 1st rx packet intermittently */
7809 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7813 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7814 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7817 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7818 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7819 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7820 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7821 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7822 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7823 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7826 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7827 * If TG3_FLG2_IS_NIC is zero, we should read the
7828 * register to preserve the GPIO settings for LOMs. The GPIOs,
7829 * whether used as inputs or outputs, are set by boot code after
7832 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7835 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7836 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7837 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7839 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7840 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7841 GRC_LCLCTRL_GPIO_OUTPUT3;
7843 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7844 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7846 tp->grc_local_ctrl &= ~gpio_mask;
7847 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7849 /* GPIO1 must be driven high for eeprom write protect */
7850 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7851 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7852 GRC_LCLCTRL_GPIO_OUTPUT1);
7854 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7857 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
7858 val = tr32(MSGINT_MODE);
7859 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
7860 tw32(MSGINT_MODE, val);
7863 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7864 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7868 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7869 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7870 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7871 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7872 WDMAC_MODE_LNGREAD_ENAB);
7874 /* If statement applies to 5705 and 5750 PCI devices only */
7875 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7876 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7877 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7878 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
7879 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7880 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7882 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7883 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7884 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7885 val |= WDMAC_MODE_RX_ACCEL;
7889 /* Enable host coalescing bug fix */
7890 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7891 val |= WDMAC_MODE_STATUS_TAG_FIX;
7893 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7894 val |= WDMAC_MODE_BURST_ALL_DATA;
7896 tw32_f(WDMAC_MODE, val);
7899 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7902 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7904 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7905 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7906 pcix_cmd |= PCI_X_CMD_READ_2K;
7907 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7908 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7909 pcix_cmd |= PCI_X_CMD_READ_2K;
7911 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7915 tw32_f(RDMAC_MODE, rdmac_mode);
7918 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7919 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7920 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7922 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7924 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7926 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7928 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7929 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7930 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7931 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7932 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7933 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7934 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
7935 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
7936 val |= SNDBDI_MODE_MULTI_TXQ_EN;
7937 tw32(SNDBDI_MODE, val);
7938 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7940 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7941 err = tg3_load_5701_a0_firmware_fix(tp);
7946 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7947 err = tg3_load_tso_firmware(tp);
7952 tp->tx_mode = TX_MODE_ENABLE;
7953 tw32_f(MAC_TX_MODE, tp->tx_mode);
7956 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
7957 u32 reg = MAC_RSS_INDIR_TBL_0;
7958 u8 *ent = (u8 *)&val;
7960 /* Setup the indirection table */
7961 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
7962 int idx = i % sizeof(val);
7964 ent[idx] = i % (tp->irq_cnt - 1);
7965 if (idx == sizeof(val) - 1) {
7971 /* Setup the "secret" hash key. */
7972 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
7973 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
7974 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
7975 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
7976 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
7977 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
7978 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
7979 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
7980 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
7981 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
7984 tp->rx_mode = RX_MODE_ENABLE;
7985 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7986 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7988 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
7989 tp->rx_mode |= RX_MODE_RSS_ENABLE |
7990 RX_MODE_RSS_ITBL_HASH_BITS_7 |
7991 RX_MODE_RSS_IPV6_HASH_EN |
7992 RX_MODE_RSS_TCP_IPV6_HASH_EN |
7993 RX_MODE_RSS_IPV4_HASH_EN |
7994 RX_MODE_RSS_TCP_IPV4_HASH_EN;
7996 tw32_f(MAC_RX_MODE, tp->rx_mode);
7999 tw32(MAC_LED_CTRL, tp->led_ctrl);
8001 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8002 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8003 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8006 tw32_f(MAC_RX_MODE, tp->rx_mode);
8009 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8010 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8011 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
8012 /* Set drive transmission level to 1.2V */
8013 /* only if the signal pre-emphasis bit is not set */
8014 val = tr32(MAC_SERDES_CFG);
8017 tw32(MAC_SERDES_CFG, val);
8019 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8020 tw32(MAC_SERDES_CFG, 0x616000);
8023 /* Prevent chip from dropping frames when flow control
8026 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
8028 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8029 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8030 /* Use hardware link auto-negotiation */
8031 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
8034 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
8035 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8038 tmp = tr32(SERDES_RX_CTRL);
8039 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8040 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8041 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8042 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8045 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
8046 if (tp->link_config.phy_is_low_power) {
8047 tp->link_config.phy_is_low_power = 0;
8048 tp->link_config.speed = tp->link_config.orig_speed;
8049 tp->link_config.duplex = tp->link_config.orig_duplex;
8050 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8053 err = tg3_setup_phy(tp, 0);
8057 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8058 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) {
8061 /* Clear CRC stats. */
8062 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8063 tg3_writephy(tp, MII_TG3_TEST1,
8064 tmp | MII_TG3_TEST1_CRC_EN);
8065 tg3_readphy(tp, 0x14, &tmp);
8070 __tg3_set_rx_mode(tp->dev);
8072 /* Initialize receive rules. */
8073 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8074 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8075 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8076 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8078 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8079 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
8083 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
8087 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8089 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8091 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8093 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8095 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8097 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8099 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8101 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8103 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8105 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8107 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8109 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8111 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8113 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8121 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8122 /* Write our heartbeat update interval to APE. */
8123 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8124 APE_HOST_HEARTBEAT_INT_DISABLE);
8126 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8131 /* Called at device open time to get the chip ready for
8132 * packet processing. Invoked with tp->lock held.
8134 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8136 tg3_switch_clocks(tp);
8138 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8140 return tg3_reset_hw(tp, reset_phy);
8143 #define TG3_STAT_ADD32(PSTAT, REG) \
8144 do { u32 __val = tr32(REG); \
8145 (PSTAT)->low += __val; \
8146 if ((PSTAT)->low < __val) \
8147 (PSTAT)->high += 1; \
8150 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8152 struct tg3_hw_stats *sp = tp->hw_stats;
8154 if (!netif_carrier_ok(tp->dev))
8157 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8158 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8159 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8160 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8161 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8162 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8163 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8164 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8165 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8166 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8167 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8168 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8169 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8171 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8172 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8173 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8174 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8175 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8176 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8177 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8178 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8179 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8180 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8181 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8182 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8183 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8184 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8186 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8187 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8188 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8191 static void tg3_timer(unsigned long __opaque)
8193 struct tg3 *tp = (struct tg3 *) __opaque;
8198 spin_lock(&tp->lock);
8200 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8201 /* All of this garbage is because when using non-tagged
8202 * IRQ status the mailbox/status_block protocol the chip
8203 * uses with the cpu is race prone.
8205 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8206 tw32(GRC_LOCAL_CTRL,
8207 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8209 tw32(HOSTCC_MODE, tp->coalesce_mode |
8210 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8213 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8214 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
8215 spin_unlock(&tp->lock);
8216 schedule_work(&tp->reset_task);
8221 /* This part only runs once per second. */
8222 if (!--tp->timer_counter) {
8223 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8224 tg3_periodic_fetch_stats(tp);
8226 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
8230 mac_stat = tr32(MAC_STATUS);
8233 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
8234 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8236 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8240 tg3_setup_phy(tp, 0);
8241 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
8242 u32 mac_stat = tr32(MAC_STATUS);
8245 if (netif_carrier_ok(tp->dev) &&
8246 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8249 if (! netif_carrier_ok(tp->dev) &&
8250 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8251 MAC_STATUS_SIGNAL_DET))) {
8255 if (!tp->serdes_counter) {
8258 ~MAC_MODE_PORT_MODE_MASK));
8260 tw32_f(MAC_MODE, tp->mac_mode);
8263 tg3_setup_phy(tp, 0);
8265 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8266 tg3_serdes_parallel_detect(tp);
8268 tp->timer_counter = tp->timer_multiplier;
8271 /* Heartbeat is only sent once every 2 seconds.
8273 * The heartbeat is to tell the ASF firmware that the host
8274 * driver is still alive. In the event that the OS crashes,
8275 * ASF needs to reset the hardware to free up the FIFO space
8276 * that may be filled with rx packets destined for the host.
8277 * If the FIFO is full, ASF will no longer function properly.
8279 * Unintended resets have been reported on real time kernels
8280 * where the timer doesn't run on time. Netpoll will also have
8283 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8284 * to check the ring condition when the heartbeat is expiring
8285 * before doing the reset. This will prevent most unintended
8288 if (!--tp->asf_counter) {
8289 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
8290 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
8291 tg3_wait_for_event_ack(tp);
8293 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8294 FWCMD_NICDRV_ALIVE3);
8295 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8296 /* 5 seconds timeout */
8297 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
8299 tg3_generate_fw_event(tp);
8301 tp->asf_counter = tp->asf_multiplier;
8304 spin_unlock(&tp->lock);
8307 tp->timer.expires = jiffies + tp->timer_offset;
8308 add_timer(&tp->timer);
8311 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8314 unsigned long flags;
8316 struct tg3_napi *tnapi = &tp->napi[irq_num];
8318 if (tp->irq_cnt == 1)
8319 name = tp->dev->name;
8321 name = &tnapi->irq_lbl[0];
8322 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8323 name[IFNAMSIZ-1] = 0;
8326 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8328 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8330 flags = IRQF_SAMPLE_RANDOM;
8333 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8334 fn = tg3_interrupt_tagged;
8335 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
8338 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8341 static int tg3_test_interrupt(struct tg3 *tp)
8343 struct tg3_napi *tnapi = &tp->napi[0];
8344 struct net_device *dev = tp->dev;
8345 int err, i, intr_ok = 0;
8348 if (!netif_running(dev))
8351 tg3_disable_ints(tp);
8353 free_irq(tnapi->irq_vec, tnapi);
8356 * Turn off MSI one shot mode. Otherwise this test has no
8357 * observable way to know whether the interrupt was delivered.
8359 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
8360 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8361 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8362 tw32(MSGINT_MODE, val);
8365 err = request_irq(tnapi->irq_vec, tg3_test_isr,
8366 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8370 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
8371 tg3_enable_ints(tp);
8373 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8376 for (i = 0; i < 5; i++) {
8377 u32 int_mbox, misc_host_ctrl;
8379 int_mbox = tr32_mailbox(tnapi->int_mbox);
8380 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8382 if ((int_mbox != 0) ||
8383 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8391 tg3_disable_ints(tp);
8393 free_irq(tnapi->irq_vec, tnapi);
8395 err = tg3_request_irq(tp, 0);
8401 /* Reenable MSI one shot mode. */
8402 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
8403 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8404 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
8405 tw32(MSGINT_MODE, val);
8413 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8414 * successfully restored
8416 static int tg3_test_msi(struct tg3 *tp)
8421 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8424 /* Turn off SERR reporting in case MSI terminates with Master
8427 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8428 pci_write_config_word(tp->pdev, PCI_COMMAND,
8429 pci_cmd & ~PCI_COMMAND_SERR);
8431 err = tg3_test_interrupt(tp);
8433 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8438 /* other failures */
8442 /* MSI test failed, go back to INTx mode */
8443 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8444 "switching to INTx mode. Please report this failure to "
8445 "the PCI maintainer and include system chipset information.\n",
8448 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8450 pci_disable_msi(tp->pdev);
8452 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8454 err = tg3_request_irq(tp, 0);
8458 /* Need to reset the chip because the MSI cycle may have terminated
8459 * with Master Abort.
8461 tg3_full_lock(tp, 1);
8463 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8464 err = tg3_init_hw(tp, 1);
8466 tg3_full_unlock(tp);
8469 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8474 static int tg3_request_firmware(struct tg3 *tp)
8476 const __be32 *fw_data;
8478 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
8479 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
8480 tp->dev->name, tp->fw_needed);
8484 fw_data = (void *)tp->fw->data;
8486 /* Firmware blob starts with version numbers, followed by
8487 * start address and _full_ length including BSS sections
8488 * (which must be longer than the actual data, of course
8491 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
8492 if (tp->fw_len < (tp->fw->size - 12)) {
8493 printk(KERN_ERR "%s: bogus length %d in \"%s\"\n",
8494 tp->dev->name, tp->fw_len, tp->fw_needed);
8495 release_firmware(tp->fw);
8500 /* We no longer need firmware; we have it. */
8501 tp->fw_needed = NULL;
8505 static bool tg3_enable_msix(struct tg3 *tp)
8507 int i, rc, cpus = num_online_cpus();
8508 struct msix_entry msix_ent[tp->irq_max];
8511 /* Just fallback to the simpler MSI mode. */
8515 * We want as many rx rings enabled as there are cpus.
8516 * The first MSIX vector only deals with link interrupts, etc,
8517 * so we add one to the number of vectors we are requesting.
8519 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
8521 for (i = 0; i < tp->irq_max; i++) {
8522 msix_ent[i].entry = i;
8523 msix_ent[i].vector = 0;
8526 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
8528 if (rc < TG3_RSS_MIN_NUM_MSIX_VECS)
8530 if (pci_enable_msix(tp->pdev, msix_ent, rc))
8533 "%s: Requested %d MSI-X vectors, received %d\n",
8534 tp->dev->name, tp->irq_cnt, rc);
8538 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
8540 for (i = 0; i < tp->irq_max; i++)
8541 tp->napi[i].irq_vec = msix_ent[i].vector;
8543 tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
8548 static void tg3_ints_init(struct tg3 *tp)
8550 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
8551 !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8552 /* All MSI supporting chips should support tagged
8553 * status. Assert that this is the case.
8555 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8556 "Not using MSI.\n", tp->dev->name);
8560 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
8561 tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
8562 else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
8563 pci_enable_msi(tp->pdev) == 0)
8564 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8566 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8567 u32 msi_mode = tr32(MSGINT_MODE);
8568 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8569 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
8570 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8573 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
8575 tp->napi[0].irq_vec = tp->pdev->irq;
8576 tp->dev->real_num_tx_queues = 1;
8580 static void tg3_ints_fini(struct tg3 *tp)
8582 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8583 pci_disable_msix(tp->pdev);
8584 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
8585 pci_disable_msi(tp->pdev);
8586 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
8587 tp->tg3_flags3 &= ~TG3_FLG3_ENABLE_RSS;
8590 static int tg3_open(struct net_device *dev)
8592 struct tg3 *tp = netdev_priv(dev);
8595 if (tp->fw_needed) {
8596 err = tg3_request_firmware(tp);
8597 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8601 printk(KERN_WARNING "%s: TSO capability disabled.\n",
8603 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8604 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8605 printk(KERN_NOTICE "%s: TSO capability restored.\n",
8607 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8611 netif_carrier_off(tp->dev);
8613 err = tg3_set_power_state(tp, PCI_D0);
8617 tg3_full_lock(tp, 0);
8619 tg3_disable_ints(tp);
8620 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8622 tg3_full_unlock(tp);
8625 * Setup interrupts first so we know how
8626 * many NAPI resources to allocate
8630 /* The placement of this call is tied
8631 * to the setup and use of Host TX descriptors.
8633 err = tg3_alloc_consistent(tp);
8637 tg3_napi_enable(tp);
8639 for (i = 0; i < tp->irq_cnt; i++) {
8640 struct tg3_napi *tnapi = &tp->napi[i];
8641 err = tg3_request_irq(tp, i);
8643 for (i--; i >= 0; i--)
8644 free_irq(tnapi->irq_vec, tnapi);
8652 tg3_full_lock(tp, 0);
8654 err = tg3_init_hw(tp, 1);
8656 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8659 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8660 tp->timer_offset = HZ;
8662 tp->timer_offset = HZ / 10;
8664 BUG_ON(tp->timer_offset > HZ);
8665 tp->timer_counter = tp->timer_multiplier =
8666 (HZ / tp->timer_offset);
8667 tp->asf_counter = tp->asf_multiplier =
8668 ((HZ / tp->timer_offset) * 2);
8670 init_timer(&tp->timer);
8671 tp->timer.expires = jiffies + tp->timer_offset;
8672 tp->timer.data = (unsigned long) tp;
8673 tp->timer.function = tg3_timer;
8676 tg3_full_unlock(tp);
8681 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8682 err = tg3_test_msi(tp);
8685 tg3_full_lock(tp, 0);
8686 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8688 tg3_full_unlock(tp);
8693 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8694 (tp->tg3_flags2 & TG3_FLG2_USING_MSI) &&
8695 (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) {
8696 u32 val = tr32(PCIE_TRANSACTION_CFG);
8698 tw32(PCIE_TRANSACTION_CFG,
8699 val | PCIE_TRANS_CFG_1SHOT_MSI);
8705 tg3_full_lock(tp, 0);
8707 add_timer(&tp->timer);
8708 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8709 tg3_enable_ints(tp);
8711 tg3_full_unlock(tp);
8713 netif_tx_start_all_queues(dev);
8718 for (i = tp->irq_cnt - 1; i >= 0; i--) {
8719 struct tg3_napi *tnapi = &tp->napi[i];
8720 free_irq(tnapi->irq_vec, tnapi);
8724 tg3_napi_disable(tp);
8725 tg3_free_consistent(tp);
8733 /*static*/ void tg3_dump_state(struct tg3 *tp)
8735 u32 val32, val32_2, val32_3, val32_4, val32_5;
8738 struct tg3_hw_status *sblk = tp->napi[0]->hw_status;
8740 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8741 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8742 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8746 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8747 tr32(MAC_MODE), tr32(MAC_STATUS));
8748 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8749 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8750 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8751 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8752 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8753 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8755 /* Send data initiator control block */
8756 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8757 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8758 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8759 tr32(SNDDATAI_STATSCTRL));
8761 /* Send data completion control block */
8762 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8764 /* Send BD ring selector block */
8765 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8766 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8768 /* Send BD initiator control block */
8769 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8770 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8772 /* Send BD completion control block */
8773 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8775 /* Receive list placement control block */
8776 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8777 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8778 printk(" RCVLPC_STATSCTRL[%08x]\n",
8779 tr32(RCVLPC_STATSCTRL));
8781 /* Receive data and receive BD initiator control block */
8782 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8783 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8785 /* Receive data completion control block */
8786 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8789 /* Receive BD initiator control block */
8790 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8791 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8793 /* Receive BD completion control block */
8794 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8795 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8797 /* Receive list selector control block */
8798 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8799 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8801 /* Mbuf cluster free block */
8802 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8803 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8805 /* Host coalescing control block */
8806 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8807 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8808 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8809 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8810 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8811 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8812 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8813 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8814 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8815 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8816 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8817 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8819 /* Memory arbiter control block */
8820 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8821 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8823 /* Buffer manager control block */
8824 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8825 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8826 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8827 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8828 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8829 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8830 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8831 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8833 /* Read DMA control block */
8834 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8835 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8837 /* Write DMA control block */
8838 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8839 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8841 /* DMA completion block */
8842 printk("DEBUG: DMAC_MODE[%08x]\n",
8846 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8847 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8848 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8849 tr32(GRC_LOCAL_CTRL));
8852 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8853 tr32(RCVDBDI_JUMBO_BD + 0x0),
8854 tr32(RCVDBDI_JUMBO_BD + 0x4),
8855 tr32(RCVDBDI_JUMBO_BD + 0x8),
8856 tr32(RCVDBDI_JUMBO_BD + 0xc));
8857 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8858 tr32(RCVDBDI_STD_BD + 0x0),
8859 tr32(RCVDBDI_STD_BD + 0x4),
8860 tr32(RCVDBDI_STD_BD + 0x8),
8861 tr32(RCVDBDI_STD_BD + 0xc));
8862 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8863 tr32(RCVDBDI_MINI_BD + 0x0),
8864 tr32(RCVDBDI_MINI_BD + 0x4),
8865 tr32(RCVDBDI_MINI_BD + 0x8),
8866 tr32(RCVDBDI_MINI_BD + 0xc));
8868 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8869 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8870 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8871 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8872 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8873 val32, val32_2, val32_3, val32_4);
8875 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8876 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8877 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8878 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8879 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8880 val32, val32_2, val32_3, val32_4);
8882 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8883 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8884 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8885 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8886 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8887 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8888 val32, val32_2, val32_3, val32_4, val32_5);
8890 /* SW status block */
8892 "Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8895 sblk->rx_jumbo_consumer,
8897 sblk->rx_mini_consumer,
8898 sblk->idx[0].rx_producer,
8899 sblk->idx[0].tx_consumer);
8901 /* SW statistics block */
8902 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8903 ((u32 *)tp->hw_stats)[0],
8904 ((u32 *)tp->hw_stats)[1],
8905 ((u32 *)tp->hw_stats)[2],
8906 ((u32 *)tp->hw_stats)[3]);
8909 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8910 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8911 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8912 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8913 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8915 /* NIC side send descriptors. */
8916 for (i = 0; i < 6; i++) {
8919 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8920 + (i * sizeof(struct tg3_tx_buffer_desc));
8921 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8923 readl(txd + 0x0), readl(txd + 0x4),
8924 readl(txd + 0x8), readl(txd + 0xc));
8927 /* NIC side RX descriptors. */
8928 for (i = 0; i < 6; i++) {
8931 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8932 + (i * sizeof(struct tg3_rx_buffer_desc));
8933 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8935 readl(rxd + 0x0), readl(rxd + 0x4),
8936 readl(rxd + 0x8), readl(rxd + 0xc));
8937 rxd += (4 * sizeof(u32));
8938 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8940 readl(rxd + 0x0), readl(rxd + 0x4),
8941 readl(rxd + 0x8), readl(rxd + 0xc));
8944 for (i = 0; i < 6; i++) {
8947 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8948 + (i * sizeof(struct tg3_rx_buffer_desc));
8949 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8951 readl(rxd + 0x0), readl(rxd + 0x4),
8952 readl(rxd + 0x8), readl(rxd + 0xc));
8953 rxd += (4 * sizeof(u32));
8954 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8956 readl(rxd + 0x0), readl(rxd + 0x4),
8957 readl(rxd + 0x8), readl(rxd + 0xc));
8962 static struct net_device_stats *tg3_get_stats(struct net_device *);
8963 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8965 static int tg3_close(struct net_device *dev)
8968 struct tg3 *tp = netdev_priv(dev);
8970 tg3_napi_disable(tp);
8971 cancel_work_sync(&tp->reset_task);
8973 netif_tx_stop_all_queues(dev);
8975 del_timer_sync(&tp->timer);
8979 tg3_full_lock(tp, 1);
8984 tg3_disable_ints(tp);
8986 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8988 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8990 tg3_full_unlock(tp);
8992 for (i = tp->irq_cnt - 1; i >= 0; i--) {
8993 struct tg3_napi *tnapi = &tp->napi[i];
8994 free_irq(tnapi->irq_vec, tnapi);
8999 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
9000 sizeof(tp->net_stats_prev));
9001 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9002 sizeof(tp->estats_prev));
9004 tg3_free_consistent(tp);
9006 tg3_set_power_state(tp, PCI_D3hot);
9008 netif_carrier_off(tp->dev);
9013 static inline unsigned long get_stat64(tg3_stat64_t *val)
9017 #if (BITS_PER_LONG == 32)
9020 ret = ((u64)val->high << 32) | ((u64)val->low);
9025 static inline u64 get_estat64(tg3_stat64_t *val)
9027 return ((u64)val->high << 32) | ((u64)val->low);
9030 static unsigned long calc_crc_errors(struct tg3 *tp)
9032 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9034 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
9035 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9036 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9039 spin_lock_bh(&tp->lock);
9040 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9041 tg3_writephy(tp, MII_TG3_TEST1,
9042 val | MII_TG3_TEST1_CRC_EN);
9043 tg3_readphy(tp, 0x14, &val);
9046 spin_unlock_bh(&tp->lock);
9048 tp->phy_crc_errors += val;
9050 return tp->phy_crc_errors;
9053 return get_stat64(&hw_stats->rx_fcs_errors);
9056 #define ESTAT_ADD(member) \
9057 estats->member = old_estats->member + \
9058 get_estat64(&hw_stats->member)
9060 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9062 struct tg3_ethtool_stats *estats = &tp->estats;
9063 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9064 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9069 ESTAT_ADD(rx_octets);
9070 ESTAT_ADD(rx_fragments);
9071 ESTAT_ADD(rx_ucast_packets);
9072 ESTAT_ADD(rx_mcast_packets);
9073 ESTAT_ADD(rx_bcast_packets);
9074 ESTAT_ADD(rx_fcs_errors);
9075 ESTAT_ADD(rx_align_errors);
9076 ESTAT_ADD(rx_xon_pause_rcvd);
9077 ESTAT_ADD(rx_xoff_pause_rcvd);
9078 ESTAT_ADD(rx_mac_ctrl_rcvd);
9079 ESTAT_ADD(rx_xoff_entered);
9080 ESTAT_ADD(rx_frame_too_long_errors);
9081 ESTAT_ADD(rx_jabbers);
9082 ESTAT_ADD(rx_undersize_packets);
9083 ESTAT_ADD(rx_in_length_errors);
9084 ESTAT_ADD(rx_out_length_errors);
9085 ESTAT_ADD(rx_64_or_less_octet_packets);
9086 ESTAT_ADD(rx_65_to_127_octet_packets);
9087 ESTAT_ADD(rx_128_to_255_octet_packets);
9088 ESTAT_ADD(rx_256_to_511_octet_packets);
9089 ESTAT_ADD(rx_512_to_1023_octet_packets);
9090 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9091 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9092 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9093 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9094 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9096 ESTAT_ADD(tx_octets);
9097 ESTAT_ADD(tx_collisions);
9098 ESTAT_ADD(tx_xon_sent);
9099 ESTAT_ADD(tx_xoff_sent);
9100 ESTAT_ADD(tx_flow_control);
9101 ESTAT_ADD(tx_mac_errors);
9102 ESTAT_ADD(tx_single_collisions);
9103 ESTAT_ADD(tx_mult_collisions);
9104 ESTAT_ADD(tx_deferred);
9105 ESTAT_ADD(tx_excessive_collisions);
9106 ESTAT_ADD(tx_late_collisions);
9107 ESTAT_ADD(tx_collide_2times);
9108 ESTAT_ADD(tx_collide_3times);
9109 ESTAT_ADD(tx_collide_4times);
9110 ESTAT_ADD(tx_collide_5times);
9111 ESTAT_ADD(tx_collide_6times);
9112 ESTAT_ADD(tx_collide_7times);
9113 ESTAT_ADD(tx_collide_8times);
9114 ESTAT_ADD(tx_collide_9times);
9115 ESTAT_ADD(tx_collide_10times);
9116 ESTAT_ADD(tx_collide_11times);
9117 ESTAT_ADD(tx_collide_12times);
9118 ESTAT_ADD(tx_collide_13times);
9119 ESTAT_ADD(tx_collide_14times);
9120 ESTAT_ADD(tx_collide_15times);
9121 ESTAT_ADD(tx_ucast_packets);
9122 ESTAT_ADD(tx_mcast_packets);
9123 ESTAT_ADD(tx_bcast_packets);
9124 ESTAT_ADD(tx_carrier_sense_errors);
9125 ESTAT_ADD(tx_discards);
9126 ESTAT_ADD(tx_errors);
9128 ESTAT_ADD(dma_writeq_full);
9129 ESTAT_ADD(dma_write_prioq_full);
9130 ESTAT_ADD(rxbds_empty);
9131 ESTAT_ADD(rx_discards);
9132 ESTAT_ADD(rx_errors);
9133 ESTAT_ADD(rx_threshold_hit);
9135 ESTAT_ADD(dma_readq_full);
9136 ESTAT_ADD(dma_read_prioq_full);
9137 ESTAT_ADD(tx_comp_queue_full);
9139 ESTAT_ADD(ring_set_send_prod_index);
9140 ESTAT_ADD(ring_status_update);
9141 ESTAT_ADD(nic_irqs);
9142 ESTAT_ADD(nic_avoided_irqs);
9143 ESTAT_ADD(nic_tx_threshold_hit);
9148 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
9150 struct tg3 *tp = netdev_priv(dev);
9151 struct net_device_stats *stats = &tp->net_stats;
9152 struct net_device_stats *old_stats = &tp->net_stats_prev;
9153 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9158 stats->rx_packets = old_stats->rx_packets +
9159 get_stat64(&hw_stats->rx_ucast_packets) +
9160 get_stat64(&hw_stats->rx_mcast_packets) +
9161 get_stat64(&hw_stats->rx_bcast_packets);
9163 stats->tx_packets = old_stats->tx_packets +
9164 get_stat64(&hw_stats->tx_ucast_packets) +
9165 get_stat64(&hw_stats->tx_mcast_packets) +
9166 get_stat64(&hw_stats->tx_bcast_packets);
9168 stats->rx_bytes = old_stats->rx_bytes +
9169 get_stat64(&hw_stats->rx_octets);
9170 stats->tx_bytes = old_stats->tx_bytes +
9171 get_stat64(&hw_stats->tx_octets);
9173 stats->rx_errors = old_stats->rx_errors +
9174 get_stat64(&hw_stats->rx_errors);
9175 stats->tx_errors = old_stats->tx_errors +
9176 get_stat64(&hw_stats->tx_errors) +
9177 get_stat64(&hw_stats->tx_mac_errors) +
9178 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9179 get_stat64(&hw_stats->tx_discards);
9181 stats->multicast = old_stats->multicast +
9182 get_stat64(&hw_stats->rx_mcast_packets);
9183 stats->collisions = old_stats->collisions +
9184 get_stat64(&hw_stats->tx_collisions);
9186 stats->rx_length_errors = old_stats->rx_length_errors +
9187 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9188 get_stat64(&hw_stats->rx_undersize_packets);
9190 stats->rx_over_errors = old_stats->rx_over_errors +
9191 get_stat64(&hw_stats->rxbds_empty);
9192 stats->rx_frame_errors = old_stats->rx_frame_errors +
9193 get_stat64(&hw_stats->rx_align_errors);
9194 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9195 get_stat64(&hw_stats->tx_discards);
9196 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9197 get_stat64(&hw_stats->tx_carrier_sense_errors);
9199 stats->rx_crc_errors = old_stats->rx_crc_errors +
9200 calc_crc_errors(tp);
9202 stats->rx_missed_errors = old_stats->rx_missed_errors +
9203 get_stat64(&hw_stats->rx_discards);
9208 static inline u32 calc_crc(unsigned char *buf, int len)
9216 for (j = 0; j < len; j++) {
9219 for (k = 0; k < 8; k++) {
9233 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9235 /* accept or reject all multicast frames */
9236 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9237 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9238 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9239 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9242 static void __tg3_set_rx_mode(struct net_device *dev)
9244 struct tg3 *tp = netdev_priv(dev);
9247 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9248 RX_MODE_KEEP_VLAN_TAG);
9250 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9253 #if TG3_VLAN_TAG_USED
9255 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9256 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9258 /* By definition, VLAN is disabled always in this
9261 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9262 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9265 if (dev->flags & IFF_PROMISC) {
9266 /* Promiscuous mode. */
9267 rx_mode |= RX_MODE_PROMISC;
9268 } else if (dev->flags & IFF_ALLMULTI) {
9269 /* Accept all multicast. */
9270 tg3_set_multi (tp, 1);
9271 } else if (dev->mc_count < 1) {
9272 /* Reject all multicast. */
9273 tg3_set_multi (tp, 0);
9275 /* Accept one or more multicast(s). */
9276 struct dev_mc_list *mclist;
9278 u32 mc_filter[4] = { 0, };
9283 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
9284 i++, mclist = mclist->next) {
9286 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
9288 regidx = (bit & 0x60) >> 5;
9290 mc_filter[regidx] |= (1 << bit);
9293 tw32(MAC_HASH_REG_0, mc_filter[0]);
9294 tw32(MAC_HASH_REG_1, mc_filter[1]);
9295 tw32(MAC_HASH_REG_2, mc_filter[2]);
9296 tw32(MAC_HASH_REG_3, mc_filter[3]);
9299 if (rx_mode != tp->rx_mode) {
9300 tp->rx_mode = rx_mode;
9301 tw32_f(MAC_RX_MODE, rx_mode);
9306 static void tg3_set_rx_mode(struct net_device *dev)
9308 struct tg3 *tp = netdev_priv(dev);
9310 if (!netif_running(dev))
9313 tg3_full_lock(tp, 0);
9314 __tg3_set_rx_mode(dev);
9315 tg3_full_unlock(tp);
9318 #define TG3_REGDUMP_LEN (32 * 1024)
9320 static int tg3_get_regs_len(struct net_device *dev)
9322 return TG3_REGDUMP_LEN;
9325 static void tg3_get_regs(struct net_device *dev,
9326 struct ethtool_regs *regs, void *_p)
9329 struct tg3 *tp = netdev_priv(dev);
9335 memset(p, 0, TG3_REGDUMP_LEN);
9337 if (tp->link_config.phy_is_low_power)
9340 tg3_full_lock(tp, 0);
9342 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
9343 #define GET_REG32_LOOP(base,len) \
9344 do { p = (u32 *)(orig_p + (base)); \
9345 for (i = 0; i < len; i += 4) \
9346 __GET_REG32((base) + i); \
9348 #define GET_REG32_1(reg) \
9349 do { p = (u32 *)(orig_p + (reg)); \
9350 __GET_REG32((reg)); \
9353 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
9354 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
9355 GET_REG32_LOOP(MAC_MODE, 0x4f0);
9356 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
9357 GET_REG32_1(SNDDATAC_MODE);
9358 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
9359 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
9360 GET_REG32_1(SNDBDC_MODE);
9361 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
9362 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
9363 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
9364 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
9365 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
9366 GET_REG32_1(RCVDCC_MODE);
9367 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
9368 GET_REG32_LOOP(RCVCC_MODE, 0x14);
9369 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
9370 GET_REG32_1(MBFREE_MODE);
9371 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
9372 GET_REG32_LOOP(MEMARB_MODE, 0x10);
9373 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
9374 GET_REG32_LOOP(RDMAC_MODE, 0x08);
9375 GET_REG32_LOOP(WDMAC_MODE, 0x08);
9376 GET_REG32_1(RX_CPU_MODE);
9377 GET_REG32_1(RX_CPU_STATE);
9378 GET_REG32_1(RX_CPU_PGMCTR);
9379 GET_REG32_1(RX_CPU_HWBKPT);
9380 GET_REG32_1(TX_CPU_MODE);
9381 GET_REG32_1(TX_CPU_STATE);
9382 GET_REG32_1(TX_CPU_PGMCTR);
9383 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
9384 GET_REG32_LOOP(FTQ_RESET, 0x120);
9385 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
9386 GET_REG32_1(DMAC_MODE);
9387 GET_REG32_LOOP(GRC_MODE, 0x4c);
9388 if (tp->tg3_flags & TG3_FLAG_NVRAM)
9389 GET_REG32_LOOP(NVRAM_CMD, 0x24);
9392 #undef GET_REG32_LOOP
9395 tg3_full_unlock(tp);
9398 static int tg3_get_eeprom_len(struct net_device *dev)
9400 struct tg3 *tp = netdev_priv(dev);
9402 return tp->nvram_size;
9405 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9407 struct tg3 *tp = netdev_priv(dev);
9410 u32 i, offset, len, b_offset, b_count;
9413 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9416 if (tp->link_config.phy_is_low_power)
9419 offset = eeprom->offset;
9423 eeprom->magic = TG3_EEPROM_MAGIC;
9426 /* adjustments to start on required 4 byte boundary */
9427 b_offset = offset & 3;
9428 b_count = 4 - b_offset;
9429 if (b_count > len) {
9430 /* i.e. offset=1 len=2 */
9433 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9436 memcpy(data, ((char*)&val) + b_offset, b_count);
9439 eeprom->len += b_count;
9442 /* read bytes upto the last 4 byte boundary */
9443 pd = &data[eeprom->len];
9444 for (i = 0; i < (len - (len & 3)); i += 4) {
9445 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9450 memcpy(pd + i, &val, 4);
9455 /* read last bytes not ending on 4 byte boundary */
9456 pd = &data[eeprom->len];
9458 b_offset = offset + len - b_count;
9459 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9462 memcpy(pd, &val, b_count);
9463 eeprom->len += b_count;
9468 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9470 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9472 struct tg3 *tp = netdev_priv(dev);
9474 u32 offset, len, b_offset, odd_len;
9478 if (tp->link_config.phy_is_low_power)
9481 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
9482 eeprom->magic != TG3_EEPROM_MAGIC)
9485 offset = eeprom->offset;
9488 if ((b_offset = (offset & 3))) {
9489 /* adjustments to start on required 4 byte boundary */
9490 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9501 /* adjustments to end on required 4 byte boundary */
9503 len = (len + 3) & ~3;
9504 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9510 if (b_offset || odd_len) {
9511 buf = kmalloc(len, GFP_KERNEL);
9515 memcpy(buf, &start, 4);
9517 memcpy(buf+len-4, &end, 4);
9518 memcpy(buf + b_offset, data, eeprom->len);
9521 ret = tg3_nvram_write_block(tp, offset, len, buf);
9529 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9531 struct tg3 *tp = netdev_priv(dev);
9533 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9534 struct phy_device *phydev;
9535 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9537 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9538 return phy_ethtool_gset(phydev, cmd);
9541 cmd->supported = (SUPPORTED_Autoneg);
9543 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9544 cmd->supported |= (SUPPORTED_1000baseT_Half |
9545 SUPPORTED_1000baseT_Full);
9547 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
9548 cmd->supported |= (SUPPORTED_100baseT_Half |
9549 SUPPORTED_100baseT_Full |
9550 SUPPORTED_10baseT_Half |
9551 SUPPORTED_10baseT_Full |
9553 cmd->port = PORT_TP;
9555 cmd->supported |= SUPPORTED_FIBRE;
9556 cmd->port = PORT_FIBRE;
9559 cmd->advertising = tp->link_config.advertising;
9560 if (netif_running(dev)) {
9561 cmd->speed = tp->link_config.active_speed;
9562 cmd->duplex = tp->link_config.active_duplex;
9564 cmd->phy_address = tp->phy_addr;
9565 cmd->transceiver = XCVR_INTERNAL;
9566 cmd->autoneg = tp->link_config.autoneg;
9572 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9574 struct tg3 *tp = netdev_priv(dev);
9576 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9577 struct phy_device *phydev;
9578 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9580 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9581 return phy_ethtool_sset(phydev, cmd);
9584 if (cmd->autoneg != AUTONEG_ENABLE &&
9585 cmd->autoneg != AUTONEG_DISABLE)
9588 if (cmd->autoneg == AUTONEG_DISABLE &&
9589 cmd->duplex != DUPLEX_FULL &&
9590 cmd->duplex != DUPLEX_HALF)
9593 if (cmd->autoneg == AUTONEG_ENABLE) {
9594 u32 mask = ADVERTISED_Autoneg |
9596 ADVERTISED_Asym_Pause;
9598 if (!(tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
9599 mask |= ADVERTISED_1000baseT_Half |
9600 ADVERTISED_1000baseT_Full;
9602 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9603 mask |= ADVERTISED_100baseT_Half |
9604 ADVERTISED_100baseT_Full |
9605 ADVERTISED_10baseT_Half |
9606 ADVERTISED_10baseT_Full |
9609 mask |= ADVERTISED_FIBRE;
9611 if (cmd->advertising & ~mask)
9614 mask &= (ADVERTISED_1000baseT_Half |
9615 ADVERTISED_1000baseT_Full |
9616 ADVERTISED_100baseT_Half |
9617 ADVERTISED_100baseT_Full |
9618 ADVERTISED_10baseT_Half |
9619 ADVERTISED_10baseT_Full);
9621 cmd->advertising &= mask;
9623 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9624 if (cmd->speed != SPEED_1000)
9627 if (cmd->duplex != DUPLEX_FULL)
9630 if (cmd->speed != SPEED_100 &&
9631 cmd->speed != SPEED_10)
9636 tg3_full_lock(tp, 0);
9638 tp->link_config.autoneg = cmd->autoneg;
9639 if (cmd->autoneg == AUTONEG_ENABLE) {
9640 tp->link_config.advertising = (cmd->advertising |
9641 ADVERTISED_Autoneg);
9642 tp->link_config.speed = SPEED_INVALID;
9643 tp->link_config.duplex = DUPLEX_INVALID;
9645 tp->link_config.advertising = 0;
9646 tp->link_config.speed = cmd->speed;
9647 tp->link_config.duplex = cmd->duplex;
9650 tp->link_config.orig_speed = tp->link_config.speed;
9651 tp->link_config.orig_duplex = tp->link_config.duplex;
9652 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9654 if (netif_running(dev))
9655 tg3_setup_phy(tp, 1);
9657 tg3_full_unlock(tp);
9662 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9664 struct tg3 *tp = netdev_priv(dev);
9666 strcpy(info->driver, DRV_MODULE_NAME);
9667 strcpy(info->version, DRV_MODULE_VERSION);
9668 strcpy(info->fw_version, tp->fw_ver);
9669 strcpy(info->bus_info, pci_name(tp->pdev));
9672 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9674 struct tg3 *tp = netdev_priv(dev);
9676 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9677 device_can_wakeup(&tp->pdev->dev))
9678 wol->supported = WAKE_MAGIC;
9682 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9683 device_can_wakeup(&tp->pdev->dev))
9684 wol->wolopts = WAKE_MAGIC;
9685 memset(&wol->sopass, 0, sizeof(wol->sopass));
9688 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9690 struct tg3 *tp = netdev_priv(dev);
9691 struct device *dp = &tp->pdev->dev;
9693 if (wol->wolopts & ~WAKE_MAGIC)
9695 if ((wol->wolopts & WAKE_MAGIC) &&
9696 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9699 spin_lock_bh(&tp->lock);
9700 if (wol->wolopts & WAKE_MAGIC) {
9701 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9702 device_set_wakeup_enable(dp, true);
9704 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9705 device_set_wakeup_enable(dp, false);
9707 spin_unlock_bh(&tp->lock);
9712 static u32 tg3_get_msglevel(struct net_device *dev)
9714 struct tg3 *tp = netdev_priv(dev);
9715 return tp->msg_enable;
9718 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9720 struct tg3 *tp = netdev_priv(dev);
9721 tp->msg_enable = value;
9724 static int tg3_set_tso(struct net_device *dev, u32 value)
9726 struct tg3 *tp = netdev_priv(dev);
9728 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9733 if ((dev->features & NETIF_F_IPV6_CSUM) &&
9734 ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
9735 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
9737 dev->features |= NETIF_F_TSO6;
9738 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
9739 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9740 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9741 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9742 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9743 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9744 dev->features |= NETIF_F_TSO_ECN;
9746 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9748 return ethtool_op_set_tso(dev, value);
9751 static int tg3_nway_reset(struct net_device *dev)
9753 struct tg3 *tp = netdev_priv(dev);
9756 if (!netif_running(dev))
9759 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9762 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9763 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9765 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9769 spin_lock_bh(&tp->lock);
9771 tg3_readphy(tp, MII_BMCR, &bmcr);
9772 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9773 ((bmcr & BMCR_ANENABLE) ||
9774 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9775 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9779 spin_unlock_bh(&tp->lock);
9785 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9787 struct tg3 *tp = netdev_priv(dev);
9789 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9790 ering->rx_mini_max_pending = 0;
9791 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9792 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9794 ering->rx_jumbo_max_pending = 0;
9796 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9798 ering->rx_pending = tp->rx_pending;
9799 ering->rx_mini_pending = 0;
9800 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9801 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9803 ering->rx_jumbo_pending = 0;
9805 ering->tx_pending = tp->napi[0].tx_pending;
9808 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9810 struct tg3 *tp = netdev_priv(dev);
9811 int i, irq_sync = 0, err = 0;
9813 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9814 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9815 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9816 (ering->tx_pending <= MAX_SKB_FRAGS) ||
9817 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9818 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9821 if (netif_running(dev)) {
9827 tg3_full_lock(tp, irq_sync);
9829 tp->rx_pending = ering->rx_pending;
9831 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9832 tp->rx_pending > 63)
9833 tp->rx_pending = 63;
9834 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9836 for (i = 0; i < TG3_IRQ_MAX_VECS; i++)
9837 tp->napi[i].tx_pending = ering->tx_pending;
9839 if (netif_running(dev)) {
9840 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9841 err = tg3_restart_hw(tp, 1);
9843 tg3_netif_start(tp);
9846 tg3_full_unlock(tp);
9848 if (irq_sync && !err)
9854 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9856 struct tg3 *tp = netdev_priv(dev);
9858 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9860 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
9861 epause->rx_pause = 1;
9863 epause->rx_pause = 0;
9865 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
9866 epause->tx_pause = 1;
9868 epause->tx_pause = 0;
9871 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9873 struct tg3 *tp = netdev_priv(dev);
9876 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9877 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9880 if (epause->autoneg) {
9882 struct phy_device *phydev;
9884 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9886 if (epause->rx_pause) {
9887 if (epause->tx_pause)
9888 newadv = ADVERTISED_Pause;
9890 newadv = ADVERTISED_Pause |
9891 ADVERTISED_Asym_Pause;
9892 } else if (epause->tx_pause) {
9893 newadv = ADVERTISED_Asym_Pause;
9897 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9898 u32 oldadv = phydev->advertising &
9900 ADVERTISED_Asym_Pause);
9901 if (oldadv != newadv) {
9902 phydev->advertising &=
9903 ~(ADVERTISED_Pause |
9904 ADVERTISED_Asym_Pause);
9905 phydev->advertising |= newadv;
9906 err = phy_start_aneg(phydev);
9909 tp->link_config.advertising &=
9910 ~(ADVERTISED_Pause |
9911 ADVERTISED_Asym_Pause);
9912 tp->link_config.advertising |= newadv;
9915 if (epause->rx_pause)
9916 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9918 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9920 if (epause->tx_pause)
9921 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9923 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9925 if (netif_running(dev))
9926 tg3_setup_flow_control(tp, 0, 0);
9931 if (netif_running(dev)) {
9936 tg3_full_lock(tp, irq_sync);
9938 if (epause->autoneg)
9939 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9941 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9942 if (epause->rx_pause)
9943 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9945 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9946 if (epause->tx_pause)
9947 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9949 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9951 if (netif_running(dev)) {
9952 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9953 err = tg3_restart_hw(tp, 1);
9955 tg3_netif_start(tp);
9958 tg3_full_unlock(tp);
9964 static u32 tg3_get_rx_csum(struct net_device *dev)
9966 struct tg3 *tp = netdev_priv(dev);
9967 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9970 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9972 struct tg3 *tp = netdev_priv(dev);
9974 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9980 spin_lock_bh(&tp->lock);
9982 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9984 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9985 spin_unlock_bh(&tp->lock);
9990 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9992 struct tg3 *tp = netdev_priv(dev);
9994 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10000 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10001 ethtool_op_set_tx_ipv6_csum(dev, data);
10003 ethtool_op_set_tx_csum(dev, data);
10008 static int tg3_get_sset_count (struct net_device *dev, int sset)
10012 return TG3_NUM_TEST;
10014 return TG3_NUM_STATS;
10016 return -EOPNOTSUPP;
10020 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
10022 switch (stringset) {
10024 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10027 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10030 WARN_ON(1); /* we need a WARN() */
10035 static int tg3_phys_id(struct net_device *dev, u32 data)
10037 struct tg3 *tp = netdev_priv(dev);
10040 if (!netif_running(tp->dev))
10044 data = UINT_MAX / 2;
10046 for (i = 0; i < (data * 2); i++) {
10048 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10049 LED_CTRL_1000MBPS_ON |
10050 LED_CTRL_100MBPS_ON |
10051 LED_CTRL_10MBPS_ON |
10052 LED_CTRL_TRAFFIC_OVERRIDE |
10053 LED_CTRL_TRAFFIC_BLINK |
10054 LED_CTRL_TRAFFIC_LED);
10057 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10058 LED_CTRL_TRAFFIC_OVERRIDE);
10060 if (msleep_interruptible(500))
10063 tw32(MAC_LED_CTRL, tp->led_ctrl);
10067 static void tg3_get_ethtool_stats (struct net_device *dev,
10068 struct ethtool_stats *estats, u64 *tmp_stats)
10070 struct tg3 *tp = netdev_priv(dev);
10071 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10074 #define NVRAM_TEST_SIZE 0x100
10075 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10076 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10077 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10078 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10079 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10081 static int tg3_test_nvram(struct tg3 *tp)
10085 int i, j, k, err = 0, size;
10087 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
10090 if (tg3_nvram_read(tp, 0, &magic) != 0)
10093 if (magic == TG3_EEPROM_MAGIC)
10094 size = NVRAM_TEST_SIZE;
10095 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10096 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10097 TG3_EEPROM_SB_FORMAT_1) {
10098 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10099 case TG3_EEPROM_SB_REVISION_0:
10100 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10102 case TG3_EEPROM_SB_REVISION_2:
10103 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10105 case TG3_EEPROM_SB_REVISION_3:
10106 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10113 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10114 size = NVRAM_SELFBOOT_HW_SIZE;
10118 buf = kmalloc(size, GFP_KERNEL);
10123 for (i = 0, j = 0; i < size; i += 4, j++) {
10124 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10131 /* Selfboot format */
10132 magic = be32_to_cpu(buf[0]);
10133 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10134 TG3_EEPROM_MAGIC_FW) {
10135 u8 *buf8 = (u8 *) buf, csum8 = 0;
10137 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10138 TG3_EEPROM_SB_REVISION_2) {
10139 /* For rev 2, the csum doesn't include the MBA. */
10140 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10142 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10145 for (i = 0; i < size; i++)
10158 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10159 TG3_EEPROM_MAGIC_HW) {
10160 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10161 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10162 u8 *buf8 = (u8 *) buf;
10164 /* Separate the parity bits and the data bytes. */
10165 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10166 if ((i == 0) || (i == 8)) {
10170 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10171 parity[k++] = buf8[i] & msk;
10174 else if (i == 16) {
10178 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10179 parity[k++] = buf8[i] & msk;
10182 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10183 parity[k++] = buf8[i] & msk;
10186 data[j++] = buf8[i];
10190 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10191 u8 hw8 = hweight8(data[i]);
10193 if ((hw8 & 0x1) && parity[i])
10195 else if (!(hw8 & 0x1) && !parity[i])
10202 /* Bootstrap checksum at offset 0x10 */
10203 csum = calc_crc((unsigned char *) buf, 0x10);
10204 if (csum != be32_to_cpu(buf[0x10/4]))
10207 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10208 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10209 if (csum != be32_to_cpu(buf[0xfc/4]))
10219 #define TG3_SERDES_TIMEOUT_SEC 2
10220 #define TG3_COPPER_TIMEOUT_SEC 6
10222 static int tg3_test_link(struct tg3 *tp)
10226 if (!netif_running(tp->dev))
10229 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10230 max = TG3_SERDES_TIMEOUT_SEC;
10232 max = TG3_COPPER_TIMEOUT_SEC;
10234 for (i = 0; i < max; i++) {
10235 if (netif_carrier_ok(tp->dev))
10238 if (msleep_interruptible(1000))
10245 /* Only test the commonly used registers */
10246 static int tg3_test_registers(struct tg3 *tp)
10248 int i, is_5705, is_5750;
10249 u32 offset, read_mask, write_mask, val, save_val, read_val;
10253 #define TG3_FL_5705 0x1
10254 #define TG3_FL_NOT_5705 0x2
10255 #define TG3_FL_NOT_5788 0x4
10256 #define TG3_FL_NOT_5750 0x8
10260 /* MAC Control Registers */
10261 { MAC_MODE, TG3_FL_NOT_5705,
10262 0x00000000, 0x00ef6f8c },
10263 { MAC_MODE, TG3_FL_5705,
10264 0x00000000, 0x01ef6b8c },
10265 { MAC_STATUS, TG3_FL_NOT_5705,
10266 0x03800107, 0x00000000 },
10267 { MAC_STATUS, TG3_FL_5705,
10268 0x03800100, 0x00000000 },
10269 { MAC_ADDR_0_HIGH, 0x0000,
10270 0x00000000, 0x0000ffff },
10271 { MAC_ADDR_0_LOW, 0x0000,
10272 0x00000000, 0xffffffff },
10273 { MAC_RX_MTU_SIZE, 0x0000,
10274 0x00000000, 0x0000ffff },
10275 { MAC_TX_MODE, 0x0000,
10276 0x00000000, 0x00000070 },
10277 { MAC_TX_LENGTHS, 0x0000,
10278 0x00000000, 0x00003fff },
10279 { MAC_RX_MODE, TG3_FL_NOT_5705,
10280 0x00000000, 0x000007fc },
10281 { MAC_RX_MODE, TG3_FL_5705,
10282 0x00000000, 0x000007dc },
10283 { MAC_HASH_REG_0, 0x0000,
10284 0x00000000, 0xffffffff },
10285 { MAC_HASH_REG_1, 0x0000,
10286 0x00000000, 0xffffffff },
10287 { MAC_HASH_REG_2, 0x0000,
10288 0x00000000, 0xffffffff },
10289 { MAC_HASH_REG_3, 0x0000,
10290 0x00000000, 0xffffffff },
10292 /* Receive Data and Receive BD Initiator Control Registers. */
10293 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10294 0x00000000, 0xffffffff },
10295 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10296 0x00000000, 0xffffffff },
10297 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10298 0x00000000, 0x00000003 },
10299 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10300 0x00000000, 0xffffffff },
10301 { RCVDBDI_STD_BD+0, 0x0000,
10302 0x00000000, 0xffffffff },
10303 { RCVDBDI_STD_BD+4, 0x0000,
10304 0x00000000, 0xffffffff },
10305 { RCVDBDI_STD_BD+8, 0x0000,
10306 0x00000000, 0xffff0002 },
10307 { RCVDBDI_STD_BD+0xc, 0x0000,
10308 0x00000000, 0xffffffff },
10310 /* Receive BD Initiator Control Registers. */
10311 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10312 0x00000000, 0xffffffff },
10313 { RCVBDI_STD_THRESH, TG3_FL_5705,
10314 0x00000000, 0x000003ff },
10315 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10316 0x00000000, 0xffffffff },
10318 /* Host Coalescing Control Registers. */
10319 { HOSTCC_MODE, TG3_FL_NOT_5705,
10320 0x00000000, 0x00000004 },
10321 { HOSTCC_MODE, TG3_FL_5705,
10322 0x00000000, 0x000000f6 },
10323 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10324 0x00000000, 0xffffffff },
10325 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10326 0x00000000, 0x000003ff },
10327 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10328 0x00000000, 0xffffffff },
10329 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10330 0x00000000, 0x000003ff },
10331 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10332 0x00000000, 0xffffffff },
10333 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10334 0x00000000, 0x000000ff },
10335 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10336 0x00000000, 0xffffffff },
10337 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10338 0x00000000, 0x000000ff },
10339 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10340 0x00000000, 0xffffffff },
10341 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10342 0x00000000, 0xffffffff },
10343 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10344 0x00000000, 0xffffffff },
10345 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10346 0x00000000, 0x000000ff },
10347 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10348 0x00000000, 0xffffffff },
10349 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10350 0x00000000, 0x000000ff },
10351 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10352 0x00000000, 0xffffffff },
10353 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10354 0x00000000, 0xffffffff },
10355 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10356 0x00000000, 0xffffffff },
10357 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10358 0x00000000, 0xffffffff },
10359 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10360 0x00000000, 0xffffffff },
10361 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10362 0xffffffff, 0x00000000 },
10363 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10364 0xffffffff, 0x00000000 },
10366 /* Buffer Manager Control Registers. */
10367 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10368 0x00000000, 0x007fff80 },
10369 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10370 0x00000000, 0x007fffff },
10371 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10372 0x00000000, 0x0000003f },
10373 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10374 0x00000000, 0x000001ff },
10375 { BUFMGR_MB_HIGH_WATER, 0x0000,
10376 0x00000000, 0x000001ff },
10377 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10378 0xffffffff, 0x00000000 },
10379 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10380 0xffffffff, 0x00000000 },
10382 /* Mailbox Registers */
10383 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10384 0x00000000, 0x000001ff },
10385 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10386 0x00000000, 0x000001ff },
10387 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10388 0x00000000, 0x000007ff },
10389 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10390 0x00000000, 0x000001ff },
10392 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10395 is_5705 = is_5750 = 0;
10396 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10398 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10402 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10403 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10406 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10409 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10410 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10413 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10416 offset = (u32) reg_tbl[i].offset;
10417 read_mask = reg_tbl[i].read_mask;
10418 write_mask = reg_tbl[i].write_mask;
10420 /* Save the original register content */
10421 save_val = tr32(offset);
10423 /* Determine the read-only value. */
10424 read_val = save_val & read_mask;
10426 /* Write zero to the register, then make sure the read-only bits
10427 * are not changed and the read/write bits are all zeros.
10431 val = tr32(offset);
10433 /* Test the read-only and read/write bits. */
10434 if (((val & read_mask) != read_val) || (val & write_mask))
10437 /* Write ones to all the bits defined by RdMask and WrMask, then
10438 * make sure the read-only bits are not changed and the
10439 * read/write bits are all ones.
10441 tw32(offset, read_mask | write_mask);
10443 val = tr32(offset);
10445 /* Test the read-only bits. */
10446 if ((val & read_mask) != read_val)
10449 /* Test the read/write bits. */
10450 if ((val & write_mask) != write_mask)
10453 tw32(offset, save_val);
10459 if (netif_msg_hw(tp))
10460 printk(KERN_ERR PFX "Register test failed at offset %x\n",
10462 tw32(offset, save_val);
10466 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10468 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10472 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10473 for (j = 0; j < len; j += 4) {
10476 tg3_write_mem(tp, offset + j, test_pattern[i]);
10477 tg3_read_mem(tp, offset + j, &val);
10478 if (val != test_pattern[i])
10485 static int tg3_test_memory(struct tg3 *tp)
10487 static struct mem_entry {
10490 } mem_tbl_570x[] = {
10491 { 0x00000000, 0x00b50},
10492 { 0x00002000, 0x1c000},
10493 { 0xffffffff, 0x00000}
10494 }, mem_tbl_5705[] = {
10495 { 0x00000100, 0x0000c},
10496 { 0x00000200, 0x00008},
10497 { 0x00004000, 0x00800},
10498 { 0x00006000, 0x01000},
10499 { 0x00008000, 0x02000},
10500 { 0x00010000, 0x0e000},
10501 { 0xffffffff, 0x00000}
10502 }, mem_tbl_5755[] = {
10503 { 0x00000200, 0x00008},
10504 { 0x00004000, 0x00800},
10505 { 0x00006000, 0x00800},
10506 { 0x00008000, 0x02000},
10507 { 0x00010000, 0x0c000},
10508 { 0xffffffff, 0x00000}
10509 }, mem_tbl_5906[] = {
10510 { 0x00000200, 0x00008},
10511 { 0x00004000, 0x00400},
10512 { 0x00006000, 0x00400},
10513 { 0x00008000, 0x01000},
10514 { 0x00010000, 0x01000},
10515 { 0xffffffff, 0x00000}
10517 struct mem_entry *mem_tbl;
10521 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10522 mem_tbl = mem_tbl_5755;
10523 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10524 mem_tbl = mem_tbl_5906;
10525 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
10526 mem_tbl = mem_tbl_5705;
10528 mem_tbl = mem_tbl_570x;
10530 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10531 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
10532 mem_tbl[i].len)) != 0)
10539 #define TG3_MAC_LOOPBACK 0
10540 #define TG3_PHY_LOOPBACK 1
10542 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10544 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10545 u32 desc_idx, coal_now;
10546 struct sk_buff *skb, *rx_skb;
10549 int num_pkts, tx_len, rx_len, i, err;
10550 struct tg3_rx_buffer_desc *desc;
10551 struct tg3_napi *tnapi, *rnapi;
10552 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
10554 if (tp->irq_cnt > 1) {
10555 tnapi = &tp->napi[1];
10556 rnapi = &tp->napi[1];
10558 tnapi = &tp->napi[0];
10559 rnapi = &tp->napi[0];
10561 coal_now = tnapi->coal_now | rnapi->coal_now;
10563 if (loopback_mode == TG3_MAC_LOOPBACK) {
10564 /* HW errata - mac loopback fails in some cases on 5780.
10565 * Normal traffic and PHY loopback are not affected by
10568 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10571 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
10572 MAC_MODE_PORT_INT_LPBACK;
10573 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10574 mac_mode |= MAC_MODE_LINK_POLARITY;
10575 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10576 mac_mode |= MAC_MODE_PORT_MODE_MII;
10578 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10579 tw32(MAC_MODE, mac_mode);
10580 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10583 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10584 tg3_phy_fet_toggle_apd(tp, false);
10585 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10587 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10589 tg3_phy_toggle_automdix(tp, 0);
10591 tg3_writephy(tp, MII_BMCR, val);
10594 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10595 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10596 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10597 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x1800);
10598 mac_mode |= MAC_MODE_PORT_MODE_MII;
10600 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10602 /* reset to prevent losing 1st rx packet intermittently */
10603 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10604 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10606 tw32_f(MAC_RX_MODE, tp->rx_mode);
10608 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10609 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10610 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10611 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10612 mac_mode |= MAC_MODE_LINK_POLARITY;
10613 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10614 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10616 tw32(MAC_MODE, mac_mode);
10624 skb = netdev_alloc_skb(tp->dev, tx_len);
10628 tx_data = skb_put(skb, tx_len);
10629 memcpy(tx_data, tp->dev->dev_addr, 6);
10630 memset(tx_data + 6, 0x0, 8);
10632 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10634 for (i = 14; i < tx_len; i++)
10635 tx_data[i] = (u8) (i & 0xff);
10637 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
10638 dev_kfree_skb(skb);
10642 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10647 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
10651 tg3_set_txd(tnapi, tnapi->tx_prod,
10652 skb_shinfo(skb)->dma_head, tx_len, 0, 1);
10657 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
10658 tr32_mailbox(tnapi->prodmbox);
10662 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
10663 for (i = 0; i < 35; i++) {
10664 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10669 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
10670 rx_idx = rnapi->hw_status->idx[0].rx_producer;
10671 if ((tx_idx == tnapi->tx_prod) &&
10672 (rx_idx == (rx_start_idx + num_pkts)))
10676 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
10677 dev_kfree_skb(skb);
10679 if (tx_idx != tnapi->tx_prod)
10682 if (rx_idx != rx_start_idx + num_pkts)
10685 desc = &rnapi->rx_rcb[rx_start_idx];
10686 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10687 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10688 if (opaque_key != RXD_OPAQUE_RING_STD)
10691 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10692 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10695 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10696 if (rx_len != tx_len)
10699 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
10701 map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
10702 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10704 for (i = 14; i < tx_len; i++) {
10705 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10710 /* tg3_free_rings will unmap and free the rx_skb */
10715 #define TG3_MAC_LOOPBACK_FAILED 1
10716 #define TG3_PHY_LOOPBACK_FAILED 2
10717 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10718 TG3_PHY_LOOPBACK_FAILED)
10720 static int tg3_test_loopback(struct tg3 *tp)
10725 if (!netif_running(tp->dev))
10726 return TG3_LOOPBACK_FAILED;
10728 err = tg3_reset_hw(tp, 1);
10730 return TG3_LOOPBACK_FAILED;
10732 /* Turn off gphy autopowerdown. */
10733 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10734 tg3_phy_toggle_apd(tp, false);
10736 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10740 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10742 /* Wait for up to 40 microseconds to acquire lock. */
10743 for (i = 0; i < 4; i++) {
10744 status = tr32(TG3_CPMU_MUTEX_GNT);
10745 if (status == CPMU_MUTEX_GNT_DRIVER)
10750 if (status != CPMU_MUTEX_GNT_DRIVER)
10751 return TG3_LOOPBACK_FAILED;
10753 /* Turn off link-based power management. */
10754 cpmuctrl = tr32(TG3_CPMU_CTRL);
10755 tw32(TG3_CPMU_CTRL,
10756 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10757 CPMU_CTRL_LINK_AWARE_MODE));
10760 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10761 err |= TG3_MAC_LOOPBACK_FAILED;
10763 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10764 tw32(TG3_CPMU_CTRL, cpmuctrl);
10766 /* Release the mutex */
10767 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10770 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10771 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10772 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10773 err |= TG3_PHY_LOOPBACK_FAILED;
10776 /* Re-enable gphy autopowerdown. */
10777 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10778 tg3_phy_toggle_apd(tp, true);
10783 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10786 struct tg3 *tp = netdev_priv(dev);
10788 if (tp->link_config.phy_is_low_power)
10789 tg3_set_power_state(tp, PCI_D0);
10791 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10793 if (tg3_test_nvram(tp) != 0) {
10794 etest->flags |= ETH_TEST_FL_FAILED;
10797 if (tg3_test_link(tp) != 0) {
10798 etest->flags |= ETH_TEST_FL_FAILED;
10801 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10802 int err, err2 = 0, irq_sync = 0;
10804 if (netif_running(dev)) {
10806 tg3_netif_stop(tp);
10810 tg3_full_lock(tp, irq_sync);
10812 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10813 err = tg3_nvram_lock(tp);
10814 tg3_halt_cpu(tp, RX_CPU_BASE);
10815 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10816 tg3_halt_cpu(tp, TX_CPU_BASE);
10818 tg3_nvram_unlock(tp);
10820 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10823 if (tg3_test_registers(tp) != 0) {
10824 etest->flags |= ETH_TEST_FL_FAILED;
10827 if (tg3_test_memory(tp) != 0) {
10828 etest->flags |= ETH_TEST_FL_FAILED;
10831 if ((data[4] = tg3_test_loopback(tp)) != 0)
10832 etest->flags |= ETH_TEST_FL_FAILED;
10834 tg3_full_unlock(tp);
10836 if (tg3_test_interrupt(tp) != 0) {
10837 etest->flags |= ETH_TEST_FL_FAILED;
10841 tg3_full_lock(tp, 0);
10843 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10844 if (netif_running(dev)) {
10845 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10846 err2 = tg3_restart_hw(tp, 1);
10848 tg3_netif_start(tp);
10851 tg3_full_unlock(tp);
10853 if (irq_sync && !err2)
10856 if (tp->link_config.phy_is_low_power)
10857 tg3_set_power_state(tp, PCI_D3hot);
10861 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10863 struct mii_ioctl_data *data = if_mii(ifr);
10864 struct tg3 *tp = netdev_priv(dev);
10867 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10868 struct phy_device *phydev;
10869 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10871 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10872 return phy_mii_ioctl(phydev, data, cmd);
10877 data->phy_id = tp->phy_addr;
10880 case SIOCGMIIREG: {
10883 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10884 break; /* We have no PHY */
10886 if (tp->link_config.phy_is_low_power)
10889 spin_lock_bh(&tp->lock);
10890 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10891 spin_unlock_bh(&tp->lock);
10893 data->val_out = mii_regval;
10899 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10900 break; /* We have no PHY */
10902 if (tp->link_config.phy_is_low_power)
10905 spin_lock_bh(&tp->lock);
10906 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10907 spin_unlock_bh(&tp->lock);
10915 return -EOPNOTSUPP;
10918 #if TG3_VLAN_TAG_USED
10919 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10921 struct tg3 *tp = netdev_priv(dev);
10923 if (!netif_running(dev)) {
10928 tg3_netif_stop(tp);
10930 tg3_full_lock(tp, 0);
10934 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10935 __tg3_set_rx_mode(dev);
10937 tg3_netif_start(tp);
10939 tg3_full_unlock(tp);
10943 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10945 struct tg3 *tp = netdev_priv(dev);
10947 memcpy(ec, &tp->coal, sizeof(*ec));
10951 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10953 struct tg3 *tp = netdev_priv(dev);
10954 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10955 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10957 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10958 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10959 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10960 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10961 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10964 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10965 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10966 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10967 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10968 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10969 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10970 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10971 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10972 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10973 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10976 /* No rx interrupts will be generated if both are zero */
10977 if ((ec->rx_coalesce_usecs == 0) &&
10978 (ec->rx_max_coalesced_frames == 0))
10981 /* No tx interrupts will be generated if both are zero */
10982 if ((ec->tx_coalesce_usecs == 0) &&
10983 (ec->tx_max_coalesced_frames == 0))
10986 /* Only copy relevant parameters, ignore all others. */
10987 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10988 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10989 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10990 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10991 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10992 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10993 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10994 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10995 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10997 if (netif_running(dev)) {
10998 tg3_full_lock(tp, 0);
10999 __tg3_set_coalesce(tp, &tp->coal);
11000 tg3_full_unlock(tp);
11005 static const struct ethtool_ops tg3_ethtool_ops = {
11006 .get_settings = tg3_get_settings,
11007 .set_settings = tg3_set_settings,
11008 .get_drvinfo = tg3_get_drvinfo,
11009 .get_regs_len = tg3_get_regs_len,
11010 .get_regs = tg3_get_regs,
11011 .get_wol = tg3_get_wol,
11012 .set_wol = tg3_set_wol,
11013 .get_msglevel = tg3_get_msglevel,
11014 .set_msglevel = tg3_set_msglevel,
11015 .nway_reset = tg3_nway_reset,
11016 .get_link = ethtool_op_get_link,
11017 .get_eeprom_len = tg3_get_eeprom_len,
11018 .get_eeprom = tg3_get_eeprom,
11019 .set_eeprom = tg3_set_eeprom,
11020 .get_ringparam = tg3_get_ringparam,
11021 .set_ringparam = tg3_set_ringparam,
11022 .get_pauseparam = tg3_get_pauseparam,
11023 .set_pauseparam = tg3_set_pauseparam,
11024 .get_rx_csum = tg3_get_rx_csum,
11025 .set_rx_csum = tg3_set_rx_csum,
11026 .set_tx_csum = tg3_set_tx_csum,
11027 .set_sg = ethtool_op_set_sg,
11028 .set_tso = tg3_set_tso,
11029 .self_test = tg3_self_test,
11030 .get_strings = tg3_get_strings,
11031 .phys_id = tg3_phys_id,
11032 .get_ethtool_stats = tg3_get_ethtool_stats,
11033 .get_coalesce = tg3_get_coalesce,
11034 .set_coalesce = tg3_set_coalesce,
11035 .get_sset_count = tg3_get_sset_count,
11038 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11040 u32 cursize, val, magic;
11042 tp->nvram_size = EEPROM_CHIP_SIZE;
11044 if (tg3_nvram_read(tp, 0, &magic) != 0)
11047 if ((magic != TG3_EEPROM_MAGIC) &&
11048 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11049 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11053 * Size the chip by reading offsets at increasing powers of two.
11054 * When we encounter our validation signature, we know the addressing
11055 * has wrapped around, and thus have our chip size.
11059 while (cursize < tp->nvram_size) {
11060 if (tg3_nvram_read(tp, cursize, &val) != 0)
11069 tp->nvram_size = cursize;
11072 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11076 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
11077 tg3_nvram_read(tp, 0, &val) != 0)
11080 /* Selfboot format */
11081 if (val != TG3_EEPROM_MAGIC) {
11082 tg3_get_eeprom_size(tp);
11086 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11088 /* This is confusing. We want to operate on the
11089 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11090 * call will read from NVRAM and byteswap the data
11091 * according to the byteswapping settings for all
11092 * other register accesses. This ensures the data we
11093 * want will always reside in the lower 16-bits.
11094 * However, the data in NVRAM is in LE format, which
11095 * means the data from the NVRAM read will always be
11096 * opposite the endianness of the CPU. The 16-bit
11097 * byteswap then brings the data to CPU endianness.
11099 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11103 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11106 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11110 nvcfg1 = tr32(NVRAM_CFG1);
11111 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11112 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11114 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11115 tw32(NVRAM_CFG1, nvcfg1);
11118 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11119 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11120 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11121 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11122 tp->nvram_jedecnum = JEDEC_ATMEL;
11123 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11124 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11126 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11127 tp->nvram_jedecnum = JEDEC_ATMEL;
11128 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11130 case FLASH_VENDOR_ATMEL_EEPROM:
11131 tp->nvram_jedecnum = JEDEC_ATMEL;
11132 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11133 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11135 case FLASH_VENDOR_ST:
11136 tp->nvram_jedecnum = JEDEC_ST;
11137 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11138 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11140 case FLASH_VENDOR_SAIFUN:
11141 tp->nvram_jedecnum = JEDEC_SAIFUN;
11142 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11144 case FLASH_VENDOR_SST_SMALL:
11145 case FLASH_VENDOR_SST_LARGE:
11146 tp->nvram_jedecnum = JEDEC_SST;
11147 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11151 tp->nvram_jedecnum = JEDEC_ATMEL;
11152 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11153 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11157 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11159 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11160 case FLASH_5752PAGE_SIZE_256:
11161 tp->nvram_pagesize = 256;
11163 case FLASH_5752PAGE_SIZE_512:
11164 tp->nvram_pagesize = 512;
11166 case FLASH_5752PAGE_SIZE_1K:
11167 tp->nvram_pagesize = 1024;
11169 case FLASH_5752PAGE_SIZE_2K:
11170 tp->nvram_pagesize = 2048;
11172 case FLASH_5752PAGE_SIZE_4K:
11173 tp->nvram_pagesize = 4096;
11175 case FLASH_5752PAGE_SIZE_264:
11176 tp->nvram_pagesize = 264;
11178 case FLASH_5752PAGE_SIZE_528:
11179 tp->nvram_pagesize = 528;
11184 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11188 nvcfg1 = tr32(NVRAM_CFG1);
11190 /* NVRAM protection for TPM */
11191 if (nvcfg1 & (1 << 27))
11192 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11194 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11195 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11196 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11197 tp->nvram_jedecnum = JEDEC_ATMEL;
11198 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11200 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11201 tp->nvram_jedecnum = JEDEC_ATMEL;
11202 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11203 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11205 case FLASH_5752VENDOR_ST_M45PE10:
11206 case FLASH_5752VENDOR_ST_M45PE20:
11207 case FLASH_5752VENDOR_ST_M45PE40:
11208 tp->nvram_jedecnum = JEDEC_ST;
11209 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11210 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11214 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
11215 tg3_nvram_get_pagesize(tp, nvcfg1);
11217 /* For eeprom, set pagesize to maximum eeprom size */
11218 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11220 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11221 tw32(NVRAM_CFG1, nvcfg1);
11225 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11227 u32 nvcfg1, protect = 0;
11229 nvcfg1 = tr32(NVRAM_CFG1);
11231 /* NVRAM protection for TPM */
11232 if (nvcfg1 & (1 << 27)) {
11233 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11237 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11239 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11240 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11241 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11242 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11243 tp->nvram_jedecnum = JEDEC_ATMEL;
11244 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11245 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11246 tp->nvram_pagesize = 264;
11247 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11248 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11249 tp->nvram_size = (protect ? 0x3e200 :
11250 TG3_NVRAM_SIZE_512KB);
11251 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11252 tp->nvram_size = (protect ? 0x1f200 :
11253 TG3_NVRAM_SIZE_256KB);
11255 tp->nvram_size = (protect ? 0x1f200 :
11256 TG3_NVRAM_SIZE_128KB);
11258 case FLASH_5752VENDOR_ST_M45PE10:
11259 case FLASH_5752VENDOR_ST_M45PE20:
11260 case FLASH_5752VENDOR_ST_M45PE40:
11261 tp->nvram_jedecnum = JEDEC_ST;
11262 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11263 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11264 tp->nvram_pagesize = 256;
11265 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11266 tp->nvram_size = (protect ?
11267 TG3_NVRAM_SIZE_64KB :
11268 TG3_NVRAM_SIZE_128KB);
11269 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11270 tp->nvram_size = (protect ?
11271 TG3_NVRAM_SIZE_64KB :
11272 TG3_NVRAM_SIZE_256KB);
11274 tp->nvram_size = (protect ?
11275 TG3_NVRAM_SIZE_128KB :
11276 TG3_NVRAM_SIZE_512KB);
11281 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11285 nvcfg1 = tr32(NVRAM_CFG1);
11287 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11288 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11289 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11290 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11291 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11292 tp->nvram_jedecnum = JEDEC_ATMEL;
11293 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11294 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11296 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11297 tw32(NVRAM_CFG1, nvcfg1);
11299 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11300 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11301 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11302 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11303 tp->nvram_jedecnum = JEDEC_ATMEL;
11304 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11305 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11306 tp->nvram_pagesize = 264;
11308 case FLASH_5752VENDOR_ST_M45PE10:
11309 case FLASH_5752VENDOR_ST_M45PE20:
11310 case FLASH_5752VENDOR_ST_M45PE40:
11311 tp->nvram_jedecnum = JEDEC_ST;
11312 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11313 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11314 tp->nvram_pagesize = 256;
11319 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11321 u32 nvcfg1, protect = 0;
11323 nvcfg1 = tr32(NVRAM_CFG1);
11325 /* NVRAM protection for TPM */
11326 if (nvcfg1 & (1 << 27)) {
11327 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11331 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11333 case FLASH_5761VENDOR_ATMEL_ADB021D:
11334 case FLASH_5761VENDOR_ATMEL_ADB041D:
11335 case FLASH_5761VENDOR_ATMEL_ADB081D:
11336 case FLASH_5761VENDOR_ATMEL_ADB161D:
11337 case FLASH_5761VENDOR_ATMEL_MDB021D:
11338 case FLASH_5761VENDOR_ATMEL_MDB041D:
11339 case FLASH_5761VENDOR_ATMEL_MDB081D:
11340 case FLASH_5761VENDOR_ATMEL_MDB161D:
11341 tp->nvram_jedecnum = JEDEC_ATMEL;
11342 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11343 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11344 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11345 tp->nvram_pagesize = 256;
11347 case FLASH_5761VENDOR_ST_A_M45PE20:
11348 case FLASH_5761VENDOR_ST_A_M45PE40:
11349 case FLASH_5761VENDOR_ST_A_M45PE80:
11350 case FLASH_5761VENDOR_ST_A_M45PE16:
11351 case FLASH_5761VENDOR_ST_M_M45PE20:
11352 case FLASH_5761VENDOR_ST_M_M45PE40:
11353 case FLASH_5761VENDOR_ST_M_M45PE80:
11354 case FLASH_5761VENDOR_ST_M_M45PE16:
11355 tp->nvram_jedecnum = JEDEC_ST;
11356 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11357 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11358 tp->nvram_pagesize = 256;
11363 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11366 case FLASH_5761VENDOR_ATMEL_ADB161D:
11367 case FLASH_5761VENDOR_ATMEL_MDB161D:
11368 case FLASH_5761VENDOR_ST_A_M45PE16:
11369 case FLASH_5761VENDOR_ST_M_M45PE16:
11370 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11372 case FLASH_5761VENDOR_ATMEL_ADB081D:
11373 case FLASH_5761VENDOR_ATMEL_MDB081D:
11374 case FLASH_5761VENDOR_ST_A_M45PE80:
11375 case FLASH_5761VENDOR_ST_M_M45PE80:
11376 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11378 case FLASH_5761VENDOR_ATMEL_ADB041D:
11379 case FLASH_5761VENDOR_ATMEL_MDB041D:
11380 case FLASH_5761VENDOR_ST_A_M45PE40:
11381 case FLASH_5761VENDOR_ST_M_M45PE40:
11382 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11384 case FLASH_5761VENDOR_ATMEL_ADB021D:
11385 case FLASH_5761VENDOR_ATMEL_MDB021D:
11386 case FLASH_5761VENDOR_ST_A_M45PE20:
11387 case FLASH_5761VENDOR_ST_M_M45PE20:
11388 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11394 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11396 tp->nvram_jedecnum = JEDEC_ATMEL;
11397 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11398 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11401 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11405 nvcfg1 = tr32(NVRAM_CFG1);
11407 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11408 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11409 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11410 tp->nvram_jedecnum = JEDEC_ATMEL;
11411 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11412 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11414 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11415 tw32(NVRAM_CFG1, nvcfg1);
11417 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11418 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11419 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11420 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11421 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11422 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11423 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11424 tp->nvram_jedecnum = JEDEC_ATMEL;
11425 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11426 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11428 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11429 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11430 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11431 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11432 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11434 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11435 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11436 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11438 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11439 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11440 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11444 case FLASH_5752VENDOR_ST_M45PE10:
11445 case FLASH_5752VENDOR_ST_M45PE20:
11446 case FLASH_5752VENDOR_ST_M45PE40:
11447 tp->nvram_jedecnum = JEDEC_ST;
11448 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11449 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11451 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11452 case FLASH_5752VENDOR_ST_M45PE10:
11453 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11455 case FLASH_5752VENDOR_ST_M45PE20:
11456 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11458 case FLASH_5752VENDOR_ST_M45PE40:
11459 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11464 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11468 tg3_nvram_get_pagesize(tp, nvcfg1);
11469 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11470 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11474 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11478 nvcfg1 = tr32(NVRAM_CFG1);
11480 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11481 case FLASH_5717VENDOR_ATMEL_EEPROM:
11482 case FLASH_5717VENDOR_MICRO_EEPROM:
11483 tp->nvram_jedecnum = JEDEC_ATMEL;
11484 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11485 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11487 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11488 tw32(NVRAM_CFG1, nvcfg1);
11490 case FLASH_5717VENDOR_ATMEL_MDB011D:
11491 case FLASH_5717VENDOR_ATMEL_ADB011B:
11492 case FLASH_5717VENDOR_ATMEL_ADB011D:
11493 case FLASH_5717VENDOR_ATMEL_MDB021D:
11494 case FLASH_5717VENDOR_ATMEL_ADB021B:
11495 case FLASH_5717VENDOR_ATMEL_ADB021D:
11496 case FLASH_5717VENDOR_ATMEL_45USPT:
11497 tp->nvram_jedecnum = JEDEC_ATMEL;
11498 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11499 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11501 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11502 case FLASH_5717VENDOR_ATMEL_MDB021D:
11503 case FLASH_5717VENDOR_ATMEL_ADB021B:
11504 case FLASH_5717VENDOR_ATMEL_ADB021D:
11505 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11508 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11512 case FLASH_5717VENDOR_ST_M_M25PE10:
11513 case FLASH_5717VENDOR_ST_A_M25PE10:
11514 case FLASH_5717VENDOR_ST_M_M45PE10:
11515 case FLASH_5717VENDOR_ST_A_M45PE10:
11516 case FLASH_5717VENDOR_ST_M_M25PE20:
11517 case FLASH_5717VENDOR_ST_A_M25PE20:
11518 case FLASH_5717VENDOR_ST_M_M45PE20:
11519 case FLASH_5717VENDOR_ST_A_M45PE20:
11520 case FLASH_5717VENDOR_ST_25USPT:
11521 case FLASH_5717VENDOR_ST_45USPT:
11522 tp->nvram_jedecnum = JEDEC_ST;
11523 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11524 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11526 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11527 case FLASH_5717VENDOR_ST_M_M25PE20:
11528 case FLASH_5717VENDOR_ST_A_M25PE20:
11529 case FLASH_5717VENDOR_ST_M_M45PE20:
11530 case FLASH_5717VENDOR_ST_A_M45PE20:
11531 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11534 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11539 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11543 tg3_nvram_get_pagesize(tp, nvcfg1);
11544 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11545 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11548 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
11549 static void __devinit tg3_nvram_init(struct tg3 *tp)
11551 tw32_f(GRC_EEPROM_ADDR,
11552 (EEPROM_ADDR_FSM_RESET |
11553 (EEPROM_DEFAULT_CLOCK_PERIOD <<
11554 EEPROM_ADDR_CLKPERD_SHIFT)));
11558 /* Enable seeprom accesses. */
11559 tw32_f(GRC_LOCAL_CTRL,
11560 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
11563 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11564 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
11565 tp->tg3_flags |= TG3_FLAG_NVRAM;
11567 if (tg3_nvram_lock(tp)) {
11568 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
11569 "tg3_nvram_init failed.\n", tp->dev->name);
11572 tg3_enable_nvram_access(tp);
11574 tp->nvram_size = 0;
11576 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11577 tg3_get_5752_nvram_info(tp);
11578 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11579 tg3_get_5755_nvram_info(tp);
11580 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11581 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11582 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11583 tg3_get_5787_nvram_info(tp);
11584 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11585 tg3_get_5761_nvram_info(tp);
11586 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11587 tg3_get_5906_nvram_info(tp);
11588 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
11589 tg3_get_57780_nvram_info(tp);
11590 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
11591 tg3_get_5717_nvram_info(tp);
11593 tg3_get_nvram_info(tp);
11595 if (tp->nvram_size == 0)
11596 tg3_get_nvram_size(tp);
11598 tg3_disable_nvram_access(tp);
11599 tg3_nvram_unlock(tp);
11602 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
11604 tg3_get_eeprom_size(tp);
11608 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11609 u32 offset, u32 len, u8 *buf)
11614 for (i = 0; i < len; i += 4) {
11620 memcpy(&data, buf + i, 4);
11623 * The SEEPROM interface expects the data to always be opposite
11624 * the native endian format. We accomplish this by reversing
11625 * all the operations that would have been performed on the
11626 * data from a call to tg3_nvram_read_be32().
11628 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
11630 val = tr32(GRC_EEPROM_ADDR);
11631 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11633 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11635 tw32(GRC_EEPROM_ADDR, val |
11636 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11637 (addr & EEPROM_ADDR_ADDR_MASK) |
11638 EEPROM_ADDR_START |
11639 EEPROM_ADDR_WRITE);
11641 for (j = 0; j < 1000; j++) {
11642 val = tr32(GRC_EEPROM_ADDR);
11644 if (val & EEPROM_ADDR_COMPLETE)
11648 if (!(val & EEPROM_ADDR_COMPLETE)) {
11657 /* offset and length are dword aligned */
11658 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11662 u32 pagesize = tp->nvram_pagesize;
11663 u32 pagemask = pagesize - 1;
11667 tmp = kmalloc(pagesize, GFP_KERNEL);
11673 u32 phy_addr, page_off, size;
11675 phy_addr = offset & ~pagemask;
11677 for (j = 0; j < pagesize; j += 4) {
11678 ret = tg3_nvram_read_be32(tp, phy_addr + j,
11679 (__be32 *) (tmp + j));
11686 page_off = offset & pagemask;
11693 memcpy(tmp + page_off, buf, size);
11695 offset = offset + (pagesize - page_off);
11697 tg3_enable_nvram_access(tp);
11700 * Before we can erase the flash page, we need
11701 * to issue a special "write enable" command.
11703 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11705 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11708 /* Erase the target page */
11709 tw32(NVRAM_ADDR, phy_addr);
11711 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11712 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11714 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11717 /* Issue another write enable to start the write. */
11718 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11720 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11723 for (j = 0; j < pagesize; j += 4) {
11726 data = *((__be32 *) (tmp + j));
11728 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11730 tw32(NVRAM_ADDR, phy_addr + j);
11732 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11736 nvram_cmd |= NVRAM_CMD_FIRST;
11737 else if (j == (pagesize - 4))
11738 nvram_cmd |= NVRAM_CMD_LAST;
11740 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11747 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11748 tg3_nvram_exec_cmd(tp, nvram_cmd);
11755 /* offset and length are dword aligned */
11756 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11761 for (i = 0; i < len; i += 4, offset += 4) {
11762 u32 page_off, phy_addr, nvram_cmd;
11765 memcpy(&data, buf + i, 4);
11766 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11768 page_off = offset % tp->nvram_pagesize;
11770 phy_addr = tg3_nvram_phys_addr(tp, offset);
11772 tw32(NVRAM_ADDR, phy_addr);
11774 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11776 if ((page_off == 0) || (i == 0))
11777 nvram_cmd |= NVRAM_CMD_FIRST;
11778 if (page_off == (tp->nvram_pagesize - 4))
11779 nvram_cmd |= NVRAM_CMD_LAST;
11781 if (i == (len - 4))
11782 nvram_cmd |= NVRAM_CMD_LAST;
11784 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11785 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
11786 (tp->nvram_jedecnum == JEDEC_ST) &&
11787 (nvram_cmd & NVRAM_CMD_FIRST)) {
11789 if ((ret = tg3_nvram_exec_cmd(tp,
11790 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11795 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11796 /* We always do complete word writes to eeprom. */
11797 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11800 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11806 /* offset and length are dword aligned */
11807 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11811 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11812 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11813 ~GRC_LCLCTRL_GPIO_OUTPUT1);
11817 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11818 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11823 ret = tg3_nvram_lock(tp);
11827 tg3_enable_nvram_access(tp);
11828 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11829 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
11830 tw32(NVRAM_WRITE1, 0x406);
11832 grc_mode = tr32(GRC_MODE);
11833 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11835 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11836 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11838 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11842 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11846 grc_mode = tr32(GRC_MODE);
11847 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11849 tg3_disable_nvram_access(tp);
11850 tg3_nvram_unlock(tp);
11853 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11854 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11861 struct subsys_tbl_ent {
11862 u16 subsys_vendor, subsys_devid;
11866 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11867 /* Broadcom boards. */
11868 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11869 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11870 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11871 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
11872 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11873 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11874 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
11875 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11876 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11877 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11878 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11881 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11882 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11883 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
11884 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11885 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11888 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11889 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11890 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11891 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11893 /* Compaq boards. */
11894 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11895 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11896 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
11897 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11898 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11901 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11904 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11908 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11909 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11910 tp->pdev->subsystem_vendor) &&
11911 (subsys_id_to_phy_id[i].subsys_devid ==
11912 tp->pdev->subsystem_device))
11913 return &subsys_id_to_phy_id[i];
11918 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11923 /* On some early chips the SRAM cannot be accessed in D3hot state,
11924 * so need make sure we're in D0.
11926 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11927 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11928 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11931 /* Make sure register accesses (indirect or otherwise)
11932 * will function correctly.
11934 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11935 tp->misc_host_ctrl);
11937 /* The memory arbiter has to be enabled in order for SRAM accesses
11938 * to succeed. Normally on powerup the tg3 chip firmware will make
11939 * sure it is enabled, but other entities such as system netboot
11940 * code might disable it.
11942 val = tr32(MEMARB_MODE);
11943 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11945 tp->phy_id = PHY_ID_INVALID;
11946 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11948 /* Assume an onboard device and WOL capable by default. */
11949 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11951 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11952 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11953 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11954 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11956 val = tr32(VCPU_CFGSHDW);
11957 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11958 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11959 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11960 (val & VCPU_CFGSHDW_WOL_MAGPKT))
11961 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11965 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11966 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11967 u32 nic_cfg, led_cfg;
11968 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11969 int eeprom_phy_serdes = 0;
11971 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11972 tp->nic_sram_data_cfg = nic_cfg;
11974 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11975 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11976 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11977 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11978 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11979 (ver > 0) && (ver < 0x100))
11980 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11982 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11983 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11985 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11986 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11987 eeprom_phy_serdes = 1;
11989 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11990 if (nic_phy_id != 0) {
11991 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11992 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11994 eeprom_phy_id = (id1 >> 16) << 10;
11995 eeprom_phy_id |= (id2 & 0xfc00) << 16;
11996 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12000 tp->phy_id = eeprom_phy_id;
12001 if (eeprom_phy_serdes) {
12002 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
12003 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
12005 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12008 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12009 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12010 SHASTA_EXT_LED_MODE_MASK);
12012 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12016 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12017 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12020 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12021 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12024 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12025 tp->led_ctrl = LED_CTRL_MODE_MAC;
12027 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12028 * read on some older 5700/5701 bootcode.
12030 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12032 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12034 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12038 case SHASTA_EXT_LED_SHARED:
12039 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12040 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12041 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12042 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12043 LED_CTRL_MODE_PHY_2);
12046 case SHASTA_EXT_LED_MAC:
12047 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12050 case SHASTA_EXT_LED_COMBO:
12051 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12052 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12053 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12054 LED_CTRL_MODE_PHY_2);
12059 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12060 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12061 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12062 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12064 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12065 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12067 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12068 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
12069 if ((tp->pdev->subsystem_vendor ==
12070 PCI_VENDOR_ID_ARIMA) &&
12071 (tp->pdev->subsystem_device == 0x205a ||
12072 tp->pdev->subsystem_device == 0x2063))
12073 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12075 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12076 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12079 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12080 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
12081 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12082 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
12085 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12086 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12087 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
12089 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
12090 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12091 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
12093 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
12094 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
12095 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12097 if (cfg2 & (1 << 17))
12098 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
12100 /* serdes signal pre-emphasis in register 0x590 set by */
12101 /* bootcode if bit 18 is set */
12102 if (cfg2 & (1 << 18))
12103 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
12105 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12106 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12107 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12108 tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
12110 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12113 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12114 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12115 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12118 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
12119 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
12120 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12121 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
12122 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12123 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
12126 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
12127 device_set_wakeup_enable(&tp->pdev->dev,
12128 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
12131 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12136 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12137 tw32(OTP_CTRL, cmd);
12139 /* Wait for up to 1 ms for command to execute. */
12140 for (i = 0; i < 100; i++) {
12141 val = tr32(OTP_STATUS);
12142 if (val & OTP_STATUS_CMD_DONE)
12147 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12150 /* Read the gphy configuration from the OTP region of the chip. The gphy
12151 * configuration is a 32-bit value that straddles the alignment boundary.
12152 * We do two 32-bit reads and then shift and merge the results.
12154 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12156 u32 bhalf_otp, thalf_otp;
12158 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12160 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12163 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12165 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12168 thalf_otp = tr32(OTP_READ_DATA);
12170 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12172 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12175 bhalf_otp = tr32(OTP_READ_DATA);
12177 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12180 static int __devinit tg3_phy_probe(struct tg3 *tp)
12182 u32 hw_phy_id_1, hw_phy_id_2;
12183 u32 hw_phy_id, hw_phy_id_masked;
12186 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
12187 return tg3_phy_init(tp);
12189 /* Reading the PHY ID register can conflict with ASF
12190 * firmware access to the PHY hardware.
12193 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12194 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
12195 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
12197 /* Now read the physical PHY_ID from the chip and verify
12198 * that it is sane. If it doesn't look good, we fall back
12199 * to either the hard-coded table based PHY_ID and failing
12200 * that the value found in the eeprom area.
12202 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12203 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12205 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
12206 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12207 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
12209 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
12212 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
12213 tp->phy_id = hw_phy_id;
12214 if (hw_phy_id_masked == PHY_ID_BCM8002)
12215 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12217 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
12219 if (tp->phy_id != PHY_ID_INVALID) {
12220 /* Do nothing, phy ID already set up in
12221 * tg3_get_eeprom_hw_cfg().
12224 struct subsys_tbl_ent *p;
12226 /* No eeprom signature? Try the hardcoded
12227 * subsys device table.
12229 p = lookup_by_subsys(tp);
12233 tp->phy_id = p->phy_id;
12235 tp->phy_id == PHY_ID_BCM8002)
12236 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12240 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
12241 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
12242 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
12243 u32 bmsr, adv_reg, tg3_ctrl, mask;
12245 tg3_readphy(tp, MII_BMSR, &bmsr);
12246 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12247 (bmsr & BMSR_LSTATUS))
12248 goto skip_phy_reset;
12250 err = tg3_phy_reset(tp);
12254 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
12255 ADVERTISE_100HALF | ADVERTISE_100FULL |
12256 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
12258 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
12259 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
12260 MII_TG3_CTRL_ADV_1000_FULL);
12261 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12262 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
12263 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
12264 MII_TG3_CTRL_ENABLE_AS_MASTER);
12267 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12268 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12269 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12270 if (!tg3_copper_is_advertising_all(tp, mask)) {
12271 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12273 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
12274 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12276 tg3_writephy(tp, MII_BMCR,
12277 BMCR_ANENABLE | BMCR_ANRESTART);
12279 tg3_phy_set_wirespeed(tp);
12281 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12282 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
12283 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12287 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
12288 err = tg3_init_5401phy_dsp(tp);
12293 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
12294 err = tg3_init_5401phy_dsp(tp);
12297 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
12298 tp->link_config.advertising =
12299 (ADVERTISED_1000baseT_Half |
12300 ADVERTISED_1000baseT_Full |
12301 ADVERTISED_Autoneg |
12303 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
12304 tp->link_config.advertising &=
12305 ~(ADVERTISED_1000baseT_Half |
12306 ADVERTISED_1000baseT_Full);
12311 static void __devinit tg3_read_partno(struct tg3 *tp)
12313 unsigned char vpd_data[256]; /* in little-endian format */
12317 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
12318 tg3_nvram_read(tp, 0x0, &magic))
12319 goto out_not_found;
12321 if (magic == TG3_EEPROM_MAGIC) {
12322 for (i = 0; i < 256; i += 4) {
12325 /* The data is in little-endian format in NVRAM.
12326 * Use the big-endian read routines to preserve
12327 * the byte order as it exists in NVRAM.
12329 if (tg3_nvram_read_be32(tp, 0x100 + i, &tmp))
12330 goto out_not_found;
12332 memcpy(&vpd_data[i], &tmp, sizeof(tmp));
12337 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
12338 for (i = 0; i < 256; i += 4) {
12343 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
12345 while (j++ < 100) {
12346 pci_read_config_word(tp->pdev, vpd_cap +
12347 PCI_VPD_ADDR, &tmp16);
12348 if (tmp16 & 0x8000)
12352 if (!(tmp16 & 0x8000))
12353 goto out_not_found;
12355 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
12357 v = cpu_to_le32(tmp);
12358 memcpy(&vpd_data[i], &v, sizeof(v));
12362 /* Now parse and find the part number. */
12363 for (i = 0; i < 254; ) {
12364 unsigned char val = vpd_data[i];
12365 unsigned int block_end;
12367 if (val == 0x82 || val == 0x91) {
12370 (vpd_data[i + 2] << 8)));
12375 goto out_not_found;
12377 block_end = (i + 3 +
12379 (vpd_data[i + 2] << 8)));
12382 if (block_end > 256)
12383 goto out_not_found;
12385 while (i < (block_end - 2)) {
12386 if (vpd_data[i + 0] == 'P' &&
12387 vpd_data[i + 1] == 'N') {
12388 int partno_len = vpd_data[i + 2];
12391 if (partno_len > 24 || (partno_len + i) > 256)
12392 goto out_not_found;
12394 memcpy(tp->board_part_number,
12395 &vpd_data[i], partno_len);
12400 i += 3 + vpd_data[i + 2];
12403 /* Part number not found. */
12404 goto out_not_found;
12408 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12409 strcpy(tp->board_part_number, "BCM95906");
12410 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12411 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
12412 strcpy(tp->board_part_number, "BCM57780");
12413 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12414 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
12415 strcpy(tp->board_part_number, "BCM57760");
12416 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12417 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
12418 strcpy(tp->board_part_number, "BCM57790");
12419 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12420 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
12421 strcpy(tp->board_part_number, "BCM57788");
12423 strcpy(tp->board_part_number, "none");
12426 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
12430 if (tg3_nvram_read(tp, offset, &val) ||
12431 (val & 0xfc000000) != 0x0c000000 ||
12432 tg3_nvram_read(tp, offset + 4, &val) ||
12439 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
12441 u32 val, offset, start, ver_offset;
12443 bool newver = false;
12445 if (tg3_nvram_read(tp, 0xc, &offset) ||
12446 tg3_nvram_read(tp, 0x4, &start))
12449 offset = tg3_nvram_logical_addr(tp, offset);
12451 if (tg3_nvram_read(tp, offset, &val))
12454 if ((val & 0xfc000000) == 0x0c000000) {
12455 if (tg3_nvram_read(tp, offset + 4, &val))
12463 if (tg3_nvram_read(tp, offset + 8, &ver_offset))
12466 offset = offset + ver_offset - start;
12467 for (i = 0; i < 16; i += 4) {
12469 if (tg3_nvram_read_be32(tp, offset + i, &v))
12472 memcpy(tp->fw_ver + i, &v, sizeof(v));
12477 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
12480 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
12481 TG3_NVM_BCVER_MAJSFT;
12482 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
12483 snprintf(&tp->fw_ver[0], 32, "v%d.%02d", major, minor);
12487 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
12489 u32 val, major, minor;
12491 /* Use native endian representation */
12492 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
12495 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
12496 TG3_NVM_HWSB_CFG1_MAJSFT;
12497 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
12498 TG3_NVM_HWSB_CFG1_MINSFT;
12500 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
12503 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12505 u32 offset, major, minor, build;
12507 tp->fw_ver[0] = 's';
12508 tp->fw_ver[1] = 'b';
12509 tp->fw_ver[2] = '\0';
12511 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
12514 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
12515 case TG3_EEPROM_SB_REVISION_0:
12516 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
12518 case TG3_EEPROM_SB_REVISION_2:
12519 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
12521 case TG3_EEPROM_SB_REVISION_3:
12522 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
12528 if (tg3_nvram_read(tp, offset, &val))
12531 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
12532 TG3_EEPROM_SB_EDH_BLD_SHFT;
12533 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
12534 TG3_EEPROM_SB_EDH_MAJ_SHFT;
12535 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
12537 if (minor > 99 || build > 26)
12540 snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
12543 tp->fw_ver[8] = 'a' + build - 1;
12544 tp->fw_ver[9] = '\0';
12548 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
12550 u32 val, offset, start;
12553 for (offset = TG3_NVM_DIR_START;
12554 offset < TG3_NVM_DIR_END;
12555 offset += TG3_NVM_DIRENT_SIZE) {
12556 if (tg3_nvram_read(tp, offset, &val))
12559 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
12563 if (offset == TG3_NVM_DIR_END)
12566 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12567 start = 0x08000000;
12568 else if (tg3_nvram_read(tp, offset - 4, &start))
12571 if (tg3_nvram_read(tp, offset + 4, &offset) ||
12572 !tg3_fw_img_is_valid(tp, offset) ||
12573 tg3_nvram_read(tp, offset + 8, &val))
12576 offset += val - start;
12578 vlen = strlen(tp->fw_ver);
12580 tp->fw_ver[vlen++] = ',';
12581 tp->fw_ver[vlen++] = ' ';
12583 for (i = 0; i < 4; i++) {
12585 if (tg3_nvram_read_be32(tp, offset, &v))
12588 offset += sizeof(v);
12590 if (vlen > TG3_VER_SIZE - sizeof(v)) {
12591 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
12595 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
12600 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
12605 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
12606 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
12609 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
12610 if (apedata != APE_SEG_SIG_MAGIC)
12613 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
12614 if (!(apedata & APE_FW_STATUS_READY))
12617 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
12619 vlen = strlen(tp->fw_ver);
12621 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " DASH v%d.%d.%d.%d",
12622 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
12623 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
12624 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
12625 (apedata & APE_FW_VERSION_BLDMSK));
12628 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
12632 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
12633 tp->fw_ver[0] = 's';
12634 tp->fw_ver[1] = 'b';
12635 tp->fw_ver[2] = '\0';
12640 if (tg3_nvram_read(tp, 0, &val))
12643 if (val == TG3_EEPROM_MAGIC)
12644 tg3_read_bc_ver(tp);
12645 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
12646 tg3_read_sb_ver(tp, val);
12647 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12648 tg3_read_hwsb_ver(tp);
12652 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12653 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
12656 tg3_read_mgmtfw_ver(tp);
12658 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
12661 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
12663 static int __devinit tg3_get_invariants(struct tg3 *tp)
12665 static struct pci_device_id write_reorder_chipsets[] = {
12666 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12667 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
12668 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12669 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
12670 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
12671 PCI_DEVICE_ID_VIA_8385_0) },
12675 u32 pci_state_reg, grc_misc_cfg;
12680 /* Force memory write invalidate off. If we leave it on,
12681 * then on 5700_BX chips we have to enable a workaround.
12682 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
12683 * to match the cacheline size. The Broadcom driver have this
12684 * workaround but turns MWI off all the times so never uses
12685 * it. This seems to suggest that the workaround is insufficient.
12687 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12688 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
12689 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12691 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
12692 * has the register indirect write enable bit set before
12693 * we try to access any of the MMIO registers. It is also
12694 * critical that the PCI-X hw workaround situation is decided
12695 * before that as well.
12697 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12700 tp->pci_chip_rev_id = (misc_ctrl_reg >>
12701 MISC_HOST_CTRL_CHIPREV_SHIFT);
12702 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12703 u32 prod_id_asic_rev;
12705 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717C ||
12706 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717S ||
12707 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718C ||
12708 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718S)
12709 pci_read_config_dword(tp->pdev,
12710 TG3PCI_GEN2_PRODID_ASICREV,
12711 &prod_id_asic_rev);
12713 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
12714 &prod_id_asic_rev);
12716 tp->pci_chip_rev_id = prod_id_asic_rev;
12719 /* Wrong chip ID in 5752 A0. This code can be removed later
12720 * as A0 is not in production.
12722 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12723 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12725 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12726 * we need to disable memory and use config. cycles
12727 * only to access all registers. The 5702/03 chips
12728 * can mistakenly decode the special cycles from the
12729 * ICH chipsets as memory write cycles, causing corruption
12730 * of register and memory space. Only certain ICH bridges
12731 * will drive special cycles with non-zero data during the
12732 * address phase which can fall within the 5703's address
12733 * range. This is not an ICH bug as the PCI spec allows
12734 * non-zero address during special cycles. However, only
12735 * these ICH bridges are known to drive non-zero addresses
12736 * during special cycles.
12738 * Since special cycles do not cross PCI bridges, we only
12739 * enable this workaround if the 5703 is on the secondary
12740 * bus of these ICH bridges.
12742 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12743 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12744 static struct tg3_dev_id {
12748 } ich_chipsets[] = {
12749 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12751 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12753 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12755 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12759 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12760 struct pci_dev *bridge = NULL;
12762 while (pci_id->vendor != 0) {
12763 bridge = pci_get_device(pci_id->vendor, pci_id->device,
12769 if (pci_id->rev != PCI_ANY_ID) {
12770 if (bridge->revision > pci_id->rev)
12773 if (bridge->subordinate &&
12774 (bridge->subordinate->number ==
12775 tp->pdev->bus->number)) {
12777 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12778 pci_dev_put(bridge);
12784 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12785 static struct tg3_dev_id {
12788 } bridge_chipsets[] = {
12789 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12790 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12793 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12794 struct pci_dev *bridge = NULL;
12796 while (pci_id->vendor != 0) {
12797 bridge = pci_get_device(pci_id->vendor,
12804 if (bridge->subordinate &&
12805 (bridge->subordinate->number <=
12806 tp->pdev->bus->number) &&
12807 (bridge->subordinate->subordinate >=
12808 tp->pdev->bus->number)) {
12809 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12810 pci_dev_put(bridge);
12816 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12817 * DMA addresses > 40-bit. This bridge may have other additional
12818 * 57xx devices behind it in some 4-port NIC designs for example.
12819 * Any tg3 device found behind the bridge will also need the 40-bit
12822 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12823 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12824 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
12825 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12826 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12829 struct pci_dev *bridge = NULL;
12832 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12833 PCI_DEVICE_ID_SERVERWORKS_EPB,
12835 if (bridge && bridge->subordinate &&
12836 (bridge->subordinate->number <=
12837 tp->pdev->bus->number) &&
12838 (bridge->subordinate->subordinate >=
12839 tp->pdev->bus->number)) {
12840 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12841 pci_dev_put(bridge);
12847 /* Initialize misc host control in PCI block. */
12848 tp->misc_host_ctrl |= (misc_ctrl_reg &
12849 MISC_HOST_CTRL_CHIPREV);
12850 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12851 tp->misc_host_ctrl);
12853 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
12854 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
12855 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12856 tp->pdev_peer = tg3_find_peer(tp);
12858 /* Intentionally exclude ASIC_REV_5906 */
12859 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12860 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12861 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12862 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12863 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12864 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12865 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12866 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
12868 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12869 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12870 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12871 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12872 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12873 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12875 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12876 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12877 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12879 /* 5700 B0 chips do not support checksumming correctly due
12880 * to hardware bugs.
12882 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12883 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12885 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12886 tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12887 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
12888 tp->dev->features |= NETIF_F_IPV6_CSUM;
12891 /* Determine TSO capabilities */
12892 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12893 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
12894 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12895 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12896 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12897 else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12898 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12899 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
12900 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12901 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12902 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12903 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12904 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
12905 tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
12906 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
12907 tp->fw_needed = FIRMWARE_TG3TSO5;
12909 tp->fw_needed = FIRMWARE_TG3TSO;
12914 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12915 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12916 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12917 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12918 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12919 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12920 tp->pdev_peer == tp->pdev))
12921 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12923 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12924 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12925 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12928 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
12929 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
12930 tp->irq_max = TG3_IRQ_MAX_VECS;
12934 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12935 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12936 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
12937 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
12938 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
12939 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
12942 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12943 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
12944 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12945 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
12947 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12950 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12951 if (tp->pcie_cap != 0) {
12954 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12956 pcie_set_readrq(tp->pdev, 4096);
12958 pci_read_config_word(tp->pdev,
12959 tp->pcie_cap + PCI_EXP_LNKCTL,
12961 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
12962 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12963 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12964 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12965 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12966 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
12967 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
12968 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
12970 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
12971 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12972 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12973 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12974 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12975 if (!tp->pcix_cap) {
12976 printk(KERN_ERR PFX "Cannot find PCI-X "
12977 "capability, aborting.\n");
12981 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
12982 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12985 /* If we have an AMD 762 or VIA K8T800 chipset, write
12986 * reordering to the mailbox registers done by the host
12987 * controller can cause major troubles. We read back from
12988 * every mailbox register write to force the writes to be
12989 * posted to the chip in order.
12991 if (pci_dev_present(write_reorder_chipsets) &&
12992 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12993 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12995 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
12996 &tp->pci_cacheline_sz);
12997 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
12998 &tp->pci_lat_timer);
12999 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13000 tp->pci_lat_timer < 64) {
13001 tp->pci_lat_timer = 64;
13002 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13003 tp->pci_lat_timer);
13006 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13007 /* 5700 BX chips need to have their TX producer index
13008 * mailboxes written twice to workaround a bug.
13010 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
13012 /* If we are in PCI-X mode, enable register write workaround.
13014 * The workaround is to use indirect register accesses
13015 * for all chip writes not to mailbox registers.
13017 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13020 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13022 /* The chip can have it's power management PCI config
13023 * space registers clobbered due to this bug.
13024 * So explicitly force the chip into D0 here.
13026 pci_read_config_dword(tp->pdev,
13027 tp->pm_cap + PCI_PM_CTRL,
13029 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13030 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13031 pci_write_config_dword(tp->pdev,
13032 tp->pm_cap + PCI_PM_CTRL,
13035 /* Also, force SERR#/PERR# in PCI command. */
13036 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13037 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13038 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13042 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13043 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
13044 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13045 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
13047 /* Chip-specific fixup from Broadcom driver */
13048 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13049 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13050 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13051 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13054 /* Default fast path register access methods */
13055 tp->read32 = tg3_read32;
13056 tp->write32 = tg3_write32;
13057 tp->read32_mbox = tg3_read32;
13058 tp->write32_mbox = tg3_write32;
13059 tp->write32_tx_mbox = tg3_write32;
13060 tp->write32_rx_mbox = tg3_write32;
13062 /* Various workaround register access methods */
13063 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
13064 tp->write32 = tg3_write_indirect_reg32;
13065 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13066 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
13067 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13069 * Back to back register writes can cause problems on these
13070 * chips, the workaround is to read back all reg writes
13071 * except those to mailbox regs.
13073 * See tg3_write_indirect_reg32().
13075 tp->write32 = tg3_write_flush_reg32;
13078 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
13079 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
13080 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13081 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
13082 tp->write32_rx_mbox = tg3_write_flush_reg32;
13085 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
13086 tp->read32 = tg3_read_indirect_reg32;
13087 tp->write32 = tg3_write_indirect_reg32;
13088 tp->read32_mbox = tg3_read_indirect_mbox;
13089 tp->write32_mbox = tg3_write_indirect_mbox;
13090 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13091 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13096 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13097 pci_cmd &= ~PCI_COMMAND_MEMORY;
13098 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13100 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13101 tp->read32_mbox = tg3_read32_mbox_5906;
13102 tp->write32_mbox = tg3_write32_mbox_5906;
13103 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13104 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13107 if (tp->write32 == tg3_write_indirect_reg32 ||
13108 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13109 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13110 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13111 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
13113 /* Get eeprom hw config before calling tg3_set_power_state().
13114 * In particular, the TG3_FLG2_IS_NIC flag must be
13115 * determined before calling tg3_set_power_state() so that
13116 * we know whether or not to switch out of Vaux power.
13117 * When the flag is set, it means that GPIO1 is used for eeprom
13118 * write protect and also implies that it is a LOM where GPIOs
13119 * are not used to switch power.
13121 tg3_get_eeprom_hw_cfg(tp);
13123 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13124 /* Allow reads and writes to the
13125 * APE register and memory space.
13127 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13128 PCISTATE_ALLOW_APE_SHMEM_WR;
13129 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13133 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13134 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13135 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13136 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13137 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
13138 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
13140 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
13141 * GPIO1 driven high will bring 5700's external PHY out of reset.
13142 * It is also used as eeprom write protect on LOMs.
13144 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13145 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13146 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
13147 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13148 GRC_LCLCTRL_GPIO_OUTPUT1);
13149 /* Unused GPIO3 must be driven as output on 5752 because there
13150 * are no pull-up resistors on unused GPIO pins.
13152 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13153 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13155 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13156 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13157 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13159 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13160 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13161 /* Turn off the debug UART. */
13162 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13163 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
13164 /* Keep VMain power. */
13165 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13166 GRC_LCLCTRL_GPIO_OUTPUT0;
13169 /* Force the chip into D0. */
13170 err = tg3_set_power_state(tp, PCI_D0);
13172 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
13173 pci_name(tp->pdev));
13177 /* Derive initial jumbo mode from MTU assigned in
13178 * ether_setup() via the alloc_etherdev() call
13180 if (tp->dev->mtu > ETH_DATA_LEN &&
13181 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13182 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
13184 /* Determine WakeOnLan speed to use. */
13185 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13186 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13187 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13188 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13189 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
13191 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
13194 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13195 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
13197 /* A few boards don't want Ethernet@WireSpeed phy feature */
13198 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13199 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
13200 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13201 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13202 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) ||
13203 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
13204 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
13206 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13207 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13208 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
13209 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13210 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
13212 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
13213 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
13214 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13215 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13216 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
13217 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13218 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13219 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13220 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13221 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13222 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13223 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
13224 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13225 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
13227 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
13230 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13231 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13232 tp->phy_otp = tg3_read_otp_phycfg(tp);
13233 if (tp->phy_otp == 0)
13234 tp->phy_otp = TG3_OTP_DEFAULT;
13237 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
13238 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13240 tp->mi_mode = MAC_MI_MODE_BASE;
13242 tp->coalesce_mode = 0;
13243 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
13244 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
13245 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
13247 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13248 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13249 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
13251 err = tg3_mdio_init(tp);
13255 /* Initialize data/descriptor byte/word swapping. */
13256 val = tr32(GRC_MODE);
13257 val &= GRC_MODE_HOST_STACKUP;
13258 tw32(GRC_MODE, val | tp->grc_mode);
13260 tg3_switch_clocks(tp);
13262 /* Clear this out for sanity. */
13263 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
13265 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13267 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
13268 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
13269 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
13271 if (chiprevid == CHIPREV_ID_5701_A0 ||
13272 chiprevid == CHIPREV_ID_5701_B0 ||
13273 chiprevid == CHIPREV_ID_5701_B2 ||
13274 chiprevid == CHIPREV_ID_5701_B5) {
13275 void __iomem *sram_base;
13277 /* Write some dummy words into the SRAM status block
13278 * area, see if it reads back correctly. If the return
13279 * value is bad, force enable the PCIX workaround.
13281 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
13283 writel(0x00000000, sram_base);
13284 writel(0x00000000, sram_base + 4);
13285 writel(0xffffffff, sram_base + 4);
13286 if (readl(sram_base) != 0x00000000)
13287 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13292 tg3_nvram_init(tp);
13294 grc_misc_cfg = tr32(GRC_MISC_CFG);
13295 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
13297 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13298 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
13299 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
13300 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
13302 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
13303 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
13304 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
13305 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
13306 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
13307 HOSTCC_MODE_CLRTICK_TXBD);
13309 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
13310 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13311 tp->misc_host_ctrl);
13314 /* Preserve the APE MAC_MODE bits */
13315 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
13316 tp->mac_mode = tr32(MAC_MODE) |
13317 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13319 tp->mac_mode = TG3_DEF_MAC_MODE;
13321 /* these are limited to 10/100 only */
13322 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13323 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
13324 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13325 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13326 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
13327 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
13328 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
13329 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13330 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
13331 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
13332 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
13333 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
13334 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
13335 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
13337 err = tg3_phy_probe(tp);
13339 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
13340 pci_name(tp->pdev), err);
13341 /* ... but do not return immediately ... */
13345 tg3_read_partno(tp);
13346 tg3_read_fw_ver(tp);
13348 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
13349 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
13351 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13352 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
13354 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
13357 /* 5700 {AX,BX} chips have a broken status block link
13358 * change bit implementation, so we must use the
13359 * status register in those cases.
13361 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13362 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
13364 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
13366 /* The led_ctrl is set during tg3_phy_probe, here we might
13367 * have to force the link status polling mechanism based
13368 * upon subsystem IDs.
13370 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
13371 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13372 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
13373 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
13374 TG3_FLAG_USE_LINKCHG_REG);
13377 /* For all SERDES we poll the MAC status register. */
13378 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
13379 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
13381 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
13383 tp->rx_offset = NET_IP_ALIGN;
13384 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13385 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
13388 tp->rx_std_max_post = TG3_RX_RING_SIZE;
13390 /* Increment the rx prod index on the rx std ring by at most
13391 * 8 for these chips to workaround hw errata.
13393 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13394 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13395 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13396 tp->rx_std_max_post = 8;
13398 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
13399 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
13400 PCIE_PWR_MGMT_L1_THRESH_MSK;
13405 #ifdef CONFIG_SPARC
13406 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
13408 struct net_device *dev = tp->dev;
13409 struct pci_dev *pdev = tp->pdev;
13410 struct device_node *dp = pci_device_to_OF_node(pdev);
13411 const unsigned char *addr;
13414 addr = of_get_property(dp, "local-mac-address", &len);
13415 if (addr && len == 6) {
13416 memcpy(dev->dev_addr, addr, 6);
13417 memcpy(dev->perm_addr, dev->dev_addr, 6);
13423 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
13425 struct net_device *dev = tp->dev;
13427 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
13428 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
13433 static int __devinit tg3_get_device_address(struct tg3 *tp)
13435 struct net_device *dev = tp->dev;
13436 u32 hi, lo, mac_offset;
13439 #ifdef CONFIG_SPARC
13440 if (!tg3_get_macaddr_sparc(tp))
13445 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
13446 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13447 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
13449 if (tg3_nvram_lock(tp))
13450 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
13452 tg3_nvram_unlock(tp);
13453 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13454 if (tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC)
13456 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13459 /* First try to get it from MAC address mailbox. */
13460 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
13461 if ((hi >> 16) == 0x484b) {
13462 dev->dev_addr[0] = (hi >> 8) & 0xff;
13463 dev->dev_addr[1] = (hi >> 0) & 0xff;
13465 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
13466 dev->dev_addr[2] = (lo >> 24) & 0xff;
13467 dev->dev_addr[3] = (lo >> 16) & 0xff;
13468 dev->dev_addr[4] = (lo >> 8) & 0xff;
13469 dev->dev_addr[5] = (lo >> 0) & 0xff;
13471 /* Some old bootcode may report a 0 MAC address in SRAM */
13472 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
13475 /* Next, try NVRAM. */
13476 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
13477 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
13478 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
13479 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
13480 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
13482 /* Finally just fetch it out of the MAC control regs. */
13484 hi = tr32(MAC_ADDR_0_HIGH);
13485 lo = tr32(MAC_ADDR_0_LOW);
13487 dev->dev_addr[5] = lo & 0xff;
13488 dev->dev_addr[4] = (lo >> 8) & 0xff;
13489 dev->dev_addr[3] = (lo >> 16) & 0xff;
13490 dev->dev_addr[2] = (lo >> 24) & 0xff;
13491 dev->dev_addr[1] = hi & 0xff;
13492 dev->dev_addr[0] = (hi >> 8) & 0xff;
13496 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
13497 #ifdef CONFIG_SPARC
13498 if (!tg3_get_default_macaddr_sparc(tp))
13503 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
13507 #define BOUNDARY_SINGLE_CACHELINE 1
13508 #define BOUNDARY_MULTI_CACHELINE 2
13510 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
13512 int cacheline_size;
13516 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
13518 cacheline_size = 1024;
13520 cacheline_size = (int) byte * 4;
13522 /* On 5703 and later chips, the boundary bits have no
13525 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13526 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13527 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13530 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
13531 goal = BOUNDARY_MULTI_CACHELINE;
13533 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
13534 goal = BOUNDARY_SINGLE_CACHELINE;
13540 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13541 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
13548 /* PCI controllers on most RISC systems tend to disconnect
13549 * when a device tries to burst across a cache-line boundary.
13550 * Therefore, letting tg3 do so just wastes PCI bandwidth.
13552 * Unfortunately, for PCI-E there are only limited
13553 * write-side controls for this, and thus for reads
13554 * we will still get the disconnects. We'll also waste
13555 * these PCI cycles for both read and write for chips
13556 * other than 5700 and 5701 which do not implement the
13559 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13560 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
13561 switch (cacheline_size) {
13566 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13567 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
13568 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
13570 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13571 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13576 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
13577 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
13581 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13582 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13585 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13586 switch (cacheline_size) {
13590 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13591 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13592 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
13598 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13599 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
13603 switch (cacheline_size) {
13605 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13606 val |= (DMA_RWCTRL_READ_BNDRY_16 |
13607 DMA_RWCTRL_WRITE_BNDRY_16);
13612 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13613 val |= (DMA_RWCTRL_READ_BNDRY_32 |
13614 DMA_RWCTRL_WRITE_BNDRY_32);
13619 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13620 val |= (DMA_RWCTRL_READ_BNDRY_64 |
13621 DMA_RWCTRL_WRITE_BNDRY_64);
13626 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13627 val |= (DMA_RWCTRL_READ_BNDRY_128 |
13628 DMA_RWCTRL_WRITE_BNDRY_128);
13633 val |= (DMA_RWCTRL_READ_BNDRY_256 |
13634 DMA_RWCTRL_WRITE_BNDRY_256);
13637 val |= (DMA_RWCTRL_READ_BNDRY_512 |
13638 DMA_RWCTRL_WRITE_BNDRY_512);
13642 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
13643 DMA_RWCTRL_WRITE_BNDRY_1024);
13652 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
13654 struct tg3_internal_buffer_desc test_desc;
13655 u32 sram_dma_descs;
13658 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
13660 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
13661 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
13662 tw32(RDMAC_STATUS, 0);
13663 tw32(WDMAC_STATUS, 0);
13665 tw32(BUFMGR_MODE, 0);
13666 tw32(FTQ_RESET, 0);
13668 test_desc.addr_hi = ((u64) buf_dma) >> 32;
13669 test_desc.addr_lo = buf_dma & 0xffffffff;
13670 test_desc.nic_mbuf = 0x00002100;
13671 test_desc.len = size;
13674 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
13675 * the *second* time the tg3 driver was getting loaded after an
13678 * Broadcom tells me:
13679 * ...the DMA engine is connected to the GRC block and a DMA
13680 * reset may affect the GRC block in some unpredictable way...
13681 * The behavior of resets to individual blocks has not been tested.
13683 * Broadcom noted the GRC reset will also reset all sub-components.
13686 test_desc.cqid_sqid = (13 << 8) | 2;
13688 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
13691 test_desc.cqid_sqid = (16 << 8) | 7;
13693 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
13696 test_desc.flags = 0x00000005;
13698 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
13701 val = *(((u32 *)&test_desc) + i);
13702 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
13703 sram_dma_descs + (i * sizeof(u32)));
13704 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
13706 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
13709 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
13711 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
13715 for (i = 0; i < 40; i++) {
13719 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
13721 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
13722 if ((val & 0xffff) == sram_dma_descs) {
13733 #define TEST_BUFFER_SIZE 0x2000
13735 static int __devinit tg3_test_dma(struct tg3 *tp)
13737 dma_addr_t buf_dma;
13738 u32 *buf, saved_dma_rwctrl;
13741 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
13747 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
13748 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
13750 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
13752 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
13755 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13756 /* DMA read watermark not used on PCIE */
13757 tp->dma_rwctrl |= 0x00180000;
13758 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
13759 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13760 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
13761 tp->dma_rwctrl |= 0x003f0000;
13763 tp->dma_rwctrl |= 0x003f000f;
13765 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13766 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
13767 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
13768 u32 read_water = 0x7;
13770 /* If the 5704 is behind the EPB bridge, we can
13771 * do the less restrictive ONE_DMA workaround for
13772 * better performance.
13774 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13775 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13776 tp->dma_rwctrl |= 0x8000;
13777 else if (ccval == 0x6 || ccval == 0x7)
13778 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13780 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13782 /* Set bit 23 to enable PCIX hw bug fix */
13784 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13785 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13787 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13788 /* 5780 always in PCIX mode */
13789 tp->dma_rwctrl |= 0x00144000;
13790 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13791 /* 5714 always in PCIX mode */
13792 tp->dma_rwctrl |= 0x00148000;
13794 tp->dma_rwctrl |= 0x001b000f;
13798 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13799 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13800 tp->dma_rwctrl &= 0xfffffff0;
13802 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13803 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13804 /* Remove this if it causes problems for some boards. */
13805 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13807 /* On 5700/5701 chips, we need to set this bit.
13808 * Otherwise the chip will issue cacheline transactions
13809 * to streamable DMA memory with not all the byte
13810 * enables turned on. This is an error on several
13811 * RISC PCI controllers, in particular sparc64.
13813 * On 5703/5704 chips, this bit has been reassigned
13814 * a different meaning. In particular, it is used
13815 * on those chips to enable a PCI-X workaround.
13817 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
13820 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13823 /* Unneeded, already done by tg3_get_invariants. */
13824 tg3_switch_clocks(tp);
13827 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13828 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13831 /* It is best to perform DMA test with maximum write burst size
13832 * to expose the 5700/5701 write DMA bug.
13834 saved_dma_rwctrl = tp->dma_rwctrl;
13835 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13836 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13841 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
13844 /* Send the buffer to the chip. */
13845 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13847 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13852 /* validate data reached card RAM correctly. */
13853 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13855 tg3_read_mem(tp, 0x2100 + (i*4), &val);
13856 if (le32_to_cpu(val) != p[i]) {
13857 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
13858 /* ret = -ENODEV here? */
13863 /* Now read it back. */
13864 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13866 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13872 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13876 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13877 DMA_RWCTRL_WRITE_BNDRY_16) {
13878 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13879 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13880 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13883 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13889 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13895 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13896 DMA_RWCTRL_WRITE_BNDRY_16) {
13897 static struct pci_device_id dma_wait_state_chipsets[] = {
13898 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13899 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13903 /* DMA test passed without adjusting DMA boundary,
13904 * now look for chipsets that are known to expose the
13905 * DMA bug without failing the test.
13907 if (pci_dev_present(dma_wait_state_chipsets)) {
13908 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13909 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13912 /* Safe to use the calculated DMA boundary. */
13913 tp->dma_rwctrl = saved_dma_rwctrl;
13915 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13919 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13924 static void __devinit tg3_init_link_config(struct tg3 *tp)
13926 tp->link_config.advertising =
13927 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13928 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13929 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13930 ADVERTISED_Autoneg | ADVERTISED_MII);
13931 tp->link_config.speed = SPEED_INVALID;
13932 tp->link_config.duplex = DUPLEX_INVALID;
13933 tp->link_config.autoneg = AUTONEG_ENABLE;
13934 tp->link_config.active_speed = SPEED_INVALID;
13935 tp->link_config.active_duplex = DUPLEX_INVALID;
13936 tp->link_config.phy_is_low_power = 0;
13937 tp->link_config.orig_speed = SPEED_INVALID;
13938 tp->link_config.orig_duplex = DUPLEX_INVALID;
13939 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13942 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13944 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS &&
13945 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
13946 tp->bufmgr_config.mbuf_read_dma_low_water =
13947 DEFAULT_MB_RDMA_LOW_WATER_5705;
13948 tp->bufmgr_config.mbuf_mac_rx_low_water =
13949 DEFAULT_MB_MACRX_LOW_WATER_5705;
13950 tp->bufmgr_config.mbuf_high_water =
13951 DEFAULT_MB_HIGH_WATER_5705;
13952 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13953 tp->bufmgr_config.mbuf_mac_rx_low_water =
13954 DEFAULT_MB_MACRX_LOW_WATER_5906;
13955 tp->bufmgr_config.mbuf_high_water =
13956 DEFAULT_MB_HIGH_WATER_5906;
13959 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13960 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13961 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13962 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13963 tp->bufmgr_config.mbuf_high_water_jumbo =
13964 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13966 tp->bufmgr_config.mbuf_read_dma_low_water =
13967 DEFAULT_MB_RDMA_LOW_WATER;
13968 tp->bufmgr_config.mbuf_mac_rx_low_water =
13969 DEFAULT_MB_MACRX_LOW_WATER;
13970 tp->bufmgr_config.mbuf_high_water =
13971 DEFAULT_MB_HIGH_WATER;
13973 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13974 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13975 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13976 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13977 tp->bufmgr_config.mbuf_high_water_jumbo =
13978 DEFAULT_MB_HIGH_WATER_JUMBO;
13981 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13982 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13985 static char * __devinit tg3_phy_string(struct tg3 *tp)
13987 switch (tp->phy_id & PHY_ID_MASK) {
13988 case PHY_ID_BCM5400: return "5400";
13989 case PHY_ID_BCM5401: return "5401";
13990 case PHY_ID_BCM5411: return "5411";
13991 case PHY_ID_BCM5701: return "5701";
13992 case PHY_ID_BCM5703: return "5703";
13993 case PHY_ID_BCM5704: return "5704";
13994 case PHY_ID_BCM5705: return "5705";
13995 case PHY_ID_BCM5750: return "5750";
13996 case PHY_ID_BCM5752: return "5752";
13997 case PHY_ID_BCM5714: return "5714";
13998 case PHY_ID_BCM5780: return "5780";
13999 case PHY_ID_BCM5755: return "5755";
14000 case PHY_ID_BCM5787: return "5787";
14001 case PHY_ID_BCM5784: return "5784";
14002 case PHY_ID_BCM5756: return "5722/5756";
14003 case PHY_ID_BCM5906: return "5906";
14004 case PHY_ID_BCM5761: return "5761";
14005 case PHY_ID_BCM5717: return "5717";
14006 case PHY_ID_BCM8002: return "8002/serdes";
14007 case 0: return "serdes";
14008 default: return "unknown";
14012 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14014 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14015 strcpy(str, "PCI Express");
14017 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
14018 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14020 strcpy(str, "PCIX:");
14022 if ((clock_ctrl == 7) ||
14023 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14024 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14025 strcat(str, "133MHz");
14026 else if (clock_ctrl == 0)
14027 strcat(str, "33MHz");
14028 else if (clock_ctrl == 2)
14029 strcat(str, "50MHz");
14030 else if (clock_ctrl == 4)
14031 strcat(str, "66MHz");
14032 else if (clock_ctrl == 6)
14033 strcat(str, "100MHz");
14035 strcpy(str, "PCI:");
14036 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
14037 strcat(str, "66MHz");
14039 strcat(str, "33MHz");
14041 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
14042 strcat(str, ":32-bit");
14044 strcat(str, ":64-bit");
14048 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14050 struct pci_dev *peer;
14051 unsigned int func, devnr = tp->pdev->devfn & ~7;
14053 for (func = 0; func < 8; func++) {
14054 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14055 if (peer && peer != tp->pdev)
14059 /* 5704 can be configured in single-port mode, set peer to
14060 * tp->pdev in that case.
14068 * We don't need to keep the refcount elevated; there's no way
14069 * to remove one half of this device without removing the other
14076 static void __devinit tg3_init_coal(struct tg3 *tp)
14078 struct ethtool_coalesce *ec = &tp->coal;
14080 memset(ec, 0, sizeof(*ec));
14081 ec->cmd = ETHTOOL_GCOALESCE;
14082 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14083 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14084 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14085 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14086 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14087 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14088 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14089 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14090 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14092 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14093 HOSTCC_MODE_CLRTICK_TXBD)) {
14094 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14095 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14096 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14097 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14100 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14101 ec->rx_coalesce_usecs_irq = 0;
14102 ec->tx_coalesce_usecs_irq = 0;
14103 ec->stats_block_coalesce_usecs = 0;
14107 static const struct net_device_ops tg3_netdev_ops = {
14108 .ndo_open = tg3_open,
14109 .ndo_stop = tg3_close,
14110 .ndo_start_xmit = tg3_start_xmit,
14111 .ndo_get_stats = tg3_get_stats,
14112 .ndo_validate_addr = eth_validate_addr,
14113 .ndo_set_multicast_list = tg3_set_rx_mode,
14114 .ndo_set_mac_address = tg3_set_mac_addr,
14115 .ndo_do_ioctl = tg3_ioctl,
14116 .ndo_tx_timeout = tg3_tx_timeout,
14117 .ndo_change_mtu = tg3_change_mtu,
14118 #if TG3_VLAN_TAG_USED
14119 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14121 #ifdef CONFIG_NET_POLL_CONTROLLER
14122 .ndo_poll_controller = tg3_poll_controller,
14126 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
14127 .ndo_open = tg3_open,
14128 .ndo_stop = tg3_close,
14129 .ndo_start_xmit = tg3_start_xmit_dma_bug,
14130 .ndo_get_stats = tg3_get_stats,
14131 .ndo_validate_addr = eth_validate_addr,
14132 .ndo_set_multicast_list = tg3_set_rx_mode,
14133 .ndo_set_mac_address = tg3_set_mac_addr,
14134 .ndo_do_ioctl = tg3_ioctl,
14135 .ndo_tx_timeout = tg3_tx_timeout,
14136 .ndo_change_mtu = tg3_change_mtu,
14137 #if TG3_VLAN_TAG_USED
14138 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14140 #ifdef CONFIG_NET_POLL_CONTROLLER
14141 .ndo_poll_controller = tg3_poll_controller,
14145 static int __devinit tg3_init_one(struct pci_dev *pdev,
14146 const struct pci_device_id *ent)
14148 static int tg3_version_printed = 0;
14149 struct net_device *dev;
14151 int i, err, pm_cap;
14152 u32 sndmbx, rcvmbx, intmbx;
14154 u64 dma_mask, persist_dma_mask;
14156 if (tg3_version_printed++ == 0)
14157 printk(KERN_INFO "%s", version);
14159 err = pci_enable_device(pdev);
14161 printk(KERN_ERR PFX "Cannot enable PCI device, "
14166 err = pci_request_regions(pdev, DRV_MODULE_NAME);
14168 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
14170 goto err_out_disable_pdev;
14173 pci_set_master(pdev);
14175 /* Find power-management capability. */
14176 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14178 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
14181 goto err_out_free_res;
14184 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14186 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
14188 goto err_out_free_res;
14191 SET_NETDEV_DEV(dev, &pdev->dev);
14193 #if TG3_VLAN_TAG_USED
14194 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
14197 tp = netdev_priv(dev);
14200 tp->pm_cap = pm_cap;
14201 tp->rx_mode = TG3_DEF_RX_MODE;
14202 tp->tx_mode = TG3_DEF_TX_MODE;
14205 tp->msg_enable = tg3_debug;
14207 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14209 /* The word/byte swap controls here control register access byte
14210 * swapping. DMA data byte swapping is controlled in the GRC_MODE
14213 tp->misc_host_ctrl =
14214 MISC_HOST_CTRL_MASK_PCI_INT |
14215 MISC_HOST_CTRL_WORD_SWAP |
14216 MISC_HOST_CTRL_INDIR_ACCESS |
14217 MISC_HOST_CTRL_PCISTATE_RW;
14219 /* The NONFRM (non-frame) byte/word swap controls take effect
14220 * on descriptor entries, anything which isn't packet data.
14222 * The StrongARM chips on the board (one for tx, one for rx)
14223 * are running in big-endian mode.
14225 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14226 GRC_MODE_WSWAP_NONFRM_DATA);
14227 #ifdef __BIG_ENDIAN
14228 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14230 spin_lock_init(&tp->lock);
14231 spin_lock_init(&tp->indirect_lock);
14232 INIT_WORK(&tp->reset_task, tg3_reset_task);
14234 tp->regs = pci_ioremap_bar(pdev, BAR_0);
14236 printk(KERN_ERR PFX "Cannot map device registers, "
14239 goto err_out_free_dev;
14242 tg3_init_link_config(tp);
14244 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
14245 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
14247 dev->ethtool_ops = &tg3_ethtool_ops;
14248 dev->watchdog_timeo = TG3_TX_TIMEOUT;
14249 dev->irq = pdev->irq;
14251 err = tg3_get_invariants(tp);
14253 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
14255 goto err_out_iounmap;
14258 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
14259 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
14260 dev->netdev_ops = &tg3_netdev_ops;
14262 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
14265 /* The EPB bridge inside 5714, 5715, and 5780 and any
14266 * device behind the EPB cannot support DMA addresses > 40-bit.
14267 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
14268 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
14269 * do DMA address check in tg3_start_xmit().
14271 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
14272 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
14273 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
14274 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
14275 #ifdef CONFIG_HIGHMEM
14276 dma_mask = DMA_BIT_MASK(64);
14279 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
14281 /* Configure DMA attributes. */
14282 if (dma_mask > DMA_BIT_MASK(32)) {
14283 err = pci_set_dma_mask(pdev, dma_mask);
14285 dev->features |= NETIF_F_HIGHDMA;
14286 err = pci_set_consistent_dma_mask(pdev,
14289 printk(KERN_ERR PFX "Unable to obtain 64 bit "
14290 "DMA for consistent allocations\n");
14291 goto err_out_iounmap;
14295 if (err || dma_mask == DMA_BIT_MASK(32)) {
14296 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
14298 printk(KERN_ERR PFX "No usable DMA configuration, "
14300 goto err_out_iounmap;
14304 tg3_init_bufmgr_config(tp);
14306 /* Selectively allow TSO based on operating conditions */
14307 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
14308 (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
14309 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
14311 tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
14312 tp->fw_needed = NULL;
14315 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14316 tp->fw_needed = FIRMWARE_TG3;
14318 /* TSO is on by default on chips that support hardware TSO.
14319 * Firmware TSO on older chips gives lower performance, so it
14320 * is off by default, but can be enabled using ethtool.
14322 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
14323 (dev->features & NETIF_F_IP_CSUM))
14324 dev->features |= NETIF_F_TSO;
14326 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
14327 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
14328 if (dev->features & NETIF_F_IPV6_CSUM)
14329 dev->features |= NETIF_F_TSO6;
14330 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
14331 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14332 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14333 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
14334 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14335 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14336 dev->features |= NETIF_F_TSO_ECN;
14339 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
14340 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
14341 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
14342 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
14343 tp->rx_pending = 63;
14346 err = tg3_get_device_address(tp);
14348 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
14353 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
14354 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
14355 if (!tp->aperegs) {
14356 printk(KERN_ERR PFX "Cannot map APE registers, "
14362 tg3_ape_lock_init(tp);
14364 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
14365 tg3_read_dash_ver(tp);
14369 * Reset chip in case UNDI or EFI driver did not shutdown
14370 * DMA self test will enable WDMAC and we'll see (spurious)
14371 * pending DMA on the PCI bus at that point.
14373 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
14374 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
14375 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
14376 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14379 err = tg3_test_dma(tp);
14381 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
14382 goto err_out_apeunmap;
14385 /* flow control autonegotiation is default behavior */
14386 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
14387 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14389 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
14390 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
14391 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
14392 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
14393 struct tg3_napi *tnapi = &tp->napi[i];
14396 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
14398 tnapi->int_mbox = intmbx;
14404 tnapi->consmbox = rcvmbx;
14405 tnapi->prodmbox = sndmbx;
14408 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
14409 netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64);
14411 tnapi->coal_now = HOSTCC_MODE_NOW;
14412 netif_napi_add(dev, &tnapi->napi, tg3_poll, 64);
14415 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
14419 * If we support MSIX, we'll be using RSS. If we're using
14420 * RSS, the first vector only handles link interrupts and the
14421 * remaining vectors handle rx and tx interrupts. Reuse the
14422 * mailbox values for the next iteration. The values we setup
14423 * above are still useful for the single vectored mode.
14438 pci_set_drvdata(pdev, dev);
14440 err = register_netdev(dev);
14442 printk(KERN_ERR PFX "Cannot register net device, "
14444 goto err_out_apeunmap;
14447 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
14449 tp->board_part_number,
14450 tp->pci_chip_rev_id,
14451 tg3_bus_string(tp, str),
14454 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
14455 struct phy_device *phydev;
14456 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
14458 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14459 tp->dev->name, phydev->drv->name,
14460 dev_name(&phydev->dev));
14463 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
14464 tp->dev->name, tg3_phy_string(tp),
14465 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
14466 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
14467 "10/100/1000Base-T")),
14468 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
14470 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
14472 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
14473 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
14474 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
14475 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
14476 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
14477 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
14478 dev->name, tp->dma_rwctrl,
14479 (pdev->dma_mask == DMA_BIT_MASK(32)) ? 32 :
14480 (((u64) pdev->dma_mask == DMA_BIT_MASK(40)) ? 40 : 64));
14486 iounmap(tp->aperegs);
14487 tp->aperegs = NULL;
14492 release_firmware(tp->fw);
14504 pci_release_regions(pdev);
14506 err_out_disable_pdev:
14507 pci_disable_device(pdev);
14508 pci_set_drvdata(pdev, NULL);
14512 static void __devexit tg3_remove_one(struct pci_dev *pdev)
14514 struct net_device *dev = pci_get_drvdata(pdev);
14517 struct tg3 *tp = netdev_priv(dev);
14520 release_firmware(tp->fw);
14522 flush_scheduled_work();
14524 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
14529 unregister_netdev(dev);
14531 iounmap(tp->aperegs);
14532 tp->aperegs = NULL;
14539 pci_release_regions(pdev);
14540 pci_disable_device(pdev);
14541 pci_set_drvdata(pdev, NULL);
14545 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
14547 struct net_device *dev = pci_get_drvdata(pdev);
14548 struct tg3 *tp = netdev_priv(dev);
14549 pci_power_t target_state;
14552 /* PCI register 4 needs to be saved whether netif_running() or not.
14553 * MSI address and data need to be saved if using MSI and
14556 pci_save_state(pdev);
14558 if (!netif_running(dev))
14561 flush_scheduled_work();
14563 tg3_netif_stop(tp);
14565 del_timer_sync(&tp->timer);
14567 tg3_full_lock(tp, 1);
14568 tg3_disable_ints(tp);
14569 tg3_full_unlock(tp);
14571 netif_device_detach(dev);
14573 tg3_full_lock(tp, 0);
14574 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14575 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
14576 tg3_full_unlock(tp);
14578 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
14580 err = tg3_set_power_state(tp, target_state);
14584 tg3_full_lock(tp, 0);
14586 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14587 err2 = tg3_restart_hw(tp, 1);
14591 tp->timer.expires = jiffies + tp->timer_offset;
14592 add_timer(&tp->timer);
14594 netif_device_attach(dev);
14595 tg3_netif_start(tp);
14598 tg3_full_unlock(tp);
14607 static int tg3_resume(struct pci_dev *pdev)
14609 struct net_device *dev = pci_get_drvdata(pdev);
14610 struct tg3 *tp = netdev_priv(dev);
14613 pci_restore_state(tp->pdev);
14615 if (!netif_running(dev))
14618 err = tg3_set_power_state(tp, PCI_D0);
14622 netif_device_attach(dev);
14624 tg3_full_lock(tp, 0);
14626 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14627 err = tg3_restart_hw(tp, 1);
14631 tp->timer.expires = jiffies + tp->timer_offset;
14632 add_timer(&tp->timer);
14634 tg3_netif_start(tp);
14637 tg3_full_unlock(tp);
14645 static struct pci_driver tg3_driver = {
14646 .name = DRV_MODULE_NAME,
14647 .id_table = tg3_pci_tbl,
14648 .probe = tg3_init_one,
14649 .remove = __devexit_p(tg3_remove_one),
14650 .suspend = tg3_suspend,
14651 .resume = tg3_resume
14654 static int __init tg3_init(void)
14656 return pci_register_driver(&tg3_driver);
14659 static void __exit tg3_cleanup(void)
14661 pci_unregister_driver(&tg3_driver);
14664 module_init(tg3_init);
14665 module_exit(tg3_cleanup);