2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2009 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
45 #include <net/checksum.h>
48 #include <asm/system.h>
50 #include <asm/byteorder.h>
51 #include <asm/uaccess.h>
54 #include <asm/idprom.h>
61 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
62 #define TG3_VLAN_TAG_USED 1
64 #define TG3_VLAN_TAG_USED 0
69 #define DRV_MODULE_NAME "tg3"
70 #define PFX DRV_MODULE_NAME ": "
71 #define DRV_MODULE_VERSION "3.102"
72 #define DRV_MODULE_RELDATE "September 1, 2009"
74 #define TG3_DEF_MAC_MODE 0
75 #define TG3_DEF_RX_MODE 0
76 #define TG3_DEF_TX_MODE 0
77 #define TG3_DEF_MSG_ENABLE \
87 /* length of time before we decide the hardware is borked,
88 * and dev->tx_timeout() should be called to fix the problem
90 #define TG3_TX_TIMEOUT (5 * HZ)
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU 60
94 #define TG3_MAX_MTU(tp) \
95 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98 * You can't change the ring sizes, but you can change where you place
99 * them in the NIC onboard memory.
101 #define TG3_RX_RING_SIZE 512
102 #define TG3_DEF_RX_RING_PENDING 200
103 #define TG3_RX_JUMBO_RING_SIZE 256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
105 #define TG3_RSS_INDIR_TBL_SIZE 128
107 /* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
113 #define TG3_RX_RCB_RING_SIZE(tp) \
114 (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \
115 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512)
117 #define TG3_TX_RING_SIZE 512
118 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
120 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
123 TG3_RX_JUMBO_RING_SIZE)
124 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
125 TG3_RX_RCB_RING_SIZE(tp))
126 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
128 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
130 #define TG3_DMA_BYTE_ENAB 64
132 #define TG3_RX_STD_DMA_SZ 1536
133 #define TG3_RX_JMB_DMA_SZ 9046
135 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
137 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
138 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
140 /* minimum number of free TX descriptors required to wake up TX process */
141 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
143 #define TG3_RAW_IP_ALIGN 2
145 /* number of ETHTOOL_GSTATS u64's */
146 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
148 #define TG3_NUM_TEST 6
150 #define FIRMWARE_TG3 "tigon/tg3.bin"
151 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
152 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
154 static char version[] __devinitdata =
155 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
157 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
158 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
159 MODULE_LICENSE("GPL");
160 MODULE_VERSION(DRV_MODULE_VERSION);
161 MODULE_FIRMWARE(FIRMWARE_TG3);
162 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
163 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
165 #define TG3_RSS_MIN_NUM_MSIX_VECS 2
167 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
168 module_param(tg3_debug, int, 0);
169 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
171 static struct pci_device_id tg3_pci_tbl[] = {
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
238 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
239 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
240 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
241 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
242 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
243 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
244 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
248 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
250 static const struct {
251 const char string[ETH_GSTRING_LEN];
252 } ethtool_stats_keys[TG3_NUM_STATS] = {
255 { "rx_ucast_packets" },
256 { "rx_mcast_packets" },
257 { "rx_bcast_packets" },
259 { "rx_align_errors" },
260 { "rx_xon_pause_rcvd" },
261 { "rx_xoff_pause_rcvd" },
262 { "rx_mac_ctrl_rcvd" },
263 { "rx_xoff_entered" },
264 { "rx_frame_too_long_errors" },
266 { "rx_undersize_packets" },
267 { "rx_in_length_errors" },
268 { "rx_out_length_errors" },
269 { "rx_64_or_less_octet_packets" },
270 { "rx_65_to_127_octet_packets" },
271 { "rx_128_to_255_octet_packets" },
272 { "rx_256_to_511_octet_packets" },
273 { "rx_512_to_1023_octet_packets" },
274 { "rx_1024_to_1522_octet_packets" },
275 { "rx_1523_to_2047_octet_packets" },
276 { "rx_2048_to_4095_octet_packets" },
277 { "rx_4096_to_8191_octet_packets" },
278 { "rx_8192_to_9022_octet_packets" },
285 { "tx_flow_control" },
287 { "tx_single_collisions" },
288 { "tx_mult_collisions" },
290 { "tx_excessive_collisions" },
291 { "tx_late_collisions" },
292 { "tx_collide_2times" },
293 { "tx_collide_3times" },
294 { "tx_collide_4times" },
295 { "tx_collide_5times" },
296 { "tx_collide_6times" },
297 { "tx_collide_7times" },
298 { "tx_collide_8times" },
299 { "tx_collide_9times" },
300 { "tx_collide_10times" },
301 { "tx_collide_11times" },
302 { "tx_collide_12times" },
303 { "tx_collide_13times" },
304 { "tx_collide_14times" },
305 { "tx_collide_15times" },
306 { "tx_ucast_packets" },
307 { "tx_mcast_packets" },
308 { "tx_bcast_packets" },
309 { "tx_carrier_sense_errors" },
313 { "dma_writeq_full" },
314 { "dma_write_prioq_full" },
318 { "rx_threshold_hit" },
320 { "dma_readq_full" },
321 { "dma_read_prioq_full" },
322 { "tx_comp_queue_full" },
324 { "ring_set_send_prod_index" },
325 { "ring_status_update" },
327 { "nic_avoided_irqs" },
328 { "nic_tx_threshold_hit" }
331 static const struct {
332 const char string[ETH_GSTRING_LEN];
333 } ethtool_test_keys[TG3_NUM_TEST] = {
334 { "nvram test (online) " },
335 { "link test (online) " },
336 { "register test (offline)" },
337 { "memory test (offline)" },
338 { "loopback test (offline)" },
339 { "interrupt test (offline)" },
342 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
344 writel(val, tp->regs + off);
347 static u32 tg3_read32(struct tg3 *tp, u32 off)
349 return (readl(tp->regs + off));
352 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
354 writel(val, tp->aperegs + off);
357 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
359 return (readl(tp->aperegs + off));
362 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
366 spin_lock_irqsave(&tp->indirect_lock, flags);
367 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
368 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
369 spin_unlock_irqrestore(&tp->indirect_lock, flags);
372 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
374 writel(val, tp->regs + off);
375 readl(tp->regs + off);
378 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
383 spin_lock_irqsave(&tp->indirect_lock, flags);
384 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
385 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
386 spin_unlock_irqrestore(&tp->indirect_lock, flags);
390 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
394 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
395 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
396 TG3_64BIT_REG_LOW, val);
399 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
400 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
401 TG3_64BIT_REG_LOW, val);
405 spin_lock_irqsave(&tp->indirect_lock, flags);
406 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
407 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
408 spin_unlock_irqrestore(&tp->indirect_lock, flags);
410 /* In indirect mode when disabling interrupts, we also need
411 * to clear the interrupt bit in the GRC local ctrl register.
413 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
415 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
416 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
420 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
425 spin_lock_irqsave(&tp->indirect_lock, flags);
426 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
427 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
428 spin_unlock_irqrestore(&tp->indirect_lock, flags);
432 /* usec_wait specifies the wait time in usec when writing to certain registers
433 * where it is unsafe to read back the register without some delay.
434 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
435 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
437 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
439 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
440 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
441 /* Non-posted methods */
442 tp->write32(tp, off, val);
445 tg3_write32(tp, off, val);
450 /* Wait again after the read for the posted method to guarantee that
451 * the wait time is met.
457 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
459 tp->write32_mbox(tp, off, val);
460 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
461 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
462 tp->read32_mbox(tp, off);
465 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
467 void __iomem *mbox = tp->regs + off;
469 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
471 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
475 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
477 return (readl(tp->regs + off + GRCMBOX_BASE));
480 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
482 writel(val, tp->regs + off + GRCMBOX_BASE);
485 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
486 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
487 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
488 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
489 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
491 #define tw32(reg,val) tp->write32(tp, reg, val)
492 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
493 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
494 #define tr32(reg) tp->read32(tp, reg)
496 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
500 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
501 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
506 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
507 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
509 /* Always leave this as zero. */
510 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
512 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
513 tw32_f(TG3PCI_MEM_WIN_DATA, val);
515 /* Always leave this as zero. */
516 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
518 spin_unlock_irqrestore(&tp->indirect_lock, flags);
521 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
525 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
526 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
531 spin_lock_irqsave(&tp->indirect_lock, flags);
532 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
533 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
534 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
536 /* Always leave this as zero. */
537 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
539 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
540 *val = tr32(TG3PCI_MEM_WIN_DATA);
542 /* Always leave this as zero. */
543 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
545 spin_unlock_irqrestore(&tp->indirect_lock, flags);
548 static void tg3_ape_lock_init(struct tg3 *tp)
552 /* Make sure the driver hasn't any stale locks. */
553 for (i = 0; i < 8; i++)
554 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
555 APE_LOCK_GRANT_DRIVER);
558 static int tg3_ape_lock(struct tg3 *tp, int locknum)
564 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
568 case TG3_APE_LOCK_GRC:
569 case TG3_APE_LOCK_MEM:
577 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
579 /* Wait for up to 1 millisecond to acquire lock. */
580 for (i = 0; i < 100; i++) {
581 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
582 if (status == APE_LOCK_GRANT_DRIVER)
587 if (status != APE_LOCK_GRANT_DRIVER) {
588 /* Revoke the lock request. */
589 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
590 APE_LOCK_GRANT_DRIVER);
598 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
602 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
606 case TG3_APE_LOCK_GRC:
607 case TG3_APE_LOCK_MEM:
614 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
617 static void tg3_disable_ints(struct tg3 *tp)
621 tw32(TG3PCI_MISC_HOST_CTRL,
622 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
623 for (i = 0; i < tp->irq_max; i++)
624 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
627 static void tg3_enable_ints(struct tg3 *tp)
635 tw32(TG3PCI_MISC_HOST_CTRL,
636 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
638 for (i = 0; i < tp->irq_cnt; i++) {
639 struct tg3_napi *tnapi = &tp->napi[i];
640 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
641 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
642 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
644 coal_now |= tnapi->coal_now;
647 /* Force an initial interrupt */
648 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
649 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
650 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
652 tw32(HOSTCC_MODE, tp->coalesce_mode |
653 HOSTCC_MODE_ENABLE | coal_now);
656 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
658 struct tg3 *tp = tnapi->tp;
659 struct tg3_hw_status *sblk = tnapi->hw_status;
660 unsigned int work_exists = 0;
662 /* check for phy events */
663 if (!(tp->tg3_flags &
664 (TG3_FLAG_USE_LINKCHG_REG |
665 TG3_FLAG_POLL_SERDES))) {
666 if (sblk->status & SD_STATUS_LINK_CHG)
669 /* check for RX/TX work to do */
670 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
671 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
678 * similar to tg3_enable_ints, but it accurately determines whether there
679 * is new work pending and can return without flushing the PIO write
680 * which reenables interrupts
682 static void tg3_int_reenable(struct tg3_napi *tnapi)
684 struct tg3 *tp = tnapi->tp;
686 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
689 /* When doing tagged status, this work check is unnecessary.
690 * The last_tag we write above tells the chip which piece of
691 * work we've completed.
693 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
695 tw32(HOSTCC_MODE, tp->coalesce_mode |
696 HOSTCC_MODE_ENABLE | tnapi->coal_now);
699 static void tg3_napi_disable(struct tg3 *tp)
703 for (i = tp->irq_cnt - 1; i >= 0; i--)
704 napi_disable(&tp->napi[i].napi);
707 static void tg3_napi_enable(struct tg3 *tp)
711 for (i = 0; i < tp->irq_cnt; i++)
712 napi_enable(&tp->napi[i].napi);
715 static inline void tg3_netif_stop(struct tg3 *tp)
717 tp->dev->trans_start = jiffies; /* prevent tx timeout */
718 tg3_napi_disable(tp);
719 netif_tx_disable(tp->dev);
722 static inline void tg3_netif_start(struct tg3 *tp)
724 /* NOTE: unconditional netif_tx_wake_all_queues is only
725 * appropriate so long as all callers are assured to
726 * have free tx slots (such as after tg3_init_hw)
728 netif_tx_wake_all_queues(tp->dev);
731 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
735 static void tg3_switch_clocks(struct tg3 *tp)
740 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
741 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
744 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
746 orig_clock_ctrl = clock_ctrl;
747 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
748 CLOCK_CTRL_CLKRUN_OENABLE |
750 tp->pci_clock_ctrl = clock_ctrl;
752 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
753 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
754 tw32_wait_f(TG3PCI_CLOCK_CTRL,
755 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
757 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
758 tw32_wait_f(TG3PCI_CLOCK_CTRL,
760 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
762 tw32_wait_f(TG3PCI_CLOCK_CTRL,
763 clock_ctrl | (CLOCK_CTRL_ALTCLK),
766 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
769 #define PHY_BUSY_LOOPS 5000
771 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
777 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
779 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
785 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
786 MI_COM_PHY_ADDR_MASK);
787 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
788 MI_COM_REG_ADDR_MASK);
789 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
791 tw32_f(MAC_MI_COM, frame_val);
793 loops = PHY_BUSY_LOOPS;
796 frame_val = tr32(MAC_MI_COM);
798 if ((frame_val & MI_COM_BUSY) == 0) {
800 frame_val = tr32(MAC_MI_COM);
808 *val = frame_val & MI_COM_DATA_MASK;
812 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
813 tw32_f(MAC_MI_MODE, tp->mi_mode);
820 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
826 if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
827 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
830 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
832 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
836 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
837 MI_COM_PHY_ADDR_MASK);
838 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
839 MI_COM_REG_ADDR_MASK);
840 frame_val |= (val & MI_COM_DATA_MASK);
841 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
843 tw32_f(MAC_MI_COM, frame_val);
845 loops = PHY_BUSY_LOOPS;
848 frame_val = tr32(MAC_MI_COM);
849 if ((frame_val & MI_COM_BUSY) == 0) {
851 frame_val = tr32(MAC_MI_COM);
861 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
862 tw32_f(MAC_MI_MODE, tp->mi_mode);
869 static int tg3_bmcr_reset(struct tg3 *tp)
874 /* OK, reset it, and poll the BMCR_RESET bit until it
875 * clears or we time out.
877 phy_control = BMCR_RESET;
878 err = tg3_writephy(tp, MII_BMCR, phy_control);
884 err = tg3_readphy(tp, MII_BMCR, &phy_control);
888 if ((phy_control & BMCR_RESET) == 0) {
900 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
902 struct tg3 *tp = bp->priv;
905 spin_lock_bh(&tp->lock);
907 if (tg3_readphy(tp, reg, &val))
910 spin_unlock_bh(&tp->lock);
915 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
917 struct tg3 *tp = bp->priv;
920 spin_lock_bh(&tp->lock);
922 if (tg3_writephy(tp, reg, val))
925 spin_unlock_bh(&tp->lock);
930 static int tg3_mdio_reset(struct mii_bus *bp)
935 static void tg3_mdio_config_5785(struct tg3 *tp)
938 struct phy_device *phydev;
940 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
941 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
942 case TG3_PHY_ID_BCM50610:
943 val = MAC_PHYCFG2_50610_LED_MODES;
945 case TG3_PHY_ID_BCMAC131:
946 val = MAC_PHYCFG2_AC131_LED_MODES;
948 case TG3_PHY_ID_RTL8211C:
949 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
951 case TG3_PHY_ID_RTL8201E:
952 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
958 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
959 tw32(MAC_PHYCFG2, val);
961 val = tr32(MAC_PHYCFG1);
962 val &= ~(MAC_PHYCFG1_RGMII_INT |
963 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
964 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
965 tw32(MAC_PHYCFG1, val);
970 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
971 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
972 MAC_PHYCFG2_FMODE_MASK_MASK |
973 MAC_PHYCFG2_GMODE_MASK_MASK |
974 MAC_PHYCFG2_ACT_MASK_MASK |
975 MAC_PHYCFG2_QUAL_MASK_MASK |
976 MAC_PHYCFG2_INBAND_ENABLE;
978 tw32(MAC_PHYCFG2, val);
980 val = tr32(MAC_PHYCFG1);
981 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
982 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
983 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
984 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
985 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
986 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
987 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
989 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
990 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
991 tw32(MAC_PHYCFG1, val);
993 val = tr32(MAC_EXT_RGMII_MODE);
994 val &= ~(MAC_RGMII_MODE_RX_INT_B |
995 MAC_RGMII_MODE_RX_QUALITY |
996 MAC_RGMII_MODE_RX_ACTIVITY |
997 MAC_RGMII_MODE_RX_ENG_DET |
998 MAC_RGMII_MODE_TX_ENABLE |
999 MAC_RGMII_MODE_TX_LOWPWR |
1000 MAC_RGMII_MODE_TX_RESET);
1001 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
1002 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1003 val |= MAC_RGMII_MODE_RX_INT_B |
1004 MAC_RGMII_MODE_RX_QUALITY |
1005 MAC_RGMII_MODE_RX_ACTIVITY |
1006 MAC_RGMII_MODE_RX_ENG_DET;
1007 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1008 val |= MAC_RGMII_MODE_TX_ENABLE |
1009 MAC_RGMII_MODE_TX_LOWPWR |
1010 MAC_RGMII_MODE_TX_RESET;
1012 tw32(MAC_EXT_RGMII_MODE, val);
1015 static void tg3_mdio_start(struct tg3 *tp)
1017 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1018 tw32_f(MAC_MI_MODE, tp->mi_mode);
1021 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
1022 u32 funcnum, is_serdes;
1024 funcnum = tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC;
1030 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1034 tp->phy_addr = TG3_PHY_MII_ADDR;
1036 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1037 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1038 tg3_mdio_config_5785(tp);
1041 static int tg3_mdio_init(struct tg3 *tp)
1045 struct phy_device *phydev;
1049 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1050 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1053 tp->mdio_bus = mdiobus_alloc();
1054 if (tp->mdio_bus == NULL)
1057 tp->mdio_bus->name = "tg3 mdio bus";
1058 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1059 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1060 tp->mdio_bus->priv = tp;
1061 tp->mdio_bus->parent = &tp->pdev->dev;
1062 tp->mdio_bus->read = &tg3_mdio_read;
1063 tp->mdio_bus->write = &tg3_mdio_write;
1064 tp->mdio_bus->reset = &tg3_mdio_reset;
1065 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1066 tp->mdio_bus->irq = &tp->mdio_irq[0];
1068 for (i = 0; i < PHY_MAX_ADDR; i++)
1069 tp->mdio_bus->irq[i] = PHY_POLL;
1071 /* The bus registration will look for all the PHYs on the mdio bus.
1072 * Unfortunately, it does not ensure the PHY is powered up before
1073 * accessing the PHY ID registers. A chip reset is the
1074 * quickest way to bring the device back to an operational state..
1076 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1079 i = mdiobus_register(tp->mdio_bus);
1081 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1083 mdiobus_free(tp->mdio_bus);
1087 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1089 if (!phydev || !phydev->drv) {
1090 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1091 mdiobus_unregister(tp->mdio_bus);
1092 mdiobus_free(tp->mdio_bus);
1096 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1097 case TG3_PHY_ID_BCM57780:
1098 phydev->interface = PHY_INTERFACE_MODE_GMII;
1100 case TG3_PHY_ID_BCM50610:
1101 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1102 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1103 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1104 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1105 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1106 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1108 case TG3_PHY_ID_RTL8211C:
1109 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1111 case TG3_PHY_ID_RTL8201E:
1112 case TG3_PHY_ID_BCMAC131:
1113 phydev->interface = PHY_INTERFACE_MODE_MII;
1114 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1118 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1120 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1121 tg3_mdio_config_5785(tp);
1126 static void tg3_mdio_fini(struct tg3 *tp)
1128 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1129 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1130 mdiobus_unregister(tp->mdio_bus);
1131 mdiobus_free(tp->mdio_bus);
1135 /* tp->lock is held. */
1136 static inline void tg3_generate_fw_event(struct tg3 *tp)
1140 val = tr32(GRC_RX_CPU_EVENT);
1141 val |= GRC_RX_CPU_DRIVER_EVENT;
1142 tw32_f(GRC_RX_CPU_EVENT, val);
1144 tp->last_event_jiffies = jiffies;
1147 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1149 /* tp->lock is held. */
1150 static void tg3_wait_for_event_ack(struct tg3 *tp)
1153 unsigned int delay_cnt;
1156 /* If enough time has passed, no wait is necessary. */
1157 time_remain = (long)(tp->last_event_jiffies + 1 +
1158 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1160 if (time_remain < 0)
1163 /* Check if we can shorten the wait time. */
1164 delay_cnt = jiffies_to_usecs(time_remain);
1165 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1166 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1167 delay_cnt = (delay_cnt >> 3) + 1;
1169 for (i = 0; i < delay_cnt; i++) {
1170 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1176 /* tp->lock is held. */
1177 static void tg3_ump_link_report(struct tg3 *tp)
1182 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1183 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1186 tg3_wait_for_event_ack(tp);
1188 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1190 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1193 if (!tg3_readphy(tp, MII_BMCR, ®))
1195 if (!tg3_readphy(tp, MII_BMSR, ®))
1196 val |= (reg & 0xffff);
1197 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1200 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1202 if (!tg3_readphy(tp, MII_LPA, ®))
1203 val |= (reg & 0xffff);
1204 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1207 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1208 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1210 if (!tg3_readphy(tp, MII_STAT1000, ®))
1211 val |= (reg & 0xffff);
1213 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1215 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1219 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1221 tg3_generate_fw_event(tp);
1224 static void tg3_link_report(struct tg3 *tp)
1226 if (!netif_carrier_ok(tp->dev)) {
1227 if (netif_msg_link(tp))
1228 printk(KERN_INFO PFX "%s: Link is down.\n",
1230 tg3_ump_link_report(tp);
1231 } else if (netif_msg_link(tp)) {
1232 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1234 (tp->link_config.active_speed == SPEED_1000 ?
1236 (tp->link_config.active_speed == SPEED_100 ?
1238 (tp->link_config.active_duplex == DUPLEX_FULL ?
1241 printk(KERN_INFO PFX
1242 "%s: Flow control is %s for TX and %s for RX.\n",
1244 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1246 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1248 tg3_ump_link_report(tp);
1252 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1256 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1257 miireg = ADVERTISE_PAUSE_CAP;
1258 else if (flow_ctrl & FLOW_CTRL_TX)
1259 miireg = ADVERTISE_PAUSE_ASYM;
1260 else if (flow_ctrl & FLOW_CTRL_RX)
1261 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1268 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1272 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1273 miireg = ADVERTISE_1000XPAUSE;
1274 else if (flow_ctrl & FLOW_CTRL_TX)
1275 miireg = ADVERTISE_1000XPSE_ASYM;
1276 else if (flow_ctrl & FLOW_CTRL_RX)
1277 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1284 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1288 if (lcladv & ADVERTISE_1000XPAUSE) {
1289 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1290 if (rmtadv & LPA_1000XPAUSE)
1291 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1292 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1295 if (rmtadv & LPA_1000XPAUSE)
1296 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1298 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1299 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1306 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1310 u32 old_rx_mode = tp->rx_mode;
1311 u32 old_tx_mode = tp->tx_mode;
1313 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1314 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1316 autoneg = tp->link_config.autoneg;
1318 if (autoneg == AUTONEG_ENABLE &&
1319 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1320 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1321 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1323 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1325 flowctrl = tp->link_config.flowctrl;
1327 tp->link_config.active_flowctrl = flowctrl;
1329 if (flowctrl & FLOW_CTRL_RX)
1330 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1332 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1334 if (old_rx_mode != tp->rx_mode)
1335 tw32_f(MAC_RX_MODE, tp->rx_mode);
1337 if (flowctrl & FLOW_CTRL_TX)
1338 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1340 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1342 if (old_tx_mode != tp->tx_mode)
1343 tw32_f(MAC_TX_MODE, tp->tx_mode);
1346 static void tg3_adjust_link(struct net_device *dev)
1348 u8 oldflowctrl, linkmesg = 0;
1349 u32 mac_mode, lcl_adv, rmt_adv;
1350 struct tg3 *tp = netdev_priv(dev);
1351 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1353 spin_lock_bh(&tp->lock);
1355 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1356 MAC_MODE_HALF_DUPLEX);
1358 oldflowctrl = tp->link_config.active_flowctrl;
1364 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1365 mac_mode |= MAC_MODE_PORT_MODE_MII;
1367 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1369 if (phydev->duplex == DUPLEX_HALF)
1370 mac_mode |= MAC_MODE_HALF_DUPLEX;
1372 lcl_adv = tg3_advert_flowctrl_1000T(
1373 tp->link_config.flowctrl);
1376 rmt_adv = LPA_PAUSE_CAP;
1377 if (phydev->asym_pause)
1378 rmt_adv |= LPA_PAUSE_ASYM;
1381 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1383 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1385 if (mac_mode != tp->mac_mode) {
1386 tp->mac_mode = mac_mode;
1387 tw32_f(MAC_MODE, tp->mac_mode);
1391 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1392 if (phydev->speed == SPEED_10)
1394 MAC_MI_STAT_10MBPS_MODE |
1395 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1397 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1400 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1401 tw32(MAC_TX_LENGTHS,
1402 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1403 (6 << TX_LENGTHS_IPG_SHIFT) |
1404 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1406 tw32(MAC_TX_LENGTHS,
1407 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1408 (6 << TX_LENGTHS_IPG_SHIFT) |
1409 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1411 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1412 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1413 phydev->speed != tp->link_config.active_speed ||
1414 phydev->duplex != tp->link_config.active_duplex ||
1415 oldflowctrl != tp->link_config.active_flowctrl)
1418 tp->link_config.active_speed = phydev->speed;
1419 tp->link_config.active_duplex = phydev->duplex;
1421 spin_unlock_bh(&tp->lock);
1424 tg3_link_report(tp);
1427 static int tg3_phy_init(struct tg3 *tp)
1429 struct phy_device *phydev;
1431 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1434 /* Bring the PHY back to a known state. */
1437 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1439 /* Attach the MAC to the PHY. */
1440 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1441 phydev->dev_flags, phydev->interface);
1442 if (IS_ERR(phydev)) {
1443 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1444 return PTR_ERR(phydev);
1447 /* Mask with MAC supported features. */
1448 switch (phydev->interface) {
1449 case PHY_INTERFACE_MODE_GMII:
1450 case PHY_INTERFACE_MODE_RGMII:
1451 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1452 phydev->supported &= (PHY_GBIT_FEATURES |
1454 SUPPORTED_Asym_Pause);
1458 case PHY_INTERFACE_MODE_MII:
1459 phydev->supported &= (PHY_BASIC_FEATURES |
1461 SUPPORTED_Asym_Pause);
1464 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1468 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1470 phydev->advertising = phydev->supported;
1475 static void tg3_phy_start(struct tg3 *tp)
1477 struct phy_device *phydev;
1479 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1482 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1484 if (tp->link_config.phy_is_low_power) {
1485 tp->link_config.phy_is_low_power = 0;
1486 phydev->speed = tp->link_config.orig_speed;
1487 phydev->duplex = tp->link_config.orig_duplex;
1488 phydev->autoneg = tp->link_config.orig_autoneg;
1489 phydev->advertising = tp->link_config.orig_advertising;
1494 phy_start_aneg(phydev);
1497 static void tg3_phy_stop(struct tg3 *tp)
1499 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1502 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1505 static void tg3_phy_fini(struct tg3 *tp)
1507 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1508 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1509 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1513 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1515 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1516 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1519 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1523 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1526 tg3_writephy(tp, MII_TG3_FET_TEST,
1527 phytest | MII_TG3_FET_SHADOW_EN);
1528 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1530 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1532 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1533 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1535 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1539 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1543 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
1546 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1547 tg3_phy_fet_toggle_apd(tp, enable);
1551 reg = MII_TG3_MISC_SHDW_WREN |
1552 MII_TG3_MISC_SHDW_SCR5_SEL |
1553 MII_TG3_MISC_SHDW_SCR5_LPED |
1554 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1555 MII_TG3_MISC_SHDW_SCR5_SDTL |
1556 MII_TG3_MISC_SHDW_SCR5_C125OE;
1557 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1558 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1560 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1563 reg = MII_TG3_MISC_SHDW_WREN |
1564 MII_TG3_MISC_SHDW_APD_SEL |
1565 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1567 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1569 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1572 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1576 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1577 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1580 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1583 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1584 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1586 tg3_writephy(tp, MII_TG3_FET_TEST,
1587 ephy | MII_TG3_FET_SHADOW_EN);
1588 if (!tg3_readphy(tp, reg, &phy)) {
1590 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1592 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1593 tg3_writephy(tp, reg, phy);
1595 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1598 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1599 MII_TG3_AUXCTL_SHDWSEL_MISC;
1600 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1601 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1603 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1605 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1606 phy |= MII_TG3_AUXCTL_MISC_WREN;
1607 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1612 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1616 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1619 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1620 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1621 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1622 (val | (1 << 15) | (1 << 4)));
1625 static void tg3_phy_apply_otp(struct tg3 *tp)
1634 /* Enable SM_DSP clock and tx 6dB coding. */
1635 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1636 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1637 MII_TG3_AUXCTL_ACTL_TX_6DB;
1638 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1640 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1641 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1642 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1644 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1645 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1646 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1648 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1649 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1650 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1652 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1653 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1655 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1656 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1658 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1659 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1660 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1662 /* Turn off SM_DSP clock. */
1663 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1664 MII_TG3_AUXCTL_ACTL_TX_6DB;
1665 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1668 static int tg3_wait_macro_done(struct tg3 *tp)
1675 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1676 if ((tmp32 & 0x1000) == 0)
1686 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1688 static const u32 test_pat[4][6] = {
1689 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1690 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1691 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1692 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1696 for (chan = 0; chan < 4; chan++) {
1699 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1700 (chan * 0x2000) | 0x0200);
1701 tg3_writephy(tp, 0x16, 0x0002);
1703 for (i = 0; i < 6; i++)
1704 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1707 tg3_writephy(tp, 0x16, 0x0202);
1708 if (tg3_wait_macro_done(tp)) {
1713 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1714 (chan * 0x2000) | 0x0200);
1715 tg3_writephy(tp, 0x16, 0x0082);
1716 if (tg3_wait_macro_done(tp)) {
1721 tg3_writephy(tp, 0x16, 0x0802);
1722 if (tg3_wait_macro_done(tp)) {
1727 for (i = 0; i < 6; i += 2) {
1730 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1731 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1732 tg3_wait_macro_done(tp)) {
1738 if (low != test_pat[chan][i] ||
1739 high != test_pat[chan][i+1]) {
1740 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1741 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1742 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1752 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1756 for (chan = 0; chan < 4; chan++) {
1759 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1760 (chan * 0x2000) | 0x0200);
1761 tg3_writephy(tp, 0x16, 0x0002);
1762 for (i = 0; i < 6; i++)
1763 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1764 tg3_writephy(tp, 0x16, 0x0202);
1765 if (tg3_wait_macro_done(tp))
1772 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1774 u32 reg32, phy9_orig;
1775 int retries, do_phy_reset, err;
1781 err = tg3_bmcr_reset(tp);
1787 /* Disable transmitter and interrupt. */
1788 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1792 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1794 /* Set full-duplex, 1000 mbps. */
1795 tg3_writephy(tp, MII_BMCR,
1796 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1798 /* Set to master mode. */
1799 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1802 tg3_writephy(tp, MII_TG3_CTRL,
1803 (MII_TG3_CTRL_AS_MASTER |
1804 MII_TG3_CTRL_ENABLE_AS_MASTER));
1806 /* Enable SM_DSP_CLOCK and 6dB. */
1807 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1809 /* Block the PHY control access. */
1810 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1811 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1813 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1816 } while (--retries);
1818 err = tg3_phy_reset_chanpat(tp);
1822 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1823 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1825 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1826 tg3_writephy(tp, 0x16, 0x0000);
1828 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1829 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1830 /* Set Extended packet length bit for jumbo frames */
1831 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1834 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1837 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1839 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1841 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1848 /* This will reset the tigon3 PHY if there is no valid
1849 * link unless the FORCE argument is non-zero.
1851 static int tg3_phy_reset(struct tg3 *tp)
1857 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1860 val = tr32(GRC_MISC_CFG);
1861 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1864 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1865 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1869 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1870 netif_carrier_off(tp->dev);
1871 tg3_link_report(tp);
1874 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1875 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1876 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1877 err = tg3_phy_reset_5703_4_5(tp);
1884 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1885 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1886 cpmuctrl = tr32(TG3_CPMU_CTRL);
1887 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1889 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1892 err = tg3_bmcr_reset(tp);
1896 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1899 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1900 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1902 tw32(TG3_CPMU_CTRL, cpmuctrl);
1905 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1906 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1909 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1910 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1911 CPMU_LSPD_1000MB_MACCLK_12_5) {
1912 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1914 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1918 tg3_phy_apply_otp(tp);
1920 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1921 tg3_phy_toggle_apd(tp, true);
1923 tg3_phy_toggle_apd(tp, false);
1926 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1927 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1928 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1929 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1930 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1931 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1932 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1934 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1935 tg3_writephy(tp, 0x1c, 0x8d68);
1936 tg3_writephy(tp, 0x1c, 0x8d68);
1938 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1939 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1940 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1941 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1942 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1943 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1944 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1945 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1946 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1948 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1949 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1950 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1951 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1952 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1953 tg3_writephy(tp, MII_TG3_TEST1,
1954 MII_TG3_TEST1_TRIM_EN | 0x4);
1956 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1957 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1959 /* Set Extended packet length bit (bit 14) on all chips that */
1960 /* support jumbo frames */
1961 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1962 /* Cannot do read-modify-write on 5401 */
1963 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1964 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1967 /* Set bit 14 with read-modify-write to preserve other bits */
1968 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1969 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1970 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1973 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1974 * jumbo frames transmission.
1976 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1979 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1980 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1981 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1984 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1985 /* adjust output voltage */
1986 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
1989 tg3_phy_toggle_automdix(tp, 1);
1990 tg3_phy_set_wirespeed(tp);
1994 static void tg3_frob_aux_power(struct tg3 *tp)
1996 struct tg3 *tp_peer = tp;
1998 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
2001 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2002 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2003 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
2004 struct net_device *dev_peer;
2006 dev_peer = pci_get_drvdata(tp->pdev_peer);
2007 /* remove_one() may have been run on the peer. */
2011 tp_peer = netdev_priv(dev_peer);
2014 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2015 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
2016 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2017 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
2018 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2019 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2020 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2021 (GRC_LCLCTRL_GPIO_OE0 |
2022 GRC_LCLCTRL_GPIO_OE1 |
2023 GRC_LCLCTRL_GPIO_OE2 |
2024 GRC_LCLCTRL_GPIO_OUTPUT0 |
2025 GRC_LCLCTRL_GPIO_OUTPUT1),
2027 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2028 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2029 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2030 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2031 GRC_LCLCTRL_GPIO_OE1 |
2032 GRC_LCLCTRL_GPIO_OE2 |
2033 GRC_LCLCTRL_GPIO_OUTPUT0 |
2034 GRC_LCLCTRL_GPIO_OUTPUT1 |
2036 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2038 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2039 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2041 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2042 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2045 u32 grc_local_ctrl = 0;
2047 if (tp_peer != tp &&
2048 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2051 /* Workaround to prevent overdrawing Amps. */
2052 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2054 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2055 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2056 grc_local_ctrl, 100);
2059 /* On 5753 and variants, GPIO2 cannot be used. */
2060 no_gpio2 = tp->nic_sram_data_cfg &
2061 NIC_SRAM_DATA_CFG_NO_GPIO2;
2063 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2064 GRC_LCLCTRL_GPIO_OE1 |
2065 GRC_LCLCTRL_GPIO_OE2 |
2066 GRC_LCLCTRL_GPIO_OUTPUT1 |
2067 GRC_LCLCTRL_GPIO_OUTPUT2;
2069 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2070 GRC_LCLCTRL_GPIO_OUTPUT2);
2072 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2073 grc_local_ctrl, 100);
2075 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2077 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2078 grc_local_ctrl, 100);
2081 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2082 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2083 grc_local_ctrl, 100);
2087 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2088 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2089 if (tp_peer != tp &&
2090 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2093 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2094 (GRC_LCLCTRL_GPIO_OE1 |
2095 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2097 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2098 GRC_LCLCTRL_GPIO_OE1, 100);
2100 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2101 (GRC_LCLCTRL_GPIO_OE1 |
2102 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2107 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2109 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2111 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2112 if (speed != SPEED_10)
2114 } else if (speed == SPEED_10)
2120 static int tg3_setup_phy(struct tg3 *, int);
2122 #define RESET_KIND_SHUTDOWN 0
2123 #define RESET_KIND_INIT 1
2124 #define RESET_KIND_SUSPEND 2
2126 static void tg3_write_sig_post_reset(struct tg3 *, int);
2127 static int tg3_halt_cpu(struct tg3 *, u32);
2129 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2133 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2134 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2135 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2136 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2139 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2140 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2141 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2146 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2148 val = tr32(GRC_MISC_CFG);
2149 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2152 } else if (do_low_power) {
2153 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2154 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2156 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2157 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2158 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2159 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2160 MII_TG3_AUXCTL_PCTL_VREG_11V);
2163 /* The PHY should not be powered down on some chips because
2166 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2167 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2168 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2169 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2172 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2173 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2174 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2175 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2176 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2177 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2180 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2183 /* tp->lock is held. */
2184 static int tg3_nvram_lock(struct tg3 *tp)
2186 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2189 if (tp->nvram_lock_cnt == 0) {
2190 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2191 for (i = 0; i < 8000; i++) {
2192 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2197 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2201 tp->nvram_lock_cnt++;
2206 /* tp->lock is held. */
2207 static void tg3_nvram_unlock(struct tg3 *tp)
2209 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2210 if (tp->nvram_lock_cnt > 0)
2211 tp->nvram_lock_cnt--;
2212 if (tp->nvram_lock_cnt == 0)
2213 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2217 /* tp->lock is held. */
2218 static void tg3_enable_nvram_access(struct tg3 *tp)
2220 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2221 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
2222 u32 nvaccess = tr32(NVRAM_ACCESS);
2224 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2228 /* tp->lock is held. */
2229 static void tg3_disable_nvram_access(struct tg3 *tp)
2231 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2232 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
2233 u32 nvaccess = tr32(NVRAM_ACCESS);
2235 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2239 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2240 u32 offset, u32 *val)
2245 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2248 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2249 EEPROM_ADDR_DEVID_MASK |
2251 tw32(GRC_EEPROM_ADDR,
2253 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2254 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2255 EEPROM_ADDR_ADDR_MASK) |
2256 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2258 for (i = 0; i < 1000; i++) {
2259 tmp = tr32(GRC_EEPROM_ADDR);
2261 if (tmp & EEPROM_ADDR_COMPLETE)
2265 if (!(tmp & EEPROM_ADDR_COMPLETE))
2268 tmp = tr32(GRC_EEPROM_DATA);
2271 * The data will always be opposite the native endian
2272 * format. Perform a blind byteswap to compensate.
2279 #define NVRAM_CMD_TIMEOUT 10000
2281 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2285 tw32(NVRAM_CMD, nvram_cmd);
2286 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2288 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2294 if (i == NVRAM_CMD_TIMEOUT)
2300 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2302 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2303 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2304 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2305 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2306 (tp->nvram_jedecnum == JEDEC_ATMEL))
2308 addr = ((addr / tp->nvram_pagesize) <<
2309 ATMEL_AT45DB0X1B_PAGE_POS) +
2310 (addr % tp->nvram_pagesize);
2315 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2317 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2318 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2319 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2320 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2321 (tp->nvram_jedecnum == JEDEC_ATMEL))
2323 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2324 tp->nvram_pagesize) +
2325 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2330 /* NOTE: Data read in from NVRAM is byteswapped according to
2331 * the byteswapping settings for all other register accesses.
2332 * tg3 devices are BE devices, so on a BE machine, the data
2333 * returned will be exactly as it is seen in NVRAM. On a LE
2334 * machine, the 32-bit value will be byteswapped.
2336 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2340 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2341 return tg3_nvram_read_using_eeprom(tp, offset, val);
2343 offset = tg3_nvram_phys_addr(tp, offset);
2345 if (offset > NVRAM_ADDR_MSK)
2348 ret = tg3_nvram_lock(tp);
2352 tg3_enable_nvram_access(tp);
2354 tw32(NVRAM_ADDR, offset);
2355 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2356 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2359 *val = tr32(NVRAM_RDDATA);
2361 tg3_disable_nvram_access(tp);
2363 tg3_nvram_unlock(tp);
2368 /* Ensures NVRAM data is in bytestream format. */
2369 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2372 int res = tg3_nvram_read(tp, offset, &v);
2374 *val = cpu_to_be32(v);
2378 /* tp->lock is held. */
2379 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2381 u32 addr_high, addr_low;
2384 addr_high = ((tp->dev->dev_addr[0] << 8) |
2385 tp->dev->dev_addr[1]);
2386 addr_low = ((tp->dev->dev_addr[2] << 24) |
2387 (tp->dev->dev_addr[3] << 16) |
2388 (tp->dev->dev_addr[4] << 8) |
2389 (tp->dev->dev_addr[5] << 0));
2390 for (i = 0; i < 4; i++) {
2391 if (i == 1 && skip_mac_1)
2393 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2394 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2397 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2398 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2399 for (i = 0; i < 12; i++) {
2400 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2401 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2405 addr_high = (tp->dev->dev_addr[0] +
2406 tp->dev->dev_addr[1] +
2407 tp->dev->dev_addr[2] +
2408 tp->dev->dev_addr[3] +
2409 tp->dev->dev_addr[4] +
2410 tp->dev->dev_addr[5]) &
2411 TX_BACKOFF_SEED_MASK;
2412 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2415 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2418 bool device_should_wake, do_low_power;
2420 /* Make sure register accesses (indirect or otherwise)
2421 * will function correctly.
2423 pci_write_config_dword(tp->pdev,
2424 TG3PCI_MISC_HOST_CTRL,
2425 tp->misc_host_ctrl);
2429 pci_enable_wake(tp->pdev, state, false);
2430 pci_set_power_state(tp->pdev, PCI_D0);
2432 /* Switch out of Vaux if it is a NIC */
2433 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2434 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2444 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2445 tp->dev->name, state);
2449 /* Restore the CLKREQ setting. */
2450 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2453 pci_read_config_word(tp->pdev,
2454 tp->pcie_cap + PCI_EXP_LNKCTL,
2456 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2457 pci_write_config_word(tp->pdev,
2458 tp->pcie_cap + PCI_EXP_LNKCTL,
2462 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2463 tw32(TG3PCI_MISC_HOST_CTRL,
2464 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2466 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2467 device_may_wakeup(&tp->pdev->dev) &&
2468 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2470 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2471 do_low_power = false;
2472 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2473 !tp->link_config.phy_is_low_power) {
2474 struct phy_device *phydev;
2475 u32 phyid, advertising;
2477 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2479 tp->link_config.phy_is_low_power = 1;
2481 tp->link_config.orig_speed = phydev->speed;
2482 tp->link_config.orig_duplex = phydev->duplex;
2483 tp->link_config.orig_autoneg = phydev->autoneg;
2484 tp->link_config.orig_advertising = phydev->advertising;
2486 advertising = ADVERTISED_TP |
2488 ADVERTISED_Autoneg |
2489 ADVERTISED_10baseT_Half;
2491 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2492 device_should_wake) {
2493 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2495 ADVERTISED_100baseT_Half |
2496 ADVERTISED_100baseT_Full |
2497 ADVERTISED_10baseT_Full;
2499 advertising |= ADVERTISED_10baseT_Full;
2502 phydev->advertising = advertising;
2504 phy_start_aneg(phydev);
2506 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2507 if (phyid != TG3_PHY_ID_BCMAC131) {
2508 phyid &= TG3_PHY_OUI_MASK;
2509 if (phyid == TG3_PHY_OUI_1 ||
2510 phyid == TG3_PHY_OUI_2 ||
2511 phyid == TG3_PHY_OUI_3)
2512 do_low_power = true;
2516 do_low_power = true;
2518 if (tp->link_config.phy_is_low_power == 0) {
2519 tp->link_config.phy_is_low_power = 1;
2520 tp->link_config.orig_speed = tp->link_config.speed;
2521 tp->link_config.orig_duplex = tp->link_config.duplex;
2522 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2525 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2526 tp->link_config.speed = SPEED_10;
2527 tp->link_config.duplex = DUPLEX_HALF;
2528 tp->link_config.autoneg = AUTONEG_ENABLE;
2529 tg3_setup_phy(tp, 0);
2533 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2536 val = tr32(GRC_VCPU_EXT_CTRL);
2537 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2538 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2542 for (i = 0; i < 200; i++) {
2543 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2544 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2549 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2550 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2551 WOL_DRV_STATE_SHUTDOWN |
2555 if (device_should_wake) {
2558 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2560 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2564 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2565 mac_mode = MAC_MODE_PORT_MODE_GMII;
2567 mac_mode = MAC_MODE_PORT_MODE_MII;
2569 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2570 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2572 u32 speed = (tp->tg3_flags &
2573 TG3_FLAG_WOL_SPEED_100MB) ?
2574 SPEED_100 : SPEED_10;
2575 if (tg3_5700_link_polarity(tp, speed))
2576 mac_mode |= MAC_MODE_LINK_POLARITY;
2578 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2581 mac_mode = MAC_MODE_PORT_MODE_TBI;
2584 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2585 tw32(MAC_LED_CTRL, tp->led_ctrl);
2587 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2588 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2589 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2590 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2591 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2592 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2594 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2595 mac_mode |= tp->mac_mode &
2596 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2597 if (mac_mode & MAC_MODE_APE_TX_EN)
2598 mac_mode |= MAC_MODE_TDE_ENABLE;
2601 tw32_f(MAC_MODE, mac_mode);
2604 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2608 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2609 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2610 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2613 base_val = tp->pci_clock_ctrl;
2614 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2615 CLOCK_CTRL_TXCLK_DISABLE);
2617 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2618 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2619 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2620 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2621 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2623 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2624 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2625 u32 newbits1, newbits2;
2627 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2628 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2629 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2630 CLOCK_CTRL_TXCLK_DISABLE |
2632 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2633 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2634 newbits1 = CLOCK_CTRL_625_CORE;
2635 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2637 newbits1 = CLOCK_CTRL_ALTCLK;
2638 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2641 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2644 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2647 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2650 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2651 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2652 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2653 CLOCK_CTRL_TXCLK_DISABLE |
2654 CLOCK_CTRL_44MHZ_CORE);
2656 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2659 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2660 tp->pci_clock_ctrl | newbits3, 40);
2664 if (!(device_should_wake) &&
2665 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2666 tg3_power_down_phy(tp, do_low_power);
2668 tg3_frob_aux_power(tp);
2670 /* Workaround for unstable PLL clock */
2671 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2672 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2673 u32 val = tr32(0x7d00);
2675 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2677 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2680 err = tg3_nvram_lock(tp);
2681 tg3_halt_cpu(tp, RX_CPU_BASE);
2683 tg3_nvram_unlock(tp);
2687 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2689 if (device_should_wake)
2690 pci_enable_wake(tp->pdev, state, true);
2692 /* Finally, set the new power state. */
2693 pci_set_power_state(tp->pdev, state);
2698 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2700 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2701 case MII_TG3_AUX_STAT_10HALF:
2703 *duplex = DUPLEX_HALF;
2706 case MII_TG3_AUX_STAT_10FULL:
2708 *duplex = DUPLEX_FULL;
2711 case MII_TG3_AUX_STAT_100HALF:
2713 *duplex = DUPLEX_HALF;
2716 case MII_TG3_AUX_STAT_100FULL:
2718 *duplex = DUPLEX_FULL;
2721 case MII_TG3_AUX_STAT_1000HALF:
2722 *speed = SPEED_1000;
2723 *duplex = DUPLEX_HALF;
2726 case MII_TG3_AUX_STAT_1000FULL:
2727 *speed = SPEED_1000;
2728 *duplex = DUPLEX_FULL;
2732 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2733 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2735 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2739 *speed = SPEED_INVALID;
2740 *duplex = DUPLEX_INVALID;
2745 static void tg3_phy_copper_begin(struct tg3 *tp)
2750 if (tp->link_config.phy_is_low_power) {
2751 /* Entering low power mode. Disable gigabit and
2752 * 100baseT advertisements.
2754 tg3_writephy(tp, MII_TG3_CTRL, 0);
2756 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2757 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2758 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2759 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2761 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2762 } else if (tp->link_config.speed == SPEED_INVALID) {
2763 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2764 tp->link_config.advertising &=
2765 ~(ADVERTISED_1000baseT_Half |
2766 ADVERTISED_1000baseT_Full);
2768 new_adv = ADVERTISE_CSMA;
2769 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2770 new_adv |= ADVERTISE_10HALF;
2771 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2772 new_adv |= ADVERTISE_10FULL;
2773 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2774 new_adv |= ADVERTISE_100HALF;
2775 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2776 new_adv |= ADVERTISE_100FULL;
2778 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2780 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2782 if (tp->link_config.advertising &
2783 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2785 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2786 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2787 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2788 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2789 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2790 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2791 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2792 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2793 MII_TG3_CTRL_ENABLE_AS_MASTER);
2794 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2796 tg3_writephy(tp, MII_TG3_CTRL, 0);
2799 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2800 new_adv |= ADVERTISE_CSMA;
2802 /* Asking for a specific link mode. */
2803 if (tp->link_config.speed == SPEED_1000) {
2804 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2806 if (tp->link_config.duplex == DUPLEX_FULL)
2807 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2809 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2810 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2811 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2812 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2813 MII_TG3_CTRL_ENABLE_AS_MASTER);
2815 if (tp->link_config.speed == SPEED_100) {
2816 if (tp->link_config.duplex == DUPLEX_FULL)
2817 new_adv |= ADVERTISE_100FULL;
2819 new_adv |= ADVERTISE_100HALF;
2821 if (tp->link_config.duplex == DUPLEX_FULL)
2822 new_adv |= ADVERTISE_10FULL;
2824 new_adv |= ADVERTISE_10HALF;
2826 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2831 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2834 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2835 tp->link_config.speed != SPEED_INVALID) {
2836 u32 bmcr, orig_bmcr;
2838 tp->link_config.active_speed = tp->link_config.speed;
2839 tp->link_config.active_duplex = tp->link_config.duplex;
2842 switch (tp->link_config.speed) {
2848 bmcr |= BMCR_SPEED100;
2852 bmcr |= TG3_BMCR_SPEED1000;
2856 if (tp->link_config.duplex == DUPLEX_FULL)
2857 bmcr |= BMCR_FULLDPLX;
2859 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2860 (bmcr != orig_bmcr)) {
2861 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2862 for (i = 0; i < 1500; i++) {
2866 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2867 tg3_readphy(tp, MII_BMSR, &tmp))
2869 if (!(tmp & BMSR_LSTATUS)) {
2874 tg3_writephy(tp, MII_BMCR, bmcr);
2878 tg3_writephy(tp, MII_BMCR,
2879 BMCR_ANENABLE | BMCR_ANRESTART);
2883 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2887 /* Turn off tap power management. */
2888 /* Set Extended packet length bit */
2889 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2891 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2892 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2894 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2895 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2897 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2898 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2900 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2901 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2903 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2904 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2911 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2913 u32 adv_reg, all_mask = 0;
2915 if (mask & ADVERTISED_10baseT_Half)
2916 all_mask |= ADVERTISE_10HALF;
2917 if (mask & ADVERTISED_10baseT_Full)
2918 all_mask |= ADVERTISE_10FULL;
2919 if (mask & ADVERTISED_100baseT_Half)
2920 all_mask |= ADVERTISE_100HALF;
2921 if (mask & ADVERTISED_100baseT_Full)
2922 all_mask |= ADVERTISE_100FULL;
2924 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2927 if ((adv_reg & all_mask) != all_mask)
2929 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2933 if (mask & ADVERTISED_1000baseT_Half)
2934 all_mask |= ADVERTISE_1000HALF;
2935 if (mask & ADVERTISED_1000baseT_Full)
2936 all_mask |= ADVERTISE_1000FULL;
2938 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2941 if ((tg3_ctrl & all_mask) != all_mask)
2947 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2951 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2954 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2955 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2957 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2958 if (curadv != reqadv)
2961 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2962 tg3_readphy(tp, MII_LPA, rmtadv);
2964 /* Reprogram the advertisement register, even if it
2965 * does not affect the current link. If the link
2966 * gets renegotiated in the future, we can save an
2967 * additional renegotiation cycle by advertising
2968 * it correctly in the first place.
2970 if (curadv != reqadv) {
2971 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2972 ADVERTISE_PAUSE_ASYM);
2973 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2980 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2982 int current_link_up;
2984 u32 lcl_adv, rmt_adv;
2992 (MAC_STATUS_SYNC_CHANGED |
2993 MAC_STATUS_CFG_CHANGED |
2994 MAC_STATUS_MI_COMPLETION |
2995 MAC_STATUS_LNKSTATE_CHANGED));
2998 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3000 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3004 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3006 /* Some third-party PHYs need to be reset on link going
3009 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3010 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3011 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3012 netif_carrier_ok(tp->dev)) {
3013 tg3_readphy(tp, MII_BMSR, &bmsr);
3014 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3015 !(bmsr & BMSR_LSTATUS))
3021 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
3022 tg3_readphy(tp, MII_BMSR, &bmsr);
3023 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3024 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3027 if (!(bmsr & BMSR_LSTATUS)) {
3028 err = tg3_init_5401phy_dsp(tp);
3032 tg3_readphy(tp, MII_BMSR, &bmsr);
3033 for (i = 0; i < 1000; i++) {
3035 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3036 (bmsr & BMSR_LSTATUS)) {
3042 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
3043 !(bmsr & BMSR_LSTATUS) &&
3044 tp->link_config.active_speed == SPEED_1000) {
3045 err = tg3_phy_reset(tp);
3047 err = tg3_init_5401phy_dsp(tp);
3052 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3053 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3054 /* 5701 {A0,B0} CRC bug workaround */
3055 tg3_writephy(tp, 0x15, 0x0a75);
3056 tg3_writephy(tp, 0x1c, 0x8c68);
3057 tg3_writephy(tp, 0x1c, 0x8d68);
3058 tg3_writephy(tp, 0x1c, 0x8c68);
3061 /* Clear pending interrupts... */
3062 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3063 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3065 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
3066 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3067 else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
3068 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3070 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3071 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3072 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3073 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3074 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3076 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3079 current_link_up = 0;
3080 current_speed = SPEED_INVALID;
3081 current_duplex = DUPLEX_INVALID;
3083 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
3086 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3087 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3088 if (!(val & (1 << 10))) {
3090 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3096 for (i = 0; i < 100; i++) {
3097 tg3_readphy(tp, MII_BMSR, &bmsr);
3098 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3099 (bmsr & BMSR_LSTATUS))
3104 if (bmsr & BMSR_LSTATUS) {
3107 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3108 for (i = 0; i < 2000; i++) {
3110 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3115 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3120 for (i = 0; i < 200; i++) {
3121 tg3_readphy(tp, MII_BMCR, &bmcr);
3122 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3124 if (bmcr && bmcr != 0x7fff)
3132 tp->link_config.active_speed = current_speed;
3133 tp->link_config.active_duplex = current_duplex;
3135 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3136 if ((bmcr & BMCR_ANENABLE) &&
3137 tg3_copper_is_advertising_all(tp,
3138 tp->link_config.advertising)) {
3139 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3141 current_link_up = 1;
3144 if (!(bmcr & BMCR_ANENABLE) &&
3145 tp->link_config.speed == current_speed &&
3146 tp->link_config.duplex == current_duplex &&
3147 tp->link_config.flowctrl ==
3148 tp->link_config.active_flowctrl) {
3149 current_link_up = 1;
3153 if (current_link_up == 1 &&
3154 tp->link_config.active_duplex == DUPLEX_FULL)
3155 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3159 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
3162 tg3_phy_copper_begin(tp);
3164 tg3_readphy(tp, MII_BMSR, &tmp);
3165 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
3166 (tmp & BMSR_LSTATUS))
3167 current_link_up = 1;
3170 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3171 if (current_link_up == 1) {
3172 if (tp->link_config.active_speed == SPEED_100 ||
3173 tp->link_config.active_speed == SPEED_10)
3174 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3176 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3177 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
3178 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3180 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3182 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3183 if (tp->link_config.active_duplex == DUPLEX_HALF)
3184 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3186 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3187 if (current_link_up == 1 &&
3188 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3189 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3191 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3194 /* ??? Without this setting Netgear GA302T PHY does not
3195 * ??? send/receive packets...
3197 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
3198 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3199 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3200 tw32_f(MAC_MI_MODE, tp->mi_mode);
3204 tw32_f(MAC_MODE, tp->mac_mode);
3207 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3208 /* Polled via timer. */
3209 tw32_f(MAC_EVENT, 0);
3211 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3215 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3216 current_link_up == 1 &&
3217 tp->link_config.active_speed == SPEED_1000 &&
3218 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3219 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3222 (MAC_STATUS_SYNC_CHANGED |
3223 MAC_STATUS_CFG_CHANGED));
3226 NIC_SRAM_FIRMWARE_MBOX,
3227 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3230 /* Prevent send BD corruption. */
3231 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3232 u16 oldlnkctl, newlnkctl;
3234 pci_read_config_word(tp->pdev,
3235 tp->pcie_cap + PCI_EXP_LNKCTL,
3237 if (tp->link_config.active_speed == SPEED_100 ||
3238 tp->link_config.active_speed == SPEED_10)
3239 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3241 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3242 if (newlnkctl != oldlnkctl)
3243 pci_write_config_word(tp->pdev,
3244 tp->pcie_cap + PCI_EXP_LNKCTL,
3248 if (current_link_up != netif_carrier_ok(tp->dev)) {
3249 if (current_link_up)
3250 netif_carrier_on(tp->dev);
3252 netif_carrier_off(tp->dev);
3253 tg3_link_report(tp);
3259 struct tg3_fiber_aneginfo {
3261 #define ANEG_STATE_UNKNOWN 0
3262 #define ANEG_STATE_AN_ENABLE 1
3263 #define ANEG_STATE_RESTART_INIT 2
3264 #define ANEG_STATE_RESTART 3
3265 #define ANEG_STATE_DISABLE_LINK_OK 4
3266 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3267 #define ANEG_STATE_ABILITY_DETECT 6
3268 #define ANEG_STATE_ACK_DETECT_INIT 7
3269 #define ANEG_STATE_ACK_DETECT 8
3270 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3271 #define ANEG_STATE_COMPLETE_ACK 10
3272 #define ANEG_STATE_IDLE_DETECT_INIT 11
3273 #define ANEG_STATE_IDLE_DETECT 12
3274 #define ANEG_STATE_LINK_OK 13
3275 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3276 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3279 #define MR_AN_ENABLE 0x00000001
3280 #define MR_RESTART_AN 0x00000002
3281 #define MR_AN_COMPLETE 0x00000004
3282 #define MR_PAGE_RX 0x00000008
3283 #define MR_NP_LOADED 0x00000010
3284 #define MR_TOGGLE_TX 0x00000020
3285 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3286 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3287 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3288 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3289 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3290 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3291 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3292 #define MR_TOGGLE_RX 0x00002000
3293 #define MR_NP_RX 0x00004000
3295 #define MR_LINK_OK 0x80000000
3297 unsigned long link_time, cur_time;
3299 u32 ability_match_cfg;
3300 int ability_match_count;
3302 char ability_match, idle_match, ack_match;
3304 u32 txconfig, rxconfig;
3305 #define ANEG_CFG_NP 0x00000080
3306 #define ANEG_CFG_ACK 0x00000040
3307 #define ANEG_CFG_RF2 0x00000020
3308 #define ANEG_CFG_RF1 0x00000010
3309 #define ANEG_CFG_PS2 0x00000001
3310 #define ANEG_CFG_PS1 0x00008000
3311 #define ANEG_CFG_HD 0x00004000
3312 #define ANEG_CFG_FD 0x00002000
3313 #define ANEG_CFG_INVAL 0x00001f06
3318 #define ANEG_TIMER_ENAB 2
3319 #define ANEG_FAILED -1
3321 #define ANEG_STATE_SETTLE_TIME 10000
3323 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3324 struct tg3_fiber_aneginfo *ap)
3327 unsigned long delta;
3331 if (ap->state == ANEG_STATE_UNKNOWN) {
3335 ap->ability_match_cfg = 0;
3336 ap->ability_match_count = 0;
3337 ap->ability_match = 0;
3343 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3344 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3346 if (rx_cfg_reg != ap->ability_match_cfg) {
3347 ap->ability_match_cfg = rx_cfg_reg;
3348 ap->ability_match = 0;
3349 ap->ability_match_count = 0;
3351 if (++ap->ability_match_count > 1) {
3352 ap->ability_match = 1;
3353 ap->ability_match_cfg = rx_cfg_reg;
3356 if (rx_cfg_reg & ANEG_CFG_ACK)
3364 ap->ability_match_cfg = 0;
3365 ap->ability_match_count = 0;
3366 ap->ability_match = 0;
3372 ap->rxconfig = rx_cfg_reg;
3376 case ANEG_STATE_UNKNOWN:
3377 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3378 ap->state = ANEG_STATE_AN_ENABLE;
3381 case ANEG_STATE_AN_ENABLE:
3382 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3383 if (ap->flags & MR_AN_ENABLE) {
3386 ap->ability_match_cfg = 0;
3387 ap->ability_match_count = 0;
3388 ap->ability_match = 0;
3392 ap->state = ANEG_STATE_RESTART_INIT;
3394 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3398 case ANEG_STATE_RESTART_INIT:
3399 ap->link_time = ap->cur_time;
3400 ap->flags &= ~(MR_NP_LOADED);
3402 tw32(MAC_TX_AUTO_NEG, 0);
3403 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3404 tw32_f(MAC_MODE, tp->mac_mode);
3407 ret = ANEG_TIMER_ENAB;
3408 ap->state = ANEG_STATE_RESTART;
3411 case ANEG_STATE_RESTART:
3412 delta = ap->cur_time - ap->link_time;
3413 if (delta > ANEG_STATE_SETTLE_TIME) {
3414 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3416 ret = ANEG_TIMER_ENAB;
3420 case ANEG_STATE_DISABLE_LINK_OK:
3424 case ANEG_STATE_ABILITY_DETECT_INIT:
3425 ap->flags &= ~(MR_TOGGLE_TX);
3426 ap->txconfig = ANEG_CFG_FD;
3427 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3428 if (flowctrl & ADVERTISE_1000XPAUSE)
3429 ap->txconfig |= ANEG_CFG_PS1;
3430 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3431 ap->txconfig |= ANEG_CFG_PS2;
3432 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3433 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3434 tw32_f(MAC_MODE, tp->mac_mode);
3437 ap->state = ANEG_STATE_ABILITY_DETECT;
3440 case ANEG_STATE_ABILITY_DETECT:
3441 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3442 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3446 case ANEG_STATE_ACK_DETECT_INIT:
3447 ap->txconfig |= ANEG_CFG_ACK;
3448 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3449 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3450 tw32_f(MAC_MODE, tp->mac_mode);
3453 ap->state = ANEG_STATE_ACK_DETECT;
3456 case ANEG_STATE_ACK_DETECT:
3457 if (ap->ack_match != 0) {
3458 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3459 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3460 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3462 ap->state = ANEG_STATE_AN_ENABLE;
3464 } else if (ap->ability_match != 0 &&
3465 ap->rxconfig == 0) {
3466 ap->state = ANEG_STATE_AN_ENABLE;
3470 case ANEG_STATE_COMPLETE_ACK_INIT:
3471 if (ap->rxconfig & ANEG_CFG_INVAL) {
3475 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3476 MR_LP_ADV_HALF_DUPLEX |
3477 MR_LP_ADV_SYM_PAUSE |
3478 MR_LP_ADV_ASYM_PAUSE |
3479 MR_LP_ADV_REMOTE_FAULT1 |
3480 MR_LP_ADV_REMOTE_FAULT2 |
3481 MR_LP_ADV_NEXT_PAGE |
3484 if (ap->rxconfig & ANEG_CFG_FD)
3485 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3486 if (ap->rxconfig & ANEG_CFG_HD)
3487 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3488 if (ap->rxconfig & ANEG_CFG_PS1)
3489 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3490 if (ap->rxconfig & ANEG_CFG_PS2)
3491 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3492 if (ap->rxconfig & ANEG_CFG_RF1)
3493 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3494 if (ap->rxconfig & ANEG_CFG_RF2)
3495 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3496 if (ap->rxconfig & ANEG_CFG_NP)
3497 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3499 ap->link_time = ap->cur_time;
3501 ap->flags ^= (MR_TOGGLE_TX);
3502 if (ap->rxconfig & 0x0008)
3503 ap->flags |= MR_TOGGLE_RX;
3504 if (ap->rxconfig & ANEG_CFG_NP)
3505 ap->flags |= MR_NP_RX;
3506 ap->flags |= MR_PAGE_RX;
3508 ap->state = ANEG_STATE_COMPLETE_ACK;
3509 ret = ANEG_TIMER_ENAB;
3512 case ANEG_STATE_COMPLETE_ACK:
3513 if (ap->ability_match != 0 &&
3514 ap->rxconfig == 0) {
3515 ap->state = ANEG_STATE_AN_ENABLE;
3518 delta = ap->cur_time - ap->link_time;
3519 if (delta > ANEG_STATE_SETTLE_TIME) {
3520 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3521 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3523 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3524 !(ap->flags & MR_NP_RX)) {
3525 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3533 case ANEG_STATE_IDLE_DETECT_INIT:
3534 ap->link_time = ap->cur_time;
3535 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3536 tw32_f(MAC_MODE, tp->mac_mode);
3539 ap->state = ANEG_STATE_IDLE_DETECT;
3540 ret = ANEG_TIMER_ENAB;
3543 case ANEG_STATE_IDLE_DETECT:
3544 if (ap->ability_match != 0 &&
3545 ap->rxconfig == 0) {
3546 ap->state = ANEG_STATE_AN_ENABLE;
3549 delta = ap->cur_time - ap->link_time;
3550 if (delta > ANEG_STATE_SETTLE_TIME) {
3551 /* XXX another gem from the Broadcom driver :( */
3552 ap->state = ANEG_STATE_LINK_OK;
3556 case ANEG_STATE_LINK_OK:
3557 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3561 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3562 /* ??? unimplemented */
3565 case ANEG_STATE_NEXT_PAGE_WAIT:
3566 /* ??? unimplemented */
3577 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3580 struct tg3_fiber_aneginfo aninfo;
3581 int status = ANEG_FAILED;
3585 tw32_f(MAC_TX_AUTO_NEG, 0);
3587 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3588 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3591 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3594 memset(&aninfo, 0, sizeof(aninfo));
3595 aninfo.flags |= MR_AN_ENABLE;
3596 aninfo.state = ANEG_STATE_UNKNOWN;
3597 aninfo.cur_time = 0;
3599 while (++tick < 195000) {
3600 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3601 if (status == ANEG_DONE || status == ANEG_FAILED)
3607 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3608 tw32_f(MAC_MODE, tp->mac_mode);
3611 *txflags = aninfo.txconfig;
3612 *rxflags = aninfo.flags;
3614 if (status == ANEG_DONE &&
3615 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3616 MR_LP_ADV_FULL_DUPLEX)))
3622 static void tg3_init_bcm8002(struct tg3 *tp)
3624 u32 mac_status = tr32(MAC_STATUS);
3627 /* Reset when initting first time or we have a link. */
3628 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3629 !(mac_status & MAC_STATUS_PCS_SYNCED))
3632 /* Set PLL lock range. */
3633 tg3_writephy(tp, 0x16, 0x8007);
3636 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3638 /* Wait for reset to complete. */
3639 /* XXX schedule_timeout() ... */
3640 for (i = 0; i < 500; i++)
3643 /* Config mode; select PMA/Ch 1 regs. */
3644 tg3_writephy(tp, 0x10, 0x8411);
3646 /* Enable auto-lock and comdet, select txclk for tx. */
3647 tg3_writephy(tp, 0x11, 0x0a10);
3649 tg3_writephy(tp, 0x18, 0x00a0);
3650 tg3_writephy(tp, 0x16, 0x41ff);
3652 /* Assert and deassert POR. */
3653 tg3_writephy(tp, 0x13, 0x0400);
3655 tg3_writephy(tp, 0x13, 0x0000);
3657 tg3_writephy(tp, 0x11, 0x0a50);
3659 tg3_writephy(tp, 0x11, 0x0a10);
3661 /* Wait for signal to stabilize */
3662 /* XXX schedule_timeout() ... */
3663 for (i = 0; i < 15000; i++)
3666 /* Deselect the channel register so we can read the PHYID
3669 tg3_writephy(tp, 0x10, 0x8011);
3672 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3675 u32 sg_dig_ctrl, sg_dig_status;
3676 u32 serdes_cfg, expected_sg_dig_ctrl;
3677 int workaround, port_a;
3678 int current_link_up;
3681 expected_sg_dig_ctrl = 0;
3684 current_link_up = 0;
3686 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3687 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3689 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3692 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3693 /* preserve bits 20-23 for voltage regulator */
3694 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3697 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3699 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3700 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3702 u32 val = serdes_cfg;
3708 tw32_f(MAC_SERDES_CFG, val);
3711 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3713 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3714 tg3_setup_flow_control(tp, 0, 0);
3715 current_link_up = 1;
3720 /* Want auto-negotiation. */
3721 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3723 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3724 if (flowctrl & ADVERTISE_1000XPAUSE)
3725 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3726 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3727 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3729 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3730 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3731 tp->serdes_counter &&
3732 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3733 MAC_STATUS_RCVD_CFG)) ==
3734 MAC_STATUS_PCS_SYNCED)) {
3735 tp->serdes_counter--;
3736 current_link_up = 1;
3741 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3742 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3744 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3746 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3747 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3748 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3749 MAC_STATUS_SIGNAL_DET)) {
3750 sg_dig_status = tr32(SG_DIG_STATUS);
3751 mac_status = tr32(MAC_STATUS);
3753 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3754 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3755 u32 local_adv = 0, remote_adv = 0;
3757 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3758 local_adv |= ADVERTISE_1000XPAUSE;
3759 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3760 local_adv |= ADVERTISE_1000XPSE_ASYM;
3762 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3763 remote_adv |= LPA_1000XPAUSE;
3764 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3765 remote_adv |= LPA_1000XPAUSE_ASYM;
3767 tg3_setup_flow_control(tp, local_adv, remote_adv);
3768 current_link_up = 1;
3769 tp->serdes_counter = 0;
3770 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3771 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3772 if (tp->serdes_counter)
3773 tp->serdes_counter--;
3776 u32 val = serdes_cfg;
3783 tw32_f(MAC_SERDES_CFG, val);
3786 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3789 /* Link parallel detection - link is up */
3790 /* only if we have PCS_SYNC and not */
3791 /* receiving config code words */
3792 mac_status = tr32(MAC_STATUS);
3793 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3794 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3795 tg3_setup_flow_control(tp, 0, 0);
3796 current_link_up = 1;
3798 TG3_FLG2_PARALLEL_DETECT;
3799 tp->serdes_counter =
3800 SERDES_PARALLEL_DET_TIMEOUT;
3802 goto restart_autoneg;
3806 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3807 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3811 return current_link_up;
3814 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3816 int current_link_up = 0;
3818 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3821 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3822 u32 txflags, rxflags;
3825 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3826 u32 local_adv = 0, remote_adv = 0;
3828 if (txflags & ANEG_CFG_PS1)
3829 local_adv |= ADVERTISE_1000XPAUSE;
3830 if (txflags & ANEG_CFG_PS2)
3831 local_adv |= ADVERTISE_1000XPSE_ASYM;
3833 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3834 remote_adv |= LPA_1000XPAUSE;
3835 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3836 remote_adv |= LPA_1000XPAUSE_ASYM;
3838 tg3_setup_flow_control(tp, local_adv, remote_adv);
3840 current_link_up = 1;
3842 for (i = 0; i < 30; i++) {
3845 (MAC_STATUS_SYNC_CHANGED |
3846 MAC_STATUS_CFG_CHANGED));
3848 if ((tr32(MAC_STATUS) &
3849 (MAC_STATUS_SYNC_CHANGED |
3850 MAC_STATUS_CFG_CHANGED)) == 0)
3854 mac_status = tr32(MAC_STATUS);
3855 if (current_link_up == 0 &&
3856 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3857 !(mac_status & MAC_STATUS_RCVD_CFG))
3858 current_link_up = 1;
3860 tg3_setup_flow_control(tp, 0, 0);
3862 /* Forcing 1000FD link up. */
3863 current_link_up = 1;
3865 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3868 tw32_f(MAC_MODE, tp->mac_mode);
3873 return current_link_up;
3876 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3879 u16 orig_active_speed;
3880 u8 orig_active_duplex;
3882 int current_link_up;
3885 orig_pause_cfg = tp->link_config.active_flowctrl;
3886 orig_active_speed = tp->link_config.active_speed;
3887 orig_active_duplex = tp->link_config.active_duplex;
3889 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3890 netif_carrier_ok(tp->dev) &&
3891 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3892 mac_status = tr32(MAC_STATUS);
3893 mac_status &= (MAC_STATUS_PCS_SYNCED |
3894 MAC_STATUS_SIGNAL_DET |
3895 MAC_STATUS_CFG_CHANGED |
3896 MAC_STATUS_RCVD_CFG);
3897 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3898 MAC_STATUS_SIGNAL_DET)) {
3899 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3900 MAC_STATUS_CFG_CHANGED));
3905 tw32_f(MAC_TX_AUTO_NEG, 0);
3907 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3908 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3909 tw32_f(MAC_MODE, tp->mac_mode);
3912 if (tp->phy_id == PHY_ID_BCM8002)
3913 tg3_init_bcm8002(tp);
3915 /* Enable link change event even when serdes polling. */
3916 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3919 current_link_up = 0;
3920 mac_status = tr32(MAC_STATUS);
3922 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3923 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3925 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3927 tp->napi[0].hw_status->status =
3928 (SD_STATUS_UPDATED |
3929 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
3931 for (i = 0; i < 100; i++) {
3932 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3933 MAC_STATUS_CFG_CHANGED));
3935 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3936 MAC_STATUS_CFG_CHANGED |
3937 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3941 mac_status = tr32(MAC_STATUS);
3942 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3943 current_link_up = 0;
3944 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3945 tp->serdes_counter == 0) {
3946 tw32_f(MAC_MODE, (tp->mac_mode |
3947 MAC_MODE_SEND_CONFIGS));
3949 tw32_f(MAC_MODE, tp->mac_mode);
3953 if (current_link_up == 1) {
3954 tp->link_config.active_speed = SPEED_1000;
3955 tp->link_config.active_duplex = DUPLEX_FULL;
3956 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3957 LED_CTRL_LNKLED_OVERRIDE |
3958 LED_CTRL_1000MBPS_ON));
3960 tp->link_config.active_speed = SPEED_INVALID;
3961 tp->link_config.active_duplex = DUPLEX_INVALID;
3962 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3963 LED_CTRL_LNKLED_OVERRIDE |
3964 LED_CTRL_TRAFFIC_OVERRIDE));
3967 if (current_link_up != netif_carrier_ok(tp->dev)) {
3968 if (current_link_up)
3969 netif_carrier_on(tp->dev);
3971 netif_carrier_off(tp->dev);
3972 tg3_link_report(tp);
3974 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3975 if (orig_pause_cfg != now_pause_cfg ||
3976 orig_active_speed != tp->link_config.active_speed ||
3977 orig_active_duplex != tp->link_config.active_duplex)
3978 tg3_link_report(tp);
3984 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3986 int current_link_up, err = 0;
3990 u32 local_adv, remote_adv;
3992 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3993 tw32_f(MAC_MODE, tp->mac_mode);
3999 (MAC_STATUS_SYNC_CHANGED |
4000 MAC_STATUS_CFG_CHANGED |
4001 MAC_STATUS_MI_COMPLETION |
4002 MAC_STATUS_LNKSTATE_CHANGED));
4008 current_link_up = 0;
4009 current_speed = SPEED_INVALID;
4010 current_duplex = DUPLEX_INVALID;
4012 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4013 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4014 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4015 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4016 bmsr |= BMSR_LSTATUS;
4018 bmsr &= ~BMSR_LSTATUS;
4021 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4023 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4024 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4025 /* do nothing, just check for link up at the end */
4026 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4029 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4030 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4031 ADVERTISE_1000XPAUSE |
4032 ADVERTISE_1000XPSE_ASYM |
4035 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4037 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4038 new_adv |= ADVERTISE_1000XHALF;
4039 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4040 new_adv |= ADVERTISE_1000XFULL;
4042 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4043 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4044 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4045 tg3_writephy(tp, MII_BMCR, bmcr);
4047 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4048 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4049 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4056 bmcr &= ~BMCR_SPEED1000;
4057 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4059 if (tp->link_config.duplex == DUPLEX_FULL)
4060 new_bmcr |= BMCR_FULLDPLX;
4062 if (new_bmcr != bmcr) {
4063 /* BMCR_SPEED1000 is a reserved bit that needs
4064 * to be set on write.
4066 new_bmcr |= BMCR_SPEED1000;
4068 /* Force a linkdown */
4069 if (netif_carrier_ok(tp->dev)) {
4072 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4073 adv &= ~(ADVERTISE_1000XFULL |
4074 ADVERTISE_1000XHALF |
4076 tg3_writephy(tp, MII_ADVERTISE, adv);
4077 tg3_writephy(tp, MII_BMCR, bmcr |
4081 netif_carrier_off(tp->dev);
4083 tg3_writephy(tp, MII_BMCR, new_bmcr);
4085 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4086 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4087 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4089 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4090 bmsr |= BMSR_LSTATUS;
4092 bmsr &= ~BMSR_LSTATUS;
4094 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4098 if (bmsr & BMSR_LSTATUS) {
4099 current_speed = SPEED_1000;
4100 current_link_up = 1;
4101 if (bmcr & BMCR_FULLDPLX)
4102 current_duplex = DUPLEX_FULL;
4104 current_duplex = DUPLEX_HALF;
4109 if (bmcr & BMCR_ANENABLE) {
4112 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4113 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4114 common = local_adv & remote_adv;
4115 if (common & (ADVERTISE_1000XHALF |
4116 ADVERTISE_1000XFULL)) {
4117 if (common & ADVERTISE_1000XFULL)
4118 current_duplex = DUPLEX_FULL;
4120 current_duplex = DUPLEX_HALF;
4123 current_link_up = 0;
4127 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4128 tg3_setup_flow_control(tp, local_adv, remote_adv);
4130 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4131 if (tp->link_config.active_duplex == DUPLEX_HALF)
4132 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4134 tw32_f(MAC_MODE, tp->mac_mode);
4137 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4139 tp->link_config.active_speed = current_speed;
4140 tp->link_config.active_duplex = current_duplex;
4142 if (current_link_up != netif_carrier_ok(tp->dev)) {
4143 if (current_link_up)
4144 netif_carrier_on(tp->dev);
4146 netif_carrier_off(tp->dev);
4147 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4149 tg3_link_report(tp);
4154 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4156 if (tp->serdes_counter) {
4157 /* Give autoneg time to complete. */
4158 tp->serdes_counter--;
4161 if (!netif_carrier_ok(tp->dev) &&
4162 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4165 tg3_readphy(tp, MII_BMCR, &bmcr);
4166 if (bmcr & BMCR_ANENABLE) {
4169 /* Select shadow register 0x1f */
4170 tg3_writephy(tp, 0x1c, 0x7c00);
4171 tg3_readphy(tp, 0x1c, &phy1);
4173 /* Select expansion interrupt status register */
4174 tg3_writephy(tp, 0x17, 0x0f01);
4175 tg3_readphy(tp, 0x15, &phy2);
4176 tg3_readphy(tp, 0x15, &phy2);
4178 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4179 /* We have signal detect and not receiving
4180 * config code words, link is up by parallel
4184 bmcr &= ~BMCR_ANENABLE;
4185 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4186 tg3_writephy(tp, MII_BMCR, bmcr);
4187 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
4191 else if (netif_carrier_ok(tp->dev) &&
4192 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4193 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4196 /* Select expansion interrupt status register */
4197 tg3_writephy(tp, 0x17, 0x0f01);
4198 tg3_readphy(tp, 0x15, &phy2);
4202 /* Config code words received, turn on autoneg. */
4203 tg3_readphy(tp, MII_BMCR, &bmcr);
4204 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4206 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4212 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4216 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4217 err = tg3_setup_fiber_phy(tp, force_reset);
4218 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4219 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4221 err = tg3_setup_copper_phy(tp, force_reset);
4224 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4227 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4228 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4230 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4235 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4236 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4237 tw32(GRC_MISC_CFG, val);
4240 if (tp->link_config.active_speed == SPEED_1000 &&
4241 tp->link_config.active_duplex == DUPLEX_HALF)
4242 tw32(MAC_TX_LENGTHS,
4243 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4244 (6 << TX_LENGTHS_IPG_SHIFT) |
4245 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4247 tw32(MAC_TX_LENGTHS,
4248 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4249 (6 << TX_LENGTHS_IPG_SHIFT) |
4250 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4252 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4253 if (netif_carrier_ok(tp->dev)) {
4254 tw32(HOSTCC_STAT_COAL_TICKS,
4255 tp->coal.stats_block_coalesce_usecs);
4257 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4261 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4262 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4263 if (!netif_carrier_ok(tp->dev))
4264 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4267 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4268 tw32(PCIE_PWR_MGMT_THRESH, val);
4274 /* This is called whenever we suspect that the system chipset is re-
4275 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4276 * is bogus tx completions. We try to recover by setting the
4277 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4280 static void tg3_tx_recover(struct tg3 *tp)
4282 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4283 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4285 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
4286 "mapped I/O cycles to the network device, attempting to "
4287 "recover. Please report the problem to the driver maintainer "
4288 "and include system chipset information.\n", tp->dev->name);
4290 spin_lock(&tp->lock);
4291 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4292 spin_unlock(&tp->lock);
4295 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4298 return tnapi->tx_pending -
4299 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4302 /* Tigon3 never reports partial packet sends. So we do not
4303 * need special logic to handle SKBs that have not had all
4304 * of their frags sent yet, like SunGEM does.
4306 static void tg3_tx(struct tg3_napi *tnapi)
4308 struct tg3 *tp = tnapi->tp;
4309 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4310 u32 sw_idx = tnapi->tx_cons;
4311 struct netdev_queue *txq;
4312 int index = tnapi - tp->napi;
4314 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
4317 txq = netdev_get_tx_queue(tp->dev, index);
4319 while (sw_idx != hw_idx) {
4320 struct tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
4321 struct sk_buff *skb = ri->skb;
4324 if (unlikely(skb == NULL)) {
4329 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4333 sw_idx = NEXT_TX(sw_idx);
4335 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4336 ri = &tnapi->tx_buffers[sw_idx];
4337 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4339 sw_idx = NEXT_TX(sw_idx);
4344 if (unlikely(tx_bug)) {
4350 tnapi->tx_cons = sw_idx;
4352 /* Need to make the tx_cons update visible to tg3_start_xmit()
4353 * before checking for netif_queue_stopped(). Without the
4354 * memory barrier, there is a small possibility that tg3_start_xmit()
4355 * will miss it and cause the queue to be stopped forever.
4359 if (unlikely(netif_tx_queue_stopped(txq) &&
4360 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4361 __netif_tx_lock(txq, smp_processor_id());
4362 if (netif_tx_queue_stopped(txq) &&
4363 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4364 netif_tx_wake_queue(txq);
4365 __netif_tx_unlock(txq);
4369 /* Returns size of skb allocated or < 0 on error.
4371 * We only need to fill in the address because the other members
4372 * of the RX descriptor are invariant, see tg3_init_rings.
4374 * Note the purposeful assymetry of cpu vs. chip accesses. For
4375 * posting buffers we only dirty the first cache line of the RX
4376 * descriptor (containing the address). Whereas for the RX status
4377 * buffers the cpu only reads the last cacheline of the RX descriptor
4378 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4380 static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4381 int src_idx, u32 dest_idx_unmasked)
4383 struct tg3 *tp = tnapi->tp;
4384 struct tg3_rx_buffer_desc *desc;
4385 struct ring_info *map, *src_map;
4386 struct sk_buff *skb;
4388 int skb_size, dest_idx;
4389 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4392 switch (opaque_key) {
4393 case RXD_OPAQUE_RING_STD:
4394 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4395 desc = &tpr->rx_std[dest_idx];
4396 map = &tpr->rx_std_buffers[dest_idx];
4398 src_map = &tpr->rx_std_buffers[src_idx];
4399 skb_size = tp->rx_pkt_map_sz;
4402 case RXD_OPAQUE_RING_JUMBO:
4403 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4404 desc = &tpr->rx_jmb[dest_idx].std;
4405 map = &tpr->rx_jmb_buffers[dest_idx];
4407 src_map = &tpr->rx_jmb_buffers[src_idx];
4408 skb_size = TG3_RX_JMB_MAP_SZ;
4415 /* Do not overwrite any of the map or rp information
4416 * until we are sure we can commit to a new buffer.
4418 * Callers depend upon this behavior and assume that
4419 * we leave everything unchanged if we fail.
4421 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4425 skb_reserve(skb, tp->rx_offset);
4427 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4428 PCI_DMA_FROMDEVICE);
4429 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4435 pci_unmap_addr_set(map, mapping, mapping);
4437 if (src_map != NULL)
4438 src_map->skb = NULL;
4440 desc->addr_hi = ((u64)mapping >> 32);
4441 desc->addr_lo = ((u64)mapping & 0xffffffff);
4446 /* We only need to move over in the address because the other
4447 * members of the RX descriptor are invariant. See notes above
4448 * tg3_alloc_rx_skb for full details.
4450 static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key,
4451 int src_idx, u32 dest_idx_unmasked)
4453 struct tg3 *tp = tnapi->tp;
4454 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4455 struct ring_info *src_map, *dest_map;
4457 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4459 switch (opaque_key) {
4460 case RXD_OPAQUE_RING_STD:
4461 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4462 dest_desc = &tpr->rx_std[dest_idx];
4463 dest_map = &tpr->rx_std_buffers[dest_idx];
4464 src_desc = &tpr->rx_std[src_idx];
4465 src_map = &tpr->rx_std_buffers[src_idx];
4468 case RXD_OPAQUE_RING_JUMBO:
4469 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4470 dest_desc = &tpr->rx_jmb[dest_idx].std;
4471 dest_map = &tpr->rx_jmb_buffers[dest_idx];
4472 src_desc = &tpr->rx_jmb[src_idx].std;
4473 src_map = &tpr->rx_jmb_buffers[src_idx];
4480 dest_map->skb = src_map->skb;
4481 pci_unmap_addr_set(dest_map, mapping,
4482 pci_unmap_addr(src_map, mapping));
4483 dest_desc->addr_hi = src_desc->addr_hi;
4484 dest_desc->addr_lo = src_desc->addr_lo;
4486 src_map->skb = NULL;
4489 /* The RX ring scheme is composed of multiple rings which post fresh
4490 * buffers to the chip, and one special ring the chip uses to report
4491 * status back to the host.
4493 * The special ring reports the status of received packets to the
4494 * host. The chip does not write into the original descriptor the
4495 * RX buffer was obtained from. The chip simply takes the original
4496 * descriptor as provided by the host, updates the status and length
4497 * field, then writes this into the next status ring entry.
4499 * Each ring the host uses to post buffers to the chip is described
4500 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4501 * it is first placed into the on-chip ram. When the packet's length
4502 * is known, it walks down the TG3_BDINFO entries to select the ring.
4503 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4504 * which is within the range of the new packet's length is chosen.
4506 * The "separate ring for rx status" scheme may sound queer, but it makes
4507 * sense from a cache coherency perspective. If only the host writes
4508 * to the buffer post rings, and only the chip writes to the rx status
4509 * rings, then cache lines never move beyond shared-modified state.
4510 * If both the host and chip were to write into the same ring, cache line
4511 * eviction could occur since both entities want it in an exclusive state.
4513 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4515 struct tg3 *tp = tnapi->tp;
4516 u32 work_mask, rx_std_posted = 0;
4517 u32 sw_idx = tnapi->rx_rcb_ptr;
4520 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4522 hw_idx = *(tnapi->rx_rcb_prod_idx);
4524 * We need to order the read of hw_idx and the read of
4525 * the opaque cookie.
4530 while (sw_idx != hw_idx && budget > 0) {
4531 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4533 struct sk_buff *skb;
4534 dma_addr_t dma_addr;
4535 u32 opaque_key, desc_idx, *post_ptr;
4537 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4538 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4539 if (opaque_key == RXD_OPAQUE_RING_STD) {
4540 struct ring_info *ri = &tpr->rx_std_buffers[desc_idx];
4541 dma_addr = pci_unmap_addr(ri, mapping);
4543 post_ptr = &tpr->rx_std_ptr;
4545 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4546 struct ring_info *ri = &tpr->rx_jmb_buffers[desc_idx];
4547 dma_addr = pci_unmap_addr(ri, mapping);
4549 post_ptr = &tpr->rx_jmb_ptr;
4551 goto next_pkt_nopost;
4553 work_mask |= opaque_key;
4555 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4556 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4558 tg3_recycle_rx(tnapi, opaque_key,
4559 desc_idx, *post_ptr);
4561 /* Other statistics kept track of by card. */
4562 tp->net_stats.rx_dropped++;
4566 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4569 if (len > RX_COPY_THRESHOLD
4570 && tp->rx_offset == NET_IP_ALIGN
4571 /* rx_offset will likely not equal NET_IP_ALIGN
4572 * if this is a 5701 card running in PCI-X mode
4573 * [see tg3_get_invariants()]
4578 skb_size = tg3_alloc_rx_skb(tnapi, opaque_key,
4579 desc_idx, *post_ptr);
4583 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4584 PCI_DMA_FROMDEVICE);
4588 struct sk_buff *copy_skb;
4590 tg3_recycle_rx(tnapi, opaque_key,
4591 desc_idx, *post_ptr);
4593 copy_skb = netdev_alloc_skb(tp->dev,
4594 len + TG3_RAW_IP_ALIGN);
4595 if (copy_skb == NULL)
4596 goto drop_it_no_recycle;
4598 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4599 skb_put(copy_skb, len);
4600 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4601 skb_copy_from_linear_data(skb, copy_skb->data, len);
4602 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4604 /* We'll reuse the original ring buffer. */
4608 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4609 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4610 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4611 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4612 skb->ip_summed = CHECKSUM_UNNECESSARY;
4614 skb->ip_summed = CHECKSUM_NONE;
4616 skb->protocol = eth_type_trans(skb, tp->dev);
4618 if (len > (tp->dev->mtu + ETH_HLEN) &&
4619 skb->protocol != htons(ETH_P_8021Q)) {
4624 #if TG3_VLAN_TAG_USED
4625 if (tp->vlgrp != NULL &&
4626 desc->type_flags & RXD_FLAG_VLAN) {
4627 vlan_gro_receive(&tnapi->napi, tp->vlgrp,
4628 desc->err_vlan & RXD_VLAN_MASK, skb);
4631 napi_gro_receive(&tnapi->napi, skb);
4639 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4640 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4642 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4643 TG3_64BIT_REG_LOW, idx);
4644 work_mask &= ~RXD_OPAQUE_RING_STD;
4649 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4651 /* Refresh hw_idx to see if there is new work */
4652 if (sw_idx == hw_idx) {
4653 hw_idx = *(tnapi->rx_rcb_prod_idx);
4658 /* ACK the status ring. */
4659 tnapi->rx_rcb_ptr = sw_idx;
4660 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4662 /* Refill RX ring(s). */
4663 if (work_mask & RXD_OPAQUE_RING_STD) {
4664 sw_idx = tpr->rx_std_ptr % TG3_RX_RING_SIZE;
4665 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4668 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4669 sw_idx = tpr->rx_jmb_ptr % TG3_RX_JUMBO_RING_SIZE;
4670 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4678 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4680 struct tg3 *tp = tnapi->tp;
4681 struct tg3_hw_status *sblk = tnapi->hw_status;
4683 /* handle link change and other phy events */
4684 if (!(tp->tg3_flags &
4685 (TG3_FLAG_USE_LINKCHG_REG |
4686 TG3_FLAG_POLL_SERDES))) {
4687 if (sblk->status & SD_STATUS_LINK_CHG) {
4688 sblk->status = SD_STATUS_UPDATED |
4689 (sblk->status & ~SD_STATUS_LINK_CHG);
4690 spin_lock(&tp->lock);
4691 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4693 (MAC_STATUS_SYNC_CHANGED |
4694 MAC_STATUS_CFG_CHANGED |
4695 MAC_STATUS_MI_COMPLETION |
4696 MAC_STATUS_LNKSTATE_CHANGED));
4699 tg3_setup_phy(tp, 0);
4700 spin_unlock(&tp->lock);
4704 /* run TX completion thread */
4705 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4707 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4711 /* run RX thread, within the bounds set by NAPI.
4712 * All RX "locking" is done by ensuring outside
4713 * code synchronizes with tg3->napi.poll()
4715 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4716 work_done += tg3_rx(tnapi, budget - work_done);
4721 static int tg3_poll(struct napi_struct *napi, int budget)
4723 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4724 struct tg3 *tp = tnapi->tp;
4726 struct tg3_hw_status *sblk = tnapi->hw_status;
4729 work_done = tg3_poll_work(tnapi, work_done, budget);
4731 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4734 if (unlikely(work_done >= budget))
4737 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4738 /* tp->last_tag is used in tg3_int_reenable() below
4739 * to tell the hw how much work has been processed,
4740 * so we must read it before checking for more work.
4742 tnapi->last_tag = sblk->status_tag;
4743 tnapi->last_irq_tag = tnapi->last_tag;
4746 sblk->status &= ~SD_STATUS_UPDATED;
4748 if (likely(!tg3_has_work(tnapi))) {
4749 napi_complete(napi);
4750 tg3_int_reenable(tnapi);
4758 /* work_done is guaranteed to be less than budget. */
4759 napi_complete(napi);
4760 schedule_work(&tp->reset_task);
4764 static void tg3_irq_quiesce(struct tg3 *tp)
4768 BUG_ON(tp->irq_sync);
4773 for (i = 0; i < tp->irq_cnt; i++)
4774 synchronize_irq(tp->napi[i].irq_vec);
4777 static inline int tg3_irq_sync(struct tg3 *tp)
4779 return tp->irq_sync;
4782 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4783 * If irq_sync is non-zero, then the IRQ handler must be synchronized
4784 * with as well. Most of the time, this is not necessary except when
4785 * shutting down the device.
4787 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4789 spin_lock_bh(&tp->lock);
4791 tg3_irq_quiesce(tp);
4794 static inline void tg3_full_unlock(struct tg3 *tp)
4796 spin_unlock_bh(&tp->lock);
4799 /* One-shot MSI handler - Chip automatically disables interrupt
4800 * after sending MSI so driver doesn't have to do it.
4802 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4804 struct tg3_napi *tnapi = dev_id;
4805 struct tg3 *tp = tnapi->tp;
4807 prefetch(tnapi->hw_status);
4809 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4811 if (likely(!tg3_irq_sync(tp)))
4812 napi_schedule(&tnapi->napi);
4817 /* MSI ISR - No need to check for interrupt sharing and no need to
4818 * flush status block and interrupt mailbox. PCI ordering rules
4819 * guarantee that MSI will arrive after the status block.
4821 static irqreturn_t tg3_msi(int irq, void *dev_id)
4823 struct tg3_napi *tnapi = dev_id;
4824 struct tg3 *tp = tnapi->tp;
4826 prefetch(tnapi->hw_status);
4828 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4830 * Writing any value to intr-mbox-0 clears PCI INTA# and
4831 * chip-internal interrupt pending events.
4832 * Writing non-zero to intr-mbox-0 additional tells the
4833 * NIC to stop sending us irqs, engaging "in-intr-handler"
4836 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4837 if (likely(!tg3_irq_sync(tp)))
4838 napi_schedule(&tnapi->napi);
4840 return IRQ_RETVAL(1);
4843 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4845 struct tg3_napi *tnapi = dev_id;
4846 struct tg3 *tp = tnapi->tp;
4847 struct tg3_hw_status *sblk = tnapi->hw_status;
4848 unsigned int handled = 1;
4850 /* In INTx mode, it is possible for the interrupt to arrive at
4851 * the CPU before the status block posted prior to the interrupt.
4852 * Reading the PCI State register will confirm whether the
4853 * interrupt is ours and will flush the status block.
4855 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4856 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4857 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4864 * Writing any value to intr-mbox-0 clears PCI INTA# and
4865 * chip-internal interrupt pending events.
4866 * Writing non-zero to intr-mbox-0 additional tells the
4867 * NIC to stop sending us irqs, engaging "in-intr-handler"
4870 * Flush the mailbox to de-assert the IRQ immediately to prevent
4871 * spurious interrupts. The flush impacts performance but
4872 * excessive spurious interrupts can be worse in some cases.
4874 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4875 if (tg3_irq_sync(tp))
4877 sblk->status &= ~SD_STATUS_UPDATED;
4878 if (likely(tg3_has_work(tnapi))) {
4879 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4880 napi_schedule(&tnapi->napi);
4882 /* No work, shared interrupt perhaps? re-enable
4883 * interrupts, and flush that PCI write
4885 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4889 return IRQ_RETVAL(handled);
4892 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4894 struct tg3_napi *tnapi = dev_id;
4895 struct tg3 *tp = tnapi->tp;
4896 struct tg3_hw_status *sblk = tnapi->hw_status;
4897 unsigned int handled = 1;
4899 /* In INTx mode, it is possible for the interrupt to arrive at
4900 * the CPU before the status block posted prior to the interrupt.
4901 * Reading the PCI State register will confirm whether the
4902 * interrupt is ours and will flush the status block.
4904 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
4905 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4906 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4913 * writing any value to intr-mbox-0 clears PCI INTA# and
4914 * chip-internal interrupt pending events.
4915 * writing non-zero to intr-mbox-0 additional tells the
4916 * NIC to stop sending us irqs, engaging "in-intr-handler"
4919 * Flush the mailbox to de-assert the IRQ immediately to prevent
4920 * spurious interrupts. The flush impacts performance but
4921 * excessive spurious interrupts can be worse in some cases.
4923 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4926 * In a shared interrupt configuration, sometimes other devices'
4927 * interrupts will scream. We record the current status tag here
4928 * so that the above check can report that the screaming interrupts
4929 * are unhandled. Eventually they will be silenced.
4931 tnapi->last_irq_tag = sblk->status_tag;
4933 if (tg3_irq_sync(tp))
4936 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4938 napi_schedule(&tnapi->napi);
4941 return IRQ_RETVAL(handled);
4944 /* ISR for interrupt test */
4945 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4947 struct tg3_napi *tnapi = dev_id;
4948 struct tg3 *tp = tnapi->tp;
4949 struct tg3_hw_status *sblk = tnapi->hw_status;
4951 if ((sblk->status & SD_STATUS_UPDATED) ||
4952 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4953 tg3_disable_ints(tp);
4954 return IRQ_RETVAL(1);
4956 return IRQ_RETVAL(0);
4959 static int tg3_init_hw(struct tg3 *, int);
4960 static int tg3_halt(struct tg3 *, int, int);
4962 /* Restart hardware after configuration changes, self-test, etc.
4963 * Invoked with tp->lock held.
4965 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4966 __releases(tp->lock)
4967 __acquires(tp->lock)
4971 err = tg3_init_hw(tp, reset_phy);
4973 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4974 "aborting.\n", tp->dev->name);
4975 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4976 tg3_full_unlock(tp);
4977 del_timer_sync(&tp->timer);
4979 tg3_napi_enable(tp);
4981 tg3_full_lock(tp, 0);
4986 #ifdef CONFIG_NET_POLL_CONTROLLER
4987 static void tg3_poll_controller(struct net_device *dev)
4990 struct tg3 *tp = netdev_priv(dev);
4992 for (i = 0; i < tp->irq_cnt; i++)
4993 tg3_interrupt(tp->napi[i].irq_vec, dev);
4997 static void tg3_reset_task(struct work_struct *work)
4999 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5001 unsigned int restart_timer;
5003 tg3_full_lock(tp, 0);
5005 if (!netif_running(tp->dev)) {
5006 tg3_full_unlock(tp);
5010 tg3_full_unlock(tp);
5016 tg3_full_lock(tp, 1);
5018 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5019 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5021 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5022 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5023 tp->write32_rx_mbox = tg3_write_flush_reg32;
5024 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5025 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5028 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5029 err = tg3_init_hw(tp, 1);
5033 tg3_netif_start(tp);
5036 mod_timer(&tp->timer, jiffies + 1);
5039 tg3_full_unlock(tp);
5045 static void tg3_dump_short_state(struct tg3 *tp)
5047 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5048 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5049 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5050 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5053 static void tg3_tx_timeout(struct net_device *dev)
5055 struct tg3 *tp = netdev_priv(dev);
5057 if (netif_msg_tx_err(tp)) {
5058 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
5060 tg3_dump_short_state(tp);
5063 schedule_work(&tp->reset_task);
5066 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5067 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5069 u32 base = (u32) mapping & 0xffffffff;
5071 return ((base > 0xffffdcc0) &&
5072 (base + len + 8 < base));
5075 /* Test for DMA addresses > 40-bit */
5076 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5079 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5080 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5081 return (((u64) mapping + len) > DMA_BIT_MASK(40));
5088 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5090 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5091 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
5092 u32 last_plus_one, u32 *start,
5093 u32 base_flags, u32 mss)
5095 struct tg3_napi *tnapi = &tp->napi[0];
5096 struct sk_buff *new_skb;
5097 dma_addr_t new_addr = 0;
5101 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5102 new_skb = skb_copy(skb, GFP_ATOMIC);
5104 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5106 new_skb = skb_copy_expand(skb,
5107 skb_headroom(skb) + more_headroom,
5108 skb_tailroom(skb), GFP_ATOMIC);
5114 /* New SKB is guaranteed to be linear. */
5116 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
5117 new_addr = skb_shinfo(new_skb)->dma_head;
5119 /* Make sure new skb does not cross any 4G boundaries.
5120 * Drop the packet if it does.
5122 if (ret || ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5123 tg3_4g_overflow_test(new_addr, new_skb->len))) {
5125 skb_dma_unmap(&tp->pdev->dev, new_skb,
5128 dev_kfree_skb(new_skb);
5131 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5132 base_flags, 1 | (mss << 1));
5133 *start = NEXT_TX(entry);
5137 /* Now clean up the sw ring entries. */
5139 while (entry != last_plus_one) {
5141 tnapi->tx_buffers[entry].skb = new_skb;
5143 tnapi->tx_buffers[entry].skb = NULL;
5144 entry = NEXT_TX(entry);
5148 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5154 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5155 dma_addr_t mapping, int len, u32 flags,
5158 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5159 int is_end = (mss_and_is_end & 0x1);
5160 u32 mss = (mss_and_is_end >> 1);
5164 flags |= TXD_FLAG_END;
5165 if (flags & TXD_FLAG_VLAN) {
5166 vlan_tag = flags >> 16;
5169 vlan_tag |= (mss << TXD_MSS_SHIFT);
5171 txd->addr_hi = ((u64) mapping >> 32);
5172 txd->addr_lo = ((u64) mapping & 0xffffffff);
5173 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5174 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5177 /* hard_start_xmit for devices that don't have any bugs and
5178 * support TG3_FLG2_HW_TSO_2 only.
5180 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5181 struct net_device *dev)
5183 struct tg3 *tp = netdev_priv(dev);
5184 u32 len, entry, base_flags, mss;
5185 struct skb_shared_info *sp;
5187 struct tg3_napi *tnapi;
5188 struct netdev_queue *txq;
5190 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5191 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5192 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
5195 /* We are running in BH disabled context with netif_tx_lock
5196 * and TX reclaim runs via tp->napi.poll inside of a software
5197 * interrupt. Furthermore, IRQ processing runs lockless so we have
5198 * no IRQ context deadlocks to worry about either. Rejoice!
5200 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5201 if (!netif_tx_queue_stopped(txq)) {
5202 netif_tx_stop_queue(txq);
5204 /* This is a hard error, log it. */
5205 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5206 "queue awake!\n", dev->name);
5208 return NETDEV_TX_BUSY;
5211 entry = tnapi->tx_prod;
5214 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5215 int tcp_opt_len, ip_tcp_len;
5218 if (skb_header_cloned(skb) &&
5219 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5224 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
5225 hdrlen = skb_headlen(skb) - ETH_HLEN;
5227 struct iphdr *iph = ip_hdr(skb);
5229 tcp_opt_len = tcp_optlen(skb);
5230 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5233 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5234 hdrlen = ip_tcp_len + tcp_opt_len;
5237 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
5238 mss |= (hdrlen & 0xc) << 12;
5240 base_flags |= 0x00000010;
5241 base_flags |= (hdrlen & 0x3e0) << 5;
5245 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5246 TXD_FLAG_CPU_POST_DMA);
5248 tcp_hdr(skb)->check = 0;
5251 else if (skb->ip_summed == CHECKSUM_PARTIAL)
5252 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5253 #if TG3_VLAN_TAG_USED
5254 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5255 base_flags |= (TXD_FLAG_VLAN |
5256 (vlan_tx_tag_get(skb) << 16));
5259 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5264 sp = skb_shinfo(skb);
5266 mapping = sp->dma_head;
5268 tnapi->tx_buffers[entry].skb = skb;
5270 len = skb_headlen(skb);
5272 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
5273 !mss && skb->len > ETH_DATA_LEN)
5274 base_flags |= TXD_FLAG_JMB_PKT;
5276 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5277 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5279 entry = NEXT_TX(entry);
5281 /* Now loop through additional data fragments, and queue them. */
5282 if (skb_shinfo(skb)->nr_frags > 0) {
5283 unsigned int i, last;
5285 last = skb_shinfo(skb)->nr_frags - 1;
5286 for (i = 0; i <= last; i++) {
5287 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5290 mapping = sp->dma_maps[i];
5291 tnapi->tx_buffers[entry].skb = NULL;
5293 tg3_set_txd(tnapi, entry, mapping, len,
5294 base_flags, (i == last) | (mss << 1));
5296 entry = NEXT_TX(entry);
5300 /* Packets are ready, update Tx producer idx local and on card. */
5301 tw32_tx_mbox(tnapi->prodmbox, entry);
5303 tnapi->tx_prod = entry;
5304 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5305 netif_tx_stop_queue(txq);
5306 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5307 netif_tx_wake_queue(txq);
5313 return NETDEV_TX_OK;
5316 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5317 struct net_device *);
5319 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5320 * TSO header is greater than 80 bytes.
5322 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5324 struct sk_buff *segs, *nskb;
5325 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5327 /* Estimate the number of fragments in the worst case */
5328 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5329 netif_stop_queue(tp->dev);
5330 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5331 return NETDEV_TX_BUSY;
5333 netif_wake_queue(tp->dev);
5336 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5338 goto tg3_tso_bug_end;
5344 tg3_start_xmit_dma_bug(nskb, tp->dev);
5350 return NETDEV_TX_OK;
5353 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5354 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5356 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5357 struct net_device *dev)
5359 struct tg3 *tp = netdev_priv(dev);
5360 u32 len, entry, base_flags, mss;
5361 struct skb_shared_info *sp;
5362 int would_hit_hwbug;
5364 struct tg3_napi *tnapi = &tp->napi[0];
5366 len = skb_headlen(skb);
5368 /* We are running in BH disabled context with netif_tx_lock
5369 * and TX reclaim runs via tp->napi.poll inside of a software
5370 * interrupt. Furthermore, IRQ processing runs lockless so we have
5371 * no IRQ context deadlocks to worry about either. Rejoice!
5373 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5374 if (!netif_queue_stopped(dev)) {
5375 netif_stop_queue(dev);
5377 /* This is a hard error, log it. */
5378 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5379 "queue awake!\n", dev->name);
5381 return NETDEV_TX_BUSY;
5384 entry = tnapi->tx_prod;
5386 if (skb->ip_summed == CHECKSUM_PARTIAL)
5387 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5389 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5391 u32 tcp_opt_len, ip_tcp_len, hdr_len;
5393 if (skb_header_cloned(skb) &&
5394 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5399 tcp_opt_len = tcp_optlen(skb);
5400 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5402 hdr_len = ip_tcp_len + tcp_opt_len;
5403 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5404 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5405 return (tg3_tso_bug(tp, skb));
5407 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5408 TXD_FLAG_CPU_POST_DMA);
5412 iph->tot_len = htons(mss + hdr_len);
5413 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5414 tcp_hdr(skb)->check = 0;
5415 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5417 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5422 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
5423 mss |= hdr_len << 9;
5424 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
5425 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5426 if (tcp_opt_len || iph->ihl > 5) {
5429 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5430 mss |= (tsflags << 11);
5433 if (tcp_opt_len || iph->ihl > 5) {
5436 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5437 base_flags |= tsflags << 12;
5441 #if TG3_VLAN_TAG_USED
5442 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5443 base_flags |= (TXD_FLAG_VLAN |
5444 (vlan_tx_tag_get(skb) << 16));
5447 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5452 sp = skb_shinfo(skb);
5454 mapping = sp->dma_head;
5456 tnapi->tx_buffers[entry].skb = skb;
5458 would_hit_hwbug = 0;
5460 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
5461 would_hit_hwbug = 1;
5463 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5464 tg3_4g_overflow_test(mapping, len))
5465 would_hit_hwbug = 1;
5467 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5468 tg3_40bit_overflow_test(tp, mapping, len))
5469 would_hit_hwbug = 1;
5471 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5472 would_hit_hwbug = 1;
5474 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5475 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5477 entry = NEXT_TX(entry);
5479 /* Now loop through additional data fragments, and queue them. */
5480 if (skb_shinfo(skb)->nr_frags > 0) {
5481 unsigned int i, last;
5483 last = skb_shinfo(skb)->nr_frags - 1;
5484 for (i = 0; i <= last; i++) {
5485 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5488 mapping = sp->dma_maps[i];
5490 tnapi->tx_buffers[entry].skb = NULL;
5492 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
5494 would_hit_hwbug = 1;
5496 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5497 tg3_4g_overflow_test(mapping, len))
5498 would_hit_hwbug = 1;
5500 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5501 tg3_40bit_overflow_test(tp, mapping, len))
5502 would_hit_hwbug = 1;
5504 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5505 tg3_set_txd(tnapi, entry, mapping, len,
5506 base_flags, (i == last)|(mss << 1));
5508 tg3_set_txd(tnapi, entry, mapping, len,
5509 base_flags, (i == last));
5511 entry = NEXT_TX(entry);
5515 if (would_hit_hwbug) {
5516 u32 last_plus_one = entry;
5519 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5520 start &= (TG3_TX_RING_SIZE - 1);
5522 /* If the workaround fails due to memory/mapping
5523 * failure, silently drop this packet.
5525 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
5526 &start, base_flags, mss))
5532 /* Packets are ready, update Tx producer idx local and on card. */
5533 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, entry);
5535 tnapi->tx_prod = entry;
5536 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5537 netif_stop_queue(dev);
5538 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5539 netif_wake_queue(tp->dev);
5545 return NETDEV_TX_OK;
5548 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5553 if (new_mtu > ETH_DATA_LEN) {
5554 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5555 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5556 ethtool_op_set_tso(dev, 0);
5559 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5561 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5562 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5563 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5567 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5569 struct tg3 *tp = netdev_priv(dev);
5572 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5575 if (!netif_running(dev)) {
5576 /* We'll just catch it later when the
5579 tg3_set_mtu(dev, tp, new_mtu);
5587 tg3_full_lock(tp, 1);
5589 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5591 tg3_set_mtu(dev, tp, new_mtu);
5593 err = tg3_restart_hw(tp, 0);
5596 tg3_netif_start(tp);
5598 tg3_full_unlock(tp);
5606 static void tg3_rx_prodring_free(struct tg3 *tp,
5607 struct tg3_rx_prodring_set *tpr)
5610 struct ring_info *rxp;
5612 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5613 rxp = &tpr->rx_std_buffers[i];
5615 if (rxp->skb == NULL)
5618 pci_unmap_single(tp->pdev,
5619 pci_unmap_addr(rxp, mapping),
5621 PCI_DMA_FROMDEVICE);
5622 dev_kfree_skb_any(rxp->skb);
5626 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5627 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5628 rxp = &tpr->rx_jmb_buffers[i];
5630 if (rxp->skb == NULL)
5633 pci_unmap_single(tp->pdev,
5634 pci_unmap_addr(rxp, mapping),
5636 PCI_DMA_FROMDEVICE);
5637 dev_kfree_skb_any(rxp->skb);
5643 /* Initialize tx/rx rings for packet processing.
5645 * The chip has been shut down and the driver detached from
5646 * the networking, so no interrupts or new tx packets will
5647 * end up in the driver. tp->{tx,}lock are held and thus
5650 static int tg3_rx_prodring_alloc(struct tg3 *tp,
5651 struct tg3_rx_prodring_set *tpr)
5653 u32 i, rx_pkt_dma_sz;
5654 struct tg3_napi *tnapi = &tp->napi[0];
5656 /* Zero out all descriptors. */
5657 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
5659 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
5660 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5661 tp->dev->mtu > ETH_DATA_LEN)
5662 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
5663 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
5665 /* Initialize invariants of the rings, we only set this
5666 * stuff once. This works because the card does not
5667 * write into the rx buffer posting rings.
5669 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5670 struct tg3_rx_buffer_desc *rxd;
5672 rxd = &tpr->rx_std[i];
5673 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
5674 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5675 rxd->opaque = (RXD_OPAQUE_RING_STD |
5676 (i << RXD_OPAQUE_INDEX_SHIFT));
5679 /* Now allocate fresh SKBs for each rx ring. */
5680 for (i = 0; i < tp->rx_pending; i++) {
5681 if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5682 printk(KERN_WARNING PFX
5683 "%s: Using a smaller RX standard ring, "
5684 "only %d out of %d buffers were allocated "
5686 tp->dev->name, i, tp->rx_pending);
5694 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
5697 memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
5699 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5700 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5701 struct tg3_rx_buffer_desc *rxd;
5703 rxd = &tpr->rx_jmb[i].std;
5704 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
5705 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5707 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5708 (i << RXD_OPAQUE_INDEX_SHIFT));
5711 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5712 if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_JUMBO,
5714 printk(KERN_WARNING PFX
5715 "%s: Using a smaller RX jumbo ring, "
5716 "only %d out of %d buffers were "
5717 "allocated successfully.\n",
5718 tp->dev->name, i, tp->rx_jumbo_pending);
5721 tp->rx_jumbo_pending = i;
5731 tg3_rx_prodring_free(tp, tpr);
5735 static void tg3_rx_prodring_fini(struct tg3 *tp,
5736 struct tg3_rx_prodring_set *tpr)
5738 kfree(tpr->rx_std_buffers);
5739 tpr->rx_std_buffers = NULL;
5740 kfree(tpr->rx_jmb_buffers);
5741 tpr->rx_jmb_buffers = NULL;
5743 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5744 tpr->rx_std, tpr->rx_std_mapping);
5748 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5749 tpr->rx_jmb, tpr->rx_jmb_mapping);
5754 static int tg3_rx_prodring_init(struct tg3 *tp,
5755 struct tg3_rx_prodring_set *tpr)
5757 tpr->rx_std_buffers = kzalloc(sizeof(struct ring_info) *
5758 TG3_RX_RING_SIZE, GFP_KERNEL);
5759 if (!tpr->rx_std_buffers)
5762 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5763 &tpr->rx_std_mapping);
5767 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5768 tpr->rx_jmb_buffers = kzalloc(sizeof(struct ring_info) *
5769 TG3_RX_JUMBO_RING_SIZE,
5771 if (!tpr->rx_jmb_buffers)
5774 tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
5775 TG3_RX_JUMBO_RING_BYTES,
5776 &tpr->rx_jmb_mapping);
5784 tg3_rx_prodring_fini(tp, tpr);
5788 /* Free up pending packets in all rx/tx rings.
5790 * The chip has been shut down and the driver detached from
5791 * the networking, so no interrupts or new tx packets will
5792 * end up in the driver. tp->{tx,}lock is not held and we are not
5793 * in an interrupt context and thus may sleep.
5795 static void tg3_free_rings(struct tg3 *tp)
5799 for (j = 0; j < tp->irq_cnt; j++) {
5800 struct tg3_napi *tnapi = &tp->napi[j];
5802 if (!tnapi->tx_buffers)
5805 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5806 struct tx_ring_info *txp;
5807 struct sk_buff *skb;
5809 txp = &tnapi->tx_buffers[i];
5817 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5821 i += skb_shinfo(skb)->nr_frags + 1;
5823 dev_kfree_skb_any(skb);
5827 tg3_rx_prodring_free(tp, &tp->prodring[0]);
5830 /* Initialize tx/rx rings for packet processing.
5832 * The chip has been shut down and the driver detached from
5833 * the networking, so no interrupts or new tx packets will
5834 * end up in the driver. tp->{tx,}lock are held and thus
5837 static int tg3_init_rings(struct tg3 *tp)
5841 /* Free up all the SKBs. */
5844 for (i = 0; i < tp->irq_cnt; i++) {
5845 struct tg3_napi *tnapi = &tp->napi[i];
5847 tnapi->last_tag = 0;
5848 tnapi->last_irq_tag = 0;
5849 tnapi->hw_status->status = 0;
5850 tnapi->hw_status->status_tag = 0;
5851 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
5856 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
5858 tnapi->rx_rcb_ptr = 0;
5860 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5863 return tg3_rx_prodring_alloc(tp, &tp->prodring[0]);
5867 * Must not be invoked with interrupt sources disabled and
5868 * the hardware shutdown down.
5870 static void tg3_free_consistent(struct tg3 *tp)
5874 for (i = 0; i < tp->irq_cnt; i++) {
5875 struct tg3_napi *tnapi = &tp->napi[i];
5877 if (tnapi->tx_ring) {
5878 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5879 tnapi->tx_ring, tnapi->tx_desc_mapping);
5880 tnapi->tx_ring = NULL;
5883 kfree(tnapi->tx_buffers);
5884 tnapi->tx_buffers = NULL;
5886 if (tnapi->rx_rcb) {
5887 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5889 tnapi->rx_rcb_mapping);
5890 tnapi->rx_rcb = NULL;
5893 if (tnapi->hw_status) {
5894 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5896 tnapi->status_mapping);
5897 tnapi->hw_status = NULL;
5902 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5903 tp->hw_stats, tp->stats_mapping);
5904 tp->hw_stats = NULL;
5907 tg3_rx_prodring_fini(tp, &tp->prodring[0]);
5911 * Must not be invoked with interrupt sources disabled and
5912 * the hardware shutdown down. Can sleep.
5914 static int tg3_alloc_consistent(struct tg3 *tp)
5918 if (tg3_rx_prodring_init(tp, &tp->prodring[0]))
5921 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5922 sizeof(struct tg3_hw_stats),
5923 &tp->stats_mapping);
5927 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5929 for (i = 0; i < tp->irq_cnt; i++) {
5930 struct tg3_napi *tnapi = &tp->napi[i];
5931 struct tg3_hw_status *sblk;
5933 tnapi->hw_status = pci_alloc_consistent(tp->pdev,
5935 &tnapi->status_mapping);
5936 if (!tnapi->hw_status)
5939 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
5940 sblk = tnapi->hw_status;
5943 * When RSS is enabled, the status block format changes
5944 * slightly. The "rx_jumbo_consumer", "reserved",
5945 * and "rx_mini_consumer" members get mapped to the
5946 * other three rx return ring producer indexes.
5950 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
5953 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
5956 tnapi->rx_rcb_prod_idx = &sblk->reserved;
5959 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
5964 * If multivector RSS is enabled, vector 0 does not handle
5965 * rx or tx interrupts. Don't allocate any resources for it.
5967 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
5970 tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
5971 TG3_RX_RCB_RING_BYTES(tp),
5972 &tnapi->rx_rcb_mapping);
5976 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5978 tnapi->tx_buffers = kzalloc(sizeof(struct tx_ring_info) *
5979 TG3_TX_RING_SIZE, GFP_KERNEL);
5980 if (!tnapi->tx_buffers)
5983 tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
5985 &tnapi->tx_desc_mapping);
5986 if (!tnapi->tx_ring)
5993 tg3_free_consistent(tp);
5997 #define MAX_WAIT_CNT 1000
5999 /* To stop a block, clear the enable bit and poll till it
6000 * clears. tp->lock is held.
6002 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6007 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6014 /* We can't enable/disable these bits of the
6015 * 5705/5750, just say success.
6028 for (i = 0; i < MAX_WAIT_CNT; i++) {
6031 if ((val & enable_bit) == 0)
6035 if (i == MAX_WAIT_CNT && !silent) {
6036 printk(KERN_ERR PFX "tg3_stop_block timed out, "
6037 "ofs=%lx enable_bit=%x\n",
6045 /* tp->lock is held. */
6046 static int tg3_abort_hw(struct tg3 *tp, int silent)
6050 tg3_disable_ints(tp);
6052 tp->rx_mode &= ~RX_MODE_ENABLE;
6053 tw32_f(MAC_RX_MODE, tp->rx_mode);
6056 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6057 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6058 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6059 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6060 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6061 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6063 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6064 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6065 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6066 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6067 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6068 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6069 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6071 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6072 tw32_f(MAC_MODE, tp->mac_mode);
6075 tp->tx_mode &= ~TX_MODE_ENABLE;
6076 tw32_f(MAC_TX_MODE, tp->tx_mode);
6078 for (i = 0; i < MAX_WAIT_CNT; i++) {
6080 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6083 if (i >= MAX_WAIT_CNT) {
6084 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
6085 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
6086 tp->dev->name, tr32(MAC_TX_MODE));
6090 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6091 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6092 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6094 tw32(FTQ_RESET, 0xffffffff);
6095 tw32(FTQ_RESET, 0x00000000);
6097 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6098 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6100 for (i = 0; i < tp->irq_cnt; i++) {
6101 struct tg3_napi *tnapi = &tp->napi[i];
6102 if (tnapi->hw_status)
6103 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6106 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6111 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6116 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6117 if (apedata != APE_SEG_SIG_MAGIC)
6120 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6121 if (!(apedata & APE_FW_STATUS_READY))
6124 /* Wait for up to 1 millisecond for APE to service previous event. */
6125 for (i = 0; i < 10; i++) {
6126 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6129 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6131 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6132 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6133 event | APE_EVENT_STATUS_EVENT_PENDING);
6135 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6137 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6143 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6144 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6147 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6152 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
6156 case RESET_KIND_INIT:
6157 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6158 APE_HOST_SEG_SIG_MAGIC);
6159 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6160 APE_HOST_SEG_LEN_MAGIC);
6161 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6162 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6163 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6164 APE_HOST_DRIVER_ID_MAGIC);
6165 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6166 APE_HOST_BEHAV_NO_PHYLOCK);
6168 event = APE_EVENT_STATUS_STATE_START;
6170 case RESET_KIND_SHUTDOWN:
6171 /* With the interface we are currently using,
6172 * APE does not track driver state. Wiping
6173 * out the HOST SEGMENT SIGNATURE forces
6174 * the APE to assume OS absent status.
6176 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6178 event = APE_EVENT_STATUS_STATE_UNLOAD;
6180 case RESET_KIND_SUSPEND:
6181 event = APE_EVENT_STATUS_STATE_SUSPEND;
6187 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6189 tg3_ape_send_event(tp, event);
6192 /* tp->lock is held. */
6193 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6195 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6196 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6198 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6200 case RESET_KIND_INIT:
6201 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6205 case RESET_KIND_SHUTDOWN:
6206 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6210 case RESET_KIND_SUSPEND:
6211 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6220 if (kind == RESET_KIND_INIT ||
6221 kind == RESET_KIND_SUSPEND)
6222 tg3_ape_driver_state_change(tp, kind);
6225 /* tp->lock is held. */
6226 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6228 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6230 case RESET_KIND_INIT:
6231 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6232 DRV_STATE_START_DONE);
6235 case RESET_KIND_SHUTDOWN:
6236 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6237 DRV_STATE_UNLOAD_DONE);
6245 if (kind == RESET_KIND_SHUTDOWN)
6246 tg3_ape_driver_state_change(tp, kind);
6249 /* tp->lock is held. */
6250 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6252 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6254 case RESET_KIND_INIT:
6255 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6259 case RESET_KIND_SHUTDOWN:
6260 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6264 case RESET_KIND_SUSPEND:
6265 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6275 static int tg3_poll_fw(struct tg3 *tp)
6280 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6281 /* Wait up to 20ms for init done. */
6282 for (i = 0; i < 200; i++) {
6283 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6290 /* Wait for firmware initialization to complete. */
6291 for (i = 0; i < 100000; i++) {
6292 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6293 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6298 /* Chip might not be fitted with firmware. Some Sun onboard
6299 * parts are configured like that. So don't signal the timeout
6300 * of the above loop as an error, but do report the lack of
6301 * running firmware once.
6304 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
6305 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
6307 printk(KERN_INFO PFX "%s: No firmware running.\n",
6314 /* Save PCI command register before chip reset */
6315 static void tg3_save_pci_state(struct tg3 *tp)
6317 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
6320 /* Restore PCI state after chip reset */
6321 static void tg3_restore_pci_state(struct tg3 *tp)
6325 /* Re-enable indirect register accesses. */
6326 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6327 tp->misc_host_ctrl);
6329 /* Set MAX PCI retry to zero. */
6330 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
6331 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6332 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
6333 val |= PCISTATE_RETRY_SAME_DMA;
6334 /* Allow reads and writes to the APE register and memory space. */
6335 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6336 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6337 PCISTATE_ALLOW_APE_SHMEM_WR;
6338 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
6340 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
6342 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6343 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6344 pcie_set_readrq(tp->pdev, 4096);
6346 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6347 tp->pci_cacheline_sz);
6348 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
6353 /* Make sure PCI-X relaxed ordering bit is clear. */
6354 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6357 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6359 pcix_cmd &= ~PCI_X_CMD_ERO;
6360 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6364 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6366 /* Chip reset on 5780 will reset MSI enable bit,
6367 * so need to restore it.
6369 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6372 pci_read_config_word(tp->pdev,
6373 tp->msi_cap + PCI_MSI_FLAGS,
6375 pci_write_config_word(tp->pdev,
6376 tp->msi_cap + PCI_MSI_FLAGS,
6377 ctrl | PCI_MSI_FLAGS_ENABLE);
6378 val = tr32(MSGINT_MODE);
6379 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
6384 static void tg3_stop_fw(struct tg3 *);
6386 /* tp->lock is held. */
6387 static int tg3_chip_reset(struct tg3 *tp)
6390 void (*write_op)(struct tg3 *, u32, u32);
6395 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
6397 /* No matching tg3_nvram_unlock() after this because
6398 * chip reset below will undo the nvram lock.
6400 tp->nvram_lock_cnt = 0;
6402 /* GRC_MISC_CFG core clock reset will clear the memory
6403 * enable bit in PCI register 4 and the MSI enable bit
6404 * on some chips, so we save relevant registers here.
6406 tg3_save_pci_state(tp);
6408 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
6409 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
6410 tw32(GRC_FASTBOOT_PC, 0);
6413 * We must avoid the readl() that normally takes place.
6414 * It locks machines, causes machine checks, and other
6415 * fun things. So, temporarily disable the 5701
6416 * hardware workaround, while we do the reset.
6418 write_op = tp->write32;
6419 if (write_op == tg3_write_flush_reg32)
6420 tp->write32 = tg3_write32;
6422 /* Prevent the irq handler from reading or writing PCI registers
6423 * during chip reset when the memory enable bit in the PCI command
6424 * register may be cleared. The chip does not generate interrupt
6425 * at this time, but the irq handler may still be called due to irq
6426 * sharing or irqpoll.
6428 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6429 for (i = 0; i < tp->irq_cnt; i++) {
6430 struct tg3_napi *tnapi = &tp->napi[i];
6431 if (tnapi->hw_status) {
6432 tnapi->hw_status->status = 0;
6433 tnapi->hw_status->status_tag = 0;
6435 tnapi->last_tag = 0;
6436 tnapi->last_irq_tag = 0;
6440 for (i = 0; i < tp->irq_cnt; i++)
6441 synchronize_irq(tp->napi[i].irq_vec);
6443 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6444 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
6445 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
6449 val = GRC_MISC_CFG_CORECLK_RESET;
6451 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6452 if (tr32(0x7e2c) == 0x60) {
6455 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6456 tw32(GRC_MISC_CFG, (1 << 29));
6461 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6462 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6463 tw32(GRC_VCPU_EXT_CTRL,
6464 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6467 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6468 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6469 tw32(GRC_MISC_CFG, val);
6471 /* restore 5701 hardware bug workaround write method */
6472 tp->write32 = write_op;
6474 /* Unfortunately, we have to delay before the PCI read back.
6475 * Some 575X chips even will not respond to a PCI cfg access
6476 * when the reset command is given to the chip.
6478 * How do these hardware designers expect things to work
6479 * properly if the PCI write is posted for a long period
6480 * of time? It is always necessary to have some method by
6481 * which a register read back can occur to push the write
6482 * out which does the reset.
6484 * For most tg3 variants the trick below was working.
6489 /* Flush PCI posted writes. The normal MMIO registers
6490 * are inaccessible at this time so this is the only
6491 * way to make this reliably (actually, this is no longer
6492 * the case, see above). I tried to use indirect
6493 * register read/write but this upset some 5701 variants.
6495 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6499 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
6502 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6506 /* Wait for link training to complete. */
6507 for (i = 0; i < 5000; i++)
6510 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6511 pci_write_config_dword(tp->pdev, 0xc4,
6512 cfg_val | (1 << 15));
6515 /* Clear the "no snoop" and "relaxed ordering" bits. */
6516 pci_read_config_word(tp->pdev,
6517 tp->pcie_cap + PCI_EXP_DEVCTL,
6519 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
6520 PCI_EXP_DEVCTL_NOSNOOP_EN);
6522 * Older PCIe devices only support the 128 byte
6523 * MPS setting. Enforce the restriction.
6525 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
6526 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784))
6527 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
6528 pci_write_config_word(tp->pdev,
6529 tp->pcie_cap + PCI_EXP_DEVCTL,
6532 pcie_set_readrq(tp->pdev, 4096);
6534 /* Clear error status */
6535 pci_write_config_word(tp->pdev,
6536 tp->pcie_cap + PCI_EXP_DEVSTA,
6537 PCI_EXP_DEVSTA_CED |
6538 PCI_EXP_DEVSTA_NFED |
6539 PCI_EXP_DEVSTA_FED |
6540 PCI_EXP_DEVSTA_URD);
6543 tg3_restore_pci_state(tp);
6545 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6548 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6549 val = tr32(MEMARB_MODE);
6550 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
6552 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6554 tw32(0x5000, 0x400);
6557 tw32(GRC_MODE, tp->grc_mode);
6559 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
6562 tw32(0xc4, val | (1 << 15));
6565 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6566 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6567 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6568 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6569 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6570 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6573 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6574 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6575 tw32_f(MAC_MODE, tp->mac_mode);
6576 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6577 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6578 tw32_f(MAC_MODE, tp->mac_mode);
6579 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6580 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6581 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6582 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6583 tw32_f(MAC_MODE, tp->mac_mode);
6585 tw32_f(MAC_MODE, 0);
6588 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6590 err = tg3_poll_fw(tp);
6596 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6599 phy_addr = tp->phy_addr;
6600 tp->phy_addr = TG3_PHY_PCIE_ADDR;
6602 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
6603 TG3_PCIEPHY_TXB_BLK << TG3_PCIEPHY_BLOCK_SHIFT);
6604 val = TG3_PCIEPHY_TX0CTRL1_TXOCM | TG3_PCIEPHY_TX0CTRL1_RDCTL |
6605 TG3_PCIEPHY_TX0CTRL1_TXCMV | TG3_PCIEPHY_TX0CTRL1_TKSEL |
6606 TG3_PCIEPHY_TX0CTRL1_NB_EN;
6607 tg3_writephy(tp, TG3_PCIEPHY_TX0CTRL1, val);
6610 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
6611 TG3_PCIEPHY_XGXS_BLK1 << TG3_PCIEPHY_BLOCK_SHIFT);
6612 val = TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN |
6613 TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN;
6614 tg3_writephy(tp, TG3_PCIEPHY_PWRMGMT4, val);
6617 tp->phy_addr = phy_addr;
6620 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6621 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
6622 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
6623 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
6626 tw32(0x7c00, val | (1 << 25));
6629 /* Reprobe ASF enable state. */
6630 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6631 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6632 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6633 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6636 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6637 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6638 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
6639 tp->last_event_jiffies = jiffies;
6640 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
6641 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6648 /* tp->lock is held. */
6649 static void tg3_stop_fw(struct tg3 *tp)
6651 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6652 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
6653 /* Wait for RX cpu to ACK the previous event. */
6654 tg3_wait_for_event_ack(tp);
6656 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
6658 tg3_generate_fw_event(tp);
6660 /* Wait for RX cpu to ACK this event. */
6661 tg3_wait_for_event_ack(tp);
6665 /* tp->lock is held. */
6666 static int tg3_halt(struct tg3 *tp, int kind, int silent)
6672 tg3_write_sig_pre_reset(tp, kind);
6674 tg3_abort_hw(tp, silent);
6675 err = tg3_chip_reset(tp);
6677 __tg3_set_mac_addr(tp, 0);
6679 tg3_write_sig_legacy(tp, kind);
6680 tg3_write_sig_post_reset(tp, kind);
6688 #define RX_CPU_SCRATCH_BASE 0x30000
6689 #define RX_CPU_SCRATCH_SIZE 0x04000
6690 #define TX_CPU_SCRATCH_BASE 0x34000
6691 #define TX_CPU_SCRATCH_SIZE 0x04000
6693 /* tp->lock is held. */
6694 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6698 BUG_ON(offset == TX_CPU_BASE &&
6699 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6701 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6702 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6704 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6707 if (offset == RX_CPU_BASE) {
6708 for (i = 0; i < 10000; i++) {
6709 tw32(offset + CPU_STATE, 0xffffffff);
6710 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6711 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6715 tw32(offset + CPU_STATE, 0xffffffff);
6716 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
6719 for (i = 0; i < 10000; i++) {
6720 tw32(offset + CPU_STATE, 0xffffffff);
6721 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6722 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6728 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6731 (offset == RX_CPU_BASE ? "RX" : "TX"));
6735 /* Clear firmware's nvram arbitration. */
6736 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6737 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6742 unsigned int fw_base;
6743 unsigned int fw_len;
6744 const __be32 *fw_data;
6747 /* tp->lock is held. */
6748 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6749 int cpu_scratch_size, struct fw_info *info)
6751 int err, lock_err, i;
6752 void (*write_op)(struct tg3 *, u32, u32);
6754 if (cpu_base == TX_CPU_BASE &&
6755 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6756 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6757 "TX cpu firmware on %s which is 5705.\n",
6762 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6763 write_op = tg3_write_mem;
6765 write_op = tg3_write_indirect_reg32;
6767 /* It is possible that bootcode is still loading at this point.
6768 * Get the nvram lock first before halting the cpu.
6770 lock_err = tg3_nvram_lock(tp);
6771 err = tg3_halt_cpu(tp, cpu_base);
6773 tg3_nvram_unlock(tp);
6777 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6778 write_op(tp, cpu_scratch_base + i, 0);
6779 tw32(cpu_base + CPU_STATE, 0xffffffff);
6780 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6781 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
6782 write_op(tp, (cpu_scratch_base +
6783 (info->fw_base & 0xffff) +
6785 be32_to_cpu(info->fw_data[i]));
6793 /* tp->lock is held. */
6794 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6796 struct fw_info info;
6797 const __be32 *fw_data;
6800 fw_data = (void *)tp->fw->data;
6802 /* Firmware blob starts with version numbers, followed by
6803 start address and length. We are setting complete length.
6804 length = end_address_of_bss - start_address_of_text.
6805 Remainder is the blob to be loaded contiguously
6806 from start address. */
6808 info.fw_base = be32_to_cpu(fw_data[1]);
6809 info.fw_len = tp->fw->size - 12;
6810 info.fw_data = &fw_data[3];
6812 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6813 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6818 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6819 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6824 /* Now startup only the RX cpu. */
6825 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6826 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
6828 for (i = 0; i < 5; i++) {
6829 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
6831 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6832 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
6833 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
6837 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6838 "to set RX CPU PC, is %08x should be %08x\n",
6839 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6843 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6844 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
6849 /* 5705 needs a special version of the TSO firmware. */
6851 /* tp->lock is held. */
6852 static int tg3_load_tso_firmware(struct tg3 *tp)
6854 struct fw_info info;
6855 const __be32 *fw_data;
6856 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6859 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6862 fw_data = (void *)tp->fw->data;
6864 /* Firmware blob starts with version numbers, followed by
6865 start address and length. We are setting complete length.
6866 length = end_address_of_bss - start_address_of_text.
6867 Remainder is the blob to be loaded contiguously
6868 from start address. */
6870 info.fw_base = be32_to_cpu(fw_data[1]);
6871 cpu_scratch_size = tp->fw_len;
6872 info.fw_len = tp->fw->size - 12;
6873 info.fw_data = &fw_data[3];
6875 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6876 cpu_base = RX_CPU_BASE;
6877 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6879 cpu_base = TX_CPU_BASE;
6880 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6881 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6884 err = tg3_load_firmware_cpu(tp, cpu_base,
6885 cpu_scratch_base, cpu_scratch_size,
6890 /* Now startup the cpu. */
6891 tw32(cpu_base + CPU_STATE, 0xffffffff);
6892 tw32_f(cpu_base + CPU_PC, info.fw_base);
6894 for (i = 0; i < 5; i++) {
6895 if (tr32(cpu_base + CPU_PC) == info.fw_base)
6897 tw32(cpu_base + CPU_STATE, 0xffffffff);
6898 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6899 tw32_f(cpu_base + CPU_PC, info.fw_base);
6903 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6904 "to set CPU PC, is %08x should be %08x\n",
6905 tp->dev->name, tr32(cpu_base + CPU_PC),
6909 tw32(cpu_base + CPU_STATE, 0xffffffff);
6910 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6915 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6917 struct tg3 *tp = netdev_priv(dev);
6918 struct sockaddr *addr = p;
6919 int err = 0, skip_mac_1 = 0;
6921 if (!is_valid_ether_addr(addr->sa_data))
6924 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6926 if (!netif_running(dev))
6929 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6930 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6932 addr0_high = tr32(MAC_ADDR_0_HIGH);
6933 addr0_low = tr32(MAC_ADDR_0_LOW);
6934 addr1_high = tr32(MAC_ADDR_1_HIGH);
6935 addr1_low = tr32(MAC_ADDR_1_LOW);
6937 /* Skip MAC addr 1 if ASF is using it. */
6938 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6939 !(addr1_high == 0 && addr1_low == 0))
6942 spin_lock_bh(&tp->lock);
6943 __tg3_set_mac_addr(tp, skip_mac_1);
6944 spin_unlock_bh(&tp->lock);
6949 /* tp->lock is held. */
6950 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6951 dma_addr_t mapping, u32 maxlen_flags,
6955 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6956 ((u64) mapping >> 32));
6958 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6959 ((u64) mapping & 0xffffffff));
6961 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6964 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6966 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6970 static void __tg3_set_rx_mode(struct net_device *);
6971 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6975 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
6976 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6977 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6978 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6980 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6981 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6982 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6984 tw32(HOSTCC_TXCOL_TICKS, 0);
6985 tw32(HOSTCC_TXMAX_FRAMES, 0);
6986 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
6988 tw32(HOSTCC_RXCOL_TICKS, 0);
6989 tw32(HOSTCC_RXMAX_FRAMES, 0);
6990 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
6993 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6994 u32 val = ec->stats_block_coalesce_usecs;
6996 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6997 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6999 if (!netif_carrier_ok(tp->dev))
7002 tw32(HOSTCC_STAT_COAL_TICKS, val);
7005 for (i = 0; i < tp->irq_cnt - 1; i++) {
7008 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7009 tw32(reg, ec->rx_coalesce_usecs);
7010 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7011 tw32(reg, ec->tx_coalesce_usecs);
7012 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7013 tw32(reg, ec->rx_max_coalesced_frames);
7014 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7015 tw32(reg, ec->tx_max_coalesced_frames);
7016 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7017 tw32(reg, ec->rx_max_coalesced_frames_irq);
7018 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7019 tw32(reg, ec->tx_max_coalesced_frames_irq);
7022 for (; i < tp->irq_max - 1; i++) {
7023 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7024 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7025 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7026 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7027 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7028 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7032 /* tp->lock is held. */
7033 static void tg3_rings_reset(struct tg3 *tp)
7036 u32 stblk, txrcb, rxrcb, limit;
7037 struct tg3_napi *tnapi = &tp->napi[0];
7039 /* Disable all transmit rings but the first. */
7040 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7041 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7043 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7045 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7046 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7047 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7048 BDINFO_FLAGS_DISABLED);
7051 /* Disable all receive return rings but the first. */
7052 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
7053 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7054 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7055 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7056 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7057 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7059 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7061 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7062 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7063 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7064 BDINFO_FLAGS_DISABLED);
7066 /* Disable interrupts */
7067 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7069 /* Zero mailbox registers. */
7070 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
7071 for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
7072 tp->napi[i].tx_prod = 0;
7073 tp->napi[i].tx_cons = 0;
7074 tw32_mailbox(tp->napi[i].prodmbox, 0);
7075 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7076 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7079 tp->napi[0].tx_prod = 0;
7080 tp->napi[0].tx_cons = 0;
7081 tw32_mailbox(tp->napi[0].prodmbox, 0);
7082 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7085 /* Make sure the NIC-based send BD rings are disabled. */
7086 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7087 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7088 for (i = 0; i < 16; i++)
7089 tw32_tx_mbox(mbox + i * 8, 0);
7092 txrcb = NIC_SRAM_SEND_RCB;
7093 rxrcb = NIC_SRAM_RCV_RET_RCB;
7095 /* Clear status block in ram. */
7096 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7098 /* Set status block DMA address */
7099 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7100 ((u64) tnapi->status_mapping >> 32));
7101 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7102 ((u64) tnapi->status_mapping & 0xffffffff));
7104 if (tnapi->tx_ring) {
7105 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7106 (TG3_TX_RING_SIZE <<
7107 BDINFO_FLAGS_MAXLEN_SHIFT),
7108 NIC_SRAM_TX_BUFFER_DESC);
7109 txrcb += TG3_BDINFO_SIZE;
7112 if (tnapi->rx_rcb) {
7113 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7114 (TG3_RX_RCB_RING_SIZE(tp) <<
7115 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7116 rxrcb += TG3_BDINFO_SIZE;
7119 stblk = HOSTCC_STATBLCK_RING1;
7121 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7122 u64 mapping = (u64)tnapi->status_mapping;
7123 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7124 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7126 /* Clear status block in ram. */
7127 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7129 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7130 (TG3_TX_RING_SIZE <<
7131 BDINFO_FLAGS_MAXLEN_SHIFT),
7132 NIC_SRAM_TX_BUFFER_DESC);
7134 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7135 (TG3_RX_RCB_RING_SIZE(tp) <<
7136 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7139 txrcb += TG3_BDINFO_SIZE;
7140 rxrcb += TG3_BDINFO_SIZE;
7144 /* tp->lock is held. */
7145 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7147 u32 val, rdmac_mode;
7149 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
7151 tg3_disable_ints(tp);
7155 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7157 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7158 tg3_abort_hw(tp, 1);
7162 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7165 err = tg3_chip_reset(tp);
7169 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7171 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7172 val = tr32(TG3_CPMU_CTRL);
7173 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7174 tw32(TG3_CPMU_CTRL, val);
7176 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7177 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7178 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7179 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7181 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7182 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7183 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7184 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7186 val = tr32(TG3_CPMU_HST_ACC);
7187 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7188 val |= CPMU_HST_ACC_MACCLK_6_25;
7189 tw32(TG3_CPMU_HST_ACC, val);
7192 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7193 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7194 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7195 PCIE_PWR_MGMT_L1_THRESH_4MS;
7196 tw32(PCIE_PWR_MGMT_THRESH, val);
7198 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7199 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7201 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7203 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7204 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7207 /* This works around an issue with Athlon chipsets on
7208 * B3 tigon3 silicon. This bit has no effect on any
7209 * other revision. But do not set this on PCI Express
7210 * chips and don't even touch the clocks if the CPMU is present.
7212 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7213 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7214 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7215 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7218 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7219 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7220 val = tr32(TG3PCI_PCISTATE);
7221 val |= PCISTATE_RETRY_SAME_DMA;
7222 tw32(TG3PCI_PCISTATE, val);
7225 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7226 /* Allow reads and writes to the
7227 * APE register and memory space.
7229 val = tr32(TG3PCI_PCISTATE);
7230 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7231 PCISTATE_ALLOW_APE_SHMEM_WR;
7232 tw32(TG3PCI_PCISTATE, val);
7235 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7236 /* Enable some hw fixes. */
7237 val = tr32(TG3PCI_MSI_DATA);
7238 val |= (1 << 26) | (1 << 28) | (1 << 29);
7239 tw32(TG3PCI_MSI_DATA, val);
7242 /* Descriptor ring init may make accesses to the
7243 * NIC SRAM area to setup the TX descriptors, so we
7244 * can only do this after the hardware has been
7245 * successfully reset.
7247 err = tg3_init_rings(tp);
7251 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7252 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
7253 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
7254 /* This value is determined during the probe time DMA
7255 * engine test, tg3_test_dma.
7257 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7260 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7261 GRC_MODE_4X_NIC_SEND_RINGS |
7262 GRC_MODE_NO_TX_PHDR_CSUM |
7263 GRC_MODE_NO_RX_PHDR_CSUM);
7264 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7266 /* Pseudo-header checksum is done by hardware logic and not
7267 * the offload processers, so make the chip do the pseudo-
7268 * header checksums on receive. For transmit it is more
7269 * convenient to do the pseudo-header checksum in software
7270 * as Linux does that on transmit for us in all cases.
7272 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7276 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7278 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7279 val = tr32(GRC_MISC_CFG);
7281 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7282 tw32(GRC_MISC_CFG, val);
7284 /* Initialize MBUF/DESC pool. */
7285 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7287 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7288 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7289 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7290 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7292 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7293 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7294 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7296 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7299 fw_len = tp->fw_len;
7300 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7301 tw32(BUFMGR_MB_POOL_ADDR,
7302 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7303 tw32(BUFMGR_MB_POOL_SIZE,
7304 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7307 if (tp->dev->mtu <= ETH_DATA_LEN) {
7308 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7309 tp->bufmgr_config.mbuf_read_dma_low_water);
7310 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7311 tp->bufmgr_config.mbuf_mac_rx_low_water);
7312 tw32(BUFMGR_MB_HIGH_WATER,
7313 tp->bufmgr_config.mbuf_high_water);
7315 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7316 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7317 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7318 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7319 tw32(BUFMGR_MB_HIGH_WATER,
7320 tp->bufmgr_config.mbuf_high_water_jumbo);
7322 tw32(BUFMGR_DMA_LOW_WATER,
7323 tp->bufmgr_config.dma_low_water);
7324 tw32(BUFMGR_DMA_HIGH_WATER,
7325 tp->bufmgr_config.dma_high_water);
7327 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7328 for (i = 0; i < 2000; i++) {
7329 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7334 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7339 /* Setup replenish threshold. */
7340 val = tp->rx_pending / 8;
7343 else if (val > tp->rx_std_max_post)
7344 val = tp->rx_std_max_post;
7345 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7346 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7347 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7349 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7350 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7353 tw32(RCVBDI_STD_THRESH, val);
7355 /* Initialize TG3_BDINFO's at:
7356 * RCVDBDI_STD_BD: standard eth size rx ring
7357 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7358 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7361 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7362 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7363 * ring attribute flags
7364 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7366 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7367 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7369 * The size of each ring is fixed in the firmware, but the location is
7372 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7373 ((u64) tpr->rx_std_mapping >> 32));
7374 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7375 ((u64) tpr->rx_std_mapping & 0xffffffff));
7376 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7377 NIC_SRAM_RX_BUFFER_DESC);
7379 /* Disable the mini ring */
7380 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7381 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7382 BDINFO_FLAGS_DISABLED);
7384 /* Program the jumbo buffer descriptor ring control
7385 * blocks on those devices that have them.
7387 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
7388 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
7389 /* Setup replenish threshold. */
7390 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7392 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7393 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7394 ((u64) tpr->rx_jmb_mapping >> 32));
7395 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7396 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
7397 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7398 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7399 BDINFO_FLAGS_USE_EXT_RECV);
7400 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7401 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7403 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7404 BDINFO_FLAGS_DISABLED);
7407 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
7408 val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
7409 (RX_STD_MAX_SIZE << 2);
7411 val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT;
7413 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
7415 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
7417 tpr->rx_std_ptr = tp->rx_pending;
7418 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7421 tpr->rx_jmb_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7422 tp->rx_jumbo_pending : 0;
7423 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7426 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
7427 tw32(STD_REPLENISH_LWM, 32);
7428 tw32(JMB_REPLENISH_LWM, 16);
7431 tg3_rings_reset(tp);
7433 /* Initialize MAC address and backoff seed. */
7434 __tg3_set_mac_addr(tp, 0);
7436 /* MTU + ethernet header + FCS + optional VLAN tag */
7437 tw32(MAC_RX_MTU_SIZE,
7438 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
7440 /* The slot time is changed by tg3_setup_phy if we
7441 * run at gigabit with half duplex.
7443 tw32(MAC_TX_LENGTHS,
7444 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7445 (6 << TX_LENGTHS_IPG_SHIFT) |
7446 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7448 /* Receive rules. */
7449 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7450 tw32(RCVLPC_CONFIG, 0x0181);
7452 /* Calculate RDMAC_MODE setting early, we need it to determine
7453 * the RCVLPC_STATE_ENABLE mask.
7455 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7456 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7457 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7458 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7459 RDMAC_MODE_LNGREAD_ENAB);
7461 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7462 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7463 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7464 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7465 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7466 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7468 /* If statement applies to 5705 and 5750 PCI devices only */
7469 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7470 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7471 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7472 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7473 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7474 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7475 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7476 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7477 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7481 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7482 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7484 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7485 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
7487 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7488 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7489 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
7491 /* Receive/send statistics. */
7492 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7493 val = tr32(RCVLPC_STATS_ENABLE);
7494 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7495 tw32(RCVLPC_STATS_ENABLE, val);
7496 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7497 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7498 val = tr32(RCVLPC_STATS_ENABLE);
7499 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7500 tw32(RCVLPC_STATS_ENABLE, val);
7502 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7504 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7505 tw32(SNDDATAI_STATSENAB, 0xffffff);
7506 tw32(SNDDATAI_STATSCTRL,
7507 (SNDDATAI_SCTRL_ENABLE |
7508 SNDDATAI_SCTRL_FASTUPD));
7510 /* Setup host coalescing engine. */
7511 tw32(HOSTCC_MODE, 0);
7512 for (i = 0; i < 2000; i++) {
7513 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7518 __tg3_set_coalesce(tp, &tp->coal);
7520 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7521 /* Status/statistics block address. See tg3_timer,
7522 * the tg3_periodic_fetch_stats call there, and
7523 * tg3_get_stats to see how this works for 5705/5750 chips.
7525 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7526 ((u64) tp->stats_mapping >> 32));
7527 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7528 ((u64) tp->stats_mapping & 0xffffffff));
7529 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7531 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7533 /* Clear statistics and status block memory areas */
7534 for (i = NIC_SRAM_STATS_BLK;
7535 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7537 tg3_write_mem(tp, i, 0);
7542 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7544 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7545 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7546 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7547 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7549 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7550 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7551 /* reset to prevent losing 1st rx packet intermittently */
7552 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7556 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7557 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7560 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7561 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7562 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7563 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7564 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7565 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7566 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7569 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7570 * If TG3_FLG2_IS_NIC is zero, we should read the
7571 * register to preserve the GPIO settings for LOMs. The GPIOs,
7572 * whether used as inputs or outputs, are set by boot code after
7575 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7578 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7579 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7580 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7582 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7583 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7584 GRC_LCLCTRL_GPIO_OUTPUT3;
7586 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7587 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7589 tp->grc_local_ctrl &= ~gpio_mask;
7590 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7592 /* GPIO1 must be driven high for eeprom write protect */
7593 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7594 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7595 GRC_LCLCTRL_GPIO_OUTPUT1);
7597 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7600 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
7601 val = tr32(MSGINT_MODE);
7602 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
7603 tw32(MSGINT_MODE, val);
7606 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7607 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7611 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7612 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7613 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7614 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7615 WDMAC_MODE_LNGREAD_ENAB);
7617 /* If statement applies to 5705 and 5750 PCI devices only */
7618 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7619 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7620 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7621 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
7622 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7623 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7625 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7626 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7627 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7628 val |= WDMAC_MODE_RX_ACCEL;
7632 /* Enable host coalescing bug fix */
7633 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7634 val |= WDMAC_MODE_STATUS_TAG_FIX;
7636 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7637 val |= WDMAC_MODE_BURST_ALL_DATA;
7639 tw32_f(WDMAC_MODE, val);
7642 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7645 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7647 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7648 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7649 pcix_cmd |= PCI_X_CMD_READ_2K;
7650 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7651 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7652 pcix_cmd |= PCI_X_CMD_READ_2K;
7654 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7658 tw32_f(RDMAC_MODE, rdmac_mode);
7661 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7662 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7663 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7665 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7667 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7669 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7671 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7672 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7673 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7674 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7675 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7676 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7677 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
7678 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
7679 val |= SNDBDI_MODE_MULTI_TXQ_EN;
7680 tw32(SNDBDI_MODE, val);
7681 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7683 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7684 err = tg3_load_5701_a0_firmware_fix(tp);
7689 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7690 err = tg3_load_tso_firmware(tp);
7695 tp->tx_mode = TX_MODE_ENABLE;
7696 tw32_f(MAC_TX_MODE, tp->tx_mode);
7699 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
7700 u32 reg = MAC_RSS_INDIR_TBL_0;
7701 u8 *ent = (u8 *)&val;
7703 /* Setup the indirection table */
7704 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
7705 int idx = i % sizeof(val);
7707 ent[idx] = i % (tp->irq_cnt - 1);
7708 if (idx == sizeof(val) - 1) {
7714 /* Setup the "secret" hash key. */
7715 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
7716 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
7717 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
7718 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
7719 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
7720 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
7721 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
7722 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
7723 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
7724 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
7727 tp->rx_mode = RX_MODE_ENABLE;
7728 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7729 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7731 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
7732 tp->rx_mode |= RX_MODE_RSS_ENABLE |
7733 RX_MODE_RSS_ITBL_HASH_BITS_7 |
7734 RX_MODE_RSS_IPV6_HASH_EN |
7735 RX_MODE_RSS_TCP_IPV6_HASH_EN |
7736 RX_MODE_RSS_IPV4_HASH_EN |
7737 RX_MODE_RSS_TCP_IPV4_HASH_EN;
7739 tw32_f(MAC_RX_MODE, tp->rx_mode);
7742 tw32(MAC_LED_CTRL, tp->led_ctrl);
7744 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7745 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7746 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7749 tw32_f(MAC_RX_MODE, tp->rx_mode);
7752 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7753 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7754 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7755 /* Set drive transmission level to 1.2V */
7756 /* only if the signal pre-emphasis bit is not set */
7757 val = tr32(MAC_SERDES_CFG);
7760 tw32(MAC_SERDES_CFG, val);
7762 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7763 tw32(MAC_SERDES_CFG, 0x616000);
7766 /* Prevent chip from dropping frames when flow control
7769 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7771 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7772 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7773 /* Use hardware link auto-negotiation */
7774 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7777 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7778 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7781 tmp = tr32(SERDES_RX_CTRL);
7782 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7783 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7784 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7785 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7788 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7789 if (tp->link_config.phy_is_low_power) {
7790 tp->link_config.phy_is_low_power = 0;
7791 tp->link_config.speed = tp->link_config.orig_speed;
7792 tp->link_config.duplex = tp->link_config.orig_duplex;
7793 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7796 err = tg3_setup_phy(tp, 0);
7800 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7801 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) {
7804 /* Clear CRC stats. */
7805 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7806 tg3_writephy(tp, MII_TG3_TEST1,
7807 tmp | MII_TG3_TEST1_CRC_EN);
7808 tg3_readphy(tp, 0x14, &tmp);
7813 __tg3_set_rx_mode(tp->dev);
7815 /* Initialize receive rules. */
7816 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7817 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7818 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7819 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7821 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7822 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7826 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7830 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7832 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7834 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7836 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7838 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7840 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7842 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7844 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7846 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7848 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7850 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7852 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7854 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7856 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7864 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7865 /* Write our heartbeat update interval to APE. */
7866 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7867 APE_HOST_HEARTBEAT_INT_DISABLE);
7869 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7874 /* Called at device open time to get the chip ready for
7875 * packet processing. Invoked with tp->lock held.
7877 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7879 tg3_switch_clocks(tp);
7881 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7883 return tg3_reset_hw(tp, reset_phy);
7886 #define TG3_STAT_ADD32(PSTAT, REG) \
7887 do { u32 __val = tr32(REG); \
7888 (PSTAT)->low += __val; \
7889 if ((PSTAT)->low < __val) \
7890 (PSTAT)->high += 1; \
7893 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7895 struct tg3_hw_stats *sp = tp->hw_stats;
7897 if (!netif_carrier_ok(tp->dev))
7900 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7901 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7902 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7903 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7904 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7905 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7906 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7907 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7908 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7909 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7910 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7911 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7912 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7914 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7915 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7916 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7917 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7918 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7919 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7920 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7921 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7922 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7923 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7924 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7925 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7926 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7927 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7929 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7930 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7931 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7934 static void tg3_timer(unsigned long __opaque)
7936 struct tg3 *tp = (struct tg3 *) __opaque;
7941 spin_lock(&tp->lock);
7943 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7944 /* All of this garbage is because when using non-tagged
7945 * IRQ status the mailbox/status_block protocol the chip
7946 * uses with the cpu is race prone.
7948 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
7949 tw32(GRC_LOCAL_CTRL,
7950 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7952 tw32(HOSTCC_MODE, tp->coalesce_mode |
7953 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
7956 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7957 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7958 spin_unlock(&tp->lock);
7959 schedule_work(&tp->reset_task);
7964 /* This part only runs once per second. */
7965 if (!--tp->timer_counter) {
7966 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7967 tg3_periodic_fetch_stats(tp);
7969 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7973 mac_stat = tr32(MAC_STATUS);
7976 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7977 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7979 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7983 tg3_setup_phy(tp, 0);
7984 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7985 u32 mac_stat = tr32(MAC_STATUS);
7988 if (netif_carrier_ok(tp->dev) &&
7989 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7992 if (! netif_carrier_ok(tp->dev) &&
7993 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7994 MAC_STATUS_SIGNAL_DET))) {
7998 if (!tp->serdes_counter) {
8001 ~MAC_MODE_PORT_MODE_MASK));
8003 tw32_f(MAC_MODE, tp->mac_mode);
8006 tg3_setup_phy(tp, 0);
8008 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8009 tg3_serdes_parallel_detect(tp);
8011 tp->timer_counter = tp->timer_multiplier;
8014 /* Heartbeat is only sent once every 2 seconds.
8016 * The heartbeat is to tell the ASF firmware that the host
8017 * driver is still alive. In the event that the OS crashes,
8018 * ASF needs to reset the hardware to free up the FIFO space
8019 * that may be filled with rx packets destined for the host.
8020 * If the FIFO is full, ASF will no longer function properly.
8022 * Unintended resets have been reported on real time kernels
8023 * where the timer doesn't run on time. Netpoll will also have
8026 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8027 * to check the ring condition when the heartbeat is expiring
8028 * before doing the reset. This will prevent most unintended
8031 if (!--tp->asf_counter) {
8032 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
8033 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
8034 tg3_wait_for_event_ack(tp);
8036 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8037 FWCMD_NICDRV_ALIVE3);
8038 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8039 /* 5 seconds timeout */
8040 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
8042 tg3_generate_fw_event(tp);
8044 tp->asf_counter = tp->asf_multiplier;
8047 spin_unlock(&tp->lock);
8050 tp->timer.expires = jiffies + tp->timer_offset;
8051 add_timer(&tp->timer);
8054 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8057 unsigned long flags;
8059 struct tg3_napi *tnapi = &tp->napi[irq_num];
8061 if (tp->irq_cnt == 1)
8062 name = tp->dev->name;
8064 name = &tnapi->irq_lbl[0];
8065 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8066 name[IFNAMSIZ-1] = 0;
8069 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8071 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8073 flags = IRQF_SAMPLE_RANDOM;
8076 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8077 fn = tg3_interrupt_tagged;
8078 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
8081 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8084 static int tg3_test_interrupt(struct tg3 *tp)
8086 struct tg3_napi *tnapi = &tp->napi[0];
8087 struct net_device *dev = tp->dev;
8088 int err, i, intr_ok = 0;
8091 if (!netif_running(dev))
8094 tg3_disable_ints(tp);
8096 free_irq(tnapi->irq_vec, tnapi);
8099 * Turn off MSI one shot mode. Otherwise this test has no
8100 * observable way to know whether the interrupt was delivered.
8102 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
8103 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8104 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8105 tw32(MSGINT_MODE, val);
8108 err = request_irq(tnapi->irq_vec, tg3_test_isr,
8109 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8113 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
8114 tg3_enable_ints(tp);
8116 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8119 for (i = 0; i < 5; i++) {
8120 u32 int_mbox, misc_host_ctrl;
8122 int_mbox = tr32_mailbox(tnapi->int_mbox);
8123 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8125 if ((int_mbox != 0) ||
8126 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8134 tg3_disable_ints(tp);
8136 free_irq(tnapi->irq_vec, tnapi);
8138 err = tg3_request_irq(tp, 0);
8144 /* Reenable MSI one shot mode. */
8145 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
8146 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8147 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
8148 tw32(MSGINT_MODE, val);
8156 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8157 * successfully restored
8159 static int tg3_test_msi(struct tg3 *tp)
8164 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8167 /* Turn off SERR reporting in case MSI terminates with Master
8170 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8171 pci_write_config_word(tp->pdev, PCI_COMMAND,
8172 pci_cmd & ~PCI_COMMAND_SERR);
8174 err = tg3_test_interrupt(tp);
8176 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8181 /* other failures */
8185 /* MSI test failed, go back to INTx mode */
8186 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8187 "switching to INTx mode. Please report this failure to "
8188 "the PCI maintainer and include system chipset information.\n",
8191 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8193 pci_disable_msi(tp->pdev);
8195 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8197 err = tg3_request_irq(tp, 0);
8201 /* Need to reset the chip because the MSI cycle may have terminated
8202 * with Master Abort.
8204 tg3_full_lock(tp, 1);
8206 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8207 err = tg3_init_hw(tp, 1);
8209 tg3_full_unlock(tp);
8212 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8217 static int tg3_request_firmware(struct tg3 *tp)
8219 const __be32 *fw_data;
8221 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
8222 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
8223 tp->dev->name, tp->fw_needed);
8227 fw_data = (void *)tp->fw->data;
8229 /* Firmware blob starts with version numbers, followed by
8230 * start address and _full_ length including BSS sections
8231 * (which must be longer than the actual data, of course
8234 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
8235 if (tp->fw_len < (tp->fw->size - 12)) {
8236 printk(KERN_ERR "%s: bogus length %d in \"%s\"\n",
8237 tp->dev->name, tp->fw_len, tp->fw_needed);
8238 release_firmware(tp->fw);
8243 /* We no longer need firmware; we have it. */
8244 tp->fw_needed = NULL;
8248 static bool tg3_enable_msix(struct tg3 *tp)
8250 int i, rc, cpus = num_online_cpus();
8251 struct msix_entry msix_ent[tp->irq_max];
8254 /* Just fallback to the simpler MSI mode. */
8258 * We want as many rx rings enabled as there are cpus.
8259 * The first MSIX vector only deals with link interrupts, etc,
8260 * so we add one to the number of vectors we are requesting.
8262 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
8264 for (i = 0; i < tp->irq_max; i++) {
8265 msix_ent[i].entry = i;
8266 msix_ent[i].vector = 0;
8269 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
8271 if (rc < TG3_RSS_MIN_NUM_MSIX_VECS)
8273 if (pci_enable_msix(tp->pdev, msix_ent, rc))
8276 "%s: Requested %d MSI-X vectors, received %d\n",
8277 tp->dev->name, tp->irq_cnt, rc);
8281 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
8283 for (i = 0; i < tp->irq_max; i++)
8284 tp->napi[i].irq_vec = msix_ent[i].vector;
8286 tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
8291 static void tg3_ints_init(struct tg3 *tp)
8293 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
8294 !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8295 /* All MSI supporting chips should support tagged
8296 * status. Assert that this is the case.
8298 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8299 "Not using MSI.\n", tp->dev->name);
8303 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
8304 tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
8305 else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
8306 pci_enable_msi(tp->pdev) == 0)
8307 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8309 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8310 u32 msi_mode = tr32(MSGINT_MODE);
8311 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8312 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
8313 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8316 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
8318 tp->napi[0].irq_vec = tp->pdev->irq;
8319 tp->dev->real_num_tx_queues = 1;
8323 static void tg3_ints_fini(struct tg3 *tp)
8325 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8326 pci_disable_msix(tp->pdev);
8327 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
8328 pci_disable_msi(tp->pdev);
8329 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
8330 tp->tg3_flags3 &= ~TG3_FLG3_ENABLE_RSS;
8333 static int tg3_open(struct net_device *dev)
8335 struct tg3 *tp = netdev_priv(dev);
8338 if (tp->fw_needed) {
8339 err = tg3_request_firmware(tp);
8340 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8344 printk(KERN_WARNING "%s: TSO capability disabled.\n",
8346 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8347 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8348 printk(KERN_NOTICE "%s: TSO capability restored.\n",
8350 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8354 netif_carrier_off(tp->dev);
8356 err = tg3_set_power_state(tp, PCI_D0);
8360 tg3_full_lock(tp, 0);
8362 tg3_disable_ints(tp);
8363 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8365 tg3_full_unlock(tp);
8368 * Setup interrupts first so we know how
8369 * many NAPI resources to allocate
8373 /* The placement of this call is tied
8374 * to the setup and use of Host TX descriptors.
8376 err = tg3_alloc_consistent(tp);
8380 tg3_napi_enable(tp);
8382 for (i = 0; i < tp->irq_cnt; i++) {
8383 struct tg3_napi *tnapi = &tp->napi[i];
8384 err = tg3_request_irq(tp, i);
8386 for (i--; i >= 0; i--)
8387 free_irq(tnapi->irq_vec, tnapi);
8395 tg3_full_lock(tp, 0);
8397 err = tg3_init_hw(tp, 1);
8399 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8402 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8403 tp->timer_offset = HZ;
8405 tp->timer_offset = HZ / 10;
8407 BUG_ON(tp->timer_offset > HZ);
8408 tp->timer_counter = tp->timer_multiplier =
8409 (HZ / tp->timer_offset);
8410 tp->asf_counter = tp->asf_multiplier =
8411 ((HZ / tp->timer_offset) * 2);
8413 init_timer(&tp->timer);
8414 tp->timer.expires = jiffies + tp->timer_offset;
8415 tp->timer.data = (unsigned long) tp;
8416 tp->timer.function = tg3_timer;
8419 tg3_full_unlock(tp);
8424 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8425 err = tg3_test_msi(tp);
8428 tg3_full_lock(tp, 0);
8429 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8431 tg3_full_unlock(tp);
8436 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8437 (tp->tg3_flags2 & TG3_FLG2_USING_MSI) &&
8438 (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) {
8439 u32 val = tr32(PCIE_TRANSACTION_CFG);
8441 tw32(PCIE_TRANSACTION_CFG,
8442 val | PCIE_TRANS_CFG_1SHOT_MSI);
8448 tg3_full_lock(tp, 0);
8450 add_timer(&tp->timer);
8451 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8452 tg3_enable_ints(tp);
8454 tg3_full_unlock(tp);
8456 netif_tx_start_all_queues(dev);
8461 for (i = tp->irq_cnt - 1; i >= 0; i--) {
8462 struct tg3_napi *tnapi = &tp->napi[i];
8463 free_irq(tnapi->irq_vec, tnapi);
8467 tg3_napi_disable(tp);
8468 tg3_free_consistent(tp);
8476 /*static*/ void tg3_dump_state(struct tg3 *tp)
8478 u32 val32, val32_2, val32_3, val32_4, val32_5;
8481 struct tg3_hw_status *sblk = tp->napi[0]->hw_status;
8483 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8484 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8485 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8489 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8490 tr32(MAC_MODE), tr32(MAC_STATUS));
8491 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8492 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8493 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8494 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8495 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8496 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8498 /* Send data initiator control block */
8499 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8500 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8501 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8502 tr32(SNDDATAI_STATSCTRL));
8504 /* Send data completion control block */
8505 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8507 /* Send BD ring selector block */
8508 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8509 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8511 /* Send BD initiator control block */
8512 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8513 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8515 /* Send BD completion control block */
8516 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8518 /* Receive list placement control block */
8519 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8520 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8521 printk(" RCVLPC_STATSCTRL[%08x]\n",
8522 tr32(RCVLPC_STATSCTRL));
8524 /* Receive data and receive BD initiator control block */
8525 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8526 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8528 /* Receive data completion control block */
8529 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8532 /* Receive BD initiator control block */
8533 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8534 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8536 /* Receive BD completion control block */
8537 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8538 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8540 /* Receive list selector control block */
8541 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8542 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8544 /* Mbuf cluster free block */
8545 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8546 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8548 /* Host coalescing control block */
8549 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8550 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8551 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8552 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8553 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8554 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8555 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8556 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8557 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8558 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8559 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8560 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8562 /* Memory arbiter control block */
8563 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8564 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8566 /* Buffer manager control block */
8567 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8568 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8569 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8570 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8571 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8572 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8573 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8574 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8576 /* Read DMA control block */
8577 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8578 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8580 /* Write DMA control block */
8581 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8582 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8584 /* DMA completion block */
8585 printk("DEBUG: DMAC_MODE[%08x]\n",
8589 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8590 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8591 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8592 tr32(GRC_LOCAL_CTRL));
8595 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8596 tr32(RCVDBDI_JUMBO_BD + 0x0),
8597 tr32(RCVDBDI_JUMBO_BD + 0x4),
8598 tr32(RCVDBDI_JUMBO_BD + 0x8),
8599 tr32(RCVDBDI_JUMBO_BD + 0xc));
8600 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8601 tr32(RCVDBDI_STD_BD + 0x0),
8602 tr32(RCVDBDI_STD_BD + 0x4),
8603 tr32(RCVDBDI_STD_BD + 0x8),
8604 tr32(RCVDBDI_STD_BD + 0xc));
8605 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8606 tr32(RCVDBDI_MINI_BD + 0x0),
8607 tr32(RCVDBDI_MINI_BD + 0x4),
8608 tr32(RCVDBDI_MINI_BD + 0x8),
8609 tr32(RCVDBDI_MINI_BD + 0xc));
8611 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8612 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8613 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8614 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8615 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8616 val32, val32_2, val32_3, val32_4);
8618 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8619 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8620 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8621 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8622 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8623 val32, val32_2, val32_3, val32_4);
8625 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8626 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8627 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8628 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8629 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8630 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8631 val32, val32_2, val32_3, val32_4, val32_5);
8633 /* SW status block */
8635 "Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8638 sblk->rx_jumbo_consumer,
8640 sblk->rx_mini_consumer,
8641 sblk->idx[0].rx_producer,
8642 sblk->idx[0].tx_consumer);
8644 /* SW statistics block */
8645 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8646 ((u32 *)tp->hw_stats)[0],
8647 ((u32 *)tp->hw_stats)[1],
8648 ((u32 *)tp->hw_stats)[2],
8649 ((u32 *)tp->hw_stats)[3]);
8652 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8653 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8654 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8655 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8656 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8658 /* NIC side send descriptors. */
8659 for (i = 0; i < 6; i++) {
8662 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8663 + (i * sizeof(struct tg3_tx_buffer_desc));
8664 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8666 readl(txd + 0x0), readl(txd + 0x4),
8667 readl(txd + 0x8), readl(txd + 0xc));
8670 /* NIC side RX descriptors. */
8671 for (i = 0; i < 6; i++) {
8674 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8675 + (i * sizeof(struct tg3_rx_buffer_desc));
8676 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8678 readl(rxd + 0x0), readl(rxd + 0x4),
8679 readl(rxd + 0x8), readl(rxd + 0xc));
8680 rxd += (4 * sizeof(u32));
8681 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8683 readl(rxd + 0x0), readl(rxd + 0x4),
8684 readl(rxd + 0x8), readl(rxd + 0xc));
8687 for (i = 0; i < 6; i++) {
8690 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8691 + (i * sizeof(struct tg3_rx_buffer_desc));
8692 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8694 readl(rxd + 0x0), readl(rxd + 0x4),
8695 readl(rxd + 0x8), readl(rxd + 0xc));
8696 rxd += (4 * sizeof(u32));
8697 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8699 readl(rxd + 0x0), readl(rxd + 0x4),
8700 readl(rxd + 0x8), readl(rxd + 0xc));
8705 static struct net_device_stats *tg3_get_stats(struct net_device *);
8706 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8708 static int tg3_close(struct net_device *dev)
8711 struct tg3 *tp = netdev_priv(dev);
8713 tg3_napi_disable(tp);
8714 cancel_work_sync(&tp->reset_task);
8716 netif_tx_stop_all_queues(dev);
8718 del_timer_sync(&tp->timer);
8722 tg3_full_lock(tp, 1);
8727 tg3_disable_ints(tp);
8729 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8731 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8733 tg3_full_unlock(tp);
8735 for (i = tp->irq_cnt - 1; i >= 0; i--) {
8736 struct tg3_napi *tnapi = &tp->napi[i];
8737 free_irq(tnapi->irq_vec, tnapi);
8742 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8743 sizeof(tp->net_stats_prev));
8744 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8745 sizeof(tp->estats_prev));
8747 tg3_free_consistent(tp);
8749 tg3_set_power_state(tp, PCI_D3hot);
8751 netif_carrier_off(tp->dev);
8756 static inline unsigned long get_stat64(tg3_stat64_t *val)
8760 #if (BITS_PER_LONG == 32)
8763 ret = ((u64)val->high << 32) | ((u64)val->low);
8768 static inline u64 get_estat64(tg3_stat64_t *val)
8770 return ((u64)val->high << 32) | ((u64)val->low);
8773 static unsigned long calc_crc_errors(struct tg3 *tp)
8775 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8777 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8778 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8779 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8782 spin_lock_bh(&tp->lock);
8783 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8784 tg3_writephy(tp, MII_TG3_TEST1,
8785 val | MII_TG3_TEST1_CRC_EN);
8786 tg3_readphy(tp, 0x14, &val);
8789 spin_unlock_bh(&tp->lock);
8791 tp->phy_crc_errors += val;
8793 return tp->phy_crc_errors;
8796 return get_stat64(&hw_stats->rx_fcs_errors);
8799 #define ESTAT_ADD(member) \
8800 estats->member = old_estats->member + \
8801 get_estat64(&hw_stats->member)
8803 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8805 struct tg3_ethtool_stats *estats = &tp->estats;
8806 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8807 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8812 ESTAT_ADD(rx_octets);
8813 ESTAT_ADD(rx_fragments);
8814 ESTAT_ADD(rx_ucast_packets);
8815 ESTAT_ADD(rx_mcast_packets);
8816 ESTAT_ADD(rx_bcast_packets);
8817 ESTAT_ADD(rx_fcs_errors);
8818 ESTAT_ADD(rx_align_errors);
8819 ESTAT_ADD(rx_xon_pause_rcvd);
8820 ESTAT_ADD(rx_xoff_pause_rcvd);
8821 ESTAT_ADD(rx_mac_ctrl_rcvd);
8822 ESTAT_ADD(rx_xoff_entered);
8823 ESTAT_ADD(rx_frame_too_long_errors);
8824 ESTAT_ADD(rx_jabbers);
8825 ESTAT_ADD(rx_undersize_packets);
8826 ESTAT_ADD(rx_in_length_errors);
8827 ESTAT_ADD(rx_out_length_errors);
8828 ESTAT_ADD(rx_64_or_less_octet_packets);
8829 ESTAT_ADD(rx_65_to_127_octet_packets);
8830 ESTAT_ADD(rx_128_to_255_octet_packets);
8831 ESTAT_ADD(rx_256_to_511_octet_packets);
8832 ESTAT_ADD(rx_512_to_1023_octet_packets);
8833 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8834 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8835 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8836 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8837 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8839 ESTAT_ADD(tx_octets);
8840 ESTAT_ADD(tx_collisions);
8841 ESTAT_ADD(tx_xon_sent);
8842 ESTAT_ADD(tx_xoff_sent);
8843 ESTAT_ADD(tx_flow_control);
8844 ESTAT_ADD(tx_mac_errors);
8845 ESTAT_ADD(tx_single_collisions);
8846 ESTAT_ADD(tx_mult_collisions);
8847 ESTAT_ADD(tx_deferred);
8848 ESTAT_ADD(tx_excessive_collisions);
8849 ESTAT_ADD(tx_late_collisions);
8850 ESTAT_ADD(tx_collide_2times);
8851 ESTAT_ADD(tx_collide_3times);
8852 ESTAT_ADD(tx_collide_4times);
8853 ESTAT_ADD(tx_collide_5times);
8854 ESTAT_ADD(tx_collide_6times);
8855 ESTAT_ADD(tx_collide_7times);
8856 ESTAT_ADD(tx_collide_8times);
8857 ESTAT_ADD(tx_collide_9times);
8858 ESTAT_ADD(tx_collide_10times);
8859 ESTAT_ADD(tx_collide_11times);
8860 ESTAT_ADD(tx_collide_12times);
8861 ESTAT_ADD(tx_collide_13times);
8862 ESTAT_ADD(tx_collide_14times);
8863 ESTAT_ADD(tx_collide_15times);
8864 ESTAT_ADD(tx_ucast_packets);
8865 ESTAT_ADD(tx_mcast_packets);
8866 ESTAT_ADD(tx_bcast_packets);
8867 ESTAT_ADD(tx_carrier_sense_errors);
8868 ESTAT_ADD(tx_discards);
8869 ESTAT_ADD(tx_errors);
8871 ESTAT_ADD(dma_writeq_full);
8872 ESTAT_ADD(dma_write_prioq_full);
8873 ESTAT_ADD(rxbds_empty);
8874 ESTAT_ADD(rx_discards);
8875 ESTAT_ADD(rx_errors);
8876 ESTAT_ADD(rx_threshold_hit);
8878 ESTAT_ADD(dma_readq_full);
8879 ESTAT_ADD(dma_read_prioq_full);
8880 ESTAT_ADD(tx_comp_queue_full);
8882 ESTAT_ADD(ring_set_send_prod_index);
8883 ESTAT_ADD(ring_status_update);
8884 ESTAT_ADD(nic_irqs);
8885 ESTAT_ADD(nic_avoided_irqs);
8886 ESTAT_ADD(nic_tx_threshold_hit);
8891 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8893 struct tg3 *tp = netdev_priv(dev);
8894 struct net_device_stats *stats = &tp->net_stats;
8895 struct net_device_stats *old_stats = &tp->net_stats_prev;
8896 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8901 stats->rx_packets = old_stats->rx_packets +
8902 get_stat64(&hw_stats->rx_ucast_packets) +
8903 get_stat64(&hw_stats->rx_mcast_packets) +
8904 get_stat64(&hw_stats->rx_bcast_packets);
8906 stats->tx_packets = old_stats->tx_packets +
8907 get_stat64(&hw_stats->tx_ucast_packets) +
8908 get_stat64(&hw_stats->tx_mcast_packets) +
8909 get_stat64(&hw_stats->tx_bcast_packets);
8911 stats->rx_bytes = old_stats->rx_bytes +
8912 get_stat64(&hw_stats->rx_octets);
8913 stats->tx_bytes = old_stats->tx_bytes +
8914 get_stat64(&hw_stats->tx_octets);
8916 stats->rx_errors = old_stats->rx_errors +
8917 get_stat64(&hw_stats->rx_errors);
8918 stats->tx_errors = old_stats->tx_errors +
8919 get_stat64(&hw_stats->tx_errors) +
8920 get_stat64(&hw_stats->tx_mac_errors) +
8921 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8922 get_stat64(&hw_stats->tx_discards);
8924 stats->multicast = old_stats->multicast +
8925 get_stat64(&hw_stats->rx_mcast_packets);
8926 stats->collisions = old_stats->collisions +
8927 get_stat64(&hw_stats->tx_collisions);
8929 stats->rx_length_errors = old_stats->rx_length_errors +
8930 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8931 get_stat64(&hw_stats->rx_undersize_packets);
8933 stats->rx_over_errors = old_stats->rx_over_errors +
8934 get_stat64(&hw_stats->rxbds_empty);
8935 stats->rx_frame_errors = old_stats->rx_frame_errors +
8936 get_stat64(&hw_stats->rx_align_errors);
8937 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8938 get_stat64(&hw_stats->tx_discards);
8939 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8940 get_stat64(&hw_stats->tx_carrier_sense_errors);
8942 stats->rx_crc_errors = old_stats->rx_crc_errors +
8943 calc_crc_errors(tp);
8945 stats->rx_missed_errors = old_stats->rx_missed_errors +
8946 get_stat64(&hw_stats->rx_discards);
8951 static inline u32 calc_crc(unsigned char *buf, int len)
8959 for (j = 0; j < len; j++) {
8962 for (k = 0; k < 8; k++) {
8976 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8978 /* accept or reject all multicast frames */
8979 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8980 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8981 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8982 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8985 static void __tg3_set_rx_mode(struct net_device *dev)
8987 struct tg3 *tp = netdev_priv(dev);
8990 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8991 RX_MODE_KEEP_VLAN_TAG);
8993 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8996 #if TG3_VLAN_TAG_USED
8998 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8999 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9001 /* By definition, VLAN is disabled always in this
9004 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9005 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9008 if (dev->flags & IFF_PROMISC) {
9009 /* Promiscuous mode. */
9010 rx_mode |= RX_MODE_PROMISC;
9011 } else if (dev->flags & IFF_ALLMULTI) {
9012 /* Accept all multicast. */
9013 tg3_set_multi (tp, 1);
9014 } else if (dev->mc_count < 1) {
9015 /* Reject all multicast. */
9016 tg3_set_multi (tp, 0);
9018 /* Accept one or more multicast(s). */
9019 struct dev_mc_list *mclist;
9021 u32 mc_filter[4] = { 0, };
9026 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
9027 i++, mclist = mclist->next) {
9029 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
9031 regidx = (bit & 0x60) >> 5;
9033 mc_filter[regidx] |= (1 << bit);
9036 tw32(MAC_HASH_REG_0, mc_filter[0]);
9037 tw32(MAC_HASH_REG_1, mc_filter[1]);
9038 tw32(MAC_HASH_REG_2, mc_filter[2]);
9039 tw32(MAC_HASH_REG_3, mc_filter[3]);
9042 if (rx_mode != tp->rx_mode) {
9043 tp->rx_mode = rx_mode;
9044 tw32_f(MAC_RX_MODE, rx_mode);
9049 static void tg3_set_rx_mode(struct net_device *dev)
9051 struct tg3 *tp = netdev_priv(dev);
9053 if (!netif_running(dev))
9056 tg3_full_lock(tp, 0);
9057 __tg3_set_rx_mode(dev);
9058 tg3_full_unlock(tp);
9061 #define TG3_REGDUMP_LEN (32 * 1024)
9063 static int tg3_get_regs_len(struct net_device *dev)
9065 return TG3_REGDUMP_LEN;
9068 static void tg3_get_regs(struct net_device *dev,
9069 struct ethtool_regs *regs, void *_p)
9072 struct tg3 *tp = netdev_priv(dev);
9078 memset(p, 0, TG3_REGDUMP_LEN);
9080 if (tp->link_config.phy_is_low_power)
9083 tg3_full_lock(tp, 0);
9085 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
9086 #define GET_REG32_LOOP(base,len) \
9087 do { p = (u32 *)(orig_p + (base)); \
9088 for (i = 0; i < len; i += 4) \
9089 __GET_REG32((base) + i); \
9091 #define GET_REG32_1(reg) \
9092 do { p = (u32 *)(orig_p + (reg)); \
9093 __GET_REG32((reg)); \
9096 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
9097 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
9098 GET_REG32_LOOP(MAC_MODE, 0x4f0);
9099 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
9100 GET_REG32_1(SNDDATAC_MODE);
9101 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
9102 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
9103 GET_REG32_1(SNDBDC_MODE);
9104 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
9105 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
9106 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
9107 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
9108 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
9109 GET_REG32_1(RCVDCC_MODE);
9110 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
9111 GET_REG32_LOOP(RCVCC_MODE, 0x14);
9112 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
9113 GET_REG32_1(MBFREE_MODE);
9114 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
9115 GET_REG32_LOOP(MEMARB_MODE, 0x10);
9116 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
9117 GET_REG32_LOOP(RDMAC_MODE, 0x08);
9118 GET_REG32_LOOP(WDMAC_MODE, 0x08);
9119 GET_REG32_1(RX_CPU_MODE);
9120 GET_REG32_1(RX_CPU_STATE);
9121 GET_REG32_1(RX_CPU_PGMCTR);
9122 GET_REG32_1(RX_CPU_HWBKPT);
9123 GET_REG32_1(TX_CPU_MODE);
9124 GET_REG32_1(TX_CPU_STATE);
9125 GET_REG32_1(TX_CPU_PGMCTR);
9126 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
9127 GET_REG32_LOOP(FTQ_RESET, 0x120);
9128 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
9129 GET_REG32_1(DMAC_MODE);
9130 GET_REG32_LOOP(GRC_MODE, 0x4c);
9131 if (tp->tg3_flags & TG3_FLAG_NVRAM)
9132 GET_REG32_LOOP(NVRAM_CMD, 0x24);
9135 #undef GET_REG32_LOOP
9138 tg3_full_unlock(tp);
9141 static int tg3_get_eeprom_len(struct net_device *dev)
9143 struct tg3 *tp = netdev_priv(dev);
9145 return tp->nvram_size;
9148 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9150 struct tg3 *tp = netdev_priv(dev);
9153 u32 i, offset, len, b_offset, b_count;
9156 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9159 if (tp->link_config.phy_is_low_power)
9162 offset = eeprom->offset;
9166 eeprom->magic = TG3_EEPROM_MAGIC;
9169 /* adjustments to start on required 4 byte boundary */
9170 b_offset = offset & 3;
9171 b_count = 4 - b_offset;
9172 if (b_count > len) {
9173 /* i.e. offset=1 len=2 */
9176 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9179 memcpy(data, ((char*)&val) + b_offset, b_count);
9182 eeprom->len += b_count;
9185 /* read bytes upto the last 4 byte boundary */
9186 pd = &data[eeprom->len];
9187 for (i = 0; i < (len - (len & 3)); i += 4) {
9188 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9193 memcpy(pd + i, &val, 4);
9198 /* read last bytes not ending on 4 byte boundary */
9199 pd = &data[eeprom->len];
9201 b_offset = offset + len - b_count;
9202 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9205 memcpy(pd, &val, b_count);
9206 eeprom->len += b_count;
9211 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9213 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9215 struct tg3 *tp = netdev_priv(dev);
9217 u32 offset, len, b_offset, odd_len;
9221 if (tp->link_config.phy_is_low_power)
9224 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
9225 eeprom->magic != TG3_EEPROM_MAGIC)
9228 offset = eeprom->offset;
9231 if ((b_offset = (offset & 3))) {
9232 /* adjustments to start on required 4 byte boundary */
9233 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9244 /* adjustments to end on required 4 byte boundary */
9246 len = (len + 3) & ~3;
9247 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9253 if (b_offset || odd_len) {
9254 buf = kmalloc(len, GFP_KERNEL);
9258 memcpy(buf, &start, 4);
9260 memcpy(buf+len-4, &end, 4);
9261 memcpy(buf + b_offset, data, eeprom->len);
9264 ret = tg3_nvram_write_block(tp, offset, len, buf);
9272 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9274 struct tg3 *tp = netdev_priv(dev);
9276 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9277 struct phy_device *phydev;
9278 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9280 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9281 return phy_ethtool_gset(phydev, cmd);
9284 cmd->supported = (SUPPORTED_Autoneg);
9286 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9287 cmd->supported |= (SUPPORTED_1000baseT_Half |
9288 SUPPORTED_1000baseT_Full);
9290 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
9291 cmd->supported |= (SUPPORTED_100baseT_Half |
9292 SUPPORTED_100baseT_Full |
9293 SUPPORTED_10baseT_Half |
9294 SUPPORTED_10baseT_Full |
9296 cmd->port = PORT_TP;
9298 cmd->supported |= SUPPORTED_FIBRE;
9299 cmd->port = PORT_FIBRE;
9302 cmd->advertising = tp->link_config.advertising;
9303 if (netif_running(dev)) {
9304 cmd->speed = tp->link_config.active_speed;
9305 cmd->duplex = tp->link_config.active_duplex;
9307 cmd->phy_address = tp->phy_addr;
9308 cmd->transceiver = XCVR_INTERNAL;
9309 cmd->autoneg = tp->link_config.autoneg;
9315 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9317 struct tg3 *tp = netdev_priv(dev);
9319 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9320 struct phy_device *phydev;
9321 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9323 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9324 return phy_ethtool_sset(phydev, cmd);
9327 if (cmd->autoneg != AUTONEG_ENABLE &&
9328 cmd->autoneg != AUTONEG_DISABLE)
9331 if (cmd->autoneg == AUTONEG_DISABLE &&
9332 cmd->duplex != DUPLEX_FULL &&
9333 cmd->duplex != DUPLEX_HALF)
9336 if (cmd->autoneg == AUTONEG_ENABLE) {
9337 u32 mask = ADVERTISED_Autoneg |
9339 ADVERTISED_Asym_Pause;
9341 if (!(tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
9342 mask |= ADVERTISED_1000baseT_Half |
9343 ADVERTISED_1000baseT_Full;
9345 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9346 mask |= ADVERTISED_100baseT_Half |
9347 ADVERTISED_100baseT_Full |
9348 ADVERTISED_10baseT_Half |
9349 ADVERTISED_10baseT_Full |
9352 mask |= ADVERTISED_FIBRE;
9354 if (cmd->advertising & ~mask)
9357 mask &= (ADVERTISED_1000baseT_Half |
9358 ADVERTISED_1000baseT_Full |
9359 ADVERTISED_100baseT_Half |
9360 ADVERTISED_100baseT_Full |
9361 ADVERTISED_10baseT_Half |
9362 ADVERTISED_10baseT_Full);
9364 cmd->advertising &= mask;
9366 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9367 if (cmd->speed != SPEED_1000)
9370 if (cmd->duplex != DUPLEX_FULL)
9373 if (cmd->speed != SPEED_100 &&
9374 cmd->speed != SPEED_10)
9379 tg3_full_lock(tp, 0);
9381 tp->link_config.autoneg = cmd->autoneg;
9382 if (cmd->autoneg == AUTONEG_ENABLE) {
9383 tp->link_config.advertising = (cmd->advertising |
9384 ADVERTISED_Autoneg);
9385 tp->link_config.speed = SPEED_INVALID;
9386 tp->link_config.duplex = DUPLEX_INVALID;
9388 tp->link_config.advertising = 0;
9389 tp->link_config.speed = cmd->speed;
9390 tp->link_config.duplex = cmd->duplex;
9393 tp->link_config.orig_speed = tp->link_config.speed;
9394 tp->link_config.orig_duplex = tp->link_config.duplex;
9395 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9397 if (netif_running(dev))
9398 tg3_setup_phy(tp, 1);
9400 tg3_full_unlock(tp);
9405 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9407 struct tg3 *tp = netdev_priv(dev);
9409 strcpy(info->driver, DRV_MODULE_NAME);
9410 strcpy(info->version, DRV_MODULE_VERSION);
9411 strcpy(info->fw_version, tp->fw_ver);
9412 strcpy(info->bus_info, pci_name(tp->pdev));
9415 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9417 struct tg3 *tp = netdev_priv(dev);
9419 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9420 device_can_wakeup(&tp->pdev->dev))
9421 wol->supported = WAKE_MAGIC;
9425 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9426 device_can_wakeup(&tp->pdev->dev))
9427 wol->wolopts = WAKE_MAGIC;
9428 memset(&wol->sopass, 0, sizeof(wol->sopass));
9431 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9433 struct tg3 *tp = netdev_priv(dev);
9434 struct device *dp = &tp->pdev->dev;
9436 if (wol->wolopts & ~WAKE_MAGIC)
9438 if ((wol->wolopts & WAKE_MAGIC) &&
9439 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9442 spin_lock_bh(&tp->lock);
9443 if (wol->wolopts & WAKE_MAGIC) {
9444 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9445 device_set_wakeup_enable(dp, true);
9447 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9448 device_set_wakeup_enable(dp, false);
9450 spin_unlock_bh(&tp->lock);
9455 static u32 tg3_get_msglevel(struct net_device *dev)
9457 struct tg3 *tp = netdev_priv(dev);
9458 return tp->msg_enable;
9461 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9463 struct tg3 *tp = netdev_priv(dev);
9464 tp->msg_enable = value;
9467 static int tg3_set_tso(struct net_device *dev, u32 value)
9469 struct tg3 *tp = netdev_priv(dev);
9471 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9476 if ((dev->features & NETIF_F_IPV6_CSUM) &&
9477 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) {
9479 dev->features |= NETIF_F_TSO6;
9480 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9481 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9482 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9483 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9484 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9485 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9486 dev->features |= NETIF_F_TSO_ECN;
9488 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9490 return ethtool_op_set_tso(dev, value);
9493 static int tg3_nway_reset(struct net_device *dev)
9495 struct tg3 *tp = netdev_priv(dev);
9498 if (!netif_running(dev))
9501 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9504 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9505 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9507 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9511 spin_lock_bh(&tp->lock);
9513 tg3_readphy(tp, MII_BMCR, &bmcr);
9514 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9515 ((bmcr & BMCR_ANENABLE) ||
9516 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9517 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9521 spin_unlock_bh(&tp->lock);
9527 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9529 struct tg3 *tp = netdev_priv(dev);
9531 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9532 ering->rx_mini_max_pending = 0;
9533 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9534 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9536 ering->rx_jumbo_max_pending = 0;
9538 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9540 ering->rx_pending = tp->rx_pending;
9541 ering->rx_mini_pending = 0;
9542 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9543 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9545 ering->rx_jumbo_pending = 0;
9547 ering->tx_pending = tp->napi[0].tx_pending;
9550 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9552 struct tg3 *tp = netdev_priv(dev);
9553 int i, irq_sync = 0, err = 0;
9555 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9556 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9557 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9558 (ering->tx_pending <= MAX_SKB_FRAGS) ||
9559 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9560 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9563 if (netif_running(dev)) {
9569 tg3_full_lock(tp, irq_sync);
9571 tp->rx_pending = ering->rx_pending;
9573 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9574 tp->rx_pending > 63)
9575 tp->rx_pending = 63;
9576 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9578 for (i = 0; i < TG3_IRQ_MAX_VECS; i++)
9579 tp->napi[i].tx_pending = ering->tx_pending;
9581 if (netif_running(dev)) {
9582 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9583 err = tg3_restart_hw(tp, 1);
9585 tg3_netif_start(tp);
9588 tg3_full_unlock(tp);
9590 if (irq_sync && !err)
9596 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9598 struct tg3 *tp = netdev_priv(dev);
9600 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9602 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
9603 epause->rx_pause = 1;
9605 epause->rx_pause = 0;
9607 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
9608 epause->tx_pause = 1;
9610 epause->tx_pause = 0;
9613 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9615 struct tg3 *tp = netdev_priv(dev);
9618 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9619 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9622 if (epause->autoneg) {
9624 struct phy_device *phydev;
9626 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9628 if (epause->rx_pause) {
9629 if (epause->tx_pause)
9630 newadv = ADVERTISED_Pause;
9632 newadv = ADVERTISED_Pause |
9633 ADVERTISED_Asym_Pause;
9634 } else if (epause->tx_pause) {
9635 newadv = ADVERTISED_Asym_Pause;
9639 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9640 u32 oldadv = phydev->advertising &
9642 ADVERTISED_Asym_Pause);
9643 if (oldadv != newadv) {
9644 phydev->advertising &=
9645 ~(ADVERTISED_Pause |
9646 ADVERTISED_Asym_Pause);
9647 phydev->advertising |= newadv;
9648 err = phy_start_aneg(phydev);
9651 tp->link_config.advertising &=
9652 ~(ADVERTISED_Pause |
9653 ADVERTISED_Asym_Pause);
9654 tp->link_config.advertising |= newadv;
9657 if (epause->rx_pause)
9658 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9660 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9662 if (epause->tx_pause)
9663 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9665 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9667 if (netif_running(dev))
9668 tg3_setup_flow_control(tp, 0, 0);
9673 if (netif_running(dev)) {
9678 tg3_full_lock(tp, irq_sync);
9680 if (epause->autoneg)
9681 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9683 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9684 if (epause->rx_pause)
9685 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9687 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9688 if (epause->tx_pause)
9689 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9691 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9693 if (netif_running(dev)) {
9694 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9695 err = tg3_restart_hw(tp, 1);
9697 tg3_netif_start(tp);
9700 tg3_full_unlock(tp);
9706 static u32 tg3_get_rx_csum(struct net_device *dev)
9708 struct tg3 *tp = netdev_priv(dev);
9709 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9712 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9714 struct tg3 *tp = netdev_priv(dev);
9716 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9722 spin_lock_bh(&tp->lock);
9724 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9726 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9727 spin_unlock_bh(&tp->lock);
9732 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9734 struct tg3 *tp = netdev_priv(dev);
9736 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9742 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
9743 ethtool_op_set_tx_ipv6_csum(dev, data);
9745 ethtool_op_set_tx_csum(dev, data);
9750 static int tg3_get_sset_count (struct net_device *dev, int sset)
9754 return TG3_NUM_TEST;
9756 return TG3_NUM_STATS;
9762 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9764 switch (stringset) {
9766 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
9769 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
9772 WARN_ON(1); /* we need a WARN() */
9777 static int tg3_phys_id(struct net_device *dev, u32 data)
9779 struct tg3 *tp = netdev_priv(dev);
9782 if (!netif_running(tp->dev))
9786 data = UINT_MAX / 2;
9788 for (i = 0; i < (data * 2); i++) {
9790 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9791 LED_CTRL_1000MBPS_ON |
9792 LED_CTRL_100MBPS_ON |
9793 LED_CTRL_10MBPS_ON |
9794 LED_CTRL_TRAFFIC_OVERRIDE |
9795 LED_CTRL_TRAFFIC_BLINK |
9796 LED_CTRL_TRAFFIC_LED);
9799 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9800 LED_CTRL_TRAFFIC_OVERRIDE);
9802 if (msleep_interruptible(500))
9805 tw32(MAC_LED_CTRL, tp->led_ctrl);
9809 static void tg3_get_ethtool_stats (struct net_device *dev,
9810 struct ethtool_stats *estats, u64 *tmp_stats)
9812 struct tg3 *tp = netdev_priv(dev);
9813 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9816 #define NVRAM_TEST_SIZE 0x100
9817 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
9818 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
9819 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
9820 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9821 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9823 static int tg3_test_nvram(struct tg3 *tp)
9827 int i, j, k, err = 0, size;
9829 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9832 if (tg3_nvram_read(tp, 0, &magic) != 0)
9835 if (magic == TG3_EEPROM_MAGIC)
9836 size = NVRAM_TEST_SIZE;
9837 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9838 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9839 TG3_EEPROM_SB_FORMAT_1) {
9840 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9841 case TG3_EEPROM_SB_REVISION_0:
9842 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9844 case TG3_EEPROM_SB_REVISION_2:
9845 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9847 case TG3_EEPROM_SB_REVISION_3:
9848 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9855 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9856 size = NVRAM_SELFBOOT_HW_SIZE;
9860 buf = kmalloc(size, GFP_KERNEL);
9865 for (i = 0, j = 0; i < size; i += 4, j++) {
9866 err = tg3_nvram_read_be32(tp, i, &buf[j]);
9873 /* Selfboot format */
9874 magic = be32_to_cpu(buf[0]);
9875 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9876 TG3_EEPROM_MAGIC_FW) {
9877 u8 *buf8 = (u8 *) buf, csum8 = 0;
9879 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9880 TG3_EEPROM_SB_REVISION_2) {
9881 /* For rev 2, the csum doesn't include the MBA. */
9882 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9884 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9887 for (i = 0; i < size; i++)
9900 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9901 TG3_EEPROM_MAGIC_HW) {
9902 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9903 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9904 u8 *buf8 = (u8 *) buf;
9906 /* Separate the parity bits and the data bytes. */
9907 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9908 if ((i == 0) || (i == 8)) {
9912 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9913 parity[k++] = buf8[i] & msk;
9920 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9921 parity[k++] = buf8[i] & msk;
9924 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9925 parity[k++] = buf8[i] & msk;
9928 data[j++] = buf8[i];
9932 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9933 u8 hw8 = hweight8(data[i]);
9935 if ((hw8 & 0x1) && parity[i])
9937 else if (!(hw8 & 0x1) && !parity[i])
9944 /* Bootstrap checksum at offset 0x10 */
9945 csum = calc_crc((unsigned char *) buf, 0x10);
9946 if (csum != be32_to_cpu(buf[0x10/4]))
9949 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9950 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9951 if (csum != be32_to_cpu(buf[0xfc/4]))
9961 #define TG3_SERDES_TIMEOUT_SEC 2
9962 #define TG3_COPPER_TIMEOUT_SEC 6
9964 static int tg3_test_link(struct tg3 *tp)
9968 if (!netif_running(tp->dev))
9971 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9972 max = TG3_SERDES_TIMEOUT_SEC;
9974 max = TG3_COPPER_TIMEOUT_SEC;
9976 for (i = 0; i < max; i++) {
9977 if (netif_carrier_ok(tp->dev))
9980 if (msleep_interruptible(1000))
9987 /* Only test the commonly used registers */
9988 static int tg3_test_registers(struct tg3 *tp)
9990 int i, is_5705, is_5750;
9991 u32 offset, read_mask, write_mask, val, save_val, read_val;
9995 #define TG3_FL_5705 0x1
9996 #define TG3_FL_NOT_5705 0x2
9997 #define TG3_FL_NOT_5788 0x4
9998 #define TG3_FL_NOT_5750 0x8
10002 /* MAC Control Registers */
10003 { MAC_MODE, TG3_FL_NOT_5705,
10004 0x00000000, 0x00ef6f8c },
10005 { MAC_MODE, TG3_FL_5705,
10006 0x00000000, 0x01ef6b8c },
10007 { MAC_STATUS, TG3_FL_NOT_5705,
10008 0x03800107, 0x00000000 },
10009 { MAC_STATUS, TG3_FL_5705,
10010 0x03800100, 0x00000000 },
10011 { MAC_ADDR_0_HIGH, 0x0000,
10012 0x00000000, 0x0000ffff },
10013 { MAC_ADDR_0_LOW, 0x0000,
10014 0x00000000, 0xffffffff },
10015 { MAC_RX_MTU_SIZE, 0x0000,
10016 0x00000000, 0x0000ffff },
10017 { MAC_TX_MODE, 0x0000,
10018 0x00000000, 0x00000070 },
10019 { MAC_TX_LENGTHS, 0x0000,
10020 0x00000000, 0x00003fff },
10021 { MAC_RX_MODE, TG3_FL_NOT_5705,
10022 0x00000000, 0x000007fc },
10023 { MAC_RX_MODE, TG3_FL_5705,
10024 0x00000000, 0x000007dc },
10025 { MAC_HASH_REG_0, 0x0000,
10026 0x00000000, 0xffffffff },
10027 { MAC_HASH_REG_1, 0x0000,
10028 0x00000000, 0xffffffff },
10029 { MAC_HASH_REG_2, 0x0000,
10030 0x00000000, 0xffffffff },
10031 { MAC_HASH_REG_3, 0x0000,
10032 0x00000000, 0xffffffff },
10034 /* Receive Data and Receive BD Initiator Control Registers. */
10035 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10036 0x00000000, 0xffffffff },
10037 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10038 0x00000000, 0xffffffff },
10039 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10040 0x00000000, 0x00000003 },
10041 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10042 0x00000000, 0xffffffff },
10043 { RCVDBDI_STD_BD+0, 0x0000,
10044 0x00000000, 0xffffffff },
10045 { RCVDBDI_STD_BD+4, 0x0000,
10046 0x00000000, 0xffffffff },
10047 { RCVDBDI_STD_BD+8, 0x0000,
10048 0x00000000, 0xffff0002 },
10049 { RCVDBDI_STD_BD+0xc, 0x0000,
10050 0x00000000, 0xffffffff },
10052 /* Receive BD Initiator Control Registers. */
10053 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10054 0x00000000, 0xffffffff },
10055 { RCVBDI_STD_THRESH, TG3_FL_5705,
10056 0x00000000, 0x000003ff },
10057 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10058 0x00000000, 0xffffffff },
10060 /* Host Coalescing Control Registers. */
10061 { HOSTCC_MODE, TG3_FL_NOT_5705,
10062 0x00000000, 0x00000004 },
10063 { HOSTCC_MODE, TG3_FL_5705,
10064 0x00000000, 0x000000f6 },
10065 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10066 0x00000000, 0xffffffff },
10067 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10068 0x00000000, 0x000003ff },
10069 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10070 0x00000000, 0xffffffff },
10071 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10072 0x00000000, 0x000003ff },
10073 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10074 0x00000000, 0xffffffff },
10075 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10076 0x00000000, 0x000000ff },
10077 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10078 0x00000000, 0xffffffff },
10079 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10080 0x00000000, 0x000000ff },
10081 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10082 0x00000000, 0xffffffff },
10083 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10084 0x00000000, 0xffffffff },
10085 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10086 0x00000000, 0xffffffff },
10087 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10088 0x00000000, 0x000000ff },
10089 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10090 0x00000000, 0xffffffff },
10091 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10092 0x00000000, 0x000000ff },
10093 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10094 0x00000000, 0xffffffff },
10095 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10096 0x00000000, 0xffffffff },
10097 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10098 0x00000000, 0xffffffff },
10099 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10100 0x00000000, 0xffffffff },
10101 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10102 0x00000000, 0xffffffff },
10103 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10104 0xffffffff, 0x00000000 },
10105 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10106 0xffffffff, 0x00000000 },
10108 /* Buffer Manager Control Registers. */
10109 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10110 0x00000000, 0x007fff80 },
10111 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10112 0x00000000, 0x007fffff },
10113 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10114 0x00000000, 0x0000003f },
10115 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10116 0x00000000, 0x000001ff },
10117 { BUFMGR_MB_HIGH_WATER, 0x0000,
10118 0x00000000, 0x000001ff },
10119 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10120 0xffffffff, 0x00000000 },
10121 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10122 0xffffffff, 0x00000000 },
10124 /* Mailbox Registers */
10125 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10126 0x00000000, 0x000001ff },
10127 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10128 0x00000000, 0x000001ff },
10129 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10130 0x00000000, 0x000007ff },
10131 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10132 0x00000000, 0x000001ff },
10134 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10137 is_5705 = is_5750 = 0;
10138 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10140 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10144 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10145 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10148 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10151 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10152 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10155 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10158 offset = (u32) reg_tbl[i].offset;
10159 read_mask = reg_tbl[i].read_mask;
10160 write_mask = reg_tbl[i].write_mask;
10162 /* Save the original register content */
10163 save_val = tr32(offset);
10165 /* Determine the read-only value. */
10166 read_val = save_val & read_mask;
10168 /* Write zero to the register, then make sure the read-only bits
10169 * are not changed and the read/write bits are all zeros.
10173 val = tr32(offset);
10175 /* Test the read-only and read/write bits. */
10176 if (((val & read_mask) != read_val) || (val & write_mask))
10179 /* Write ones to all the bits defined by RdMask and WrMask, then
10180 * make sure the read-only bits are not changed and the
10181 * read/write bits are all ones.
10183 tw32(offset, read_mask | write_mask);
10185 val = tr32(offset);
10187 /* Test the read-only bits. */
10188 if ((val & read_mask) != read_val)
10191 /* Test the read/write bits. */
10192 if ((val & write_mask) != write_mask)
10195 tw32(offset, save_val);
10201 if (netif_msg_hw(tp))
10202 printk(KERN_ERR PFX "Register test failed at offset %x\n",
10204 tw32(offset, save_val);
10208 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10210 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10214 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10215 for (j = 0; j < len; j += 4) {
10218 tg3_write_mem(tp, offset + j, test_pattern[i]);
10219 tg3_read_mem(tp, offset + j, &val);
10220 if (val != test_pattern[i])
10227 static int tg3_test_memory(struct tg3 *tp)
10229 static struct mem_entry {
10232 } mem_tbl_570x[] = {
10233 { 0x00000000, 0x00b50},
10234 { 0x00002000, 0x1c000},
10235 { 0xffffffff, 0x00000}
10236 }, mem_tbl_5705[] = {
10237 { 0x00000100, 0x0000c},
10238 { 0x00000200, 0x00008},
10239 { 0x00004000, 0x00800},
10240 { 0x00006000, 0x01000},
10241 { 0x00008000, 0x02000},
10242 { 0x00010000, 0x0e000},
10243 { 0xffffffff, 0x00000}
10244 }, mem_tbl_5755[] = {
10245 { 0x00000200, 0x00008},
10246 { 0x00004000, 0x00800},
10247 { 0x00006000, 0x00800},
10248 { 0x00008000, 0x02000},
10249 { 0x00010000, 0x0c000},
10250 { 0xffffffff, 0x00000}
10251 }, mem_tbl_5906[] = {
10252 { 0x00000200, 0x00008},
10253 { 0x00004000, 0x00400},
10254 { 0x00006000, 0x00400},
10255 { 0x00008000, 0x01000},
10256 { 0x00010000, 0x01000},
10257 { 0xffffffff, 0x00000}
10259 struct mem_entry *mem_tbl;
10263 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10264 mem_tbl = mem_tbl_5755;
10265 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10266 mem_tbl = mem_tbl_5906;
10267 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
10268 mem_tbl = mem_tbl_5705;
10270 mem_tbl = mem_tbl_570x;
10272 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10273 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
10274 mem_tbl[i].len)) != 0)
10281 #define TG3_MAC_LOOPBACK 0
10282 #define TG3_PHY_LOOPBACK 1
10284 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10286 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10287 u32 desc_idx, coal_now;
10288 struct sk_buff *skb, *rx_skb;
10291 int num_pkts, tx_len, rx_len, i, err;
10292 struct tg3_rx_buffer_desc *desc;
10293 struct tg3_napi *tnapi, *rnapi;
10294 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
10296 if (tp->irq_cnt > 1) {
10297 tnapi = &tp->napi[1];
10298 rnapi = &tp->napi[1];
10300 tnapi = &tp->napi[0];
10301 rnapi = &tp->napi[0];
10303 coal_now = tnapi->coal_now | rnapi->coal_now;
10305 if (loopback_mode == TG3_MAC_LOOPBACK) {
10306 /* HW errata - mac loopback fails in some cases on 5780.
10307 * Normal traffic and PHY loopback are not affected by
10310 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10313 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
10314 MAC_MODE_PORT_INT_LPBACK;
10315 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10316 mac_mode |= MAC_MODE_LINK_POLARITY;
10317 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10318 mac_mode |= MAC_MODE_PORT_MODE_MII;
10320 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10321 tw32(MAC_MODE, mac_mode);
10322 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10325 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10326 tg3_phy_fet_toggle_apd(tp, false);
10327 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10329 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10331 tg3_phy_toggle_automdix(tp, 0);
10333 tg3_writephy(tp, MII_BMCR, val);
10336 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10337 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10338 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10339 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x1800);
10340 mac_mode |= MAC_MODE_PORT_MODE_MII;
10342 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10344 /* reset to prevent losing 1st rx packet intermittently */
10345 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10346 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10348 tw32_f(MAC_RX_MODE, tp->rx_mode);
10350 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10351 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10352 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10353 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10354 mac_mode |= MAC_MODE_LINK_POLARITY;
10355 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10356 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10358 tw32(MAC_MODE, mac_mode);
10366 skb = netdev_alloc_skb(tp->dev, tx_len);
10370 tx_data = skb_put(skb, tx_len);
10371 memcpy(tx_data, tp->dev->dev_addr, 6);
10372 memset(tx_data + 6, 0x0, 8);
10374 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10376 for (i = 14; i < tx_len; i++)
10377 tx_data[i] = (u8) (i & 0xff);
10379 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
10380 dev_kfree_skb(skb);
10384 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10389 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
10393 tg3_set_txd(tnapi, tnapi->tx_prod,
10394 skb_shinfo(skb)->dma_head, tx_len, 0, 1);
10399 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
10400 tr32_mailbox(tnapi->prodmbox);
10404 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
10405 for (i = 0; i < 25; i++) {
10406 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10411 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
10412 rx_idx = rnapi->hw_status->idx[0].rx_producer;
10413 if ((tx_idx == tnapi->tx_prod) &&
10414 (rx_idx == (rx_start_idx + num_pkts)))
10418 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
10419 dev_kfree_skb(skb);
10421 if (tx_idx != tnapi->tx_prod)
10424 if (rx_idx != rx_start_idx + num_pkts)
10427 desc = &rnapi->rx_rcb[rx_start_idx];
10428 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10429 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10430 if (opaque_key != RXD_OPAQUE_RING_STD)
10433 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10434 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10437 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10438 if (rx_len != tx_len)
10441 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
10443 map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
10444 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10446 for (i = 14; i < tx_len; i++) {
10447 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10452 /* tg3_free_rings will unmap and free the rx_skb */
10457 #define TG3_MAC_LOOPBACK_FAILED 1
10458 #define TG3_PHY_LOOPBACK_FAILED 2
10459 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10460 TG3_PHY_LOOPBACK_FAILED)
10462 static int tg3_test_loopback(struct tg3 *tp)
10467 if (!netif_running(tp->dev))
10468 return TG3_LOOPBACK_FAILED;
10470 err = tg3_reset_hw(tp, 1);
10472 return TG3_LOOPBACK_FAILED;
10474 /* Turn off gphy autopowerdown. */
10475 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10476 tg3_phy_toggle_apd(tp, false);
10478 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10482 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10484 /* Wait for up to 40 microseconds to acquire lock. */
10485 for (i = 0; i < 4; i++) {
10486 status = tr32(TG3_CPMU_MUTEX_GNT);
10487 if (status == CPMU_MUTEX_GNT_DRIVER)
10492 if (status != CPMU_MUTEX_GNT_DRIVER)
10493 return TG3_LOOPBACK_FAILED;
10495 /* Turn off link-based power management. */
10496 cpmuctrl = tr32(TG3_CPMU_CTRL);
10497 tw32(TG3_CPMU_CTRL,
10498 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10499 CPMU_CTRL_LINK_AWARE_MODE));
10502 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10503 err |= TG3_MAC_LOOPBACK_FAILED;
10505 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10506 tw32(TG3_CPMU_CTRL, cpmuctrl);
10508 /* Release the mutex */
10509 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10512 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10513 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10514 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10515 err |= TG3_PHY_LOOPBACK_FAILED;
10518 /* Re-enable gphy autopowerdown. */
10519 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10520 tg3_phy_toggle_apd(tp, true);
10525 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10528 struct tg3 *tp = netdev_priv(dev);
10530 if (tp->link_config.phy_is_low_power)
10531 tg3_set_power_state(tp, PCI_D0);
10533 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10535 if (tg3_test_nvram(tp) != 0) {
10536 etest->flags |= ETH_TEST_FL_FAILED;
10539 if (tg3_test_link(tp) != 0) {
10540 etest->flags |= ETH_TEST_FL_FAILED;
10543 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10544 int err, err2 = 0, irq_sync = 0;
10546 if (netif_running(dev)) {
10548 tg3_netif_stop(tp);
10552 tg3_full_lock(tp, irq_sync);
10554 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10555 err = tg3_nvram_lock(tp);
10556 tg3_halt_cpu(tp, RX_CPU_BASE);
10557 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10558 tg3_halt_cpu(tp, TX_CPU_BASE);
10560 tg3_nvram_unlock(tp);
10562 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10565 if (tg3_test_registers(tp) != 0) {
10566 etest->flags |= ETH_TEST_FL_FAILED;
10569 if (tg3_test_memory(tp) != 0) {
10570 etest->flags |= ETH_TEST_FL_FAILED;
10573 if ((data[4] = tg3_test_loopback(tp)) != 0)
10574 etest->flags |= ETH_TEST_FL_FAILED;
10576 tg3_full_unlock(tp);
10578 if (tg3_test_interrupt(tp) != 0) {
10579 etest->flags |= ETH_TEST_FL_FAILED;
10583 tg3_full_lock(tp, 0);
10585 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10586 if (netif_running(dev)) {
10587 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10588 err2 = tg3_restart_hw(tp, 1);
10590 tg3_netif_start(tp);
10593 tg3_full_unlock(tp);
10595 if (irq_sync && !err2)
10598 if (tp->link_config.phy_is_low_power)
10599 tg3_set_power_state(tp, PCI_D3hot);
10603 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10605 struct mii_ioctl_data *data = if_mii(ifr);
10606 struct tg3 *tp = netdev_priv(dev);
10609 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10610 struct phy_device *phydev;
10611 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10613 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10614 return phy_mii_ioctl(phydev, data, cmd);
10619 data->phy_id = tp->phy_addr;
10622 case SIOCGMIIREG: {
10625 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10626 break; /* We have no PHY */
10628 if (tp->link_config.phy_is_low_power)
10631 spin_lock_bh(&tp->lock);
10632 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10633 spin_unlock_bh(&tp->lock);
10635 data->val_out = mii_regval;
10641 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10642 break; /* We have no PHY */
10644 if (tp->link_config.phy_is_low_power)
10647 spin_lock_bh(&tp->lock);
10648 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10649 spin_unlock_bh(&tp->lock);
10657 return -EOPNOTSUPP;
10660 #if TG3_VLAN_TAG_USED
10661 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10663 struct tg3 *tp = netdev_priv(dev);
10665 if (!netif_running(dev)) {
10670 tg3_netif_stop(tp);
10672 tg3_full_lock(tp, 0);
10676 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10677 __tg3_set_rx_mode(dev);
10679 tg3_netif_start(tp);
10681 tg3_full_unlock(tp);
10685 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10687 struct tg3 *tp = netdev_priv(dev);
10689 memcpy(ec, &tp->coal, sizeof(*ec));
10693 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10695 struct tg3 *tp = netdev_priv(dev);
10696 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10697 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10699 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10700 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10701 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10702 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10703 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10706 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10707 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10708 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10709 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10710 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10711 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10712 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10713 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10714 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10715 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10718 /* No rx interrupts will be generated if both are zero */
10719 if ((ec->rx_coalesce_usecs == 0) &&
10720 (ec->rx_max_coalesced_frames == 0))
10723 /* No tx interrupts will be generated if both are zero */
10724 if ((ec->tx_coalesce_usecs == 0) &&
10725 (ec->tx_max_coalesced_frames == 0))
10728 /* Only copy relevant parameters, ignore all others. */
10729 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10730 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10731 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10732 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10733 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10734 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10735 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10736 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10737 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10739 if (netif_running(dev)) {
10740 tg3_full_lock(tp, 0);
10741 __tg3_set_coalesce(tp, &tp->coal);
10742 tg3_full_unlock(tp);
10747 static const struct ethtool_ops tg3_ethtool_ops = {
10748 .get_settings = tg3_get_settings,
10749 .set_settings = tg3_set_settings,
10750 .get_drvinfo = tg3_get_drvinfo,
10751 .get_regs_len = tg3_get_regs_len,
10752 .get_regs = tg3_get_regs,
10753 .get_wol = tg3_get_wol,
10754 .set_wol = tg3_set_wol,
10755 .get_msglevel = tg3_get_msglevel,
10756 .set_msglevel = tg3_set_msglevel,
10757 .nway_reset = tg3_nway_reset,
10758 .get_link = ethtool_op_get_link,
10759 .get_eeprom_len = tg3_get_eeprom_len,
10760 .get_eeprom = tg3_get_eeprom,
10761 .set_eeprom = tg3_set_eeprom,
10762 .get_ringparam = tg3_get_ringparam,
10763 .set_ringparam = tg3_set_ringparam,
10764 .get_pauseparam = tg3_get_pauseparam,
10765 .set_pauseparam = tg3_set_pauseparam,
10766 .get_rx_csum = tg3_get_rx_csum,
10767 .set_rx_csum = tg3_set_rx_csum,
10768 .set_tx_csum = tg3_set_tx_csum,
10769 .set_sg = ethtool_op_set_sg,
10770 .set_tso = tg3_set_tso,
10771 .self_test = tg3_self_test,
10772 .get_strings = tg3_get_strings,
10773 .phys_id = tg3_phys_id,
10774 .get_ethtool_stats = tg3_get_ethtool_stats,
10775 .get_coalesce = tg3_get_coalesce,
10776 .set_coalesce = tg3_set_coalesce,
10777 .get_sset_count = tg3_get_sset_count,
10780 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10782 u32 cursize, val, magic;
10784 tp->nvram_size = EEPROM_CHIP_SIZE;
10786 if (tg3_nvram_read(tp, 0, &magic) != 0)
10789 if ((magic != TG3_EEPROM_MAGIC) &&
10790 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10791 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10795 * Size the chip by reading offsets at increasing powers of two.
10796 * When we encounter our validation signature, we know the addressing
10797 * has wrapped around, and thus have our chip size.
10801 while (cursize < tp->nvram_size) {
10802 if (tg3_nvram_read(tp, cursize, &val) != 0)
10811 tp->nvram_size = cursize;
10814 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10818 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
10819 tg3_nvram_read(tp, 0, &val) != 0)
10822 /* Selfboot format */
10823 if (val != TG3_EEPROM_MAGIC) {
10824 tg3_get_eeprom_size(tp);
10828 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10830 /* This is confusing. We want to operate on the
10831 * 16-bit value at offset 0xf2. The tg3_nvram_read()
10832 * call will read from NVRAM and byteswap the data
10833 * according to the byteswapping settings for all
10834 * other register accesses. This ensures the data we
10835 * want will always reside in the lower 16-bits.
10836 * However, the data in NVRAM is in LE format, which
10837 * means the data from the NVRAM read will always be
10838 * opposite the endianness of the CPU. The 16-bit
10839 * byteswap then brings the data to CPU endianness.
10841 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
10845 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10848 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10852 nvcfg1 = tr32(NVRAM_CFG1);
10853 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10854 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10856 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10857 tw32(NVRAM_CFG1, nvcfg1);
10860 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10861 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10862 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10863 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10864 tp->nvram_jedecnum = JEDEC_ATMEL;
10865 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10866 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10868 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10869 tp->nvram_jedecnum = JEDEC_ATMEL;
10870 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10872 case FLASH_VENDOR_ATMEL_EEPROM:
10873 tp->nvram_jedecnum = JEDEC_ATMEL;
10874 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10875 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10877 case FLASH_VENDOR_ST:
10878 tp->nvram_jedecnum = JEDEC_ST;
10879 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10880 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10882 case FLASH_VENDOR_SAIFUN:
10883 tp->nvram_jedecnum = JEDEC_SAIFUN;
10884 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10886 case FLASH_VENDOR_SST_SMALL:
10887 case FLASH_VENDOR_SST_LARGE:
10888 tp->nvram_jedecnum = JEDEC_SST;
10889 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10893 tp->nvram_jedecnum = JEDEC_ATMEL;
10894 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10895 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10899 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
10901 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10902 case FLASH_5752PAGE_SIZE_256:
10903 tp->nvram_pagesize = 256;
10905 case FLASH_5752PAGE_SIZE_512:
10906 tp->nvram_pagesize = 512;
10908 case FLASH_5752PAGE_SIZE_1K:
10909 tp->nvram_pagesize = 1024;
10911 case FLASH_5752PAGE_SIZE_2K:
10912 tp->nvram_pagesize = 2048;
10914 case FLASH_5752PAGE_SIZE_4K:
10915 tp->nvram_pagesize = 4096;
10917 case FLASH_5752PAGE_SIZE_264:
10918 tp->nvram_pagesize = 264;
10920 case FLASH_5752PAGE_SIZE_528:
10921 tp->nvram_pagesize = 528;
10926 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10930 nvcfg1 = tr32(NVRAM_CFG1);
10932 /* NVRAM protection for TPM */
10933 if (nvcfg1 & (1 << 27))
10934 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10936 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10937 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10938 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10939 tp->nvram_jedecnum = JEDEC_ATMEL;
10940 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10942 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10943 tp->nvram_jedecnum = JEDEC_ATMEL;
10944 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10945 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10947 case FLASH_5752VENDOR_ST_M45PE10:
10948 case FLASH_5752VENDOR_ST_M45PE20:
10949 case FLASH_5752VENDOR_ST_M45PE40:
10950 tp->nvram_jedecnum = JEDEC_ST;
10951 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10952 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10956 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10957 tg3_nvram_get_pagesize(tp, nvcfg1);
10959 /* For eeprom, set pagesize to maximum eeprom size */
10960 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10962 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10963 tw32(NVRAM_CFG1, nvcfg1);
10967 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10969 u32 nvcfg1, protect = 0;
10971 nvcfg1 = tr32(NVRAM_CFG1);
10973 /* NVRAM protection for TPM */
10974 if (nvcfg1 & (1 << 27)) {
10975 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10979 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10981 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10982 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10983 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10984 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10985 tp->nvram_jedecnum = JEDEC_ATMEL;
10986 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10987 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10988 tp->nvram_pagesize = 264;
10989 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10990 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10991 tp->nvram_size = (protect ? 0x3e200 :
10992 TG3_NVRAM_SIZE_512KB);
10993 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10994 tp->nvram_size = (protect ? 0x1f200 :
10995 TG3_NVRAM_SIZE_256KB);
10997 tp->nvram_size = (protect ? 0x1f200 :
10998 TG3_NVRAM_SIZE_128KB);
11000 case FLASH_5752VENDOR_ST_M45PE10:
11001 case FLASH_5752VENDOR_ST_M45PE20:
11002 case FLASH_5752VENDOR_ST_M45PE40:
11003 tp->nvram_jedecnum = JEDEC_ST;
11004 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11005 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11006 tp->nvram_pagesize = 256;
11007 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11008 tp->nvram_size = (protect ?
11009 TG3_NVRAM_SIZE_64KB :
11010 TG3_NVRAM_SIZE_128KB);
11011 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11012 tp->nvram_size = (protect ?
11013 TG3_NVRAM_SIZE_64KB :
11014 TG3_NVRAM_SIZE_256KB);
11016 tp->nvram_size = (protect ?
11017 TG3_NVRAM_SIZE_128KB :
11018 TG3_NVRAM_SIZE_512KB);
11023 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11027 nvcfg1 = tr32(NVRAM_CFG1);
11029 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11030 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11031 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11032 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11033 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11034 tp->nvram_jedecnum = JEDEC_ATMEL;
11035 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11036 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11038 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11039 tw32(NVRAM_CFG1, nvcfg1);
11041 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11042 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11043 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11044 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11045 tp->nvram_jedecnum = JEDEC_ATMEL;
11046 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11047 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11048 tp->nvram_pagesize = 264;
11050 case FLASH_5752VENDOR_ST_M45PE10:
11051 case FLASH_5752VENDOR_ST_M45PE20:
11052 case FLASH_5752VENDOR_ST_M45PE40:
11053 tp->nvram_jedecnum = JEDEC_ST;
11054 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11055 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11056 tp->nvram_pagesize = 256;
11061 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11063 u32 nvcfg1, protect = 0;
11065 nvcfg1 = tr32(NVRAM_CFG1);
11067 /* NVRAM protection for TPM */
11068 if (nvcfg1 & (1 << 27)) {
11069 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
11073 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11075 case FLASH_5761VENDOR_ATMEL_ADB021D:
11076 case FLASH_5761VENDOR_ATMEL_ADB041D:
11077 case FLASH_5761VENDOR_ATMEL_ADB081D:
11078 case FLASH_5761VENDOR_ATMEL_ADB161D:
11079 case FLASH_5761VENDOR_ATMEL_MDB021D:
11080 case FLASH_5761VENDOR_ATMEL_MDB041D:
11081 case FLASH_5761VENDOR_ATMEL_MDB081D:
11082 case FLASH_5761VENDOR_ATMEL_MDB161D:
11083 tp->nvram_jedecnum = JEDEC_ATMEL;
11084 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11085 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11086 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11087 tp->nvram_pagesize = 256;
11089 case FLASH_5761VENDOR_ST_A_M45PE20:
11090 case FLASH_5761VENDOR_ST_A_M45PE40:
11091 case FLASH_5761VENDOR_ST_A_M45PE80:
11092 case FLASH_5761VENDOR_ST_A_M45PE16:
11093 case FLASH_5761VENDOR_ST_M_M45PE20:
11094 case FLASH_5761VENDOR_ST_M_M45PE40:
11095 case FLASH_5761VENDOR_ST_M_M45PE80:
11096 case FLASH_5761VENDOR_ST_M_M45PE16:
11097 tp->nvram_jedecnum = JEDEC_ST;
11098 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11099 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11100 tp->nvram_pagesize = 256;
11105 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11108 case FLASH_5761VENDOR_ATMEL_ADB161D:
11109 case FLASH_5761VENDOR_ATMEL_MDB161D:
11110 case FLASH_5761VENDOR_ST_A_M45PE16:
11111 case FLASH_5761VENDOR_ST_M_M45PE16:
11112 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11114 case FLASH_5761VENDOR_ATMEL_ADB081D:
11115 case FLASH_5761VENDOR_ATMEL_MDB081D:
11116 case FLASH_5761VENDOR_ST_A_M45PE80:
11117 case FLASH_5761VENDOR_ST_M_M45PE80:
11118 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11120 case FLASH_5761VENDOR_ATMEL_ADB041D:
11121 case FLASH_5761VENDOR_ATMEL_MDB041D:
11122 case FLASH_5761VENDOR_ST_A_M45PE40:
11123 case FLASH_5761VENDOR_ST_M_M45PE40:
11124 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11126 case FLASH_5761VENDOR_ATMEL_ADB021D:
11127 case FLASH_5761VENDOR_ATMEL_MDB021D:
11128 case FLASH_5761VENDOR_ST_A_M45PE20:
11129 case FLASH_5761VENDOR_ST_M_M45PE20:
11130 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11136 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11138 tp->nvram_jedecnum = JEDEC_ATMEL;
11139 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11140 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11143 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11147 nvcfg1 = tr32(NVRAM_CFG1);
11149 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11150 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11151 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11152 tp->nvram_jedecnum = JEDEC_ATMEL;
11153 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11154 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11156 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11157 tw32(NVRAM_CFG1, nvcfg1);
11159 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11160 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11161 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11162 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11163 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11164 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11165 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11166 tp->nvram_jedecnum = JEDEC_ATMEL;
11167 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11168 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11170 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11171 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11172 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11173 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11174 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11176 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11177 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11178 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11180 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11181 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11182 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11186 case FLASH_5752VENDOR_ST_M45PE10:
11187 case FLASH_5752VENDOR_ST_M45PE20:
11188 case FLASH_5752VENDOR_ST_M45PE40:
11189 tp->nvram_jedecnum = JEDEC_ST;
11190 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11191 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11193 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11194 case FLASH_5752VENDOR_ST_M45PE10:
11195 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11197 case FLASH_5752VENDOR_ST_M45PE20:
11198 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11200 case FLASH_5752VENDOR_ST_M45PE40:
11201 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11206 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11210 tg3_nvram_get_pagesize(tp, nvcfg1);
11211 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11212 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11216 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11220 nvcfg1 = tr32(NVRAM_CFG1);
11222 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11223 case FLASH_5717VENDOR_ATMEL_EEPROM:
11224 case FLASH_5717VENDOR_MICRO_EEPROM:
11225 tp->nvram_jedecnum = JEDEC_ATMEL;
11226 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11227 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11229 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11230 tw32(NVRAM_CFG1, nvcfg1);
11232 case FLASH_5717VENDOR_ATMEL_MDB011D:
11233 case FLASH_5717VENDOR_ATMEL_ADB011B:
11234 case FLASH_5717VENDOR_ATMEL_ADB011D:
11235 case FLASH_5717VENDOR_ATMEL_MDB021D:
11236 case FLASH_5717VENDOR_ATMEL_ADB021B:
11237 case FLASH_5717VENDOR_ATMEL_ADB021D:
11238 case FLASH_5717VENDOR_ATMEL_45USPT:
11239 tp->nvram_jedecnum = JEDEC_ATMEL;
11240 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11241 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11243 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11244 case FLASH_5717VENDOR_ATMEL_MDB021D:
11245 case FLASH_5717VENDOR_ATMEL_ADB021B:
11246 case FLASH_5717VENDOR_ATMEL_ADB021D:
11247 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11250 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11254 case FLASH_5717VENDOR_ST_M_M25PE10:
11255 case FLASH_5717VENDOR_ST_A_M25PE10:
11256 case FLASH_5717VENDOR_ST_M_M45PE10:
11257 case FLASH_5717VENDOR_ST_A_M45PE10:
11258 case FLASH_5717VENDOR_ST_M_M25PE20:
11259 case FLASH_5717VENDOR_ST_A_M25PE20:
11260 case FLASH_5717VENDOR_ST_M_M45PE20:
11261 case FLASH_5717VENDOR_ST_A_M45PE20:
11262 case FLASH_5717VENDOR_ST_25USPT:
11263 case FLASH_5717VENDOR_ST_45USPT:
11264 tp->nvram_jedecnum = JEDEC_ST;
11265 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11266 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11268 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11269 case FLASH_5717VENDOR_ST_M_M25PE20:
11270 case FLASH_5717VENDOR_ST_A_M25PE20:
11271 case FLASH_5717VENDOR_ST_M_M45PE20:
11272 case FLASH_5717VENDOR_ST_A_M45PE20:
11273 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11276 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11281 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11285 tg3_nvram_get_pagesize(tp, nvcfg1);
11286 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11287 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11290 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
11291 static void __devinit tg3_nvram_init(struct tg3 *tp)
11293 tw32_f(GRC_EEPROM_ADDR,
11294 (EEPROM_ADDR_FSM_RESET |
11295 (EEPROM_DEFAULT_CLOCK_PERIOD <<
11296 EEPROM_ADDR_CLKPERD_SHIFT)));
11300 /* Enable seeprom accesses. */
11301 tw32_f(GRC_LOCAL_CTRL,
11302 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
11305 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11306 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
11307 tp->tg3_flags |= TG3_FLAG_NVRAM;
11309 if (tg3_nvram_lock(tp)) {
11310 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
11311 "tg3_nvram_init failed.\n", tp->dev->name);
11314 tg3_enable_nvram_access(tp);
11316 tp->nvram_size = 0;
11318 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11319 tg3_get_5752_nvram_info(tp);
11320 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11321 tg3_get_5755_nvram_info(tp);
11322 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11323 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11324 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11325 tg3_get_5787_nvram_info(tp);
11326 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11327 tg3_get_5761_nvram_info(tp);
11328 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11329 tg3_get_5906_nvram_info(tp);
11330 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
11331 tg3_get_57780_nvram_info(tp);
11332 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
11333 tg3_get_5717_nvram_info(tp);
11335 tg3_get_nvram_info(tp);
11337 if (tp->nvram_size == 0)
11338 tg3_get_nvram_size(tp);
11340 tg3_disable_nvram_access(tp);
11341 tg3_nvram_unlock(tp);
11344 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
11346 tg3_get_eeprom_size(tp);
11350 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11351 u32 offset, u32 len, u8 *buf)
11356 for (i = 0; i < len; i += 4) {
11362 memcpy(&data, buf + i, 4);
11365 * The SEEPROM interface expects the data to always be opposite
11366 * the native endian format. We accomplish this by reversing
11367 * all the operations that would have been performed on the
11368 * data from a call to tg3_nvram_read_be32().
11370 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
11372 val = tr32(GRC_EEPROM_ADDR);
11373 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11375 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11377 tw32(GRC_EEPROM_ADDR, val |
11378 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11379 (addr & EEPROM_ADDR_ADDR_MASK) |
11380 EEPROM_ADDR_START |
11381 EEPROM_ADDR_WRITE);
11383 for (j = 0; j < 1000; j++) {
11384 val = tr32(GRC_EEPROM_ADDR);
11386 if (val & EEPROM_ADDR_COMPLETE)
11390 if (!(val & EEPROM_ADDR_COMPLETE)) {
11399 /* offset and length are dword aligned */
11400 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11404 u32 pagesize = tp->nvram_pagesize;
11405 u32 pagemask = pagesize - 1;
11409 tmp = kmalloc(pagesize, GFP_KERNEL);
11415 u32 phy_addr, page_off, size;
11417 phy_addr = offset & ~pagemask;
11419 for (j = 0; j < pagesize; j += 4) {
11420 ret = tg3_nvram_read_be32(tp, phy_addr + j,
11421 (__be32 *) (tmp + j));
11428 page_off = offset & pagemask;
11435 memcpy(tmp + page_off, buf, size);
11437 offset = offset + (pagesize - page_off);
11439 tg3_enable_nvram_access(tp);
11442 * Before we can erase the flash page, we need
11443 * to issue a special "write enable" command.
11445 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11447 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11450 /* Erase the target page */
11451 tw32(NVRAM_ADDR, phy_addr);
11453 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11454 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11456 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11459 /* Issue another write enable to start the write. */
11460 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11462 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11465 for (j = 0; j < pagesize; j += 4) {
11468 data = *((__be32 *) (tmp + j));
11470 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11472 tw32(NVRAM_ADDR, phy_addr + j);
11474 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11478 nvram_cmd |= NVRAM_CMD_FIRST;
11479 else if (j == (pagesize - 4))
11480 nvram_cmd |= NVRAM_CMD_LAST;
11482 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11489 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11490 tg3_nvram_exec_cmd(tp, nvram_cmd);
11497 /* offset and length are dword aligned */
11498 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11503 for (i = 0; i < len; i += 4, offset += 4) {
11504 u32 page_off, phy_addr, nvram_cmd;
11507 memcpy(&data, buf + i, 4);
11508 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11510 page_off = offset % tp->nvram_pagesize;
11512 phy_addr = tg3_nvram_phys_addr(tp, offset);
11514 tw32(NVRAM_ADDR, phy_addr);
11516 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11518 if ((page_off == 0) || (i == 0))
11519 nvram_cmd |= NVRAM_CMD_FIRST;
11520 if (page_off == (tp->nvram_pagesize - 4))
11521 nvram_cmd |= NVRAM_CMD_LAST;
11523 if (i == (len - 4))
11524 nvram_cmd |= NVRAM_CMD_LAST;
11526 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11527 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
11528 (tp->nvram_jedecnum == JEDEC_ST) &&
11529 (nvram_cmd & NVRAM_CMD_FIRST)) {
11531 if ((ret = tg3_nvram_exec_cmd(tp,
11532 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11537 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11538 /* We always do complete word writes to eeprom. */
11539 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11542 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11548 /* offset and length are dword aligned */
11549 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11553 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11554 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11555 ~GRC_LCLCTRL_GPIO_OUTPUT1);
11559 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11560 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11565 ret = tg3_nvram_lock(tp);
11569 tg3_enable_nvram_access(tp);
11570 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11571 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
11572 tw32(NVRAM_WRITE1, 0x406);
11574 grc_mode = tr32(GRC_MODE);
11575 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11577 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11578 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11580 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11584 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11588 grc_mode = tr32(GRC_MODE);
11589 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11591 tg3_disable_nvram_access(tp);
11592 tg3_nvram_unlock(tp);
11595 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11596 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11603 struct subsys_tbl_ent {
11604 u16 subsys_vendor, subsys_devid;
11608 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11609 /* Broadcom boards. */
11610 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11611 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11612 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11613 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
11614 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11615 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11616 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
11617 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11618 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11619 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11620 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11623 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11624 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11625 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
11626 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11627 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11630 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11631 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11632 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11633 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11635 /* Compaq boards. */
11636 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11637 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11638 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
11639 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11640 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11643 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11646 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11650 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11651 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11652 tp->pdev->subsystem_vendor) &&
11653 (subsys_id_to_phy_id[i].subsys_devid ==
11654 tp->pdev->subsystem_device))
11655 return &subsys_id_to_phy_id[i];
11660 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11665 /* On some early chips the SRAM cannot be accessed in D3hot state,
11666 * so need make sure we're in D0.
11668 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11669 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11670 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11673 /* Make sure register accesses (indirect or otherwise)
11674 * will function correctly.
11676 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11677 tp->misc_host_ctrl);
11679 /* The memory arbiter has to be enabled in order for SRAM accesses
11680 * to succeed. Normally on powerup the tg3 chip firmware will make
11681 * sure it is enabled, but other entities such as system netboot
11682 * code might disable it.
11684 val = tr32(MEMARB_MODE);
11685 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11687 tp->phy_id = PHY_ID_INVALID;
11688 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11690 /* Assume an onboard device and WOL capable by default. */
11691 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11693 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11694 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11695 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11696 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11698 val = tr32(VCPU_CFGSHDW);
11699 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11700 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11701 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11702 (val & VCPU_CFGSHDW_WOL_MAGPKT))
11703 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11707 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11708 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11709 u32 nic_cfg, led_cfg;
11710 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11711 int eeprom_phy_serdes = 0;
11713 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11714 tp->nic_sram_data_cfg = nic_cfg;
11716 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11717 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11718 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11719 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11720 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11721 (ver > 0) && (ver < 0x100))
11722 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11724 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11725 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11727 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11728 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11729 eeprom_phy_serdes = 1;
11731 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11732 if (nic_phy_id != 0) {
11733 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11734 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11736 eeprom_phy_id = (id1 >> 16) << 10;
11737 eeprom_phy_id |= (id2 & 0xfc00) << 16;
11738 eeprom_phy_id |= (id2 & 0x03ff) << 0;
11742 tp->phy_id = eeprom_phy_id;
11743 if (eeprom_phy_serdes) {
11744 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11745 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11747 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11750 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11751 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11752 SHASTA_EXT_LED_MODE_MASK);
11754 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11758 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11759 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11762 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11763 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11766 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11767 tp->led_ctrl = LED_CTRL_MODE_MAC;
11769 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11770 * read on some older 5700/5701 bootcode.
11772 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11774 GET_ASIC_REV(tp->pci_chip_rev_id) ==
11776 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11780 case SHASTA_EXT_LED_SHARED:
11781 tp->led_ctrl = LED_CTRL_MODE_SHARED;
11782 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11783 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11784 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11785 LED_CTRL_MODE_PHY_2);
11788 case SHASTA_EXT_LED_MAC:
11789 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11792 case SHASTA_EXT_LED_COMBO:
11793 tp->led_ctrl = LED_CTRL_MODE_COMBO;
11794 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11795 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11796 LED_CTRL_MODE_PHY_2);
11801 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11802 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11803 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11804 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11806 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11807 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11809 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11810 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11811 if ((tp->pdev->subsystem_vendor ==
11812 PCI_VENDOR_ID_ARIMA) &&
11813 (tp->pdev->subsystem_device == 0x205a ||
11814 tp->pdev->subsystem_device == 0x2063))
11815 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11817 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11818 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11821 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11822 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11823 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11824 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11827 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11828 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11829 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11831 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11832 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11833 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11835 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11836 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
11837 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11839 if (cfg2 & (1 << 17))
11840 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11842 /* serdes signal pre-emphasis in register 0x590 set by */
11843 /* bootcode if bit 18 is set */
11844 if (cfg2 & (1 << 18))
11845 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11847 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11848 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
11849 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
11850 tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
11852 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11855 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11856 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11857 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11860 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11861 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11862 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11863 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11864 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11865 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11868 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11869 device_set_wakeup_enable(&tp->pdev->dev,
11870 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
11873 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11878 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11879 tw32(OTP_CTRL, cmd);
11881 /* Wait for up to 1 ms for command to execute. */
11882 for (i = 0; i < 100; i++) {
11883 val = tr32(OTP_STATUS);
11884 if (val & OTP_STATUS_CMD_DONE)
11889 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11892 /* Read the gphy configuration from the OTP region of the chip. The gphy
11893 * configuration is a 32-bit value that straddles the alignment boundary.
11894 * We do two 32-bit reads and then shift and merge the results.
11896 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11898 u32 bhalf_otp, thalf_otp;
11900 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11902 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11905 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11907 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11910 thalf_otp = tr32(OTP_READ_DATA);
11912 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11914 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11917 bhalf_otp = tr32(OTP_READ_DATA);
11919 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11922 static int __devinit tg3_phy_probe(struct tg3 *tp)
11924 u32 hw_phy_id_1, hw_phy_id_2;
11925 u32 hw_phy_id, hw_phy_id_masked;
11928 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11929 return tg3_phy_init(tp);
11931 /* Reading the PHY ID register can conflict with ASF
11932 * firmware access to the PHY hardware.
11935 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11936 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11937 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11939 /* Now read the physical PHY_ID from the chip and verify
11940 * that it is sane. If it doesn't look good, we fall back
11941 * to either the hard-coded table based PHY_ID and failing
11942 * that the value found in the eeprom area.
11944 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11945 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11947 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11948 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11949 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11951 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11954 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11955 tp->phy_id = hw_phy_id;
11956 if (hw_phy_id_masked == PHY_ID_BCM8002)
11957 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11959 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11961 if (tp->phy_id != PHY_ID_INVALID) {
11962 /* Do nothing, phy ID already set up in
11963 * tg3_get_eeprom_hw_cfg().
11966 struct subsys_tbl_ent *p;
11968 /* No eeprom signature? Try the hardcoded
11969 * subsys device table.
11971 p = lookup_by_subsys(tp);
11975 tp->phy_id = p->phy_id;
11977 tp->phy_id == PHY_ID_BCM8002)
11978 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11982 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11983 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11984 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11985 u32 bmsr, adv_reg, tg3_ctrl, mask;
11987 tg3_readphy(tp, MII_BMSR, &bmsr);
11988 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11989 (bmsr & BMSR_LSTATUS))
11990 goto skip_phy_reset;
11992 err = tg3_phy_reset(tp);
11996 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11997 ADVERTISE_100HALF | ADVERTISE_100FULL |
11998 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
12000 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
12001 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
12002 MII_TG3_CTRL_ADV_1000_FULL);
12003 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12004 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
12005 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
12006 MII_TG3_CTRL_ENABLE_AS_MASTER);
12009 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12010 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12011 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12012 if (!tg3_copper_is_advertising_all(tp, mask)) {
12013 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12015 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
12016 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12018 tg3_writephy(tp, MII_BMCR,
12019 BMCR_ANENABLE | BMCR_ANRESTART);
12021 tg3_phy_set_wirespeed(tp);
12023 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12024 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
12025 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12029 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
12030 err = tg3_init_5401phy_dsp(tp);
12035 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
12036 err = tg3_init_5401phy_dsp(tp);
12039 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
12040 tp->link_config.advertising =
12041 (ADVERTISED_1000baseT_Half |
12042 ADVERTISED_1000baseT_Full |
12043 ADVERTISED_Autoneg |
12045 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
12046 tp->link_config.advertising &=
12047 ~(ADVERTISED_1000baseT_Half |
12048 ADVERTISED_1000baseT_Full);
12053 static void __devinit tg3_read_partno(struct tg3 *tp)
12055 unsigned char vpd_data[256]; /* in little-endian format */
12059 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
12060 tg3_nvram_read(tp, 0x0, &magic))
12061 goto out_not_found;
12063 if (magic == TG3_EEPROM_MAGIC) {
12064 for (i = 0; i < 256; i += 4) {
12067 /* The data is in little-endian format in NVRAM.
12068 * Use the big-endian read routines to preserve
12069 * the byte order as it exists in NVRAM.
12071 if (tg3_nvram_read_be32(tp, 0x100 + i, &tmp))
12072 goto out_not_found;
12074 memcpy(&vpd_data[i], &tmp, sizeof(tmp));
12079 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
12080 for (i = 0; i < 256; i += 4) {
12085 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
12087 while (j++ < 100) {
12088 pci_read_config_word(tp->pdev, vpd_cap +
12089 PCI_VPD_ADDR, &tmp16);
12090 if (tmp16 & 0x8000)
12094 if (!(tmp16 & 0x8000))
12095 goto out_not_found;
12097 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
12099 v = cpu_to_le32(tmp);
12100 memcpy(&vpd_data[i], &v, sizeof(v));
12104 /* Now parse and find the part number. */
12105 for (i = 0; i < 254; ) {
12106 unsigned char val = vpd_data[i];
12107 unsigned int block_end;
12109 if (val == 0x82 || val == 0x91) {
12112 (vpd_data[i + 2] << 8)));
12117 goto out_not_found;
12119 block_end = (i + 3 +
12121 (vpd_data[i + 2] << 8)));
12124 if (block_end > 256)
12125 goto out_not_found;
12127 while (i < (block_end - 2)) {
12128 if (vpd_data[i + 0] == 'P' &&
12129 vpd_data[i + 1] == 'N') {
12130 int partno_len = vpd_data[i + 2];
12133 if (partno_len > 24 || (partno_len + i) > 256)
12134 goto out_not_found;
12136 memcpy(tp->board_part_number,
12137 &vpd_data[i], partno_len);
12142 i += 3 + vpd_data[i + 2];
12145 /* Part number not found. */
12146 goto out_not_found;
12150 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12151 strcpy(tp->board_part_number, "BCM95906");
12152 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12153 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
12154 strcpy(tp->board_part_number, "BCM57780");
12155 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12156 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
12157 strcpy(tp->board_part_number, "BCM57760");
12158 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12159 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
12160 strcpy(tp->board_part_number, "BCM57790");
12161 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12162 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
12163 strcpy(tp->board_part_number, "BCM57788");
12165 strcpy(tp->board_part_number, "none");
12168 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
12172 if (tg3_nvram_read(tp, offset, &val) ||
12173 (val & 0xfc000000) != 0x0c000000 ||
12174 tg3_nvram_read(tp, offset + 4, &val) ||
12181 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
12183 u32 val, offset, start, ver_offset;
12185 bool newver = false;
12187 if (tg3_nvram_read(tp, 0xc, &offset) ||
12188 tg3_nvram_read(tp, 0x4, &start))
12191 offset = tg3_nvram_logical_addr(tp, offset);
12193 if (tg3_nvram_read(tp, offset, &val))
12196 if ((val & 0xfc000000) == 0x0c000000) {
12197 if (tg3_nvram_read(tp, offset + 4, &val))
12205 if (tg3_nvram_read(tp, offset + 8, &ver_offset))
12208 offset = offset + ver_offset - start;
12209 for (i = 0; i < 16; i += 4) {
12211 if (tg3_nvram_read_be32(tp, offset + i, &v))
12214 memcpy(tp->fw_ver + i, &v, sizeof(v));
12219 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
12222 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
12223 TG3_NVM_BCVER_MAJSFT;
12224 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
12225 snprintf(&tp->fw_ver[0], 32, "v%d.%02d", major, minor);
12229 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
12231 u32 val, major, minor;
12233 /* Use native endian representation */
12234 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
12237 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
12238 TG3_NVM_HWSB_CFG1_MAJSFT;
12239 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
12240 TG3_NVM_HWSB_CFG1_MINSFT;
12242 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
12245 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12247 u32 offset, major, minor, build;
12249 tp->fw_ver[0] = 's';
12250 tp->fw_ver[1] = 'b';
12251 tp->fw_ver[2] = '\0';
12253 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
12256 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
12257 case TG3_EEPROM_SB_REVISION_0:
12258 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
12260 case TG3_EEPROM_SB_REVISION_2:
12261 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
12263 case TG3_EEPROM_SB_REVISION_3:
12264 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
12270 if (tg3_nvram_read(tp, offset, &val))
12273 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
12274 TG3_EEPROM_SB_EDH_BLD_SHFT;
12275 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
12276 TG3_EEPROM_SB_EDH_MAJ_SHFT;
12277 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
12279 if (minor > 99 || build > 26)
12282 snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
12285 tp->fw_ver[8] = 'a' + build - 1;
12286 tp->fw_ver[9] = '\0';
12290 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
12292 u32 val, offset, start;
12295 for (offset = TG3_NVM_DIR_START;
12296 offset < TG3_NVM_DIR_END;
12297 offset += TG3_NVM_DIRENT_SIZE) {
12298 if (tg3_nvram_read(tp, offset, &val))
12301 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
12305 if (offset == TG3_NVM_DIR_END)
12308 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12309 start = 0x08000000;
12310 else if (tg3_nvram_read(tp, offset - 4, &start))
12313 if (tg3_nvram_read(tp, offset + 4, &offset) ||
12314 !tg3_fw_img_is_valid(tp, offset) ||
12315 tg3_nvram_read(tp, offset + 8, &val))
12318 offset += val - start;
12320 vlen = strlen(tp->fw_ver);
12322 tp->fw_ver[vlen++] = ',';
12323 tp->fw_ver[vlen++] = ' ';
12325 for (i = 0; i < 4; i++) {
12327 if (tg3_nvram_read_be32(tp, offset, &v))
12330 offset += sizeof(v);
12332 if (vlen > TG3_VER_SIZE - sizeof(v)) {
12333 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
12337 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
12342 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
12347 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
12348 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
12351 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
12352 if (apedata != APE_SEG_SIG_MAGIC)
12355 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
12356 if (!(apedata & APE_FW_STATUS_READY))
12359 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
12361 vlen = strlen(tp->fw_ver);
12363 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " DASH v%d.%d.%d.%d",
12364 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
12365 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
12366 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
12367 (apedata & APE_FW_VERSION_BLDMSK));
12370 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
12374 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
12375 tp->fw_ver[0] = 's';
12376 tp->fw_ver[1] = 'b';
12377 tp->fw_ver[2] = '\0';
12382 if (tg3_nvram_read(tp, 0, &val))
12385 if (val == TG3_EEPROM_MAGIC)
12386 tg3_read_bc_ver(tp);
12387 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
12388 tg3_read_sb_ver(tp, val);
12389 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12390 tg3_read_hwsb_ver(tp);
12394 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12395 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
12398 tg3_read_mgmtfw_ver(tp);
12400 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
12403 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
12405 static int __devinit tg3_get_invariants(struct tg3 *tp)
12407 static struct pci_device_id write_reorder_chipsets[] = {
12408 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12409 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
12410 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12411 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
12412 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
12413 PCI_DEVICE_ID_VIA_8385_0) },
12417 u32 pci_state_reg, grc_misc_cfg;
12422 /* Force memory write invalidate off. If we leave it on,
12423 * then on 5700_BX chips we have to enable a workaround.
12424 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
12425 * to match the cacheline size. The Broadcom driver have this
12426 * workaround but turns MWI off all the times so never uses
12427 * it. This seems to suggest that the workaround is insufficient.
12429 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12430 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
12431 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12433 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
12434 * has the register indirect write enable bit set before
12435 * we try to access any of the MMIO registers. It is also
12436 * critical that the PCI-X hw workaround situation is decided
12437 * before that as well.
12439 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12442 tp->pci_chip_rev_id = (misc_ctrl_reg >>
12443 MISC_HOST_CTRL_CHIPREV_SHIFT);
12444 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12445 u32 prod_id_asic_rev;
12447 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717C ||
12448 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717S ||
12449 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718C ||
12450 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718S)
12451 pci_read_config_dword(tp->pdev,
12452 TG3PCI_GEN2_PRODID_ASICREV,
12453 &prod_id_asic_rev);
12455 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
12456 &prod_id_asic_rev);
12458 tp->pci_chip_rev_id = prod_id_asic_rev;
12461 /* Wrong chip ID in 5752 A0. This code can be removed later
12462 * as A0 is not in production.
12464 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12465 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12467 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12468 * we need to disable memory and use config. cycles
12469 * only to access all registers. The 5702/03 chips
12470 * can mistakenly decode the special cycles from the
12471 * ICH chipsets as memory write cycles, causing corruption
12472 * of register and memory space. Only certain ICH bridges
12473 * will drive special cycles with non-zero data during the
12474 * address phase which can fall within the 5703's address
12475 * range. This is not an ICH bug as the PCI spec allows
12476 * non-zero address during special cycles. However, only
12477 * these ICH bridges are known to drive non-zero addresses
12478 * during special cycles.
12480 * Since special cycles do not cross PCI bridges, we only
12481 * enable this workaround if the 5703 is on the secondary
12482 * bus of these ICH bridges.
12484 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12485 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12486 static struct tg3_dev_id {
12490 } ich_chipsets[] = {
12491 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12493 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12495 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12497 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12501 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12502 struct pci_dev *bridge = NULL;
12504 while (pci_id->vendor != 0) {
12505 bridge = pci_get_device(pci_id->vendor, pci_id->device,
12511 if (pci_id->rev != PCI_ANY_ID) {
12512 if (bridge->revision > pci_id->rev)
12515 if (bridge->subordinate &&
12516 (bridge->subordinate->number ==
12517 tp->pdev->bus->number)) {
12519 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12520 pci_dev_put(bridge);
12526 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12527 static struct tg3_dev_id {
12530 } bridge_chipsets[] = {
12531 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12532 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12535 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12536 struct pci_dev *bridge = NULL;
12538 while (pci_id->vendor != 0) {
12539 bridge = pci_get_device(pci_id->vendor,
12546 if (bridge->subordinate &&
12547 (bridge->subordinate->number <=
12548 tp->pdev->bus->number) &&
12549 (bridge->subordinate->subordinate >=
12550 tp->pdev->bus->number)) {
12551 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12552 pci_dev_put(bridge);
12558 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12559 * DMA addresses > 40-bit. This bridge may have other additional
12560 * 57xx devices behind it in some 4-port NIC designs for example.
12561 * Any tg3 device found behind the bridge will also need the 40-bit
12564 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12565 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12566 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
12567 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12568 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12571 struct pci_dev *bridge = NULL;
12574 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12575 PCI_DEVICE_ID_SERVERWORKS_EPB,
12577 if (bridge && bridge->subordinate &&
12578 (bridge->subordinate->number <=
12579 tp->pdev->bus->number) &&
12580 (bridge->subordinate->subordinate >=
12581 tp->pdev->bus->number)) {
12582 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12583 pci_dev_put(bridge);
12589 /* Initialize misc host control in PCI block. */
12590 tp->misc_host_ctrl |= (misc_ctrl_reg &
12591 MISC_HOST_CTRL_CHIPREV);
12592 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12593 tp->misc_host_ctrl);
12595 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
12596 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
12597 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12598 tp->pdev_peer = tg3_find_peer(tp);
12600 /* Intentionally exclude ASIC_REV_5906 */
12601 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12602 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12603 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12604 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12605 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12606 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12607 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12608 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
12610 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12611 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12612 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12613 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12614 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12615 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12617 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12618 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12619 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12621 /* 5700 B0 chips do not support checksumming correctly due
12622 * to hardware bugs.
12624 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12625 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12627 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12628 tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12629 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
12630 tp->dev->features |= NETIF_F_IPV6_CSUM;
12633 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12634 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12635 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12636 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12637 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12638 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12639 tp->pdev_peer == tp->pdev))
12640 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12642 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12643 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12644 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12645 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12647 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12648 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12650 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12651 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12657 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
12658 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
12659 tp->irq_max = TG3_IRQ_MAX_VECS;
12662 if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
12663 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12664 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
12666 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
12667 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
12671 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12672 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
12673 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12674 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
12676 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12679 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12680 if (tp->pcie_cap != 0) {
12683 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12685 pcie_set_readrq(tp->pdev, 4096);
12687 pci_read_config_word(tp->pdev,
12688 tp->pcie_cap + PCI_EXP_LNKCTL,
12690 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
12691 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12692 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12693 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12694 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12695 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
12696 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
12697 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
12699 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
12700 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12701 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12702 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12703 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12704 if (!tp->pcix_cap) {
12705 printk(KERN_ERR PFX "Cannot find PCI-X "
12706 "capability, aborting.\n");
12710 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
12711 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12714 /* If we have an AMD 762 or VIA K8T800 chipset, write
12715 * reordering to the mailbox registers done by the host
12716 * controller can cause major troubles. We read back from
12717 * every mailbox register write to force the writes to be
12718 * posted to the chip in order.
12720 if (pci_dev_present(write_reorder_chipsets) &&
12721 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12722 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12724 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
12725 &tp->pci_cacheline_sz);
12726 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
12727 &tp->pci_lat_timer);
12728 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12729 tp->pci_lat_timer < 64) {
12730 tp->pci_lat_timer = 64;
12731 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
12732 tp->pci_lat_timer);
12735 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12736 /* 5700 BX chips need to have their TX producer index
12737 * mailboxes written twice to workaround a bug.
12739 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12741 /* If we are in PCI-X mode, enable register write workaround.
12743 * The workaround is to use indirect register accesses
12744 * for all chip writes not to mailbox registers.
12746 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12749 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12751 /* The chip can have it's power management PCI config
12752 * space registers clobbered due to this bug.
12753 * So explicitly force the chip into D0 here.
12755 pci_read_config_dword(tp->pdev,
12756 tp->pm_cap + PCI_PM_CTRL,
12758 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12759 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12760 pci_write_config_dword(tp->pdev,
12761 tp->pm_cap + PCI_PM_CTRL,
12764 /* Also, force SERR#/PERR# in PCI command. */
12765 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12766 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12767 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12771 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12772 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12773 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12774 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12776 /* Chip-specific fixup from Broadcom driver */
12777 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12778 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12779 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12780 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12783 /* Default fast path register access methods */
12784 tp->read32 = tg3_read32;
12785 tp->write32 = tg3_write32;
12786 tp->read32_mbox = tg3_read32;
12787 tp->write32_mbox = tg3_write32;
12788 tp->write32_tx_mbox = tg3_write32;
12789 tp->write32_rx_mbox = tg3_write32;
12791 /* Various workaround register access methods */
12792 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12793 tp->write32 = tg3_write_indirect_reg32;
12794 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12795 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12796 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12798 * Back to back register writes can cause problems on these
12799 * chips, the workaround is to read back all reg writes
12800 * except those to mailbox regs.
12802 * See tg3_write_indirect_reg32().
12804 tp->write32 = tg3_write_flush_reg32;
12807 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12808 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12809 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12810 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12811 tp->write32_rx_mbox = tg3_write_flush_reg32;
12814 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12815 tp->read32 = tg3_read_indirect_reg32;
12816 tp->write32 = tg3_write_indirect_reg32;
12817 tp->read32_mbox = tg3_read_indirect_mbox;
12818 tp->write32_mbox = tg3_write_indirect_mbox;
12819 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12820 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12825 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12826 pci_cmd &= ~PCI_COMMAND_MEMORY;
12827 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12829 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12830 tp->read32_mbox = tg3_read32_mbox_5906;
12831 tp->write32_mbox = tg3_write32_mbox_5906;
12832 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12833 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12836 if (tp->write32 == tg3_write_indirect_reg32 ||
12837 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12838 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12839 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12840 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12842 /* Get eeprom hw config before calling tg3_set_power_state().
12843 * In particular, the TG3_FLG2_IS_NIC flag must be
12844 * determined before calling tg3_set_power_state() so that
12845 * we know whether or not to switch out of Vaux power.
12846 * When the flag is set, it means that GPIO1 is used for eeprom
12847 * write protect and also implies that it is a LOM where GPIOs
12848 * are not used to switch power.
12850 tg3_get_eeprom_hw_cfg(tp);
12852 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12853 /* Allow reads and writes to the
12854 * APE register and memory space.
12856 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12857 PCISTATE_ALLOW_APE_SHMEM_WR;
12858 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12862 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12863 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12864 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12865 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12866 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12867 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12869 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12870 * GPIO1 driven high will bring 5700's external PHY out of reset.
12871 * It is also used as eeprom write protect on LOMs.
12873 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12874 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12875 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12876 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12877 GRC_LCLCTRL_GPIO_OUTPUT1);
12878 /* Unused GPIO3 must be driven as output on 5752 because there
12879 * are no pull-up resistors on unused GPIO pins.
12881 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12882 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12884 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12885 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12886 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12888 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
12889 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
12890 /* Turn off the debug UART. */
12891 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12892 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12893 /* Keep VMain power. */
12894 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12895 GRC_LCLCTRL_GPIO_OUTPUT0;
12898 /* Force the chip into D0. */
12899 err = tg3_set_power_state(tp, PCI_D0);
12901 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12902 pci_name(tp->pdev));
12906 /* Derive initial jumbo mode from MTU assigned in
12907 * ether_setup() via the alloc_etherdev() call
12909 if (tp->dev->mtu > ETH_DATA_LEN &&
12910 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12911 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12913 /* Determine WakeOnLan speed to use. */
12914 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12915 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12916 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12917 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12918 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12920 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12923 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12924 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
12926 /* A few boards don't want Ethernet@WireSpeed phy feature */
12927 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12928 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12929 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12930 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12931 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) ||
12932 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12933 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12935 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12936 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12937 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12938 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12939 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12941 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
12942 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
12943 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12944 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
12945 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
12946 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12947 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12948 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12949 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12950 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12951 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12952 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12953 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12954 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12956 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12959 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12960 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12961 tp->phy_otp = tg3_read_otp_phycfg(tp);
12962 if (tp->phy_otp == 0)
12963 tp->phy_otp = TG3_OTP_DEFAULT;
12966 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12967 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12969 tp->mi_mode = MAC_MI_MODE_BASE;
12971 tp->coalesce_mode = 0;
12972 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12973 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12974 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12976 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12977 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12978 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12980 err = tg3_mdio_init(tp);
12984 /* Initialize data/descriptor byte/word swapping. */
12985 val = tr32(GRC_MODE);
12986 val &= GRC_MODE_HOST_STACKUP;
12987 tw32(GRC_MODE, val | tp->grc_mode);
12989 tg3_switch_clocks(tp);
12991 /* Clear this out for sanity. */
12992 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12994 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12996 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12997 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12998 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
13000 if (chiprevid == CHIPREV_ID_5701_A0 ||
13001 chiprevid == CHIPREV_ID_5701_B0 ||
13002 chiprevid == CHIPREV_ID_5701_B2 ||
13003 chiprevid == CHIPREV_ID_5701_B5) {
13004 void __iomem *sram_base;
13006 /* Write some dummy words into the SRAM status block
13007 * area, see if it reads back correctly. If the return
13008 * value is bad, force enable the PCIX workaround.
13010 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
13012 writel(0x00000000, sram_base);
13013 writel(0x00000000, sram_base + 4);
13014 writel(0xffffffff, sram_base + 4);
13015 if (readl(sram_base) != 0x00000000)
13016 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13021 tg3_nvram_init(tp);
13023 grc_misc_cfg = tr32(GRC_MISC_CFG);
13024 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
13026 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13027 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
13028 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
13029 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
13031 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
13032 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
13033 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
13034 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
13035 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
13036 HOSTCC_MODE_CLRTICK_TXBD);
13038 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
13039 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13040 tp->misc_host_ctrl);
13043 /* Preserve the APE MAC_MODE bits */
13044 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
13045 tp->mac_mode = tr32(MAC_MODE) |
13046 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13048 tp->mac_mode = TG3_DEF_MAC_MODE;
13050 /* these are limited to 10/100 only */
13051 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13052 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
13053 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13054 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13055 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
13056 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
13057 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
13058 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13059 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
13060 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
13061 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
13062 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
13063 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
13064 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
13066 err = tg3_phy_probe(tp);
13068 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
13069 pci_name(tp->pdev), err);
13070 /* ... but do not return immediately ... */
13074 tg3_read_partno(tp);
13075 tg3_read_fw_ver(tp);
13077 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
13078 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
13080 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13081 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
13083 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
13086 /* 5700 {AX,BX} chips have a broken status block link
13087 * change bit implementation, so we must use the
13088 * status register in those cases.
13090 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13091 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
13093 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
13095 /* The led_ctrl is set during tg3_phy_probe, here we might
13096 * have to force the link status polling mechanism based
13097 * upon subsystem IDs.
13099 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
13100 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13101 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
13102 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
13103 TG3_FLAG_USE_LINKCHG_REG);
13106 /* For all SERDES we poll the MAC status register. */
13107 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
13108 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
13110 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
13112 tp->rx_offset = NET_IP_ALIGN;
13113 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13114 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
13117 tp->rx_std_max_post = TG3_RX_RING_SIZE;
13119 /* Increment the rx prod index on the rx std ring by at most
13120 * 8 for these chips to workaround hw errata.
13122 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13123 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13124 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13125 tp->rx_std_max_post = 8;
13127 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
13128 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
13129 PCIE_PWR_MGMT_L1_THRESH_MSK;
13134 #ifdef CONFIG_SPARC
13135 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
13137 struct net_device *dev = tp->dev;
13138 struct pci_dev *pdev = tp->pdev;
13139 struct device_node *dp = pci_device_to_OF_node(pdev);
13140 const unsigned char *addr;
13143 addr = of_get_property(dp, "local-mac-address", &len);
13144 if (addr && len == 6) {
13145 memcpy(dev->dev_addr, addr, 6);
13146 memcpy(dev->perm_addr, dev->dev_addr, 6);
13152 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
13154 struct net_device *dev = tp->dev;
13156 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
13157 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
13162 static int __devinit tg3_get_device_address(struct tg3 *tp)
13164 struct net_device *dev = tp->dev;
13165 u32 hi, lo, mac_offset;
13168 #ifdef CONFIG_SPARC
13169 if (!tg3_get_macaddr_sparc(tp))
13174 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
13175 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13176 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
13178 if (tg3_nvram_lock(tp))
13179 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
13181 tg3_nvram_unlock(tp);
13182 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13183 if (tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC)
13185 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13188 /* First try to get it from MAC address mailbox. */
13189 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
13190 if ((hi >> 16) == 0x484b) {
13191 dev->dev_addr[0] = (hi >> 8) & 0xff;
13192 dev->dev_addr[1] = (hi >> 0) & 0xff;
13194 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
13195 dev->dev_addr[2] = (lo >> 24) & 0xff;
13196 dev->dev_addr[3] = (lo >> 16) & 0xff;
13197 dev->dev_addr[4] = (lo >> 8) & 0xff;
13198 dev->dev_addr[5] = (lo >> 0) & 0xff;
13200 /* Some old bootcode may report a 0 MAC address in SRAM */
13201 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
13204 /* Next, try NVRAM. */
13205 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
13206 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
13207 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
13208 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
13209 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
13211 /* Finally just fetch it out of the MAC control regs. */
13213 hi = tr32(MAC_ADDR_0_HIGH);
13214 lo = tr32(MAC_ADDR_0_LOW);
13216 dev->dev_addr[5] = lo & 0xff;
13217 dev->dev_addr[4] = (lo >> 8) & 0xff;
13218 dev->dev_addr[3] = (lo >> 16) & 0xff;
13219 dev->dev_addr[2] = (lo >> 24) & 0xff;
13220 dev->dev_addr[1] = hi & 0xff;
13221 dev->dev_addr[0] = (hi >> 8) & 0xff;
13225 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
13226 #ifdef CONFIG_SPARC
13227 if (!tg3_get_default_macaddr_sparc(tp))
13232 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
13236 #define BOUNDARY_SINGLE_CACHELINE 1
13237 #define BOUNDARY_MULTI_CACHELINE 2
13239 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
13241 int cacheline_size;
13245 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
13247 cacheline_size = 1024;
13249 cacheline_size = (int) byte * 4;
13251 /* On 5703 and later chips, the boundary bits have no
13254 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13255 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13256 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13259 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
13260 goal = BOUNDARY_MULTI_CACHELINE;
13262 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
13263 goal = BOUNDARY_SINGLE_CACHELINE;
13272 /* PCI controllers on most RISC systems tend to disconnect
13273 * when a device tries to burst across a cache-line boundary.
13274 * Therefore, letting tg3 do so just wastes PCI bandwidth.
13276 * Unfortunately, for PCI-E there are only limited
13277 * write-side controls for this, and thus for reads
13278 * we will still get the disconnects. We'll also waste
13279 * these PCI cycles for both read and write for chips
13280 * other than 5700 and 5701 which do not implement the
13283 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13284 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
13285 switch (cacheline_size) {
13290 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13291 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
13292 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
13294 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13295 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13300 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
13301 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
13305 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13306 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13309 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13310 switch (cacheline_size) {
13314 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13315 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13316 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
13322 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13323 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
13327 switch (cacheline_size) {
13329 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13330 val |= (DMA_RWCTRL_READ_BNDRY_16 |
13331 DMA_RWCTRL_WRITE_BNDRY_16);
13336 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13337 val |= (DMA_RWCTRL_READ_BNDRY_32 |
13338 DMA_RWCTRL_WRITE_BNDRY_32);
13343 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13344 val |= (DMA_RWCTRL_READ_BNDRY_64 |
13345 DMA_RWCTRL_WRITE_BNDRY_64);
13350 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13351 val |= (DMA_RWCTRL_READ_BNDRY_128 |
13352 DMA_RWCTRL_WRITE_BNDRY_128);
13357 val |= (DMA_RWCTRL_READ_BNDRY_256 |
13358 DMA_RWCTRL_WRITE_BNDRY_256);
13361 val |= (DMA_RWCTRL_READ_BNDRY_512 |
13362 DMA_RWCTRL_WRITE_BNDRY_512);
13366 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
13367 DMA_RWCTRL_WRITE_BNDRY_1024);
13376 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
13378 struct tg3_internal_buffer_desc test_desc;
13379 u32 sram_dma_descs;
13382 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
13384 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
13385 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
13386 tw32(RDMAC_STATUS, 0);
13387 tw32(WDMAC_STATUS, 0);
13389 tw32(BUFMGR_MODE, 0);
13390 tw32(FTQ_RESET, 0);
13392 test_desc.addr_hi = ((u64) buf_dma) >> 32;
13393 test_desc.addr_lo = buf_dma & 0xffffffff;
13394 test_desc.nic_mbuf = 0x00002100;
13395 test_desc.len = size;
13398 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
13399 * the *second* time the tg3 driver was getting loaded after an
13402 * Broadcom tells me:
13403 * ...the DMA engine is connected to the GRC block and a DMA
13404 * reset may affect the GRC block in some unpredictable way...
13405 * The behavior of resets to individual blocks has not been tested.
13407 * Broadcom noted the GRC reset will also reset all sub-components.
13410 test_desc.cqid_sqid = (13 << 8) | 2;
13412 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
13415 test_desc.cqid_sqid = (16 << 8) | 7;
13417 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
13420 test_desc.flags = 0x00000005;
13422 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
13425 val = *(((u32 *)&test_desc) + i);
13426 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
13427 sram_dma_descs + (i * sizeof(u32)));
13428 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
13430 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
13433 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
13435 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
13439 for (i = 0; i < 40; i++) {
13443 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
13445 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
13446 if ((val & 0xffff) == sram_dma_descs) {
13457 #define TEST_BUFFER_SIZE 0x2000
13459 static int __devinit tg3_test_dma(struct tg3 *tp)
13461 dma_addr_t buf_dma;
13462 u32 *buf, saved_dma_rwctrl;
13465 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
13471 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
13472 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
13474 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
13476 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13477 /* DMA read watermark not used on PCIE */
13478 tp->dma_rwctrl |= 0x00180000;
13479 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
13480 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13481 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
13482 tp->dma_rwctrl |= 0x003f0000;
13484 tp->dma_rwctrl |= 0x003f000f;
13486 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13487 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
13488 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
13489 u32 read_water = 0x7;
13491 /* If the 5704 is behind the EPB bridge, we can
13492 * do the less restrictive ONE_DMA workaround for
13493 * better performance.
13495 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13496 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13497 tp->dma_rwctrl |= 0x8000;
13498 else if (ccval == 0x6 || ccval == 0x7)
13499 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13501 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13503 /* Set bit 23 to enable PCIX hw bug fix */
13505 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13506 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13508 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13509 /* 5780 always in PCIX mode */
13510 tp->dma_rwctrl |= 0x00144000;
13511 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13512 /* 5714 always in PCIX mode */
13513 tp->dma_rwctrl |= 0x00148000;
13515 tp->dma_rwctrl |= 0x001b000f;
13519 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13520 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13521 tp->dma_rwctrl &= 0xfffffff0;
13523 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13524 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13525 /* Remove this if it causes problems for some boards. */
13526 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13528 /* On 5700/5701 chips, we need to set this bit.
13529 * Otherwise the chip will issue cacheline transactions
13530 * to streamable DMA memory with not all the byte
13531 * enables turned on. This is an error on several
13532 * RISC PCI controllers, in particular sparc64.
13534 * On 5703/5704 chips, this bit has been reassigned
13535 * a different meaning. In particular, it is used
13536 * on those chips to enable a PCI-X workaround.
13538 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
13541 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13544 /* Unneeded, already done by tg3_get_invariants. */
13545 tg3_switch_clocks(tp);
13549 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13550 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13553 /* It is best to perform DMA test with maximum write burst size
13554 * to expose the 5700/5701 write DMA bug.
13556 saved_dma_rwctrl = tp->dma_rwctrl;
13557 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13558 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13563 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
13566 /* Send the buffer to the chip. */
13567 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13569 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13574 /* validate data reached card RAM correctly. */
13575 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13577 tg3_read_mem(tp, 0x2100 + (i*4), &val);
13578 if (le32_to_cpu(val) != p[i]) {
13579 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
13580 /* ret = -ENODEV here? */
13585 /* Now read it back. */
13586 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13588 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13594 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13598 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13599 DMA_RWCTRL_WRITE_BNDRY_16) {
13600 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13601 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13602 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13605 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13611 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13617 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13618 DMA_RWCTRL_WRITE_BNDRY_16) {
13619 static struct pci_device_id dma_wait_state_chipsets[] = {
13620 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13621 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13625 /* DMA test passed without adjusting DMA boundary,
13626 * now look for chipsets that are known to expose the
13627 * DMA bug without failing the test.
13629 if (pci_dev_present(dma_wait_state_chipsets)) {
13630 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13631 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13634 /* Safe to use the calculated DMA boundary. */
13635 tp->dma_rwctrl = saved_dma_rwctrl;
13637 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13641 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13646 static void __devinit tg3_init_link_config(struct tg3 *tp)
13648 tp->link_config.advertising =
13649 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13650 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13651 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13652 ADVERTISED_Autoneg | ADVERTISED_MII);
13653 tp->link_config.speed = SPEED_INVALID;
13654 tp->link_config.duplex = DUPLEX_INVALID;
13655 tp->link_config.autoneg = AUTONEG_ENABLE;
13656 tp->link_config.active_speed = SPEED_INVALID;
13657 tp->link_config.active_duplex = DUPLEX_INVALID;
13658 tp->link_config.phy_is_low_power = 0;
13659 tp->link_config.orig_speed = SPEED_INVALID;
13660 tp->link_config.orig_duplex = DUPLEX_INVALID;
13661 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13664 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13666 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS &&
13667 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
13668 tp->bufmgr_config.mbuf_read_dma_low_water =
13669 DEFAULT_MB_RDMA_LOW_WATER_5705;
13670 tp->bufmgr_config.mbuf_mac_rx_low_water =
13671 DEFAULT_MB_MACRX_LOW_WATER_5705;
13672 tp->bufmgr_config.mbuf_high_water =
13673 DEFAULT_MB_HIGH_WATER_5705;
13674 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13675 tp->bufmgr_config.mbuf_mac_rx_low_water =
13676 DEFAULT_MB_MACRX_LOW_WATER_5906;
13677 tp->bufmgr_config.mbuf_high_water =
13678 DEFAULT_MB_HIGH_WATER_5906;
13681 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13682 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13683 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13684 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13685 tp->bufmgr_config.mbuf_high_water_jumbo =
13686 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13688 tp->bufmgr_config.mbuf_read_dma_low_water =
13689 DEFAULT_MB_RDMA_LOW_WATER;
13690 tp->bufmgr_config.mbuf_mac_rx_low_water =
13691 DEFAULT_MB_MACRX_LOW_WATER;
13692 tp->bufmgr_config.mbuf_high_water =
13693 DEFAULT_MB_HIGH_WATER;
13695 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13696 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13697 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13698 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13699 tp->bufmgr_config.mbuf_high_water_jumbo =
13700 DEFAULT_MB_HIGH_WATER_JUMBO;
13703 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13704 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13707 static char * __devinit tg3_phy_string(struct tg3 *tp)
13709 switch (tp->phy_id & PHY_ID_MASK) {
13710 case PHY_ID_BCM5400: return "5400";
13711 case PHY_ID_BCM5401: return "5401";
13712 case PHY_ID_BCM5411: return "5411";
13713 case PHY_ID_BCM5701: return "5701";
13714 case PHY_ID_BCM5703: return "5703";
13715 case PHY_ID_BCM5704: return "5704";
13716 case PHY_ID_BCM5705: return "5705";
13717 case PHY_ID_BCM5750: return "5750";
13718 case PHY_ID_BCM5752: return "5752";
13719 case PHY_ID_BCM5714: return "5714";
13720 case PHY_ID_BCM5780: return "5780";
13721 case PHY_ID_BCM5755: return "5755";
13722 case PHY_ID_BCM5787: return "5787";
13723 case PHY_ID_BCM5784: return "5784";
13724 case PHY_ID_BCM5756: return "5722/5756";
13725 case PHY_ID_BCM5906: return "5906";
13726 case PHY_ID_BCM5761: return "5761";
13727 case PHY_ID_BCM8002: return "8002/serdes";
13728 case 0: return "serdes";
13729 default: return "unknown";
13733 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13735 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13736 strcpy(str, "PCI Express");
13738 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13739 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13741 strcpy(str, "PCIX:");
13743 if ((clock_ctrl == 7) ||
13744 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13745 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13746 strcat(str, "133MHz");
13747 else if (clock_ctrl == 0)
13748 strcat(str, "33MHz");
13749 else if (clock_ctrl == 2)
13750 strcat(str, "50MHz");
13751 else if (clock_ctrl == 4)
13752 strcat(str, "66MHz");
13753 else if (clock_ctrl == 6)
13754 strcat(str, "100MHz");
13756 strcpy(str, "PCI:");
13757 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13758 strcat(str, "66MHz");
13760 strcat(str, "33MHz");
13762 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13763 strcat(str, ":32-bit");
13765 strcat(str, ":64-bit");
13769 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13771 struct pci_dev *peer;
13772 unsigned int func, devnr = tp->pdev->devfn & ~7;
13774 for (func = 0; func < 8; func++) {
13775 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13776 if (peer && peer != tp->pdev)
13780 /* 5704 can be configured in single-port mode, set peer to
13781 * tp->pdev in that case.
13789 * We don't need to keep the refcount elevated; there's no way
13790 * to remove one half of this device without removing the other
13797 static void __devinit tg3_init_coal(struct tg3 *tp)
13799 struct ethtool_coalesce *ec = &tp->coal;
13801 memset(ec, 0, sizeof(*ec));
13802 ec->cmd = ETHTOOL_GCOALESCE;
13803 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13804 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13805 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13806 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13807 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13808 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13809 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13810 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13811 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13813 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13814 HOSTCC_MODE_CLRTICK_TXBD)) {
13815 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13816 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13817 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13818 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13821 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13822 ec->rx_coalesce_usecs_irq = 0;
13823 ec->tx_coalesce_usecs_irq = 0;
13824 ec->stats_block_coalesce_usecs = 0;
13828 static const struct net_device_ops tg3_netdev_ops = {
13829 .ndo_open = tg3_open,
13830 .ndo_stop = tg3_close,
13831 .ndo_start_xmit = tg3_start_xmit,
13832 .ndo_get_stats = tg3_get_stats,
13833 .ndo_validate_addr = eth_validate_addr,
13834 .ndo_set_multicast_list = tg3_set_rx_mode,
13835 .ndo_set_mac_address = tg3_set_mac_addr,
13836 .ndo_do_ioctl = tg3_ioctl,
13837 .ndo_tx_timeout = tg3_tx_timeout,
13838 .ndo_change_mtu = tg3_change_mtu,
13839 #if TG3_VLAN_TAG_USED
13840 .ndo_vlan_rx_register = tg3_vlan_rx_register,
13842 #ifdef CONFIG_NET_POLL_CONTROLLER
13843 .ndo_poll_controller = tg3_poll_controller,
13847 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
13848 .ndo_open = tg3_open,
13849 .ndo_stop = tg3_close,
13850 .ndo_start_xmit = tg3_start_xmit_dma_bug,
13851 .ndo_get_stats = tg3_get_stats,
13852 .ndo_validate_addr = eth_validate_addr,
13853 .ndo_set_multicast_list = tg3_set_rx_mode,
13854 .ndo_set_mac_address = tg3_set_mac_addr,
13855 .ndo_do_ioctl = tg3_ioctl,
13856 .ndo_tx_timeout = tg3_tx_timeout,
13857 .ndo_change_mtu = tg3_change_mtu,
13858 #if TG3_VLAN_TAG_USED
13859 .ndo_vlan_rx_register = tg3_vlan_rx_register,
13861 #ifdef CONFIG_NET_POLL_CONTROLLER
13862 .ndo_poll_controller = tg3_poll_controller,
13866 static int __devinit tg3_init_one(struct pci_dev *pdev,
13867 const struct pci_device_id *ent)
13869 static int tg3_version_printed = 0;
13870 struct net_device *dev;
13872 int i, err, pm_cap;
13873 u32 sndmbx, rcvmbx, intmbx;
13875 u64 dma_mask, persist_dma_mask;
13877 if (tg3_version_printed++ == 0)
13878 printk(KERN_INFO "%s", version);
13880 err = pci_enable_device(pdev);
13882 printk(KERN_ERR PFX "Cannot enable PCI device, "
13887 err = pci_request_regions(pdev, DRV_MODULE_NAME);
13889 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13891 goto err_out_disable_pdev;
13894 pci_set_master(pdev);
13896 /* Find power-management capability. */
13897 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13899 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13902 goto err_out_free_res;
13905 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
13907 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13909 goto err_out_free_res;
13912 SET_NETDEV_DEV(dev, &pdev->dev);
13914 #if TG3_VLAN_TAG_USED
13915 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13918 tp = netdev_priv(dev);
13921 tp->pm_cap = pm_cap;
13922 tp->rx_mode = TG3_DEF_RX_MODE;
13923 tp->tx_mode = TG3_DEF_TX_MODE;
13926 tp->msg_enable = tg3_debug;
13928 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13930 /* The word/byte swap controls here control register access byte
13931 * swapping. DMA data byte swapping is controlled in the GRC_MODE
13934 tp->misc_host_ctrl =
13935 MISC_HOST_CTRL_MASK_PCI_INT |
13936 MISC_HOST_CTRL_WORD_SWAP |
13937 MISC_HOST_CTRL_INDIR_ACCESS |
13938 MISC_HOST_CTRL_PCISTATE_RW;
13940 /* The NONFRM (non-frame) byte/word swap controls take effect
13941 * on descriptor entries, anything which isn't packet data.
13943 * The StrongARM chips on the board (one for tx, one for rx)
13944 * are running in big-endian mode.
13946 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13947 GRC_MODE_WSWAP_NONFRM_DATA);
13948 #ifdef __BIG_ENDIAN
13949 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13951 spin_lock_init(&tp->lock);
13952 spin_lock_init(&tp->indirect_lock);
13953 INIT_WORK(&tp->reset_task, tg3_reset_task);
13955 tp->regs = pci_ioremap_bar(pdev, BAR_0);
13957 printk(KERN_ERR PFX "Cannot map device registers, "
13960 goto err_out_free_dev;
13963 tg3_init_link_config(tp);
13965 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13966 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13968 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
13969 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
13970 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
13971 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
13972 struct tg3_napi *tnapi = &tp->napi[i];
13975 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
13977 tnapi->int_mbox = intmbx;
13983 tnapi->consmbox = rcvmbx;
13984 tnapi->prodmbox = sndmbx;
13987 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
13989 tnapi->coal_now = HOSTCC_MODE_NOW;
13991 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
13995 * If we support MSIX, we'll be using RSS. If we're using
13996 * RSS, the first vector only handles link interrupts and the
13997 * remaining vectors handle rx and tx interrupts. Reuse the
13998 * mailbox values for the next iteration. The values we setup
13999 * above are still useful for the single vectored mode.
14012 netif_napi_add(dev, &tp->napi[0].napi, tg3_poll, 64);
14013 dev->ethtool_ops = &tg3_ethtool_ops;
14014 dev->watchdog_timeo = TG3_TX_TIMEOUT;
14015 dev->irq = pdev->irq;
14017 err = tg3_get_invariants(tp);
14019 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
14021 goto err_out_iounmap;
14024 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
14025 dev->netdev_ops = &tg3_netdev_ops;
14027 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
14030 /* The EPB bridge inside 5714, 5715, and 5780 and any
14031 * device behind the EPB cannot support DMA addresses > 40-bit.
14032 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
14033 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
14034 * do DMA address check in tg3_start_xmit().
14036 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
14037 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
14038 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
14039 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
14040 #ifdef CONFIG_HIGHMEM
14041 dma_mask = DMA_BIT_MASK(64);
14044 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
14046 /* Configure DMA attributes. */
14047 if (dma_mask > DMA_BIT_MASK(32)) {
14048 err = pci_set_dma_mask(pdev, dma_mask);
14050 dev->features |= NETIF_F_HIGHDMA;
14051 err = pci_set_consistent_dma_mask(pdev,
14054 printk(KERN_ERR PFX "Unable to obtain 64 bit "
14055 "DMA for consistent allocations\n");
14056 goto err_out_iounmap;
14060 if (err || dma_mask == DMA_BIT_MASK(32)) {
14061 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
14063 printk(KERN_ERR PFX "No usable DMA configuration, "
14065 goto err_out_iounmap;
14069 tg3_init_bufmgr_config(tp);
14071 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14072 tp->fw_needed = FIRMWARE_TG3;
14074 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
14075 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
14077 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14078 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14079 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
14080 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14081 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
14082 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
14084 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
14085 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14086 tp->fw_needed = FIRMWARE_TG3TSO5;
14088 tp->fw_needed = FIRMWARE_TG3TSO;
14091 /* TSO is on by default on chips that support hardware TSO.
14092 * Firmware TSO on older chips gives lower performance, so it
14093 * is off by default, but can be enabled using ethtool.
14095 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
14096 if (dev->features & NETIF_F_IP_CSUM)
14097 dev->features |= NETIF_F_TSO;
14098 if ((dev->features & NETIF_F_IPV6_CSUM) &&
14099 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2))
14100 dev->features |= NETIF_F_TSO6;
14101 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14102 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14103 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
14104 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14105 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14106 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
14107 dev->features |= NETIF_F_TSO_ECN;
14111 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
14112 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
14113 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
14114 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
14115 tp->rx_pending = 63;
14118 err = tg3_get_device_address(tp);
14120 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
14125 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
14126 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
14127 if (!tp->aperegs) {
14128 printk(KERN_ERR PFX "Cannot map APE registers, "
14134 tg3_ape_lock_init(tp);
14136 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
14137 tg3_read_dash_ver(tp);
14141 * Reset chip in case UNDI or EFI driver did not shutdown
14142 * DMA self test will enable WDMAC and we'll see (spurious)
14143 * pending DMA on the PCI bus at that point.
14145 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
14146 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
14147 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
14148 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14151 err = tg3_test_dma(tp);
14153 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
14154 goto err_out_apeunmap;
14157 /* flow control autonegotiation is default behavior */
14158 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
14159 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14163 pci_set_drvdata(pdev, dev);
14165 err = register_netdev(dev);
14167 printk(KERN_ERR PFX "Cannot register net device, "
14169 goto err_out_apeunmap;
14172 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
14174 tp->board_part_number,
14175 tp->pci_chip_rev_id,
14176 tg3_bus_string(tp, str),
14179 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
14180 struct phy_device *phydev;
14181 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
14183 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14184 tp->dev->name, phydev->drv->name,
14185 dev_name(&phydev->dev));
14188 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
14189 tp->dev->name, tg3_phy_string(tp),
14190 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
14191 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
14192 "10/100/1000Base-T")),
14193 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
14195 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
14197 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
14198 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
14199 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
14200 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
14201 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
14202 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
14203 dev->name, tp->dma_rwctrl,
14204 (pdev->dma_mask == DMA_BIT_MASK(32)) ? 32 :
14205 (((u64) pdev->dma_mask == DMA_BIT_MASK(40)) ? 40 : 64));
14211 iounmap(tp->aperegs);
14212 tp->aperegs = NULL;
14217 release_firmware(tp->fw);
14229 pci_release_regions(pdev);
14231 err_out_disable_pdev:
14232 pci_disable_device(pdev);
14233 pci_set_drvdata(pdev, NULL);
14237 static void __devexit tg3_remove_one(struct pci_dev *pdev)
14239 struct net_device *dev = pci_get_drvdata(pdev);
14242 struct tg3 *tp = netdev_priv(dev);
14245 release_firmware(tp->fw);
14247 flush_scheduled_work();
14249 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
14254 unregister_netdev(dev);
14256 iounmap(tp->aperegs);
14257 tp->aperegs = NULL;
14264 pci_release_regions(pdev);
14265 pci_disable_device(pdev);
14266 pci_set_drvdata(pdev, NULL);
14270 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
14272 struct net_device *dev = pci_get_drvdata(pdev);
14273 struct tg3 *tp = netdev_priv(dev);
14274 pci_power_t target_state;
14277 /* PCI register 4 needs to be saved whether netif_running() or not.
14278 * MSI address and data need to be saved if using MSI and
14281 pci_save_state(pdev);
14283 if (!netif_running(dev))
14286 flush_scheduled_work();
14288 tg3_netif_stop(tp);
14290 del_timer_sync(&tp->timer);
14292 tg3_full_lock(tp, 1);
14293 tg3_disable_ints(tp);
14294 tg3_full_unlock(tp);
14296 netif_device_detach(dev);
14298 tg3_full_lock(tp, 0);
14299 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14300 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
14301 tg3_full_unlock(tp);
14303 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
14305 err = tg3_set_power_state(tp, target_state);
14309 tg3_full_lock(tp, 0);
14311 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14312 err2 = tg3_restart_hw(tp, 1);
14316 tp->timer.expires = jiffies + tp->timer_offset;
14317 add_timer(&tp->timer);
14319 netif_device_attach(dev);
14320 tg3_netif_start(tp);
14323 tg3_full_unlock(tp);
14332 static int tg3_resume(struct pci_dev *pdev)
14334 struct net_device *dev = pci_get_drvdata(pdev);
14335 struct tg3 *tp = netdev_priv(dev);
14338 pci_restore_state(tp->pdev);
14340 if (!netif_running(dev))
14343 err = tg3_set_power_state(tp, PCI_D0);
14347 netif_device_attach(dev);
14349 tg3_full_lock(tp, 0);
14351 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14352 err = tg3_restart_hw(tp, 1);
14356 tp->timer.expires = jiffies + tp->timer_offset;
14357 add_timer(&tp->timer);
14359 tg3_netif_start(tp);
14362 tg3_full_unlock(tp);
14370 static struct pci_driver tg3_driver = {
14371 .name = DRV_MODULE_NAME,
14372 .id_table = tg3_pci_tbl,
14373 .probe = tg3_init_one,
14374 .remove = __devexit_p(tg3_remove_one),
14375 .suspend = tg3_suspend,
14376 .resume = tg3_resume
14379 static int __init tg3_init(void)
14381 return pci_register_driver(&tg3_driver);
14384 static void __exit tg3_cleanup(void)
14386 pci_unregister_driver(&tg3_driver);
14389 module_init(tg3_init);
14390 module_exit(tg3_cleanup);