2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2009 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
45 #include <net/checksum.h>
48 #include <asm/system.h>
50 #include <asm/byteorder.h>
51 #include <asm/uaccess.h>
54 #include <asm/idprom.h>
61 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
62 #define TG3_VLAN_TAG_USED 1
64 #define TG3_VLAN_TAG_USED 0
69 #define DRV_MODULE_NAME "tg3"
70 #define PFX DRV_MODULE_NAME ": "
71 #define DRV_MODULE_VERSION "3.100"
72 #define DRV_MODULE_RELDATE "August 25, 2009"
74 #define TG3_DEF_MAC_MODE 0
75 #define TG3_DEF_RX_MODE 0
76 #define TG3_DEF_TX_MODE 0
77 #define TG3_DEF_MSG_ENABLE \
87 /* length of time before we decide the hardware is borked,
88 * and dev->tx_timeout() should be called to fix the problem
90 #define TG3_TX_TIMEOUT (5 * HZ)
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU 60
94 #define TG3_MAX_MTU(tp) \
95 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98 * You can't change the ring sizes, but you can change where you place
99 * them in the NIC onboard memory.
101 #define TG3_RX_RING_SIZE 512
102 #define TG3_DEF_RX_RING_PENDING 200
103 #define TG3_RX_JUMBO_RING_SIZE 256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
106 /* Do not place this n-ring entries value into the tp struct itself,
107 * we really want to expose these constants to GCC so that modulo et
108 * al. operations are done with shifts and masks instead of with
109 * hw multiply/modulo instructions. Another solution would be to
110 * replace things like '% foo' with '& (foo - 1)'.
112 #define TG3_RX_RCB_RING_SIZE(tp) \
113 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115 #define TG3_TX_RING_SIZE 512
116 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121 TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123 TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
126 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
129 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
131 /* minimum number of free TX descriptors required to wake up TX process */
132 #define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
134 #define TG3_RAW_IP_ALIGN 2
136 /* number of ETHTOOL_GSTATS u64's */
137 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
139 #define TG3_NUM_TEST 6
141 #define FIRMWARE_TG3 "tigon/tg3.bin"
142 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
143 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
145 static char version[] __devinitdata =
146 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
148 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
149 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
150 MODULE_LICENSE("GPL");
151 MODULE_VERSION(DRV_MODULE_VERSION);
152 MODULE_FIRMWARE(FIRMWARE_TG3);
153 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
154 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
157 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
158 module_param(tg3_debug, int, 0);
159 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
161 static struct pci_device_id tg3_pci_tbl[] = {
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
228 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
229 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
230 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
231 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
232 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
233 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
234 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
238 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
240 static const struct {
241 const char string[ETH_GSTRING_LEN];
242 } ethtool_stats_keys[TG3_NUM_STATS] = {
245 { "rx_ucast_packets" },
246 { "rx_mcast_packets" },
247 { "rx_bcast_packets" },
249 { "rx_align_errors" },
250 { "rx_xon_pause_rcvd" },
251 { "rx_xoff_pause_rcvd" },
252 { "rx_mac_ctrl_rcvd" },
253 { "rx_xoff_entered" },
254 { "rx_frame_too_long_errors" },
256 { "rx_undersize_packets" },
257 { "rx_in_length_errors" },
258 { "rx_out_length_errors" },
259 { "rx_64_or_less_octet_packets" },
260 { "rx_65_to_127_octet_packets" },
261 { "rx_128_to_255_octet_packets" },
262 { "rx_256_to_511_octet_packets" },
263 { "rx_512_to_1023_octet_packets" },
264 { "rx_1024_to_1522_octet_packets" },
265 { "rx_1523_to_2047_octet_packets" },
266 { "rx_2048_to_4095_octet_packets" },
267 { "rx_4096_to_8191_octet_packets" },
268 { "rx_8192_to_9022_octet_packets" },
275 { "tx_flow_control" },
277 { "tx_single_collisions" },
278 { "tx_mult_collisions" },
280 { "tx_excessive_collisions" },
281 { "tx_late_collisions" },
282 { "tx_collide_2times" },
283 { "tx_collide_3times" },
284 { "tx_collide_4times" },
285 { "tx_collide_5times" },
286 { "tx_collide_6times" },
287 { "tx_collide_7times" },
288 { "tx_collide_8times" },
289 { "tx_collide_9times" },
290 { "tx_collide_10times" },
291 { "tx_collide_11times" },
292 { "tx_collide_12times" },
293 { "tx_collide_13times" },
294 { "tx_collide_14times" },
295 { "tx_collide_15times" },
296 { "tx_ucast_packets" },
297 { "tx_mcast_packets" },
298 { "tx_bcast_packets" },
299 { "tx_carrier_sense_errors" },
303 { "dma_writeq_full" },
304 { "dma_write_prioq_full" },
308 { "rx_threshold_hit" },
310 { "dma_readq_full" },
311 { "dma_read_prioq_full" },
312 { "tx_comp_queue_full" },
314 { "ring_set_send_prod_index" },
315 { "ring_status_update" },
317 { "nic_avoided_irqs" },
318 { "nic_tx_threshold_hit" }
321 static const struct {
322 const char string[ETH_GSTRING_LEN];
323 } ethtool_test_keys[TG3_NUM_TEST] = {
324 { "nvram test (online) " },
325 { "link test (online) " },
326 { "register test (offline)" },
327 { "memory test (offline)" },
328 { "loopback test (offline)" },
329 { "interrupt test (offline)" },
332 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
334 writel(val, tp->regs + off);
337 static u32 tg3_read32(struct tg3 *tp, u32 off)
339 return (readl(tp->regs + off));
342 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
344 writel(val, tp->aperegs + off);
347 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
349 return (readl(tp->aperegs + off));
352 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
356 spin_lock_irqsave(&tp->indirect_lock, flags);
357 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
358 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
359 spin_unlock_irqrestore(&tp->indirect_lock, flags);
362 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
364 writel(val, tp->regs + off);
365 readl(tp->regs + off);
368 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
373 spin_lock_irqsave(&tp->indirect_lock, flags);
374 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
375 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
376 spin_unlock_irqrestore(&tp->indirect_lock, flags);
380 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
384 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
385 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
386 TG3_64BIT_REG_LOW, val);
389 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
390 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
391 TG3_64BIT_REG_LOW, val);
395 spin_lock_irqsave(&tp->indirect_lock, flags);
396 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
397 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
398 spin_unlock_irqrestore(&tp->indirect_lock, flags);
400 /* In indirect mode when disabling interrupts, we also need
401 * to clear the interrupt bit in the GRC local ctrl register.
403 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
405 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
406 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
410 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
415 spin_lock_irqsave(&tp->indirect_lock, flags);
416 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
417 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
418 spin_unlock_irqrestore(&tp->indirect_lock, flags);
422 /* usec_wait specifies the wait time in usec when writing to certain registers
423 * where it is unsafe to read back the register without some delay.
424 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
425 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
427 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
429 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
430 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
431 /* Non-posted methods */
432 tp->write32(tp, off, val);
435 tg3_write32(tp, off, val);
440 /* Wait again after the read for the posted method to guarantee that
441 * the wait time is met.
447 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
449 tp->write32_mbox(tp, off, val);
450 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
451 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
452 tp->read32_mbox(tp, off);
455 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
457 void __iomem *mbox = tp->regs + off;
459 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
461 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
465 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
467 return (readl(tp->regs + off + GRCMBOX_BASE));
470 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
472 writel(val, tp->regs + off + GRCMBOX_BASE);
475 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
476 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
477 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
478 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
479 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
481 #define tw32(reg,val) tp->write32(tp, reg, val)
482 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
483 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
484 #define tr32(reg) tp->read32(tp, reg)
486 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
490 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
491 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
494 spin_lock_irqsave(&tp->indirect_lock, flags);
495 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
496 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
497 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
499 /* Always leave this as zero. */
500 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
502 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
503 tw32_f(TG3PCI_MEM_WIN_DATA, val);
505 /* Always leave this as zero. */
506 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
508 spin_unlock_irqrestore(&tp->indirect_lock, flags);
511 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
515 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
516 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
521 spin_lock_irqsave(&tp->indirect_lock, flags);
522 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
523 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
524 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
526 /* Always leave this as zero. */
527 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
529 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
530 *val = tr32(TG3PCI_MEM_WIN_DATA);
532 /* Always leave this as zero. */
533 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
535 spin_unlock_irqrestore(&tp->indirect_lock, flags);
538 static void tg3_ape_lock_init(struct tg3 *tp)
542 /* Make sure the driver hasn't any stale locks. */
543 for (i = 0; i < 8; i++)
544 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
545 APE_LOCK_GRANT_DRIVER);
548 static int tg3_ape_lock(struct tg3 *tp, int locknum)
554 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
558 case TG3_APE_LOCK_GRC:
559 case TG3_APE_LOCK_MEM:
567 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
569 /* Wait for up to 1 millisecond to acquire lock. */
570 for (i = 0; i < 100; i++) {
571 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
572 if (status == APE_LOCK_GRANT_DRIVER)
577 if (status != APE_LOCK_GRANT_DRIVER) {
578 /* Revoke the lock request. */
579 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
580 APE_LOCK_GRANT_DRIVER);
588 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
592 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
596 case TG3_APE_LOCK_GRC:
597 case TG3_APE_LOCK_MEM:
604 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
607 static void tg3_disable_ints(struct tg3 *tp)
609 tw32(TG3PCI_MISC_HOST_CTRL,
610 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
611 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
614 static inline void tg3_cond_int(struct tg3 *tp)
616 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
617 (tp->hw_status->status & SD_STATUS_UPDATED))
618 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
620 tw32(HOSTCC_MODE, tp->coalesce_mode |
621 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
624 static void tg3_enable_ints(struct tg3 *tp)
629 tw32(TG3PCI_MISC_HOST_CTRL,
630 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
631 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
632 (tp->last_tag << 24));
633 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
634 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
635 (tp->last_tag << 24));
639 static inline unsigned int tg3_has_work(struct tg3 *tp)
641 struct tg3_hw_status *sblk = tp->hw_status;
642 unsigned int work_exists = 0;
644 /* check for phy events */
645 if (!(tp->tg3_flags &
646 (TG3_FLAG_USE_LINKCHG_REG |
647 TG3_FLAG_POLL_SERDES))) {
648 if (sblk->status & SD_STATUS_LINK_CHG)
651 /* check for RX/TX work to do */
652 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
653 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
660 * similar to tg3_enable_ints, but it accurately determines whether there
661 * is new work pending and can return without flushing the PIO write
662 * which reenables interrupts
664 static void tg3_restart_ints(struct tg3 *tp)
666 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
670 /* When doing tagged status, this work check is unnecessary.
671 * The last_tag we write above tells the chip which piece of
672 * work we've completed.
674 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
676 tw32(HOSTCC_MODE, tp->coalesce_mode |
677 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
680 static inline void tg3_netif_stop(struct tg3 *tp)
682 tp->dev->trans_start = jiffies; /* prevent tx timeout */
683 napi_disable(&tp->napi);
684 netif_tx_disable(tp->dev);
687 static inline void tg3_netif_start(struct tg3 *tp)
689 netif_wake_queue(tp->dev);
690 /* NOTE: unconditional netif_wake_queue is only appropriate
691 * so long as all callers are assured to have free tx slots
692 * (such as after tg3_init_hw)
694 napi_enable(&tp->napi);
695 tp->hw_status->status |= SD_STATUS_UPDATED;
699 static void tg3_switch_clocks(struct tg3 *tp)
701 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
704 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
705 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
708 orig_clock_ctrl = clock_ctrl;
709 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
710 CLOCK_CTRL_CLKRUN_OENABLE |
712 tp->pci_clock_ctrl = clock_ctrl;
714 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
715 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
716 tw32_wait_f(TG3PCI_CLOCK_CTRL,
717 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
719 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
720 tw32_wait_f(TG3PCI_CLOCK_CTRL,
722 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
724 tw32_wait_f(TG3PCI_CLOCK_CTRL,
725 clock_ctrl | (CLOCK_CTRL_ALTCLK),
728 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
731 #define PHY_BUSY_LOOPS 5000
733 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
739 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
741 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
747 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
748 MI_COM_PHY_ADDR_MASK);
749 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
750 MI_COM_REG_ADDR_MASK);
751 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
753 tw32_f(MAC_MI_COM, frame_val);
755 loops = PHY_BUSY_LOOPS;
758 frame_val = tr32(MAC_MI_COM);
760 if ((frame_val & MI_COM_BUSY) == 0) {
762 frame_val = tr32(MAC_MI_COM);
770 *val = frame_val & MI_COM_DATA_MASK;
774 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
775 tw32_f(MAC_MI_MODE, tp->mi_mode);
782 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
788 if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
789 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
792 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
794 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
798 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
799 MI_COM_PHY_ADDR_MASK);
800 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
801 MI_COM_REG_ADDR_MASK);
802 frame_val |= (val & MI_COM_DATA_MASK);
803 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
805 tw32_f(MAC_MI_COM, frame_val);
807 loops = PHY_BUSY_LOOPS;
810 frame_val = tr32(MAC_MI_COM);
811 if ((frame_val & MI_COM_BUSY) == 0) {
813 frame_val = tr32(MAC_MI_COM);
823 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
824 tw32_f(MAC_MI_MODE, tp->mi_mode);
831 static int tg3_bmcr_reset(struct tg3 *tp)
836 /* OK, reset it, and poll the BMCR_RESET bit until it
837 * clears or we time out.
839 phy_control = BMCR_RESET;
840 err = tg3_writephy(tp, MII_BMCR, phy_control);
846 err = tg3_readphy(tp, MII_BMCR, &phy_control);
850 if ((phy_control & BMCR_RESET) == 0) {
862 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
864 struct tg3 *tp = bp->priv;
867 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
870 if (tg3_readphy(tp, reg, &val))
876 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
878 struct tg3 *tp = bp->priv;
880 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
883 if (tg3_writephy(tp, reg, val))
889 static int tg3_mdio_reset(struct mii_bus *bp)
894 static void tg3_mdio_config_5785(struct tg3 *tp)
897 struct phy_device *phydev;
899 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
900 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
901 case TG3_PHY_ID_BCM50610:
902 val = MAC_PHYCFG2_50610_LED_MODES;
904 case TG3_PHY_ID_BCMAC131:
905 val = MAC_PHYCFG2_AC131_LED_MODES;
907 case TG3_PHY_ID_RTL8211C:
908 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
910 case TG3_PHY_ID_RTL8201E:
911 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
917 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
918 tw32(MAC_PHYCFG2, val);
920 val = tr32(MAC_PHYCFG1);
921 val &= ~(MAC_PHYCFG1_RGMII_INT |
922 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
923 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
924 tw32(MAC_PHYCFG1, val);
929 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
930 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
931 MAC_PHYCFG2_FMODE_MASK_MASK |
932 MAC_PHYCFG2_GMODE_MASK_MASK |
933 MAC_PHYCFG2_ACT_MASK_MASK |
934 MAC_PHYCFG2_QUAL_MASK_MASK |
935 MAC_PHYCFG2_INBAND_ENABLE;
937 tw32(MAC_PHYCFG2, val);
939 val = tr32(MAC_PHYCFG1);
940 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
941 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
942 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
943 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
944 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
945 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
946 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
948 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
949 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
950 tw32(MAC_PHYCFG1, val);
952 val = tr32(MAC_EXT_RGMII_MODE);
953 val &= ~(MAC_RGMII_MODE_RX_INT_B |
954 MAC_RGMII_MODE_RX_QUALITY |
955 MAC_RGMII_MODE_RX_ACTIVITY |
956 MAC_RGMII_MODE_RX_ENG_DET |
957 MAC_RGMII_MODE_TX_ENABLE |
958 MAC_RGMII_MODE_TX_LOWPWR |
959 MAC_RGMII_MODE_TX_RESET);
960 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
961 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
962 val |= MAC_RGMII_MODE_RX_INT_B |
963 MAC_RGMII_MODE_RX_QUALITY |
964 MAC_RGMII_MODE_RX_ACTIVITY |
965 MAC_RGMII_MODE_RX_ENG_DET;
966 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
967 val |= MAC_RGMII_MODE_TX_ENABLE |
968 MAC_RGMII_MODE_TX_LOWPWR |
969 MAC_RGMII_MODE_TX_RESET;
971 tw32(MAC_EXT_RGMII_MODE, val);
974 static void tg3_mdio_start(struct tg3 *tp)
976 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
977 mutex_lock(&tp->mdio_bus->mdio_lock);
978 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
979 mutex_unlock(&tp->mdio_bus->mdio_lock);
982 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
983 tw32_f(MAC_MI_MODE, tp->mi_mode);
986 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
987 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
988 tg3_mdio_config_5785(tp);
991 static void tg3_mdio_stop(struct tg3 *tp)
993 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
994 mutex_lock(&tp->mdio_bus->mdio_lock);
995 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
996 mutex_unlock(&tp->mdio_bus->mdio_lock);
1000 static int tg3_mdio_init(struct tg3 *tp)
1004 struct phy_device *phydev;
1008 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1009 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1012 tp->mdio_bus = mdiobus_alloc();
1013 if (tp->mdio_bus == NULL)
1016 tp->mdio_bus->name = "tg3 mdio bus";
1017 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1018 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1019 tp->mdio_bus->priv = tp;
1020 tp->mdio_bus->parent = &tp->pdev->dev;
1021 tp->mdio_bus->read = &tg3_mdio_read;
1022 tp->mdio_bus->write = &tg3_mdio_write;
1023 tp->mdio_bus->reset = &tg3_mdio_reset;
1024 tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
1025 tp->mdio_bus->irq = &tp->mdio_irq[0];
1027 for (i = 0; i < PHY_MAX_ADDR; i++)
1028 tp->mdio_bus->irq[i] = PHY_POLL;
1030 /* The bus registration will look for all the PHYs on the mdio bus.
1031 * Unfortunately, it does not ensure the PHY is powered up before
1032 * accessing the PHY ID registers. A chip reset is the
1033 * quickest way to bring the device back to an operational state..
1035 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1038 i = mdiobus_register(tp->mdio_bus);
1040 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1042 mdiobus_free(tp->mdio_bus);
1046 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1048 if (!phydev || !phydev->drv) {
1049 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1050 mdiobus_unregister(tp->mdio_bus);
1051 mdiobus_free(tp->mdio_bus);
1055 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1056 case TG3_PHY_ID_BCM57780:
1057 phydev->interface = PHY_INTERFACE_MODE_GMII;
1059 case TG3_PHY_ID_BCM50610:
1060 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1061 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1062 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1063 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1064 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1065 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1067 case TG3_PHY_ID_RTL8211C:
1068 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1070 case TG3_PHY_ID_RTL8201E:
1071 case TG3_PHY_ID_BCMAC131:
1072 phydev->interface = PHY_INTERFACE_MODE_MII;
1073 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1077 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1079 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1080 tg3_mdio_config_5785(tp);
1085 static void tg3_mdio_fini(struct tg3 *tp)
1087 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1088 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1089 mdiobus_unregister(tp->mdio_bus);
1090 mdiobus_free(tp->mdio_bus);
1091 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1095 /* tp->lock is held. */
1096 static inline void tg3_generate_fw_event(struct tg3 *tp)
1100 val = tr32(GRC_RX_CPU_EVENT);
1101 val |= GRC_RX_CPU_DRIVER_EVENT;
1102 tw32_f(GRC_RX_CPU_EVENT, val);
1104 tp->last_event_jiffies = jiffies;
1107 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1109 /* tp->lock is held. */
1110 static void tg3_wait_for_event_ack(struct tg3 *tp)
1113 unsigned int delay_cnt;
1116 /* If enough time has passed, no wait is necessary. */
1117 time_remain = (long)(tp->last_event_jiffies + 1 +
1118 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1120 if (time_remain < 0)
1123 /* Check if we can shorten the wait time. */
1124 delay_cnt = jiffies_to_usecs(time_remain);
1125 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1126 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1127 delay_cnt = (delay_cnt >> 3) + 1;
1129 for (i = 0; i < delay_cnt; i++) {
1130 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1136 /* tp->lock is held. */
1137 static void tg3_ump_link_report(struct tg3 *tp)
1142 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1143 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1146 tg3_wait_for_event_ack(tp);
1148 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1150 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1153 if (!tg3_readphy(tp, MII_BMCR, ®))
1155 if (!tg3_readphy(tp, MII_BMSR, ®))
1156 val |= (reg & 0xffff);
1157 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1160 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1162 if (!tg3_readphy(tp, MII_LPA, ®))
1163 val |= (reg & 0xffff);
1164 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1167 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1168 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1170 if (!tg3_readphy(tp, MII_STAT1000, ®))
1171 val |= (reg & 0xffff);
1173 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1175 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1179 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1181 tg3_generate_fw_event(tp);
1184 static void tg3_link_report(struct tg3 *tp)
1186 if (!netif_carrier_ok(tp->dev)) {
1187 if (netif_msg_link(tp))
1188 printk(KERN_INFO PFX "%s: Link is down.\n",
1190 tg3_ump_link_report(tp);
1191 } else if (netif_msg_link(tp)) {
1192 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1194 (tp->link_config.active_speed == SPEED_1000 ?
1196 (tp->link_config.active_speed == SPEED_100 ?
1198 (tp->link_config.active_duplex == DUPLEX_FULL ?
1201 printk(KERN_INFO PFX
1202 "%s: Flow control is %s for TX and %s for RX.\n",
1204 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1206 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1208 tg3_ump_link_report(tp);
1212 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1216 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1217 miireg = ADVERTISE_PAUSE_CAP;
1218 else if (flow_ctrl & FLOW_CTRL_TX)
1219 miireg = ADVERTISE_PAUSE_ASYM;
1220 else if (flow_ctrl & FLOW_CTRL_RX)
1221 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1228 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1232 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1233 miireg = ADVERTISE_1000XPAUSE;
1234 else if (flow_ctrl & FLOW_CTRL_TX)
1235 miireg = ADVERTISE_1000XPSE_ASYM;
1236 else if (flow_ctrl & FLOW_CTRL_RX)
1237 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1244 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1248 if (lcladv & ADVERTISE_1000XPAUSE) {
1249 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1250 if (rmtadv & LPA_1000XPAUSE)
1251 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1252 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1255 if (rmtadv & LPA_1000XPAUSE)
1256 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1258 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1259 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1266 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1270 u32 old_rx_mode = tp->rx_mode;
1271 u32 old_tx_mode = tp->tx_mode;
1273 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1274 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
1276 autoneg = tp->link_config.autoneg;
1278 if (autoneg == AUTONEG_ENABLE &&
1279 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1280 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1281 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1283 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1285 flowctrl = tp->link_config.flowctrl;
1287 tp->link_config.active_flowctrl = flowctrl;
1289 if (flowctrl & FLOW_CTRL_RX)
1290 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1292 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1294 if (old_rx_mode != tp->rx_mode)
1295 tw32_f(MAC_RX_MODE, tp->rx_mode);
1297 if (flowctrl & FLOW_CTRL_TX)
1298 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1300 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1302 if (old_tx_mode != tp->tx_mode)
1303 tw32_f(MAC_TX_MODE, tp->tx_mode);
1306 static void tg3_adjust_link(struct net_device *dev)
1308 u8 oldflowctrl, linkmesg = 0;
1309 u32 mac_mode, lcl_adv, rmt_adv;
1310 struct tg3 *tp = netdev_priv(dev);
1311 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1313 spin_lock(&tp->lock);
1315 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1316 MAC_MODE_HALF_DUPLEX);
1318 oldflowctrl = tp->link_config.active_flowctrl;
1324 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1325 mac_mode |= MAC_MODE_PORT_MODE_MII;
1327 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1329 if (phydev->duplex == DUPLEX_HALF)
1330 mac_mode |= MAC_MODE_HALF_DUPLEX;
1332 lcl_adv = tg3_advert_flowctrl_1000T(
1333 tp->link_config.flowctrl);
1336 rmt_adv = LPA_PAUSE_CAP;
1337 if (phydev->asym_pause)
1338 rmt_adv |= LPA_PAUSE_ASYM;
1341 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1343 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1345 if (mac_mode != tp->mac_mode) {
1346 tp->mac_mode = mac_mode;
1347 tw32_f(MAC_MODE, tp->mac_mode);
1351 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1352 if (phydev->speed == SPEED_10)
1354 MAC_MI_STAT_10MBPS_MODE |
1355 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1357 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1360 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1361 tw32(MAC_TX_LENGTHS,
1362 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1363 (6 << TX_LENGTHS_IPG_SHIFT) |
1364 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1366 tw32(MAC_TX_LENGTHS,
1367 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1368 (6 << TX_LENGTHS_IPG_SHIFT) |
1369 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1371 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1372 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1373 phydev->speed != tp->link_config.active_speed ||
1374 phydev->duplex != tp->link_config.active_duplex ||
1375 oldflowctrl != tp->link_config.active_flowctrl)
1378 tp->link_config.active_speed = phydev->speed;
1379 tp->link_config.active_duplex = phydev->duplex;
1381 spin_unlock(&tp->lock);
1384 tg3_link_report(tp);
1387 static int tg3_phy_init(struct tg3 *tp)
1389 struct phy_device *phydev;
1391 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1394 /* Bring the PHY back to a known state. */
1397 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1399 /* Attach the MAC to the PHY. */
1400 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1401 phydev->dev_flags, phydev->interface);
1402 if (IS_ERR(phydev)) {
1403 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1404 return PTR_ERR(phydev);
1407 /* Mask with MAC supported features. */
1408 switch (phydev->interface) {
1409 case PHY_INTERFACE_MODE_GMII:
1410 case PHY_INTERFACE_MODE_RGMII:
1411 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1412 phydev->supported &= (PHY_GBIT_FEATURES |
1414 SUPPORTED_Asym_Pause);
1418 case PHY_INTERFACE_MODE_MII:
1419 phydev->supported &= (PHY_BASIC_FEATURES |
1421 SUPPORTED_Asym_Pause);
1424 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1428 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1430 phydev->advertising = phydev->supported;
1435 static void tg3_phy_start(struct tg3 *tp)
1437 struct phy_device *phydev;
1439 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1442 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1444 if (tp->link_config.phy_is_low_power) {
1445 tp->link_config.phy_is_low_power = 0;
1446 phydev->speed = tp->link_config.orig_speed;
1447 phydev->duplex = tp->link_config.orig_duplex;
1448 phydev->autoneg = tp->link_config.orig_autoneg;
1449 phydev->advertising = tp->link_config.orig_advertising;
1454 phy_start_aneg(phydev);
1457 static void tg3_phy_stop(struct tg3 *tp)
1459 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1462 phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
1465 static void tg3_phy_fini(struct tg3 *tp)
1467 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1468 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1469 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1473 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1475 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1476 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1479 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1483 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1486 tg3_writephy(tp, MII_TG3_FET_TEST,
1487 phytest | MII_TG3_FET_SHADOW_EN);
1488 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1490 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1492 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1493 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1495 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1499 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1503 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
1506 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1507 tg3_phy_fet_toggle_apd(tp, enable);
1511 reg = MII_TG3_MISC_SHDW_WREN |
1512 MII_TG3_MISC_SHDW_SCR5_SEL |
1513 MII_TG3_MISC_SHDW_SCR5_LPED |
1514 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1515 MII_TG3_MISC_SHDW_SCR5_SDTL |
1516 MII_TG3_MISC_SHDW_SCR5_C125OE;
1517 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1518 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1520 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1523 reg = MII_TG3_MISC_SHDW_WREN |
1524 MII_TG3_MISC_SHDW_APD_SEL |
1525 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1527 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1529 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1532 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1536 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1537 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1540 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1543 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1544 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1546 tg3_writephy(tp, MII_TG3_FET_TEST,
1547 ephy | MII_TG3_FET_SHADOW_EN);
1548 if (!tg3_readphy(tp, reg, &phy)) {
1550 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1552 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1553 tg3_writephy(tp, reg, phy);
1555 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1558 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1559 MII_TG3_AUXCTL_SHDWSEL_MISC;
1560 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1561 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1563 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1565 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1566 phy |= MII_TG3_AUXCTL_MISC_WREN;
1567 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1572 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1576 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1579 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1580 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1581 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1582 (val | (1 << 15) | (1 << 4)));
1585 static void tg3_phy_apply_otp(struct tg3 *tp)
1594 /* Enable SM_DSP clock and tx 6dB coding. */
1595 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1596 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1597 MII_TG3_AUXCTL_ACTL_TX_6DB;
1598 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1600 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1601 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1602 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1604 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1605 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1606 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1608 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1609 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1610 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1612 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1613 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1615 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1616 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1618 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1619 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1620 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1622 /* Turn off SM_DSP clock. */
1623 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1624 MII_TG3_AUXCTL_ACTL_TX_6DB;
1625 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1628 static int tg3_wait_macro_done(struct tg3 *tp)
1635 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1636 if ((tmp32 & 0x1000) == 0)
1646 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1648 static const u32 test_pat[4][6] = {
1649 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1650 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1651 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1652 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1656 for (chan = 0; chan < 4; chan++) {
1659 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1660 (chan * 0x2000) | 0x0200);
1661 tg3_writephy(tp, 0x16, 0x0002);
1663 for (i = 0; i < 6; i++)
1664 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1667 tg3_writephy(tp, 0x16, 0x0202);
1668 if (tg3_wait_macro_done(tp)) {
1673 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1674 (chan * 0x2000) | 0x0200);
1675 tg3_writephy(tp, 0x16, 0x0082);
1676 if (tg3_wait_macro_done(tp)) {
1681 tg3_writephy(tp, 0x16, 0x0802);
1682 if (tg3_wait_macro_done(tp)) {
1687 for (i = 0; i < 6; i += 2) {
1690 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1691 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1692 tg3_wait_macro_done(tp)) {
1698 if (low != test_pat[chan][i] ||
1699 high != test_pat[chan][i+1]) {
1700 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1701 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1702 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1712 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1716 for (chan = 0; chan < 4; chan++) {
1719 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1720 (chan * 0x2000) | 0x0200);
1721 tg3_writephy(tp, 0x16, 0x0002);
1722 for (i = 0; i < 6; i++)
1723 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1724 tg3_writephy(tp, 0x16, 0x0202);
1725 if (tg3_wait_macro_done(tp))
1732 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1734 u32 reg32, phy9_orig;
1735 int retries, do_phy_reset, err;
1741 err = tg3_bmcr_reset(tp);
1747 /* Disable transmitter and interrupt. */
1748 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1752 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1754 /* Set full-duplex, 1000 mbps. */
1755 tg3_writephy(tp, MII_BMCR,
1756 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1758 /* Set to master mode. */
1759 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1762 tg3_writephy(tp, MII_TG3_CTRL,
1763 (MII_TG3_CTRL_AS_MASTER |
1764 MII_TG3_CTRL_ENABLE_AS_MASTER));
1766 /* Enable SM_DSP_CLOCK and 6dB. */
1767 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1769 /* Block the PHY control access. */
1770 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1771 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1773 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1776 } while (--retries);
1778 err = tg3_phy_reset_chanpat(tp);
1782 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1783 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1785 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1786 tg3_writephy(tp, 0x16, 0x0000);
1788 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1789 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1790 /* Set Extended packet length bit for jumbo frames */
1791 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1794 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1797 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1799 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1801 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1808 /* This will reset the tigon3 PHY if there is no valid
1809 * link unless the FORCE argument is non-zero.
1811 static int tg3_phy_reset(struct tg3 *tp)
1817 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1820 val = tr32(GRC_MISC_CFG);
1821 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1824 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1825 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1829 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1830 netif_carrier_off(tp->dev);
1831 tg3_link_report(tp);
1834 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1835 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1836 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1837 err = tg3_phy_reset_5703_4_5(tp);
1844 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1845 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1846 cpmuctrl = tr32(TG3_CPMU_CTRL);
1847 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1849 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1852 err = tg3_bmcr_reset(tp);
1856 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1859 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1860 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1862 tw32(TG3_CPMU_CTRL, cpmuctrl);
1865 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1866 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1869 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1870 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1871 CPMU_LSPD_1000MB_MACCLK_12_5) {
1872 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1874 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1878 tg3_phy_apply_otp(tp);
1880 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1881 tg3_phy_toggle_apd(tp, true);
1883 tg3_phy_toggle_apd(tp, false);
1886 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1887 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1888 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1889 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1890 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1891 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1892 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1894 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1895 tg3_writephy(tp, 0x1c, 0x8d68);
1896 tg3_writephy(tp, 0x1c, 0x8d68);
1898 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1899 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1900 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1901 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1902 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1903 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1904 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1905 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1906 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1908 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1909 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1910 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1911 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1912 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1913 tg3_writephy(tp, MII_TG3_TEST1,
1914 MII_TG3_TEST1_TRIM_EN | 0x4);
1916 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1917 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1919 /* Set Extended packet length bit (bit 14) on all chips that */
1920 /* support jumbo frames */
1921 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1922 /* Cannot do read-modify-write on 5401 */
1923 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1924 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1927 /* Set bit 14 with read-modify-write to preserve other bits */
1928 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1929 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1930 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1933 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1934 * jumbo frames transmission.
1936 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1939 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1940 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1941 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1944 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1945 /* adjust output voltage */
1946 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
1949 tg3_phy_toggle_automdix(tp, 1);
1950 tg3_phy_set_wirespeed(tp);
1954 static void tg3_frob_aux_power(struct tg3 *tp)
1956 struct tg3 *tp_peer = tp;
1958 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1961 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1962 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1963 struct net_device *dev_peer;
1965 dev_peer = pci_get_drvdata(tp->pdev_peer);
1966 /* remove_one() may have been run on the peer. */
1970 tp_peer = netdev_priv(dev_peer);
1973 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1974 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1975 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1976 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1977 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1978 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1979 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1980 (GRC_LCLCTRL_GPIO_OE0 |
1981 GRC_LCLCTRL_GPIO_OE1 |
1982 GRC_LCLCTRL_GPIO_OE2 |
1983 GRC_LCLCTRL_GPIO_OUTPUT0 |
1984 GRC_LCLCTRL_GPIO_OUTPUT1),
1986 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
1987 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
1988 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1989 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1990 GRC_LCLCTRL_GPIO_OE1 |
1991 GRC_LCLCTRL_GPIO_OE2 |
1992 GRC_LCLCTRL_GPIO_OUTPUT0 |
1993 GRC_LCLCTRL_GPIO_OUTPUT1 |
1995 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1997 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1998 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2000 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2001 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2004 u32 grc_local_ctrl = 0;
2006 if (tp_peer != tp &&
2007 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2010 /* Workaround to prevent overdrawing Amps. */
2011 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2013 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2014 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2015 grc_local_ctrl, 100);
2018 /* On 5753 and variants, GPIO2 cannot be used. */
2019 no_gpio2 = tp->nic_sram_data_cfg &
2020 NIC_SRAM_DATA_CFG_NO_GPIO2;
2022 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2023 GRC_LCLCTRL_GPIO_OE1 |
2024 GRC_LCLCTRL_GPIO_OE2 |
2025 GRC_LCLCTRL_GPIO_OUTPUT1 |
2026 GRC_LCLCTRL_GPIO_OUTPUT2;
2028 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2029 GRC_LCLCTRL_GPIO_OUTPUT2);
2031 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2032 grc_local_ctrl, 100);
2034 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2036 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2037 grc_local_ctrl, 100);
2040 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2041 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2042 grc_local_ctrl, 100);
2046 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2047 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2048 if (tp_peer != tp &&
2049 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2052 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2053 (GRC_LCLCTRL_GPIO_OE1 |
2054 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2056 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2057 GRC_LCLCTRL_GPIO_OE1, 100);
2059 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2060 (GRC_LCLCTRL_GPIO_OE1 |
2061 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2066 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2068 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2070 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2071 if (speed != SPEED_10)
2073 } else if (speed == SPEED_10)
2079 static int tg3_setup_phy(struct tg3 *, int);
2081 #define RESET_KIND_SHUTDOWN 0
2082 #define RESET_KIND_INIT 1
2083 #define RESET_KIND_SUSPEND 2
2085 static void tg3_write_sig_post_reset(struct tg3 *, int);
2086 static int tg3_halt_cpu(struct tg3 *, u32);
2088 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2092 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2093 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2094 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2095 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2098 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2099 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2100 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2105 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2107 val = tr32(GRC_MISC_CFG);
2108 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2111 } else if (do_low_power) {
2112 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2113 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2115 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2116 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2117 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2118 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2119 MII_TG3_AUXCTL_PCTL_VREG_11V);
2122 /* The PHY should not be powered down on some chips because
2125 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2126 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2127 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2128 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2131 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2132 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2133 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2134 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2135 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2136 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2139 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2142 /* tp->lock is held. */
2143 static int tg3_nvram_lock(struct tg3 *tp)
2145 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2148 if (tp->nvram_lock_cnt == 0) {
2149 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2150 for (i = 0; i < 8000; i++) {
2151 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2156 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2160 tp->nvram_lock_cnt++;
2165 /* tp->lock is held. */
2166 static void tg3_nvram_unlock(struct tg3 *tp)
2168 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2169 if (tp->nvram_lock_cnt > 0)
2170 tp->nvram_lock_cnt--;
2171 if (tp->nvram_lock_cnt == 0)
2172 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2176 /* tp->lock is held. */
2177 static void tg3_enable_nvram_access(struct tg3 *tp)
2179 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2180 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
2181 u32 nvaccess = tr32(NVRAM_ACCESS);
2183 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2187 /* tp->lock is held. */
2188 static void tg3_disable_nvram_access(struct tg3 *tp)
2190 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2191 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
2192 u32 nvaccess = tr32(NVRAM_ACCESS);
2194 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2198 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2199 u32 offset, u32 *val)
2204 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2207 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2208 EEPROM_ADDR_DEVID_MASK |
2210 tw32(GRC_EEPROM_ADDR,
2212 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2213 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2214 EEPROM_ADDR_ADDR_MASK) |
2215 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2217 for (i = 0; i < 1000; i++) {
2218 tmp = tr32(GRC_EEPROM_ADDR);
2220 if (tmp & EEPROM_ADDR_COMPLETE)
2224 if (!(tmp & EEPROM_ADDR_COMPLETE))
2227 tmp = tr32(GRC_EEPROM_DATA);
2230 * The data will always be opposite the native endian
2231 * format. Perform a blind byteswap to compensate.
2238 #define NVRAM_CMD_TIMEOUT 10000
2240 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2244 tw32(NVRAM_CMD, nvram_cmd);
2245 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2247 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2253 if (i == NVRAM_CMD_TIMEOUT)
2259 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2261 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2262 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2263 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2264 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2265 (tp->nvram_jedecnum == JEDEC_ATMEL))
2267 addr = ((addr / tp->nvram_pagesize) <<
2268 ATMEL_AT45DB0X1B_PAGE_POS) +
2269 (addr % tp->nvram_pagesize);
2274 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2276 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2277 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2278 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2279 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2280 (tp->nvram_jedecnum == JEDEC_ATMEL))
2282 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2283 tp->nvram_pagesize) +
2284 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2289 /* NOTE: Data read in from NVRAM is byteswapped according to
2290 * the byteswapping settings for all other register accesses.
2291 * tg3 devices are BE devices, so on a BE machine, the data
2292 * returned will be exactly as it is seen in NVRAM. On a LE
2293 * machine, the 32-bit value will be byteswapped.
2295 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2299 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2300 return tg3_nvram_read_using_eeprom(tp, offset, val);
2302 offset = tg3_nvram_phys_addr(tp, offset);
2304 if (offset > NVRAM_ADDR_MSK)
2307 ret = tg3_nvram_lock(tp);
2311 tg3_enable_nvram_access(tp);
2313 tw32(NVRAM_ADDR, offset);
2314 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2315 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2318 *val = tr32(NVRAM_RDDATA);
2320 tg3_disable_nvram_access(tp);
2322 tg3_nvram_unlock(tp);
2327 /* Ensures NVRAM data is in bytestream format. */
2328 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2331 int res = tg3_nvram_read(tp, offset, &v);
2333 *val = cpu_to_be32(v);
2337 /* tp->lock is held. */
2338 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2340 u32 addr_high, addr_low;
2343 addr_high = ((tp->dev->dev_addr[0] << 8) |
2344 tp->dev->dev_addr[1]);
2345 addr_low = ((tp->dev->dev_addr[2] << 24) |
2346 (tp->dev->dev_addr[3] << 16) |
2347 (tp->dev->dev_addr[4] << 8) |
2348 (tp->dev->dev_addr[5] << 0));
2349 for (i = 0; i < 4; i++) {
2350 if (i == 1 && skip_mac_1)
2352 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2353 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2356 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2357 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2358 for (i = 0; i < 12; i++) {
2359 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2360 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2364 addr_high = (tp->dev->dev_addr[0] +
2365 tp->dev->dev_addr[1] +
2366 tp->dev->dev_addr[2] +
2367 tp->dev->dev_addr[3] +
2368 tp->dev->dev_addr[4] +
2369 tp->dev->dev_addr[5]) &
2370 TX_BACKOFF_SEED_MASK;
2371 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2374 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2377 bool device_should_wake, do_low_power;
2379 /* Make sure register accesses (indirect or otherwise)
2380 * will function correctly.
2382 pci_write_config_dword(tp->pdev,
2383 TG3PCI_MISC_HOST_CTRL,
2384 tp->misc_host_ctrl);
2388 pci_enable_wake(tp->pdev, state, false);
2389 pci_set_power_state(tp->pdev, PCI_D0);
2391 /* Switch out of Vaux if it is a NIC */
2392 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2393 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2403 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2404 tp->dev->name, state);
2408 /* Restore the CLKREQ setting. */
2409 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2412 pci_read_config_word(tp->pdev,
2413 tp->pcie_cap + PCI_EXP_LNKCTL,
2415 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2416 pci_write_config_word(tp->pdev,
2417 tp->pcie_cap + PCI_EXP_LNKCTL,
2421 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2422 tw32(TG3PCI_MISC_HOST_CTRL,
2423 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2425 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2426 device_may_wakeup(&tp->pdev->dev) &&
2427 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2429 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2430 do_low_power = false;
2431 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2432 !tp->link_config.phy_is_low_power) {
2433 struct phy_device *phydev;
2434 u32 phyid, advertising;
2436 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
2438 tp->link_config.phy_is_low_power = 1;
2440 tp->link_config.orig_speed = phydev->speed;
2441 tp->link_config.orig_duplex = phydev->duplex;
2442 tp->link_config.orig_autoneg = phydev->autoneg;
2443 tp->link_config.orig_advertising = phydev->advertising;
2445 advertising = ADVERTISED_TP |
2447 ADVERTISED_Autoneg |
2448 ADVERTISED_10baseT_Half;
2450 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2451 device_should_wake) {
2452 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2454 ADVERTISED_100baseT_Half |
2455 ADVERTISED_100baseT_Full |
2456 ADVERTISED_10baseT_Full;
2458 advertising |= ADVERTISED_10baseT_Full;
2461 phydev->advertising = advertising;
2463 phy_start_aneg(phydev);
2465 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2466 if (phyid != TG3_PHY_ID_BCMAC131) {
2467 phyid &= TG3_PHY_OUI_MASK;
2468 if (phyid == TG3_PHY_OUI_1 ||
2469 phyid == TG3_PHY_OUI_2 ||
2470 phyid == TG3_PHY_OUI_3)
2471 do_low_power = true;
2475 do_low_power = true;
2477 if (tp->link_config.phy_is_low_power == 0) {
2478 tp->link_config.phy_is_low_power = 1;
2479 tp->link_config.orig_speed = tp->link_config.speed;
2480 tp->link_config.orig_duplex = tp->link_config.duplex;
2481 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2484 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2485 tp->link_config.speed = SPEED_10;
2486 tp->link_config.duplex = DUPLEX_HALF;
2487 tp->link_config.autoneg = AUTONEG_ENABLE;
2488 tg3_setup_phy(tp, 0);
2492 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2495 val = tr32(GRC_VCPU_EXT_CTRL);
2496 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2497 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2501 for (i = 0; i < 200; i++) {
2502 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2503 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2508 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2509 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2510 WOL_DRV_STATE_SHUTDOWN |
2514 if (device_should_wake) {
2517 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2519 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2523 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2524 mac_mode = MAC_MODE_PORT_MODE_GMII;
2526 mac_mode = MAC_MODE_PORT_MODE_MII;
2528 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2529 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2531 u32 speed = (tp->tg3_flags &
2532 TG3_FLAG_WOL_SPEED_100MB) ?
2533 SPEED_100 : SPEED_10;
2534 if (tg3_5700_link_polarity(tp, speed))
2535 mac_mode |= MAC_MODE_LINK_POLARITY;
2537 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2540 mac_mode = MAC_MODE_PORT_MODE_TBI;
2543 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2544 tw32(MAC_LED_CTRL, tp->led_ctrl);
2546 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2547 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2548 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2549 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2550 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2551 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2553 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2554 mac_mode |= tp->mac_mode &
2555 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2556 if (mac_mode & MAC_MODE_APE_TX_EN)
2557 mac_mode |= MAC_MODE_TDE_ENABLE;
2560 tw32_f(MAC_MODE, mac_mode);
2563 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2567 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2568 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2569 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2572 base_val = tp->pci_clock_ctrl;
2573 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2574 CLOCK_CTRL_TXCLK_DISABLE);
2576 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2577 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2578 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2579 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2580 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2582 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2583 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2584 u32 newbits1, newbits2;
2586 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2587 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2588 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2589 CLOCK_CTRL_TXCLK_DISABLE |
2591 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2592 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2593 newbits1 = CLOCK_CTRL_625_CORE;
2594 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2596 newbits1 = CLOCK_CTRL_ALTCLK;
2597 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2600 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2603 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2606 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2609 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2610 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2611 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2612 CLOCK_CTRL_TXCLK_DISABLE |
2613 CLOCK_CTRL_44MHZ_CORE);
2615 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2618 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2619 tp->pci_clock_ctrl | newbits3, 40);
2623 if (!(device_should_wake) &&
2624 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2625 tg3_power_down_phy(tp, do_low_power);
2627 tg3_frob_aux_power(tp);
2629 /* Workaround for unstable PLL clock */
2630 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2631 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2632 u32 val = tr32(0x7d00);
2634 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2636 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2639 err = tg3_nvram_lock(tp);
2640 tg3_halt_cpu(tp, RX_CPU_BASE);
2642 tg3_nvram_unlock(tp);
2646 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2648 if (device_should_wake)
2649 pci_enable_wake(tp->pdev, state, true);
2651 /* Finally, set the new power state. */
2652 pci_set_power_state(tp->pdev, state);
2657 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2659 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2660 case MII_TG3_AUX_STAT_10HALF:
2662 *duplex = DUPLEX_HALF;
2665 case MII_TG3_AUX_STAT_10FULL:
2667 *duplex = DUPLEX_FULL;
2670 case MII_TG3_AUX_STAT_100HALF:
2672 *duplex = DUPLEX_HALF;
2675 case MII_TG3_AUX_STAT_100FULL:
2677 *duplex = DUPLEX_FULL;
2680 case MII_TG3_AUX_STAT_1000HALF:
2681 *speed = SPEED_1000;
2682 *duplex = DUPLEX_HALF;
2685 case MII_TG3_AUX_STAT_1000FULL:
2686 *speed = SPEED_1000;
2687 *duplex = DUPLEX_FULL;
2691 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2692 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2694 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2698 *speed = SPEED_INVALID;
2699 *duplex = DUPLEX_INVALID;
2704 static void tg3_phy_copper_begin(struct tg3 *tp)
2709 if (tp->link_config.phy_is_low_power) {
2710 /* Entering low power mode. Disable gigabit and
2711 * 100baseT advertisements.
2713 tg3_writephy(tp, MII_TG3_CTRL, 0);
2715 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2716 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2717 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2718 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2720 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2721 } else if (tp->link_config.speed == SPEED_INVALID) {
2722 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2723 tp->link_config.advertising &=
2724 ~(ADVERTISED_1000baseT_Half |
2725 ADVERTISED_1000baseT_Full);
2727 new_adv = ADVERTISE_CSMA;
2728 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2729 new_adv |= ADVERTISE_10HALF;
2730 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2731 new_adv |= ADVERTISE_10FULL;
2732 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2733 new_adv |= ADVERTISE_100HALF;
2734 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2735 new_adv |= ADVERTISE_100FULL;
2737 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2739 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2741 if (tp->link_config.advertising &
2742 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2744 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2745 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2746 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2747 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2748 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2749 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2750 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2751 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2752 MII_TG3_CTRL_ENABLE_AS_MASTER);
2753 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2755 tg3_writephy(tp, MII_TG3_CTRL, 0);
2758 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2759 new_adv |= ADVERTISE_CSMA;
2761 /* Asking for a specific link mode. */
2762 if (tp->link_config.speed == SPEED_1000) {
2763 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2765 if (tp->link_config.duplex == DUPLEX_FULL)
2766 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2768 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2769 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2770 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2771 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2772 MII_TG3_CTRL_ENABLE_AS_MASTER);
2774 if (tp->link_config.speed == SPEED_100) {
2775 if (tp->link_config.duplex == DUPLEX_FULL)
2776 new_adv |= ADVERTISE_100FULL;
2778 new_adv |= ADVERTISE_100HALF;
2780 if (tp->link_config.duplex == DUPLEX_FULL)
2781 new_adv |= ADVERTISE_10FULL;
2783 new_adv |= ADVERTISE_10HALF;
2785 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2790 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2793 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2794 tp->link_config.speed != SPEED_INVALID) {
2795 u32 bmcr, orig_bmcr;
2797 tp->link_config.active_speed = tp->link_config.speed;
2798 tp->link_config.active_duplex = tp->link_config.duplex;
2801 switch (tp->link_config.speed) {
2807 bmcr |= BMCR_SPEED100;
2811 bmcr |= TG3_BMCR_SPEED1000;
2815 if (tp->link_config.duplex == DUPLEX_FULL)
2816 bmcr |= BMCR_FULLDPLX;
2818 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2819 (bmcr != orig_bmcr)) {
2820 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2821 for (i = 0; i < 1500; i++) {
2825 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2826 tg3_readphy(tp, MII_BMSR, &tmp))
2828 if (!(tmp & BMSR_LSTATUS)) {
2833 tg3_writephy(tp, MII_BMCR, bmcr);
2837 tg3_writephy(tp, MII_BMCR,
2838 BMCR_ANENABLE | BMCR_ANRESTART);
2842 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2846 /* Turn off tap power management. */
2847 /* Set Extended packet length bit */
2848 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2850 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2851 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2853 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2854 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2856 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2857 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2859 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2860 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2862 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2863 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2870 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2872 u32 adv_reg, all_mask = 0;
2874 if (mask & ADVERTISED_10baseT_Half)
2875 all_mask |= ADVERTISE_10HALF;
2876 if (mask & ADVERTISED_10baseT_Full)
2877 all_mask |= ADVERTISE_10FULL;
2878 if (mask & ADVERTISED_100baseT_Half)
2879 all_mask |= ADVERTISE_100HALF;
2880 if (mask & ADVERTISED_100baseT_Full)
2881 all_mask |= ADVERTISE_100FULL;
2883 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2886 if ((adv_reg & all_mask) != all_mask)
2888 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2892 if (mask & ADVERTISED_1000baseT_Half)
2893 all_mask |= ADVERTISE_1000HALF;
2894 if (mask & ADVERTISED_1000baseT_Full)
2895 all_mask |= ADVERTISE_1000FULL;
2897 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2900 if ((tg3_ctrl & all_mask) != all_mask)
2906 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2910 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2913 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2914 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2916 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2917 if (curadv != reqadv)
2920 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2921 tg3_readphy(tp, MII_LPA, rmtadv);
2923 /* Reprogram the advertisement register, even if it
2924 * does not affect the current link. If the link
2925 * gets renegotiated in the future, we can save an
2926 * additional renegotiation cycle by advertising
2927 * it correctly in the first place.
2929 if (curadv != reqadv) {
2930 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2931 ADVERTISE_PAUSE_ASYM);
2932 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2939 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2941 int current_link_up;
2943 u32 lcl_adv, rmt_adv;
2951 (MAC_STATUS_SYNC_CHANGED |
2952 MAC_STATUS_CFG_CHANGED |
2953 MAC_STATUS_MI_COMPLETION |
2954 MAC_STATUS_LNKSTATE_CHANGED));
2957 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2959 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2963 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2965 /* Some third-party PHYs need to be reset on link going
2968 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2969 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2970 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2971 netif_carrier_ok(tp->dev)) {
2972 tg3_readphy(tp, MII_BMSR, &bmsr);
2973 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2974 !(bmsr & BMSR_LSTATUS))
2980 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2981 tg3_readphy(tp, MII_BMSR, &bmsr);
2982 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2983 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2986 if (!(bmsr & BMSR_LSTATUS)) {
2987 err = tg3_init_5401phy_dsp(tp);
2991 tg3_readphy(tp, MII_BMSR, &bmsr);
2992 for (i = 0; i < 1000; i++) {
2994 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2995 (bmsr & BMSR_LSTATUS)) {
3001 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
3002 !(bmsr & BMSR_LSTATUS) &&
3003 tp->link_config.active_speed == SPEED_1000) {
3004 err = tg3_phy_reset(tp);
3006 err = tg3_init_5401phy_dsp(tp);
3011 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3012 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3013 /* 5701 {A0,B0} CRC bug workaround */
3014 tg3_writephy(tp, 0x15, 0x0a75);
3015 tg3_writephy(tp, 0x1c, 0x8c68);
3016 tg3_writephy(tp, 0x1c, 0x8d68);
3017 tg3_writephy(tp, 0x1c, 0x8c68);
3020 /* Clear pending interrupts... */
3021 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3022 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3024 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
3025 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3026 else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
3027 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3029 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3030 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3031 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3032 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3033 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3035 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3038 current_link_up = 0;
3039 current_speed = SPEED_INVALID;
3040 current_duplex = DUPLEX_INVALID;
3042 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
3045 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3046 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3047 if (!(val & (1 << 10))) {
3049 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3055 for (i = 0; i < 100; i++) {
3056 tg3_readphy(tp, MII_BMSR, &bmsr);
3057 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3058 (bmsr & BMSR_LSTATUS))
3063 if (bmsr & BMSR_LSTATUS) {
3066 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3067 for (i = 0; i < 2000; i++) {
3069 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3074 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3079 for (i = 0; i < 200; i++) {
3080 tg3_readphy(tp, MII_BMCR, &bmcr);
3081 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3083 if (bmcr && bmcr != 0x7fff)
3091 tp->link_config.active_speed = current_speed;
3092 tp->link_config.active_duplex = current_duplex;
3094 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3095 if ((bmcr & BMCR_ANENABLE) &&
3096 tg3_copper_is_advertising_all(tp,
3097 tp->link_config.advertising)) {
3098 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3100 current_link_up = 1;
3103 if (!(bmcr & BMCR_ANENABLE) &&
3104 tp->link_config.speed == current_speed &&
3105 tp->link_config.duplex == current_duplex &&
3106 tp->link_config.flowctrl ==
3107 tp->link_config.active_flowctrl) {
3108 current_link_up = 1;
3112 if (current_link_up == 1 &&
3113 tp->link_config.active_duplex == DUPLEX_FULL)
3114 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3118 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
3121 tg3_phy_copper_begin(tp);
3123 tg3_readphy(tp, MII_BMSR, &tmp);
3124 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
3125 (tmp & BMSR_LSTATUS))
3126 current_link_up = 1;
3129 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3130 if (current_link_up == 1) {
3131 if (tp->link_config.active_speed == SPEED_100 ||
3132 tp->link_config.active_speed == SPEED_10)
3133 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3135 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3136 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
3137 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3139 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3141 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3142 if (tp->link_config.active_duplex == DUPLEX_HALF)
3143 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3145 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3146 if (current_link_up == 1 &&
3147 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3148 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3150 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3153 /* ??? Without this setting Netgear GA302T PHY does not
3154 * ??? send/receive packets...
3156 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
3157 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3158 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3159 tw32_f(MAC_MI_MODE, tp->mi_mode);
3163 tw32_f(MAC_MODE, tp->mac_mode);
3166 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3167 /* Polled via timer. */
3168 tw32_f(MAC_EVENT, 0);
3170 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3174 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3175 current_link_up == 1 &&
3176 tp->link_config.active_speed == SPEED_1000 &&
3177 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3178 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3181 (MAC_STATUS_SYNC_CHANGED |
3182 MAC_STATUS_CFG_CHANGED));
3185 NIC_SRAM_FIRMWARE_MBOX,
3186 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3189 /* Prevent send BD corruption. */
3190 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3191 u16 oldlnkctl, newlnkctl;
3193 pci_read_config_word(tp->pdev,
3194 tp->pcie_cap + PCI_EXP_LNKCTL,
3196 if (tp->link_config.active_speed == SPEED_100 ||
3197 tp->link_config.active_speed == SPEED_10)
3198 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3200 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3201 if (newlnkctl != oldlnkctl)
3202 pci_write_config_word(tp->pdev,
3203 tp->pcie_cap + PCI_EXP_LNKCTL,
3205 } else if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) {
3206 u32 newreg, oldreg = tr32(TG3_PCIE_LNKCTL);
3207 if (tp->link_config.active_speed == SPEED_100 ||
3208 tp->link_config.active_speed == SPEED_10)
3209 newreg = oldreg & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
3211 newreg = oldreg | TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
3212 if (newreg != oldreg)
3213 tw32(TG3_PCIE_LNKCTL, newreg);
3216 if (current_link_up != netif_carrier_ok(tp->dev)) {
3217 if (current_link_up)
3218 netif_carrier_on(tp->dev);
3220 netif_carrier_off(tp->dev);
3221 tg3_link_report(tp);
3227 struct tg3_fiber_aneginfo {
3229 #define ANEG_STATE_UNKNOWN 0
3230 #define ANEG_STATE_AN_ENABLE 1
3231 #define ANEG_STATE_RESTART_INIT 2
3232 #define ANEG_STATE_RESTART 3
3233 #define ANEG_STATE_DISABLE_LINK_OK 4
3234 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3235 #define ANEG_STATE_ABILITY_DETECT 6
3236 #define ANEG_STATE_ACK_DETECT_INIT 7
3237 #define ANEG_STATE_ACK_DETECT 8
3238 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3239 #define ANEG_STATE_COMPLETE_ACK 10
3240 #define ANEG_STATE_IDLE_DETECT_INIT 11
3241 #define ANEG_STATE_IDLE_DETECT 12
3242 #define ANEG_STATE_LINK_OK 13
3243 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3244 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3247 #define MR_AN_ENABLE 0x00000001
3248 #define MR_RESTART_AN 0x00000002
3249 #define MR_AN_COMPLETE 0x00000004
3250 #define MR_PAGE_RX 0x00000008
3251 #define MR_NP_LOADED 0x00000010
3252 #define MR_TOGGLE_TX 0x00000020
3253 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3254 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3255 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3256 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3257 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3258 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3259 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3260 #define MR_TOGGLE_RX 0x00002000
3261 #define MR_NP_RX 0x00004000
3263 #define MR_LINK_OK 0x80000000
3265 unsigned long link_time, cur_time;
3267 u32 ability_match_cfg;
3268 int ability_match_count;
3270 char ability_match, idle_match, ack_match;
3272 u32 txconfig, rxconfig;
3273 #define ANEG_CFG_NP 0x00000080
3274 #define ANEG_CFG_ACK 0x00000040
3275 #define ANEG_CFG_RF2 0x00000020
3276 #define ANEG_CFG_RF1 0x00000010
3277 #define ANEG_CFG_PS2 0x00000001
3278 #define ANEG_CFG_PS1 0x00008000
3279 #define ANEG_CFG_HD 0x00004000
3280 #define ANEG_CFG_FD 0x00002000
3281 #define ANEG_CFG_INVAL 0x00001f06
3286 #define ANEG_TIMER_ENAB 2
3287 #define ANEG_FAILED -1
3289 #define ANEG_STATE_SETTLE_TIME 10000
3291 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3292 struct tg3_fiber_aneginfo *ap)
3295 unsigned long delta;
3299 if (ap->state == ANEG_STATE_UNKNOWN) {
3303 ap->ability_match_cfg = 0;
3304 ap->ability_match_count = 0;
3305 ap->ability_match = 0;
3311 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3312 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3314 if (rx_cfg_reg != ap->ability_match_cfg) {
3315 ap->ability_match_cfg = rx_cfg_reg;
3316 ap->ability_match = 0;
3317 ap->ability_match_count = 0;
3319 if (++ap->ability_match_count > 1) {
3320 ap->ability_match = 1;
3321 ap->ability_match_cfg = rx_cfg_reg;
3324 if (rx_cfg_reg & ANEG_CFG_ACK)
3332 ap->ability_match_cfg = 0;
3333 ap->ability_match_count = 0;
3334 ap->ability_match = 0;
3340 ap->rxconfig = rx_cfg_reg;
3344 case ANEG_STATE_UNKNOWN:
3345 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3346 ap->state = ANEG_STATE_AN_ENABLE;
3349 case ANEG_STATE_AN_ENABLE:
3350 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3351 if (ap->flags & MR_AN_ENABLE) {
3354 ap->ability_match_cfg = 0;
3355 ap->ability_match_count = 0;
3356 ap->ability_match = 0;
3360 ap->state = ANEG_STATE_RESTART_INIT;
3362 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3366 case ANEG_STATE_RESTART_INIT:
3367 ap->link_time = ap->cur_time;
3368 ap->flags &= ~(MR_NP_LOADED);
3370 tw32(MAC_TX_AUTO_NEG, 0);
3371 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3372 tw32_f(MAC_MODE, tp->mac_mode);
3375 ret = ANEG_TIMER_ENAB;
3376 ap->state = ANEG_STATE_RESTART;
3379 case ANEG_STATE_RESTART:
3380 delta = ap->cur_time - ap->link_time;
3381 if (delta > ANEG_STATE_SETTLE_TIME) {
3382 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3384 ret = ANEG_TIMER_ENAB;
3388 case ANEG_STATE_DISABLE_LINK_OK:
3392 case ANEG_STATE_ABILITY_DETECT_INIT:
3393 ap->flags &= ~(MR_TOGGLE_TX);
3394 ap->txconfig = ANEG_CFG_FD;
3395 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3396 if (flowctrl & ADVERTISE_1000XPAUSE)
3397 ap->txconfig |= ANEG_CFG_PS1;
3398 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3399 ap->txconfig |= ANEG_CFG_PS2;
3400 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3401 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3402 tw32_f(MAC_MODE, tp->mac_mode);
3405 ap->state = ANEG_STATE_ABILITY_DETECT;
3408 case ANEG_STATE_ABILITY_DETECT:
3409 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3410 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3414 case ANEG_STATE_ACK_DETECT_INIT:
3415 ap->txconfig |= ANEG_CFG_ACK;
3416 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3417 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3418 tw32_f(MAC_MODE, tp->mac_mode);
3421 ap->state = ANEG_STATE_ACK_DETECT;
3424 case ANEG_STATE_ACK_DETECT:
3425 if (ap->ack_match != 0) {
3426 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3427 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3428 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3430 ap->state = ANEG_STATE_AN_ENABLE;
3432 } else if (ap->ability_match != 0 &&
3433 ap->rxconfig == 0) {
3434 ap->state = ANEG_STATE_AN_ENABLE;
3438 case ANEG_STATE_COMPLETE_ACK_INIT:
3439 if (ap->rxconfig & ANEG_CFG_INVAL) {
3443 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3444 MR_LP_ADV_HALF_DUPLEX |
3445 MR_LP_ADV_SYM_PAUSE |
3446 MR_LP_ADV_ASYM_PAUSE |
3447 MR_LP_ADV_REMOTE_FAULT1 |
3448 MR_LP_ADV_REMOTE_FAULT2 |
3449 MR_LP_ADV_NEXT_PAGE |
3452 if (ap->rxconfig & ANEG_CFG_FD)
3453 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3454 if (ap->rxconfig & ANEG_CFG_HD)
3455 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3456 if (ap->rxconfig & ANEG_CFG_PS1)
3457 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3458 if (ap->rxconfig & ANEG_CFG_PS2)
3459 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3460 if (ap->rxconfig & ANEG_CFG_RF1)
3461 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3462 if (ap->rxconfig & ANEG_CFG_RF2)
3463 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3464 if (ap->rxconfig & ANEG_CFG_NP)
3465 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3467 ap->link_time = ap->cur_time;
3469 ap->flags ^= (MR_TOGGLE_TX);
3470 if (ap->rxconfig & 0x0008)
3471 ap->flags |= MR_TOGGLE_RX;
3472 if (ap->rxconfig & ANEG_CFG_NP)
3473 ap->flags |= MR_NP_RX;
3474 ap->flags |= MR_PAGE_RX;
3476 ap->state = ANEG_STATE_COMPLETE_ACK;
3477 ret = ANEG_TIMER_ENAB;
3480 case ANEG_STATE_COMPLETE_ACK:
3481 if (ap->ability_match != 0 &&
3482 ap->rxconfig == 0) {
3483 ap->state = ANEG_STATE_AN_ENABLE;
3486 delta = ap->cur_time - ap->link_time;
3487 if (delta > ANEG_STATE_SETTLE_TIME) {
3488 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3489 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3491 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3492 !(ap->flags & MR_NP_RX)) {
3493 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3501 case ANEG_STATE_IDLE_DETECT_INIT:
3502 ap->link_time = ap->cur_time;
3503 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3504 tw32_f(MAC_MODE, tp->mac_mode);
3507 ap->state = ANEG_STATE_IDLE_DETECT;
3508 ret = ANEG_TIMER_ENAB;
3511 case ANEG_STATE_IDLE_DETECT:
3512 if (ap->ability_match != 0 &&
3513 ap->rxconfig == 0) {
3514 ap->state = ANEG_STATE_AN_ENABLE;
3517 delta = ap->cur_time - ap->link_time;
3518 if (delta > ANEG_STATE_SETTLE_TIME) {
3519 /* XXX another gem from the Broadcom driver :( */
3520 ap->state = ANEG_STATE_LINK_OK;
3524 case ANEG_STATE_LINK_OK:
3525 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3529 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3530 /* ??? unimplemented */
3533 case ANEG_STATE_NEXT_PAGE_WAIT:
3534 /* ??? unimplemented */
3545 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3548 struct tg3_fiber_aneginfo aninfo;
3549 int status = ANEG_FAILED;
3553 tw32_f(MAC_TX_AUTO_NEG, 0);
3555 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3556 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3559 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3562 memset(&aninfo, 0, sizeof(aninfo));
3563 aninfo.flags |= MR_AN_ENABLE;
3564 aninfo.state = ANEG_STATE_UNKNOWN;
3565 aninfo.cur_time = 0;
3567 while (++tick < 195000) {
3568 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3569 if (status == ANEG_DONE || status == ANEG_FAILED)
3575 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3576 tw32_f(MAC_MODE, tp->mac_mode);
3579 *txflags = aninfo.txconfig;
3580 *rxflags = aninfo.flags;
3582 if (status == ANEG_DONE &&
3583 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3584 MR_LP_ADV_FULL_DUPLEX)))
3590 static void tg3_init_bcm8002(struct tg3 *tp)
3592 u32 mac_status = tr32(MAC_STATUS);
3595 /* Reset when initting first time or we have a link. */
3596 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3597 !(mac_status & MAC_STATUS_PCS_SYNCED))
3600 /* Set PLL lock range. */
3601 tg3_writephy(tp, 0x16, 0x8007);
3604 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3606 /* Wait for reset to complete. */
3607 /* XXX schedule_timeout() ... */
3608 for (i = 0; i < 500; i++)
3611 /* Config mode; select PMA/Ch 1 regs. */
3612 tg3_writephy(tp, 0x10, 0x8411);
3614 /* Enable auto-lock and comdet, select txclk for tx. */
3615 tg3_writephy(tp, 0x11, 0x0a10);
3617 tg3_writephy(tp, 0x18, 0x00a0);
3618 tg3_writephy(tp, 0x16, 0x41ff);
3620 /* Assert and deassert POR. */
3621 tg3_writephy(tp, 0x13, 0x0400);
3623 tg3_writephy(tp, 0x13, 0x0000);
3625 tg3_writephy(tp, 0x11, 0x0a50);
3627 tg3_writephy(tp, 0x11, 0x0a10);
3629 /* Wait for signal to stabilize */
3630 /* XXX schedule_timeout() ... */
3631 for (i = 0; i < 15000; i++)
3634 /* Deselect the channel register so we can read the PHYID
3637 tg3_writephy(tp, 0x10, 0x8011);
3640 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3643 u32 sg_dig_ctrl, sg_dig_status;
3644 u32 serdes_cfg, expected_sg_dig_ctrl;
3645 int workaround, port_a;
3646 int current_link_up;
3649 expected_sg_dig_ctrl = 0;
3652 current_link_up = 0;
3654 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3655 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3657 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3660 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3661 /* preserve bits 20-23 for voltage regulator */
3662 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3665 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3667 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3668 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3670 u32 val = serdes_cfg;
3676 tw32_f(MAC_SERDES_CFG, val);
3679 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3681 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3682 tg3_setup_flow_control(tp, 0, 0);
3683 current_link_up = 1;
3688 /* Want auto-negotiation. */
3689 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3691 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3692 if (flowctrl & ADVERTISE_1000XPAUSE)
3693 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3694 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3695 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3697 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3698 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3699 tp->serdes_counter &&
3700 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3701 MAC_STATUS_RCVD_CFG)) ==
3702 MAC_STATUS_PCS_SYNCED)) {
3703 tp->serdes_counter--;
3704 current_link_up = 1;
3709 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3710 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3712 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3714 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3715 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3716 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3717 MAC_STATUS_SIGNAL_DET)) {
3718 sg_dig_status = tr32(SG_DIG_STATUS);
3719 mac_status = tr32(MAC_STATUS);
3721 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3722 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3723 u32 local_adv = 0, remote_adv = 0;
3725 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3726 local_adv |= ADVERTISE_1000XPAUSE;
3727 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3728 local_adv |= ADVERTISE_1000XPSE_ASYM;
3730 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3731 remote_adv |= LPA_1000XPAUSE;
3732 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3733 remote_adv |= LPA_1000XPAUSE_ASYM;
3735 tg3_setup_flow_control(tp, local_adv, remote_adv);
3736 current_link_up = 1;
3737 tp->serdes_counter = 0;
3738 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3739 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3740 if (tp->serdes_counter)
3741 tp->serdes_counter--;
3744 u32 val = serdes_cfg;
3751 tw32_f(MAC_SERDES_CFG, val);
3754 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3757 /* Link parallel detection - link is up */
3758 /* only if we have PCS_SYNC and not */
3759 /* receiving config code words */
3760 mac_status = tr32(MAC_STATUS);
3761 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3762 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3763 tg3_setup_flow_control(tp, 0, 0);
3764 current_link_up = 1;
3766 TG3_FLG2_PARALLEL_DETECT;
3767 tp->serdes_counter =
3768 SERDES_PARALLEL_DET_TIMEOUT;
3770 goto restart_autoneg;
3774 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3775 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3779 return current_link_up;
3782 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3784 int current_link_up = 0;
3786 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3789 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3790 u32 txflags, rxflags;
3793 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3794 u32 local_adv = 0, remote_adv = 0;
3796 if (txflags & ANEG_CFG_PS1)
3797 local_adv |= ADVERTISE_1000XPAUSE;
3798 if (txflags & ANEG_CFG_PS2)
3799 local_adv |= ADVERTISE_1000XPSE_ASYM;
3801 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3802 remote_adv |= LPA_1000XPAUSE;
3803 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3804 remote_adv |= LPA_1000XPAUSE_ASYM;
3806 tg3_setup_flow_control(tp, local_adv, remote_adv);
3808 current_link_up = 1;
3810 for (i = 0; i < 30; i++) {
3813 (MAC_STATUS_SYNC_CHANGED |
3814 MAC_STATUS_CFG_CHANGED));
3816 if ((tr32(MAC_STATUS) &
3817 (MAC_STATUS_SYNC_CHANGED |
3818 MAC_STATUS_CFG_CHANGED)) == 0)
3822 mac_status = tr32(MAC_STATUS);
3823 if (current_link_up == 0 &&
3824 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3825 !(mac_status & MAC_STATUS_RCVD_CFG))
3826 current_link_up = 1;
3828 tg3_setup_flow_control(tp, 0, 0);
3830 /* Forcing 1000FD link up. */
3831 current_link_up = 1;
3833 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3836 tw32_f(MAC_MODE, tp->mac_mode);
3841 return current_link_up;
3844 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3847 u16 orig_active_speed;
3848 u8 orig_active_duplex;
3850 int current_link_up;
3853 orig_pause_cfg = tp->link_config.active_flowctrl;
3854 orig_active_speed = tp->link_config.active_speed;
3855 orig_active_duplex = tp->link_config.active_duplex;
3857 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3858 netif_carrier_ok(tp->dev) &&
3859 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3860 mac_status = tr32(MAC_STATUS);
3861 mac_status &= (MAC_STATUS_PCS_SYNCED |
3862 MAC_STATUS_SIGNAL_DET |
3863 MAC_STATUS_CFG_CHANGED |
3864 MAC_STATUS_RCVD_CFG);
3865 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3866 MAC_STATUS_SIGNAL_DET)) {
3867 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3868 MAC_STATUS_CFG_CHANGED));
3873 tw32_f(MAC_TX_AUTO_NEG, 0);
3875 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3876 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3877 tw32_f(MAC_MODE, tp->mac_mode);
3880 if (tp->phy_id == PHY_ID_BCM8002)
3881 tg3_init_bcm8002(tp);
3883 /* Enable link change event even when serdes polling. */
3884 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3887 current_link_up = 0;
3888 mac_status = tr32(MAC_STATUS);
3890 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3891 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3893 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3895 tp->hw_status->status =
3896 (SD_STATUS_UPDATED |
3897 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3899 for (i = 0; i < 100; i++) {
3900 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3901 MAC_STATUS_CFG_CHANGED));
3903 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3904 MAC_STATUS_CFG_CHANGED |
3905 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3909 mac_status = tr32(MAC_STATUS);
3910 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3911 current_link_up = 0;
3912 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3913 tp->serdes_counter == 0) {
3914 tw32_f(MAC_MODE, (tp->mac_mode |
3915 MAC_MODE_SEND_CONFIGS));
3917 tw32_f(MAC_MODE, tp->mac_mode);
3921 if (current_link_up == 1) {
3922 tp->link_config.active_speed = SPEED_1000;
3923 tp->link_config.active_duplex = DUPLEX_FULL;
3924 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3925 LED_CTRL_LNKLED_OVERRIDE |
3926 LED_CTRL_1000MBPS_ON));
3928 tp->link_config.active_speed = SPEED_INVALID;
3929 tp->link_config.active_duplex = DUPLEX_INVALID;
3930 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3931 LED_CTRL_LNKLED_OVERRIDE |
3932 LED_CTRL_TRAFFIC_OVERRIDE));
3935 if (current_link_up != netif_carrier_ok(tp->dev)) {
3936 if (current_link_up)
3937 netif_carrier_on(tp->dev);
3939 netif_carrier_off(tp->dev);
3940 tg3_link_report(tp);
3942 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3943 if (orig_pause_cfg != now_pause_cfg ||
3944 orig_active_speed != tp->link_config.active_speed ||
3945 orig_active_duplex != tp->link_config.active_duplex)
3946 tg3_link_report(tp);
3952 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3954 int current_link_up, err = 0;
3958 u32 local_adv, remote_adv;
3960 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3961 tw32_f(MAC_MODE, tp->mac_mode);
3967 (MAC_STATUS_SYNC_CHANGED |
3968 MAC_STATUS_CFG_CHANGED |
3969 MAC_STATUS_MI_COMPLETION |
3970 MAC_STATUS_LNKSTATE_CHANGED));
3976 current_link_up = 0;
3977 current_speed = SPEED_INVALID;
3978 current_duplex = DUPLEX_INVALID;
3980 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3981 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3982 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3983 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3984 bmsr |= BMSR_LSTATUS;
3986 bmsr &= ~BMSR_LSTATUS;
3989 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3991 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3992 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3993 /* do nothing, just check for link up at the end */
3994 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3997 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3998 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3999 ADVERTISE_1000XPAUSE |
4000 ADVERTISE_1000XPSE_ASYM |
4003 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4005 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4006 new_adv |= ADVERTISE_1000XHALF;
4007 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4008 new_adv |= ADVERTISE_1000XFULL;
4010 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4011 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4012 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4013 tg3_writephy(tp, MII_BMCR, bmcr);
4015 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4016 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4017 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4024 bmcr &= ~BMCR_SPEED1000;
4025 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4027 if (tp->link_config.duplex == DUPLEX_FULL)
4028 new_bmcr |= BMCR_FULLDPLX;
4030 if (new_bmcr != bmcr) {
4031 /* BMCR_SPEED1000 is a reserved bit that needs
4032 * to be set on write.
4034 new_bmcr |= BMCR_SPEED1000;
4036 /* Force a linkdown */
4037 if (netif_carrier_ok(tp->dev)) {
4040 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4041 adv &= ~(ADVERTISE_1000XFULL |
4042 ADVERTISE_1000XHALF |
4044 tg3_writephy(tp, MII_ADVERTISE, adv);
4045 tg3_writephy(tp, MII_BMCR, bmcr |
4049 netif_carrier_off(tp->dev);
4051 tg3_writephy(tp, MII_BMCR, new_bmcr);
4053 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4054 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4055 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4057 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4058 bmsr |= BMSR_LSTATUS;
4060 bmsr &= ~BMSR_LSTATUS;
4062 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4066 if (bmsr & BMSR_LSTATUS) {
4067 current_speed = SPEED_1000;
4068 current_link_up = 1;
4069 if (bmcr & BMCR_FULLDPLX)
4070 current_duplex = DUPLEX_FULL;
4072 current_duplex = DUPLEX_HALF;
4077 if (bmcr & BMCR_ANENABLE) {
4080 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4081 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4082 common = local_adv & remote_adv;
4083 if (common & (ADVERTISE_1000XHALF |
4084 ADVERTISE_1000XFULL)) {
4085 if (common & ADVERTISE_1000XFULL)
4086 current_duplex = DUPLEX_FULL;
4088 current_duplex = DUPLEX_HALF;
4091 current_link_up = 0;
4095 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4096 tg3_setup_flow_control(tp, local_adv, remote_adv);
4098 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4099 if (tp->link_config.active_duplex == DUPLEX_HALF)
4100 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4102 tw32_f(MAC_MODE, tp->mac_mode);
4105 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4107 tp->link_config.active_speed = current_speed;
4108 tp->link_config.active_duplex = current_duplex;
4110 if (current_link_up != netif_carrier_ok(tp->dev)) {
4111 if (current_link_up)
4112 netif_carrier_on(tp->dev);
4114 netif_carrier_off(tp->dev);
4115 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4117 tg3_link_report(tp);
4122 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4124 if (tp->serdes_counter) {
4125 /* Give autoneg time to complete. */
4126 tp->serdes_counter--;
4129 if (!netif_carrier_ok(tp->dev) &&
4130 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4133 tg3_readphy(tp, MII_BMCR, &bmcr);
4134 if (bmcr & BMCR_ANENABLE) {
4137 /* Select shadow register 0x1f */
4138 tg3_writephy(tp, 0x1c, 0x7c00);
4139 tg3_readphy(tp, 0x1c, &phy1);
4141 /* Select expansion interrupt status register */
4142 tg3_writephy(tp, 0x17, 0x0f01);
4143 tg3_readphy(tp, 0x15, &phy2);
4144 tg3_readphy(tp, 0x15, &phy2);
4146 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4147 /* We have signal detect and not receiving
4148 * config code words, link is up by parallel
4152 bmcr &= ~BMCR_ANENABLE;
4153 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4154 tg3_writephy(tp, MII_BMCR, bmcr);
4155 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
4159 else if (netif_carrier_ok(tp->dev) &&
4160 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4161 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4164 /* Select expansion interrupt status register */
4165 tg3_writephy(tp, 0x17, 0x0f01);
4166 tg3_readphy(tp, 0x15, &phy2);
4170 /* Config code words received, turn on autoneg. */
4171 tg3_readphy(tp, MII_BMCR, &bmcr);
4172 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4174 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4180 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4184 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4185 err = tg3_setup_fiber_phy(tp, force_reset);
4186 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4187 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4189 err = tg3_setup_copper_phy(tp, force_reset);
4192 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4195 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4196 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4198 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4203 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4204 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4205 tw32(GRC_MISC_CFG, val);
4208 if (tp->link_config.active_speed == SPEED_1000 &&
4209 tp->link_config.active_duplex == DUPLEX_HALF)
4210 tw32(MAC_TX_LENGTHS,
4211 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4212 (6 << TX_LENGTHS_IPG_SHIFT) |
4213 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4215 tw32(MAC_TX_LENGTHS,
4216 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4217 (6 << TX_LENGTHS_IPG_SHIFT) |
4218 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4220 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4221 if (netif_carrier_ok(tp->dev)) {
4222 tw32(HOSTCC_STAT_COAL_TICKS,
4223 tp->coal.stats_block_coalesce_usecs);
4225 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4229 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4230 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4231 if (!netif_carrier_ok(tp->dev))
4232 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4235 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4236 tw32(PCIE_PWR_MGMT_THRESH, val);
4242 /* This is called whenever we suspect that the system chipset is re-
4243 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4244 * is bogus tx completions. We try to recover by setting the
4245 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4248 static void tg3_tx_recover(struct tg3 *tp)
4250 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4251 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4253 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
4254 "mapped I/O cycles to the network device, attempting to "
4255 "recover. Please report the problem to the driver maintainer "
4256 "and include system chipset information.\n", tp->dev->name);
4258 spin_lock(&tp->lock);
4259 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4260 spin_unlock(&tp->lock);
4263 static inline u32 tg3_tx_avail(struct tg3 *tp)
4266 return (tp->tx_pending -
4267 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
4270 /* Tigon3 never reports partial packet sends. So we do not
4271 * need special logic to handle SKBs that have not had all
4272 * of their frags sent yet, like SunGEM does.
4274 static void tg3_tx(struct tg3 *tp)
4276 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
4277 u32 sw_idx = tp->tx_cons;
4279 while (sw_idx != hw_idx) {
4280 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
4281 struct sk_buff *skb = ri->skb;
4284 if (unlikely(skb == NULL)) {
4289 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4293 sw_idx = NEXT_TX(sw_idx);
4295 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4296 ri = &tp->tx_buffers[sw_idx];
4297 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4299 sw_idx = NEXT_TX(sw_idx);
4304 if (unlikely(tx_bug)) {
4310 tp->tx_cons = sw_idx;
4312 /* Need to make the tx_cons update visible to tg3_start_xmit()
4313 * before checking for netif_queue_stopped(). Without the
4314 * memory barrier, there is a small possibility that tg3_start_xmit()
4315 * will miss it and cause the queue to be stopped forever.
4319 if (unlikely(netif_queue_stopped(tp->dev) &&
4320 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
4321 netif_tx_lock(tp->dev);
4322 if (netif_queue_stopped(tp->dev) &&
4323 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
4324 netif_wake_queue(tp->dev);
4325 netif_tx_unlock(tp->dev);
4329 /* Returns size of skb allocated or < 0 on error.
4331 * We only need to fill in the address because the other members
4332 * of the RX descriptor are invariant, see tg3_init_rings.
4334 * Note the purposeful assymetry of cpu vs. chip accesses. For
4335 * posting buffers we only dirty the first cache line of the RX
4336 * descriptor (containing the address). Whereas for the RX status
4337 * buffers the cpu only reads the last cacheline of the RX descriptor
4338 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4340 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
4341 int src_idx, u32 dest_idx_unmasked)
4343 struct tg3_rx_buffer_desc *desc;
4344 struct ring_info *map, *src_map;
4345 struct sk_buff *skb;
4347 int skb_size, dest_idx;
4350 switch (opaque_key) {
4351 case RXD_OPAQUE_RING_STD:
4352 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4353 desc = &tp->rx_std[dest_idx];
4354 map = &tp->rx_std_buffers[dest_idx];
4356 src_map = &tp->rx_std_buffers[src_idx];
4357 skb_size = tp->rx_pkt_buf_sz;
4360 case RXD_OPAQUE_RING_JUMBO:
4361 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4362 desc = &tp->rx_jumbo[dest_idx];
4363 map = &tp->rx_jumbo_buffers[dest_idx];
4365 src_map = &tp->rx_jumbo_buffers[src_idx];
4366 skb_size = RX_JUMBO_PKT_BUF_SZ;
4373 /* Do not overwrite any of the map or rp information
4374 * until we are sure we can commit to a new buffer.
4376 * Callers depend upon this behavior and assume that
4377 * we leave everything unchanged if we fail.
4379 skb = netdev_alloc_skb(tp->dev, skb_size);
4383 skb_reserve(skb, tp->rx_offset);
4385 mapping = pci_map_single(tp->pdev, skb->data,
4386 skb_size - tp->rx_offset,
4387 PCI_DMA_FROMDEVICE);
4390 pci_unmap_addr_set(map, mapping, mapping);
4392 if (src_map != NULL)
4393 src_map->skb = NULL;
4395 desc->addr_hi = ((u64)mapping >> 32);
4396 desc->addr_lo = ((u64)mapping & 0xffffffff);
4401 /* We only need to move over in the address because the other
4402 * members of the RX descriptor are invariant. See notes above
4403 * tg3_alloc_rx_skb for full details.
4405 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
4406 int src_idx, u32 dest_idx_unmasked)
4408 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4409 struct ring_info *src_map, *dest_map;
4412 switch (opaque_key) {
4413 case RXD_OPAQUE_RING_STD:
4414 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4415 dest_desc = &tp->rx_std[dest_idx];
4416 dest_map = &tp->rx_std_buffers[dest_idx];
4417 src_desc = &tp->rx_std[src_idx];
4418 src_map = &tp->rx_std_buffers[src_idx];
4421 case RXD_OPAQUE_RING_JUMBO:
4422 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4423 dest_desc = &tp->rx_jumbo[dest_idx];
4424 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4425 src_desc = &tp->rx_jumbo[src_idx];
4426 src_map = &tp->rx_jumbo_buffers[src_idx];
4433 dest_map->skb = src_map->skb;
4434 pci_unmap_addr_set(dest_map, mapping,
4435 pci_unmap_addr(src_map, mapping));
4436 dest_desc->addr_hi = src_desc->addr_hi;
4437 dest_desc->addr_lo = src_desc->addr_lo;
4439 src_map->skb = NULL;
4442 #if TG3_VLAN_TAG_USED
4443 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4445 return vlan_gro_receive(&tp->napi, tp->vlgrp, vlan_tag, skb);
4449 /* The RX ring scheme is composed of multiple rings which post fresh
4450 * buffers to the chip, and one special ring the chip uses to report
4451 * status back to the host.
4453 * The special ring reports the status of received packets to the
4454 * host. The chip does not write into the original descriptor the
4455 * RX buffer was obtained from. The chip simply takes the original
4456 * descriptor as provided by the host, updates the status and length
4457 * field, then writes this into the next status ring entry.
4459 * Each ring the host uses to post buffers to the chip is described
4460 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4461 * it is first placed into the on-chip ram. When the packet's length
4462 * is known, it walks down the TG3_BDINFO entries to select the ring.
4463 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4464 * which is within the range of the new packet's length is chosen.
4466 * The "separate ring for rx status" scheme may sound queer, but it makes
4467 * sense from a cache coherency perspective. If only the host writes
4468 * to the buffer post rings, and only the chip writes to the rx status
4469 * rings, then cache lines never move beyond shared-modified state.
4470 * If both the host and chip were to write into the same ring, cache line
4471 * eviction could occur since both entities want it in an exclusive state.
4473 static int tg3_rx(struct tg3 *tp, int budget)
4475 u32 work_mask, rx_std_posted = 0;
4476 u32 sw_idx = tp->rx_rcb_ptr;
4480 hw_idx = tp->hw_status->idx[0].rx_producer;
4482 * We need to order the read of hw_idx and the read of
4483 * the opaque cookie.
4488 while (sw_idx != hw_idx && budget > 0) {
4489 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4491 struct sk_buff *skb;
4492 dma_addr_t dma_addr;
4493 u32 opaque_key, desc_idx, *post_ptr;
4495 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4496 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4497 if (opaque_key == RXD_OPAQUE_RING_STD) {
4498 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4500 skb = tp->rx_std_buffers[desc_idx].skb;
4501 post_ptr = &tp->rx_std_ptr;
4503 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4504 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4506 skb = tp->rx_jumbo_buffers[desc_idx].skb;
4507 post_ptr = &tp->rx_jumbo_ptr;
4510 goto next_pkt_nopost;
4513 work_mask |= opaque_key;
4515 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4516 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4518 tg3_recycle_rx(tp, opaque_key,
4519 desc_idx, *post_ptr);
4521 /* Other statistics kept track of by card. */
4522 tp->net_stats.rx_dropped++;
4526 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4529 if (len > RX_COPY_THRESHOLD
4530 && tp->rx_offset == NET_IP_ALIGN
4531 /* rx_offset will likely not equal NET_IP_ALIGN
4532 * if this is a 5701 card running in PCI-X mode
4533 * [see tg3_get_invariants()]
4538 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4539 desc_idx, *post_ptr);
4543 pci_unmap_single(tp->pdev, dma_addr,
4544 skb_size - tp->rx_offset,
4545 PCI_DMA_FROMDEVICE);
4549 struct sk_buff *copy_skb;
4551 tg3_recycle_rx(tp, opaque_key,
4552 desc_idx, *post_ptr);
4554 copy_skb = netdev_alloc_skb(tp->dev,
4555 len + TG3_RAW_IP_ALIGN);
4556 if (copy_skb == NULL)
4557 goto drop_it_no_recycle;
4559 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4560 skb_put(copy_skb, len);
4561 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4562 skb_copy_from_linear_data(skb, copy_skb->data, len);
4563 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4565 /* We'll reuse the original ring buffer. */
4569 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4570 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4571 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4572 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4573 skb->ip_summed = CHECKSUM_UNNECESSARY;
4575 skb->ip_summed = CHECKSUM_NONE;
4577 skb->protocol = eth_type_trans(skb, tp->dev);
4579 if (len > (tp->dev->mtu + ETH_HLEN) &&
4580 skb->protocol != htons(ETH_P_8021Q)) {
4585 #if TG3_VLAN_TAG_USED
4586 if (tp->vlgrp != NULL &&
4587 desc->type_flags & RXD_FLAG_VLAN) {
4588 tg3_vlan_rx(tp, skb,
4589 desc->err_vlan & RXD_VLAN_MASK);
4592 napi_gro_receive(&tp->napi, skb);
4600 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4601 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4603 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4604 TG3_64BIT_REG_LOW, idx);
4605 work_mask &= ~RXD_OPAQUE_RING_STD;
4610 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4612 /* Refresh hw_idx to see if there is new work */
4613 if (sw_idx == hw_idx) {
4614 hw_idx = tp->hw_status->idx[0].rx_producer;
4619 /* ACK the status ring. */
4620 tp->rx_rcb_ptr = sw_idx;
4621 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
4623 /* Refill RX ring(s). */
4624 if (work_mask & RXD_OPAQUE_RING_STD) {
4625 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4626 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4629 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4630 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4631 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4639 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4641 struct tg3_hw_status *sblk = tp->hw_status;
4643 /* handle link change and other phy events */
4644 if (!(tp->tg3_flags &
4645 (TG3_FLAG_USE_LINKCHG_REG |
4646 TG3_FLAG_POLL_SERDES))) {
4647 if (sblk->status & SD_STATUS_LINK_CHG) {
4648 sblk->status = SD_STATUS_UPDATED |
4649 (sblk->status & ~SD_STATUS_LINK_CHG);
4650 spin_lock(&tp->lock);
4651 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4653 (MAC_STATUS_SYNC_CHANGED |
4654 MAC_STATUS_CFG_CHANGED |
4655 MAC_STATUS_MI_COMPLETION |
4656 MAC_STATUS_LNKSTATE_CHANGED));
4659 tg3_setup_phy(tp, 0);
4660 spin_unlock(&tp->lock);
4664 /* run TX completion thread */
4665 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
4667 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4671 /* run RX thread, within the bounds set by NAPI.
4672 * All RX "locking" is done by ensuring outside
4673 * code synchronizes with tg3->napi.poll()
4675 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
4676 work_done += tg3_rx(tp, budget - work_done);
4681 static int tg3_poll(struct napi_struct *napi, int budget)
4683 struct tg3 *tp = container_of(napi, struct tg3, napi);
4685 struct tg3_hw_status *sblk = tp->hw_status;
4688 work_done = tg3_poll_work(tp, work_done, budget);
4690 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4693 if (unlikely(work_done >= budget))
4696 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4697 /* tp->last_tag is used in tg3_restart_ints() below
4698 * to tell the hw how much work has been processed,
4699 * so we must read it before checking for more work.
4701 tp->last_tag = sblk->status_tag;
4702 tp->last_irq_tag = tp->last_tag;
4705 sblk->status &= ~SD_STATUS_UPDATED;
4707 if (likely(!tg3_has_work(tp))) {
4708 napi_complete(napi);
4709 tg3_restart_ints(tp);
4717 /* work_done is guaranteed to be less than budget. */
4718 napi_complete(napi);
4719 schedule_work(&tp->reset_task);
4723 static void tg3_irq_quiesce(struct tg3 *tp)
4725 BUG_ON(tp->irq_sync);
4730 synchronize_irq(tp->pdev->irq);
4733 static inline int tg3_irq_sync(struct tg3 *tp)
4735 return tp->irq_sync;
4738 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4739 * If irq_sync is non-zero, then the IRQ handler must be synchronized
4740 * with as well. Most of the time, this is not necessary except when
4741 * shutting down the device.
4743 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4745 spin_lock_bh(&tp->lock);
4747 tg3_irq_quiesce(tp);
4750 static inline void tg3_full_unlock(struct tg3 *tp)
4752 spin_unlock_bh(&tp->lock);
4755 /* One-shot MSI handler - Chip automatically disables interrupt
4756 * after sending MSI so driver doesn't have to do it.
4758 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4760 struct net_device *dev = dev_id;
4761 struct tg3 *tp = netdev_priv(dev);
4763 prefetch(tp->hw_status);
4764 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4766 if (likely(!tg3_irq_sync(tp)))
4767 napi_schedule(&tp->napi);
4772 /* MSI ISR - No need to check for interrupt sharing and no need to
4773 * flush status block and interrupt mailbox. PCI ordering rules
4774 * guarantee that MSI will arrive after the status block.
4776 static irqreturn_t tg3_msi(int irq, void *dev_id)
4778 struct net_device *dev = dev_id;
4779 struct tg3 *tp = netdev_priv(dev);
4781 prefetch(tp->hw_status);
4782 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4784 * Writing any value to intr-mbox-0 clears PCI INTA# and
4785 * chip-internal interrupt pending events.
4786 * Writing non-zero to intr-mbox-0 additional tells the
4787 * NIC to stop sending us irqs, engaging "in-intr-handler"
4790 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4791 if (likely(!tg3_irq_sync(tp)))
4792 napi_schedule(&tp->napi);
4794 return IRQ_RETVAL(1);
4797 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4799 struct net_device *dev = dev_id;
4800 struct tg3 *tp = netdev_priv(dev);
4801 struct tg3_hw_status *sblk = tp->hw_status;
4802 unsigned int handled = 1;
4804 /* In INTx mode, it is possible for the interrupt to arrive at
4805 * the CPU before the status block posted prior to the interrupt.
4806 * Reading the PCI State register will confirm whether the
4807 * interrupt is ours and will flush the status block.
4809 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4810 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4811 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4818 * Writing any value to intr-mbox-0 clears PCI INTA# and
4819 * chip-internal interrupt pending events.
4820 * Writing non-zero to intr-mbox-0 additional tells the
4821 * NIC to stop sending us irqs, engaging "in-intr-handler"
4824 * Flush the mailbox to de-assert the IRQ immediately to prevent
4825 * spurious interrupts. The flush impacts performance but
4826 * excessive spurious interrupts can be worse in some cases.
4828 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4829 if (tg3_irq_sync(tp))
4831 sblk->status &= ~SD_STATUS_UPDATED;
4832 if (likely(tg3_has_work(tp))) {
4833 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4834 napi_schedule(&tp->napi);
4836 /* No work, shared interrupt perhaps? re-enable
4837 * interrupts, and flush that PCI write
4839 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4843 return IRQ_RETVAL(handled);
4846 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4848 struct net_device *dev = dev_id;
4849 struct tg3 *tp = netdev_priv(dev);
4850 struct tg3_hw_status *sblk = tp->hw_status;
4851 unsigned int handled = 1;
4853 /* In INTx mode, it is possible for the interrupt to arrive at
4854 * the CPU before the status block posted prior to the interrupt.
4855 * Reading the PCI State register will confirm whether the
4856 * interrupt is ours and will flush the status block.
4858 if (unlikely(sblk->status_tag == tp->last_irq_tag)) {
4859 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4860 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4867 * writing any value to intr-mbox-0 clears PCI INTA# and
4868 * chip-internal interrupt pending events.
4869 * writing non-zero to intr-mbox-0 additional tells the
4870 * NIC to stop sending us irqs, engaging "in-intr-handler"
4873 * Flush the mailbox to de-assert the IRQ immediately to prevent
4874 * spurious interrupts. The flush impacts performance but
4875 * excessive spurious interrupts can be worse in some cases.
4877 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4880 * In a shared interrupt configuration, sometimes other devices'
4881 * interrupts will scream. We record the current status tag here
4882 * so that the above check can report that the screaming interrupts
4883 * are unhandled. Eventually they will be silenced.
4885 tp->last_irq_tag = sblk->status_tag;
4887 if (tg3_irq_sync(tp))
4890 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4892 napi_schedule(&tp->napi);
4895 return IRQ_RETVAL(handled);
4898 /* ISR for interrupt test */
4899 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4901 struct net_device *dev = dev_id;
4902 struct tg3 *tp = netdev_priv(dev);
4903 struct tg3_hw_status *sblk = tp->hw_status;
4905 if ((sblk->status & SD_STATUS_UPDATED) ||
4906 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4907 tg3_disable_ints(tp);
4908 return IRQ_RETVAL(1);
4910 return IRQ_RETVAL(0);
4913 static int tg3_init_hw(struct tg3 *, int);
4914 static int tg3_halt(struct tg3 *, int, int);
4916 /* Restart hardware after configuration changes, self-test, etc.
4917 * Invoked with tp->lock held.
4919 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4920 __releases(tp->lock)
4921 __acquires(tp->lock)
4925 err = tg3_init_hw(tp, reset_phy);
4927 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4928 "aborting.\n", tp->dev->name);
4929 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4930 tg3_full_unlock(tp);
4931 del_timer_sync(&tp->timer);
4933 napi_enable(&tp->napi);
4935 tg3_full_lock(tp, 0);
4940 #ifdef CONFIG_NET_POLL_CONTROLLER
4941 static void tg3_poll_controller(struct net_device *dev)
4943 struct tg3 *tp = netdev_priv(dev);
4945 tg3_interrupt(tp->pdev->irq, dev);
4949 static void tg3_reset_task(struct work_struct *work)
4951 struct tg3 *tp = container_of(work, struct tg3, reset_task);
4953 unsigned int restart_timer;
4955 tg3_full_lock(tp, 0);
4957 if (!netif_running(tp->dev)) {
4958 tg3_full_unlock(tp);
4962 tg3_full_unlock(tp);
4968 tg3_full_lock(tp, 1);
4970 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4971 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4973 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4974 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4975 tp->write32_rx_mbox = tg3_write_flush_reg32;
4976 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4977 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4980 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4981 err = tg3_init_hw(tp, 1);
4985 tg3_netif_start(tp);
4988 mod_timer(&tp->timer, jiffies + 1);
4991 tg3_full_unlock(tp);
4997 static void tg3_dump_short_state(struct tg3 *tp)
4999 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5000 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5001 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5002 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5005 static void tg3_tx_timeout(struct net_device *dev)
5007 struct tg3 *tp = netdev_priv(dev);
5009 if (netif_msg_tx_err(tp)) {
5010 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
5012 tg3_dump_short_state(tp);
5015 schedule_work(&tp->reset_task);
5018 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5019 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5021 u32 base = (u32) mapping & 0xffffffff;
5023 return ((base > 0xffffdcc0) &&
5024 (base + len + 8 < base));
5027 /* Test for DMA addresses > 40-bit */
5028 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5031 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5032 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5033 return (((u64) mapping + len) > DMA_BIT_MASK(40));
5040 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
5042 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5043 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
5044 u32 last_plus_one, u32 *start,
5045 u32 base_flags, u32 mss)
5047 struct sk_buff *new_skb;
5048 dma_addr_t new_addr = 0;
5052 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5053 new_skb = skb_copy(skb, GFP_ATOMIC);
5055 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5057 new_skb = skb_copy_expand(skb,
5058 skb_headroom(skb) + more_headroom,
5059 skb_tailroom(skb), GFP_ATOMIC);
5065 /* New SKB is guaranteed to be linear. */
5067 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
5068 new_addr = skb_shinfo(new_skb)->dma_head;
5070 /* Make sure new skb does not cross any 4G boundaries.
5071 * Drop the packet if it does.
5073 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
5075 skb_dma_unmap(&tp->pdev->dev, new_skb,
5078 dev_kfree_skb(new_skb);
5081 tg3_set_txd(tp, entry, new_addr, new_skb->len,
5082 base_flags, 1 | (mss << 1));
5083 *start = NEXT_TX(entry);
5087 /* Now clean up the sw ring entries. */
5089 while (entry != last_plus_one) {
5091 tp->tx_buffers[entry].skb = new_skb;
5093 tp->tx_buffers[entry].skb = NULL;
5095 entry = NEXT_TX(entry);
5099 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5105 static void tg3_set_txd(struct tg3 *tp, int entry,
5106 dma_addr_t mapping, int len, u32 flags,
5109 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
5110 int is_end = (mss_and_is_end & 0x1);
5111 u32 mss = (mss_and_is_end >> 1);
5115 flags |= TXD_FLAG_END;
5116 if (flags & TXD_FLAG_VLAN) {
5117 vlan_tag = flags >> 16;
5120 vlan_tag |= (mss << TXD_MSS_SHIFT);
5122 txd->addr_hi = ((u64) mapping >> 32);
5123 txd->addr_lo = ((u64) mapping & 0xffffffff);
5124 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5125 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5128 /* hard_start_xmit for devices that don't have any bugs and
5129 * support TG3_FLG2_HW_TSO_2 only.
5131 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5133 struct tg3 *tp = netdev_priv(dev);
5134 u32 len, entry, base_flags, mss;
5135 struct skb_shared_info *sp;
5138 len = skb_headlen(skb);
5140 /* We are running in BH disabled context with netif_tx_lock
5141 * and TX reclaim runs via tp->napi.poll inside of a software
5142 * interrupt. Furthermore, IRQ processing runs lockless so we have
5143 * no IRQ context deadlocks to worry about either. Rejoice!
5145 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
5146 if (!netif_queue_stopped(dev)) {
5147 netif_stop_queue(dev);
5149 /* This is a hard error, log it. */
5150 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5151 "queue awake!\n", dev->name);
5153 return NETDEV_TX_BUSY;
5156 entry = tp->tx_prod;
5159 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5160 int tcp_opt_len, ip_tcp_len;
5162 if (skb_header_cloned(skb) &&
5163 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5168 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
5169 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
5171 struct iphdr *iph = ip_hdr(skb);
5173 tcp_opt_len = tcp_optlen(skb);
5174 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5177 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5178 mss |= (ip_tcp_len + tcp_opt_len) << 9;
5181 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5182 TXD_FLAG_CPU_POST_DMA);
5184 tcp_hdr(skb)->check = 0;
5187 else if (skb->ip_summed == CHECKSUM_PARTIAL)
5188 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5189 #if TG3_VLAN_TAG_USED
5190 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5191 base_flags |= (TXD_FLAG_VLAN |
5192 (vlan_tx_tag_get(skb) << 16));
5195 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5200 sp = skb_shinfo(skb);
5202 mapping = sp->dma_head;
5204 tp->tx_buffers[entry].skb = skb;
5206 tg3_set_txd(tp, entry, mapping, len, base_flags,
5207 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5209 entry = NEXT_TX(entry);
5211 /* Now loop through additional data fragments, and queue them. */
5212 if (skb_shinfo(skb)->nr_frags > 0) {
5213 unsigned int i, last;
5215 last = skb_shinfo(skb)->nr_frags - 1;
5216 for (i = 0; i <= last; i++) {
5217 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5220 mapping = sp->dma_maps[i];
5221 tp->tx_buffers[entry].skb = NULL;
5223 tg3_set_txd(tp, entry, mapping, len,
5224 base_flags, (i == last) | (mss << 1));
5226 entry = NEXT_TX(entry);
5230 /* Packets are ready, update Tx producer idx local and on card. */
5231 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5233 tp->tx_prod = entry;
5234 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5235 netif_stop_queue(dev);
5236 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5237 netif_wake_queue(tp->dev);
5243 return NETDEV_TX_OK;
5246 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
5248 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5249 * TSO header is greater than 80 bytes.
5251 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5253 struct sk_buff *segs, *nskb;
5255 /* Estimate the number of fragments in the worst case */
5256 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
5257 netif_stop_queue(tp->dev);
5258 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
5259 return NETDEV_TX_BUSY;
5261 netif_wake_queue(tp->dev);
5264 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5266 goto tg3_tso_bug_end;
5272 tg3_start_xmit_dma_bug(nskb, tp->dev);
5278 return NETDEV_TX_OK;
5281 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5282 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5284 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5286 struct tg3 *tp = netdev_priv(dev);
5287 u32 len, entry, base_flags, mss;
5288 struct skb_shared_info *sp;
5289 int would_hit_hwbug;
5292 len = skb_headlen(skb);
5294 /* We are running in BH disabled context with netif_tx_lock
5295 * and TX reclaim runs via tp->napi.poll inside of a software
5296 * interrupt. Furthermore, IRQ processing runs lockless so we have
5297 * no IRQ context deadlocks to worry about either. Rejoice!
5299 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
5300 if (!netif_queue_stopped(dev)) {
5301 netif_stop_queue(dev);
5303 /* This is a hard error, log it. */
5304 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5305 "queue awake!\n", dev->name);
5307 return NETDEV_TX_BUSY;
5310 entry = tp->tx_prod;
5312 if (skb->ip_summed == CHECKSUM_PARTIAL)
5313 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5315 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5317 int tcp_opt_len, ip_tcp_len, hdr_len;
5319 if (skb_header_cloned(skb) &&
5320 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5325 tcp_opt_len = tcp_optlen(skb);
5326 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5328 hdr_len = ip_tcp_len + tcp_opt_len;
5329 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5330 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5331 return (tg3_tso_bug(tp, skb));
5333 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5334 TXD_FLAG_CPU_POST_DMA);
5338 iph->tot_len = htons(mss + hdr_len);
5339 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5340 tcp_hdr(skb)->check = 0;
5341 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5343 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5348 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
5349 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
5350 if (tcp_opt_len || iph->ihl > 5) {
5353 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5354 mss |= (tsflags << 11);
5357 if (tcp_opt_len || iph->ihl > 5) {
5360 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5361 base_flags |= tsflags << 12;
5365 #if TG3_VLAN_TAG_USED
5366 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5367 base_flags |= (TXD_FLAG_VLAN |
5368 (vlan_tx_tag_get(skb) << 16));
5371 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5376 sp = skb_shinfo(skb);
5378 mapping = sp->dma_head;
5380 tp->tx_buffers[entry].skb = skb;
5382 would_hit_hwbug = 0;
5384 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5385 would_hit_hwbug = 1;
5386 else if (tg3_4g_overflow_test(mapping, len))
5387 would_hit_hwbug = 1;
5389 tg3_set_txd(tp, entry, mapping, len, base_flags,
5390 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5392 entry = NEXT_TX(entry);
5394 /* Now loop through additional data fragments, and queue them. */
5395 if (skb_shinfo(skb)->nr_frags > 0) {
5396 unsigned int i, last;
5398 last = skb_shinfo(skb)->nr_frags - 1;
5399 for (i = 0; i <= last; i++) {
5400 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5403 mapping = sp->dma_maps[i];
5405 tp->tx_buffers[entry].skb = NULL;
5407 if (tg3_4g_overflow_test(mapping, len))
5408 would_hit_hwbug = 1;
5410 if (tg3_40bit_overflow_test(tp, mapping, len))
5411 would_hit_hwbug = 1;
5413 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5414 tg3_set_txd(tp, entry, mapping, len,
5415 base_flags, (i == last)|(mss << 1));
5417 tg3_set_txd(tp, entry, mapping, len,
5418 base_flags, (i == last));
5420 entry = NEXT_TX(entry);
5424 if (would_hit_hwbug) {
5425 u32 last_plus_one = entry;
5428 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5429 start &= (TG3_TX_RING_SIZE - 1);
5431 /* If the workaround fails due to memory/mapping
5432 * failure, silently drop this packet.
5434 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
5435 &start, base_flags, mss))
5441 /* Packets are ready, update Tx producer idx local and on card. */
5442 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5444 tp->tx_prod = entry;
5445 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5446 netif_stop_queue(dev);
5447 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5448 netif_wake_queue(tp->dev);
5454 return NETDEV_TX_OK;
5457 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5462 if (new_mtu > ETH_DATA_LEN) {
5463 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5464 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5465 ethtool_op_set_tso(dev, 0);
5468 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5470 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5471 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5472 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5476 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5478 struct tg3 *tp = netdev_priv(dev);
5481 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5484 if (!netif_running(dev)) {
5485 /* We'll just catch it later when the
5488 tg3_set_mtu(dev, tp, new_mtu);
5496 tg3_full_lock(tp, 1);
5498 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5500 tg3_set_mtu(dev, tp, new_mtu);
5502 err = tg3_restart_hw(tp, 0);
5505 tg3_netif_start(tp);
5507 tg3_full_unlock(tp);
5515 /* Free up pending packets in all rx/tx rings.
5517 * The chip has been shut down and the driver detached from
5518 * the networking, so no interrupts or new tx packets will
5519 * end up in the driver. tp->{tx,}lock is not held and we are not
5520 * in an interrupt context and thus may sleep.
5522 static void tg3_free_rings(struct tg3 *tp)
5524 struct ring_info *rxp;
5527 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5528 rxp = &tp->rx_std_buffers[i];
5530 if (rxp->skb == NULL)
5532 pci_unmap_single(tp->pdev,
5533 pci_unmap_addr(rxp, mapping),
5534 tp->rx_pkt_buf_sz - tp->rx_offset,
5535 PCI_DMA_FROMDEVICE);
5536 dev_kfree_skb_any(rxp->skb);
5540 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5541 rxp = &tp->rx_jumbo_buffers[i];
5543 if (rxp->skb == NULL)
5545 pci_unmap_single(tp->pdev,
5546 pci_unmap_addr(rxp, mapping),
5547 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5548 PCI_DMA_FROMDEVICE);
5549 dev_kfree_skb_any(rxp->skb);
5553 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5554 struct tx_ring_info *txp;
5555 struct sk_buff *skb;
5557 txp = &tp->tx_buffers[i];
5565 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5569 i += skb_shinfo(skb)->nr_frags + 1;
5571 dev_kfree_skb_any(skb);
5575 /* Initialize tx/rx rings for packet processing.
5577 * The chip has been shut down and the driver detached from
5578 * the networking, so no interrupts or new tx packets will
5579 * end up in the driver. tp->{tx,}lock are held and thus
5582 static int tg3_init_rings(struct tg3 *tp)
5586 /* Free up all the SKBs. */
5589 /* Zero out all descriptors. */
5590 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5591 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5592 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5593 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5595 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
5596 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5597 (tp->dev->mtu > ETH_DATA_LEN))
5598 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5600 /* Initialize invariants of the rings, we only set this
5601 * stuff once. This works because the card does not
5602 * write into the rx buffer posting rings.
5604 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5605 struct tg3_rx_buffer_desc *rxd;
5607 rxd = &tp->rx_std[i];
5608 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
5610 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5611 rxd->opaque = (RXD_OPAQUE_RING_STD |
5612 (i << RXD_OPAQUE_INDEX_SHIFT));
5615 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5616 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5617 struct tg3_rx_buffer_desc *rxd;
5619 rxd = &tp->rx_jumbo[i];
5620 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5622 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5624 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5625 (i << RXD_OPAQUE_INDEX_SHIFT));
5629 /* Now allocate fresh SKBs for each rx ring. */
5630 for (i = 0; i < tp->rx_pending; i++) {
5631 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5632 printk(KERN_WARNING PFX
5633 "%s: Using a smaller RX standard ring, "
5634 "only %d out of %d buffers were allocated "
5636 tp->dev->name, i, tp->rx_pending);
5644 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5645 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5646 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
5648 printk(KERN_WARNING PFX
5649 "%s: Using a smaller RX jumbo ring, "
5650 "only %d out of %d buffers were "
5651 "allocated successfully.\n",
5652 tp->dev->name, i, tp->rx_jumbo_pending);
5657 tp->rx_jumbo_pending = i;
5666 * Must not be invoked with interrupt sources disabled and
5667 * the hardware shutdown down.
5669 static void tg3_free_consistent(struct tg3 *tp)
5671 kfree(tp->rx_std_buffers);
5672 tp->rx_std_buffers = NULL;
5674 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5675 tp->rx_std, tp->rx_std_mapping);
5679 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5680 tp->rx_jumbo, tp->rx_jumbo_mapping);
5681 tp->rx_jumbo = NULL;
5684 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5685 tp->rx_rcb, tp->rx_rcb_mapping);
5689 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5690 tp->tx_ring, tp->tx_desc_mapping);
5693 if (tp->hw_status) {
5694 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5695 tp->hw_status, tp->status_mapping);
5696 tp->hw_status = NULL;
5699 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5700 tp->hw_stats, tp->stats_mapping);
5701 tp->hw_stats = NULL;
5706 * Must not be invoked with interrupt sources disabled and
5707 * the hardware shutdown down. Can sleep.
5709 static int tg3_alloc_consistent(struct tg3 *tp)
5711 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5713 TG3_RX_JUMBO_RING_SIZE)) +
5714 (sizeof(struct tx_ring_info) *
5717 if (!tp->rx_std_buffers)
5720 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5721 tp->tx_buffers = (struct tx_ring_info *)
5722 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5724 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5725 &tp->rx_std_mapping);
5729 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5730 &tp->rx_jumbo_mapping);
5735 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5736 &tp->rx_rcb_mapping);
5740 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5741 &tp->tx_desc_mapping);
5745 tp->hw_status = pci_alloc_consistent(tp->pdev,
5747 &tp->status_mapping);
5751 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5752 sizeof(struct tg3_hw_stats),
5753 &tp->stats_mapping);
5757 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5758 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5763 tg3_free_consistent(tp);
5767 #define MAX_WAIT_CNT 1000
5769 /* To stop a block, clear the enable bit and poll till it
5770 * clears. tp->lock is held.
5772 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5777 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5784 /* We can't enable/disable these bits of the
5785 * 5705/5750, just say success.
5798 for (i = 0; i < MAX_WAIT_CNT; i++) {
5801 if ((val & enable_bit) == 0)
5805 if (i == MAX_WAIT_CNT && !silent) {
5806 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5807 "ofs=%lx enable_bit=%x\n",
5815 /* tp->lock is held. */
5816 static int tg3_abort_hw(struct tg3 *tp, int silent)
5820 tg3_disable_ints(tp);
5822 tp->rx_mode &= ~RX_MODE_ENABLE;
5823 tw32_f(MAC_RX_MODE, tp->rx_mode);
5826 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5827 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5828 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5829 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5830 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5831 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5833 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5834 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5835 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5836 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5837 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5838 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5839 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5841 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5842 tw32_f(MAC_MODE, tp->mac_mode);
5845 tp->tx_mode &= ~TX_MODE_ENABLE;
5846 tw32_f(MAC_TX_MODE, tp->tx_mode);
5848 for (i = 0; i < MAX_WAIT_CNT; i++) {
5850 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5853 if (i >= MAX_WAIT_CNT) {
5854 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5855 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5856 tp->dev->name, tr32(MAC_TX_MODE));
5860 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5861 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5862 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5864 tw32(FTQ_RESET, 0xffffffff);
5865 tw32(FTQ_RESET, 0x00000000);
5867 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5868 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5871 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5873 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5878 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5883 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5884 if (apedata != APE_SEG_SIG_MAGIC)
5887 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5888 if (!(apedata & APE_FW_STATUS_READY))
5891 /* Wait for up to 1 millisecond for APE to service previous event. */
5892 for (i = 0; i < 10; i++) {
5893 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5896 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5898 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5899 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5900 event | APE_EVENT_STATUS_EVENT_PENDING);
5902 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5904 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5910 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5911 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5914 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5919 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5923 case RESET_KIND_INIT:
5924 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5925 APE_HOST_SEG_SIG_MAGIC);
5926 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5927 APE_HOST_SEG_LEN_MAGIC);
5928 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5929 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5930 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5931 APE_HOST_DRIVER_ID_MAGIC);
5932 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5933 APE_HOST_BEHAV_NO_PHYLOCK);
5935 event = APE_EVENT_STATUS_STATE_START;
5937 case RESET_KIND_SHUTDOWN:
5938 /* With the interface we are currently using,
5939 * APE does not track driver state. Wiping
5940 * out the HOST SEGMENT SIGNATURE forces
5941 * the APE to assume OS absent status.
5943 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
5945 event = APE_EVENT_STATUS_STATE_UNLOAD;
5947 case RESET_KIND_SUSPEND:
5948 event = APE_EVENT_STATUS_STATE_SUSPEND;
5954 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5956 tg3_ape_send_event(tp, event);
5959 /* tp->lock is held. */
5960 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5962 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5963 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5965 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5967 case RESET_KIND_INIT:
5968 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5972 case RESET_KIND_SHUTDOWN:
5973 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5977 case RESET_KIND_SUSPEND:
5978 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5987 if (kind == RESET_KIND_INIT ||
5988 kind == RESET_KIND_SUSPEND)
5989 tg3_ape_driver_state_change(tp, kind);
5992 /* tp->lock is held. */
5993 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5995 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5997 case RESET_KIND_INIT:
5998 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5999 DRV_STATE_START_DONE);
6002 case RESET_KIND_SHUTDOWN:
6003 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6004 DRV_STATE_UNLOAD_DONE);
6012 if (kind == RESET_KIND_SHUTDOWN)
6013 tg3_ape_driver_state_change(tp, kind);
6016 /* tp->lock is held. */
6017 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6019 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6021 case RESET_KIND_INIT:
6022 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6026 case RESET_KIND_SHUTDOWN:
6027 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6031 case RESET_KIND_SUSPEND:
6032 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6042 static int tg3_poll_fw(struct tg3 *tp)
6047 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6048 /* Wait up to 20ms for init done. */
6049 for (i = 0; i < 200; i++) {
6050 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6057 /* Wait for firmware initialization to complete. */
6058 for (i = 0; i < 100000; i++) {
6059 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6060 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6065 /* Chip might not be fitted with firmware. Some Sun onboard
6066 * parts are configured like that. So don't signal the timeout
6067 * of the above loop as an error, but do report the lack of
6068 * running firmware once.
6071 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
6072 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
6074 printk(KERN_INFO PFX "%s: No firmware running.\n",
6081 /* Save PCI command register before chip reset */
6082 static void tg3_save_pci_state(struct tg3 *tp)
6084 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
6087 /* Restore PCI state after chip reset */
6088 static void tg3_restore_pci_state(struct tg3 *tp)
6092 /* Re-enable indirect register accesses. */
6093 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6094 tp->misc_host_ctrl);
6096 /* Set MAX PCI retry to zero. */
6097 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
6098 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6099 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
6100 val |= PCISTATE_RETRY_SAME_DMA;
6101 /* Allow reads and writes to the APE register and memory space. */
6102 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6103 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6104 PCISTATE_ALLOW_APE_SHMEM_WR;
6105 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
6107 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
6109 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6110 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6111 pcie_set_readrq(tp->pdev, 4096);
6113 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6114 tp->pci_cacheline_sz);
6115 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
6120 /* Make sure PCI-X relaxed ordering bit is clear. */
6121 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6124 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6126 pcix_cmd &= ~PCI_X_CMD_ERO;
6127 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6131 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6133 /* Chip reset on 5780 will reset MSI enable bit,
6134 * so need to restore it.
6136 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6139 pci_read_config_word(tp->pdev,
6140 tp->msi_cap + PCI_MSI_FLAGS,
6142 pci_write_config_word(tp->pdev,
6143 tp->msi_cap + PCI_MSI_FLAGS,
6144 ctrl | PCI_MSI_FLAGS_ENABLE);
6145 val = tr32(MSGINT_MODE);
6146 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
6151 static void tg3_stop_fw(struct tg3 *);
6153 /* tp->lock is held. */
6154 static int tg3_chip_reset(struct tg3 *tp)
6157 void (*write_op)(struct tg3 *, u32, u32);
6164 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
6166 /* No matching tg3_nvram_unlock() after this because
6167 * chip reset below will undo the nvram lock.
6169 tp->nvram_lock_cnt = 0;
6171 /* GRC_MISC_CFG core clock reset will clear the memory
6172 * enable bit in PCI register 4 and the MSI enable bit
6173 * on some chips, so we save relevant registers here.
6175 tg3_save_pci_state(tp);
6177 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
6178 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
6179 tw32(GRC_FASTBOOT_PC, 0);
6182 * We must avoid the readl() that normally takes place.
6183 * It locks machines, causes machine checks, and other
6184 * fun things. So, temporarily disable the 5701
6185 * hardware workaround, while we do the reset.
6187 write_op = tp->write32;
6188 if (write_op == tg3_write_flush_reg32)
6189 tp->write32 = tg3_write32;
6191 /* Prevent the irq handler from reading or writing PCI registers
6192 * during chip reset when the memory enable bit in the PCI command
6193 * register may be cleared. The chip does not generate interrupt
6194 * at this time, but the irq handler may still be called due to irq
6195 * sharing or irqpoll.
6197 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6198 if (tp->hw_status) {
6199 tp->hw_status->status = 0;
6200 tp->hw_status->status_tag = 0;
6203 tp->last_irq_tag = 0;
6205 synchronize_irq(tp->pdev->irq);
6207 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6208 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
6209 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
6213 val = GRC_MISC_CFG_CORECLK_RESET;
6215 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6216 if (tr32(0x7e2c) == 0x60) {
6219 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6220 tw32(GRC_MISC_CFG, (1 << 29));
6225 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6226 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6227 tw32(GRC_VCPU_EXT_CTRL,
6228 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6231 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6232 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6233 tw32(GRC_MISC_CFG, val);
6235 /* restore 5701 hardware bug workaround write method */
6236 tp->write32 = write_op;
6238 /* Unfortunately, we have to delay before the PCI read back.
6239 * Some 575X chips even will not respond to a PCI cfg access
6240 * when the reset command is given to the chip.
6242 * How do these hardware designers expect things to work
6243 * properly if the PCI write is posted for a long period
6244 * of time? It is always necessary to have some method by
6245 * which a register read back can occur to push the write
6246 * out which does the reset.
6248 * For most tg3 variants the trick below was working.
6253 /* Flush PCI posted writes. The normal MMIO registers
6254 * are inaccessible at this time so this is the only
6255 * way to make this reliably (actually, this is no longer
6256 * the case, see above). I tried to use indirect
6257 * register read/write but this upset some 5701 variants.
6259 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6263 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
6266 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6270 /* Wait for link training to complete. */
6271 for (i = 0; i < 5000; i++)
6274 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6275 pci_write_config_dword(tp->pdev, 0xc4,
6276 cfg_val | (1 << 15));
6279 /* Clear the "no snoop" and "relaxed ordering" bits. */
6280 pci_read_config_word(tp->pdev,
6281 tp->pcie_cap + PCI_EXP_DEVCTL,
6283 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
6284 PCI_EXP_DEVCTL_NOSNOOP_EN);
6286 * Older PCIe devices only support the 128 byte
6287 * MPS setting. Enforce the restriction.
6289 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
6290 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784))
6291 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
6292 pci_write_config_word(tp->pdev,
6293 tp->pcie_cap + PCI_EXP_DEVCTL,
6296 pcie_set_readrq(tp->pdev, 4096);
6298 /* Clear error status */
6299 pci_write_config_word(tp->pdev,
6300 tp->pcie_cap + PCI_EXP_DEVSTA,
6301 PCI_EXP_DEVSTA_CED |
6302 PCI_EXP_DEVSTA_NFED |
6303 PCI_EXP_DEVSTA_FED |
6304 PCI_EXP_DEVSTA_URD);
6307 tg3_restore_pci_state(tp);
6309 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6312 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6313 val = tr32(MEMARB_MODE);
6314 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
6316 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6318 tw32(0x5000, 0x400);
6321 tw32(GRC_MODE, tp->grc_mode);
6323 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
6326 tw32(0xc4, val | (1 << 15));
6329 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6330 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6331 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6332 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6333 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6334 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6337 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6338 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6339 tw32_f(MAC_MODE, tp->mac_mode);
6340 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6341 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6342 tw32_f(MAC_MODE, tp->mac_mode);
6343 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6344 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6345 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6346 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6347 tw32_f(MAC_MODE, tp->mac_mode);
6349 tw32_f(MAC_MODE, 0);
6352 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6354 err = tg3_poll_fw(tp);
6360 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6361 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6364 tw32(0x7c00, val | (1 << 25));
6367 /* Reprobe ASF enable state. */
6368 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6369 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6370 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6371 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6374 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6375 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6376 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
6377 tp->last_event_jiffies = jiffies;
6378 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
6379 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6386 /* tp->lock is held. */
6387 static void tg3_stop_fw(struct tg3 *tp)
6389 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6390 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
6391 /* Wait for RX cpu to ACK the previous event. */
6392 tg3_wait_for_event_ack(tp);
6394 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
6396 tg3_generate_fw_event(tp);
6398 /* Wait for RX cpu to ACK this event. */
6399 tg3_wait_for_event_ack(tp);
6403 /* tp->lock is held. */
6404 static int tg3_halt(struct tg3 *tp, int kind, int silent)
6410 tg3_write_sig_pre_reset(tp, kind);
6412 tg3_abort_hw(tp, silent);
6413 err = tg3_chip_reset(tp);
6415 __tg3_set_mac_addr(tp, 0);
6417 tg3_write_sig_legacy(tp, kind);
6418 tg3_write_sig_post_reset(tp, kind);
6426 #define RX_CPU_SCRATCH_BASE 0x30000
6427 #define RX_CPU_SCRATCH_SIZE 0x04000
6428 #define TX_CPU_SCRATCH_BASE 0x34000
6429 #define TX_CPU_SCRATCH_SIZE 0x04000
6431 /* tp->lock is held. */
6432 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6436 BUG_ON(offset == TX_CPU_BASE &&
6437 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6439 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6440 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6442 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6445 if (offset == RX_CPU_BASE) {
6446 for (i = 0; i < 10000; i++) {
6447 tw32(offset + CPU_STATE, 0xffffffff);
6448 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6449 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6453 tw32(offset + CPU_STATE, 0xffffffff);
6454 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
6457 for (i = 0; i < 10000; i++) {
6458 tw32(offset + CPU_STATE, 0xffffffff);
6459 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6460 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6466 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6469 (offset == RX_CPU_BASE ? "RX" : "TX"));
6473 /* Clear firmware's nvram arbitration. */
6474 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6475 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6480 unsigned int fw_base;
6481 unsigned int fw_len;
6482 const __be32 *fw_data;
6485 /* tp->lock is held. */
6486 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6487 int cpu_scratch_size, struct fw_info *info)
6489 int err, lock_err, i;
6490 void (*write_op)(struct tg3 *, u32, u32);
6492 if (cpu_base == TX_CPU_BASE &&
6493 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6494 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6495 "TX cpu firmware on %s which is 5705.\n",
6500 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6501 write_op = tg3_write_mem;
6503 write_op = tg3_write_indirect_reg32;
6505 /* It is possible that bootcode is still loading at this point.
6506 * Get the nvram lock first before halting the cpu.
6508 lock_err = tg3_nvram_lock(tp);
6509 err = tg3_halt_cpu(tp, cpu_base);
6511 tg3_nvram_unlock(tp);
6515 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6516 write_op(tp, cpu_scratch_base + i, 0);
6517 tw32(cpu_base + CPU_STATE, 0xffffffff);
6518 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6519 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
6520 write_op(tp, (cpu_scratch_base +
6521 (info->fw_base & 0xffff) +
6523 be32_to_cpu(info->fw_data[i]));
6531 /* tp->lock is held. */
6532 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6534 struct fw_info info;
6535 const __be32 *fw_data;
6538 fw_data = (void *)tp->fw->data;
6540 /* Firmware blob starts with version numbers, followed by
6541 start address and length. We are setting complete length.
6542 length = end_address_of_bss - start_address_of_text.
6543 Remainder is the blob to be loaded contiguously
6544 from start address. */
6546 info.fw_base = be32_to_cpu(fw_data[1]);
6547 info.fw_len = tp->fw->size - 12;
6548 info.fw_data = &fw_data[3];
6550 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6551 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6556 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6557 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6562 /* Now startup only the RX cpu. */
6563 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6564 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
6566 for (i = 0; i < 5; i++) {
6567 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
6569 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6570 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
6571 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
6575 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6576 "to set RX CPU PC, is %08x should be %08x\n",
6577 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6581 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6582 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
6587 /* 5705 needs a special version of the TSO firmware. */
6589 /* tp->lock is held. */
6590 static int tg3_load_tso_firmware(struct tg3 *tp)
6592 struct fw_info info;
6593 const __be32 *fw_data;
6594 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6597 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6600 fw_data = (void *)tp->fw->data;
6602 /* Firmware blob starts with version numbers, followed by
6603 start address and length. We are setting complete length.
6604 length = end_address_of_bss - start_address_of_text.
6605 Remainder is the blob to be loaded contiguously
6606 from start address. */
6608 info.fw_base = be32_to_cpu(fw_data[1]);
6609 cpu_scratch_size = tp->fw_len;
6610 info.fw_len = tp->fw->size - 12;
6611 info.fw_data = &fw_data[3];
6613 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6614 cpu_base = RX_CPU_BASE;
6615 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6617 cpu_base = TX_CPU_BASE;
6618 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6619 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6622 err = tg3_load_firmware_cpu(tp, cpu_base,
6623 cpu_scratch_base, cpu_scratch_size,
6628 /* Now startup the cpu. */
6629 tw32(cpu_base + CPU_STATE, 0xffffffff);
6630 tw32_f(cpu_base + CPU_PC, info.fw_base);
6632 for (i = 0; i < 5; i++) {
6633 if (tr32(cpu_base + CPU_PC) == info.fw_base)
6635 tw32(cpu_base + CPU_STATE, 0xffffffff);
6636 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6637 tw32_f(cpu_base + CPU_PC, info.fw_base);
6641 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6642 "to set CPU PC, is %08x should be %08x\n",
6643 tp->dev->name, tr32(cpu_base + CPU_PC),
6647 tw32(cpu_base + CPU_STATE, 0xffffffff);
6648 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6653 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6655 struct tg3 *tp = netdev_priv(dev);
6656 struct sockaddr *addr = p;
6657 int err = 0, skip_mac_1 = 0;
6659 if (!is_valid_ether_addr(addr->sa_data))
6662 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6664 if (!netif_running(dev))
6667 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6668 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6670 addr0_high = tr32(MAC_ADDR_0_HIGH);
6671 addr0_low = tr32(MAC_ADDR_0_LOW);
6672 addr1_high = tr32(MAC_ADDR_1_HIGH);
6673 addr1_low = tr32(MAC_ADDR_1_LOW);
6675 /* Skip MAC addr 1 if ASF is using it. */
6676 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6677 !(addr1_high == 0 && addr1_low == 0))
6680 spin_lock_bh(&tp->lock);
6681 __tg3_set_mac_addr(tp, skip_mac_1);
6682 spin_unlock_bh(&tp->lock);
6687 /* tp->lock is held. */
6688 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6689 dma_addr_t mapping, u32 maxlen_flags,
6693 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6694 ((u64) mapping >> 32));
6696 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6697 ((u64) mapping & 0xffffffff));
6699 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6702 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6704 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6708 static void __tg3_set_rx_mode(struct net_device *);
6709 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6711 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6712 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6713 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6714 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6715 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6716 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6717 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6719 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6720 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6721 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6722 u32 val = ec->stats_block_coalesce_usecs;
6724 if (!netif_carrier_ok(tp->dev))
6727 tw32(HOSTCC_STAT_COAL_TICKS, val);
6731 /* tp->lock is held. */
6732 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6734 u32 val, rdmac_mode;
6737 tg3_disable_ints(tp);
6741 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6743 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6744 tg3_abort_hw(tp, 1);
6748 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
6751 err = tg3_chip_reset(tp);
6755 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6757 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
6758 val = tr32(TG3_CPMU_CTRL);
6759 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6760 tw32(TG3_CPMU_CTRL, val);
6762 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6763 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6764 val |= CPMU_LSPD_10MB_MACCLK_6_25;
6765 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6767 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6768 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6769 val |= CPMU_LNK_AWARE_MACCLK_6_25;
6770 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6772 val = tr32(TG3_CPMU_HST_ACC);
6773 val &= ~CPMU_HST_ACC_MACCLK_MASK;
6774 val |= CPMU_HST_ACC_MACCLK_6_25;
6775 tw32(TG3_CPMU_HST_ACC, val);
6778 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6779 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
6780 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
6781 PCIE_PWR_MGMT_L1_THRESH_4MS;
6782 tw32(PCIE_PWR_MGMT_THRESH, val);
6784 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
6785 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
6787 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
6790 if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) {
6791 val = tr32(TG3_PCIE_LNKCTL);
6792 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG)
6793 val |= TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
6795 val &= ~TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
6796 tw32(TG3_PCIE_LNKCTL, val);
6799 /* This works around an issue with Athlon chipsets on
6800 * B3 tigon3 silicon. This bit has no effect on any
6801 * other revision. But do not set this on PCI Express
6802 * chips and don't even touch the clocks if the CPMU is present.
6804 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6805 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6806 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6807 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6810 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6811 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6812 val = tr32(TG3PCI_PCISTATE);
6813 val |= PCISTATE_RETRY_SAME_DMA;
6814 tw32(TG3PCI_PCISTATE, val);
6817 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6818 /* Allow reads and writes to the
6819 * APE register and memory space.
6821 val = tr32(TG3PCI_PCISTATE);
6822 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6823 PCISTATE_ALLOW_APE_SHMEM_WR;
6824 tw32(TG3PCI_PCISTATE, val);
6827 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6828 /* Enable some hw fixes. */
6829 val = tr32(TG3PCI_MSI_DATA);
6830 val |= (1 << 26) | (1 << 28) | (1 << 29);
6831 tw32(TG3PCI_MSI_DATA, val);
6834 /* Descriptor ring init may make accesses to the
6835 * NIC SRAM area to setup the TX descriptors, so we
6836 * can only do this after the hardware has been
6837 * successfully reset.
6839 err = tg3_init_rings(tp);
6843 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6844 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6845 /* This value is determined during the probe time DMA
6846 * engine test, tg3_test_dma.
6848 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6851 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6852 GRC_MODE_4X_NIC_SEND_RINGS |
6853 GRC_MODE_NO_TX_PHDR_CSUM |
6854 GRC_MODE_NO_RX_PHDR_CSUM);
6855 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6857 /* Pseudo-header checksum is done by hardware logic and not
6858 * the offload processers, so make the chip do the pseudo-
6859 * header checksums on receive. For transmit it is more
6860 * convenient to do the pseudo-header checksum in software
6861 * as Linux does that on transmit for us in all cases.
6863 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6867 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6869 /* Setup the timer prescalar register. Clock is always 66Mhz. */
6870 val = tr32(GRC_MISC_CFG);
6872 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6873 tw32(GRC_MISC_CFG, val);
6875 /* Initialize MBUF/DESC pool. */
6876 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6878 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6879 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6880 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6881 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6883 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6884 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6885 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6887 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6890 fw_len = tp->fw_len;
6891 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6892 tw32(BUFMGR_MB_POOL_ADDR,
6893 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6894 tw32(BUFMGR_MB_POOL_SIZE,
6895 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6898 if (tp->dev->mtu <= ETH_DATA_LEN) {
6899 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6900 tp->bufmgr_config.mbuf_read_dma_low_water);
6901 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6902 tp->bufmgr_config.mbuf_mac_rx_low_water);
6903 tw32(BUFMGR_MB_HIGH_WATER,
6904 tp->bufmgr_config.mbuf_high_water);
6906 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6907 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6908 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6909 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6910 tw32(BUFMGR_MB_HIGH_WATER,
6911 tp->bufmgr_config.mbuf_high_water_jumbo);
6913 tw32(BUFMGR_DMA_LOW_WATER,
6914 tp->bufmgr_config.dma_low_water);
6915 tw32(BUFMGR_DMA_HIGH_WATER,
6916 tp->bufmgr_config.dma_high_water);
6918 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6919 for (i = 0; i < 2000; i++) {
6920 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6925 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6930 /* Setup replenish threshold. */
6931 val = tp->rx_pending / 8;
6934 else if (val > tp->rx_std_max_post)
6935 val = tp->rx_std_max_post;
6936 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6937 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6938 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6940 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6941 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6944 tw32(RCVBDI_STD_THRESH, val);
6946 /* Initialize TG3_BDINFO's at:
6947 * RCVDBDI_STD_BD: standard eth size rx ring
6948 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6949 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6952 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6953 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6954 * ring attribute flags
6955 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6957 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6958 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6960 * The size of each ring is fixed in the firmware, but the location is
6963 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6964 ((u64) tp->rx_std_mapping >> 32));
6965 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6966 ((u64) tp->rx_std_mapping & 0xffffffff));
6967 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6968 NIC_SRAM_RX_BUFFER_DESC);
6970 /* Don't even try to program the JUMBO/MINI buffer descriptor
6973 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6974 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6975 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6977 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6978 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6980 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6981 BDINFO_FLAGS_DISABLED);
6983 /* Setup replenish threshold. */
6984 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6986 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6987 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6988 ((u64) tp->rx_jumbo_mapping >> 32));
6989 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6990 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6991 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6992 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6993 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6994 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6996 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6997 BDINFO_FLAGS_DISABLED);
7002 /* There is only one send ring on 5705/5750, no need to explicitly
7003 * disable the others.
7005 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7006 /* Clear out send RCB ring in SRAM. */
7007 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7008 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7009 BDINFO_FLAGS_DISABLED);
7014 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7015 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7017 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7018 tp->tx_desc_mapping,
7019 (TG3_TX_RING_SIZE <<
7020 BDINFO_FLAGS_MAXLEN_SHIFT),
7021 NIC_SRAM_TX_BUFFER_DESC);
7023 /* There is only one receive return ring on 5705/5750, no need
7024 * to explicitly disable the others.
7026 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7027 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7028 i += TG3_BDINFO_SIZE) {
7029 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7030 BDINFO_FLAGS_DISABLED);
7035 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7037 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7039 (TG3_RX_RCB_RING_SIZE(tp) <<
7040 BDINFO_FLAGS_MAXLEN_SHIFT),
7043 tp->rx_std_ptr = tp->rx_pending;
7044 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7047 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7048 tp->rx_jumbo_pending : 0;
7049 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7052 /* Initialize MAC address and backoff seed. */
7053 __tg3_set_mac_addr(tp, 0);
7055 /* MTU + ethernet header + FCS + optional VLAN tag */
7056 tw32(MAC_RX_MTU_SIZE,
7057 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
7059 /* The slot time is changed by tg3_setup_phy if we
7060 * run at gigabit with half duplex.
7062 tw32(MAC_TX_LENGTHS,
7063 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7064 (6 << TX_LENGTHS_IPG_SHIFT) |
7065 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7067 /* Receive rules. */
7068 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7069 tw32(RCVLPC_CONFIG, 0x0181);
7071 /* Calculate RDMAC_MODE setting early, we need it to determine
7072 * the RCVLPC_STATE_ENABLE mask.
7074 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7075 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7076 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7077 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7078 RDMAC_MODE_LNGREAD_ENAB);
7080 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7081 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7082 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7083 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7084 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7085 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7087 /* If statement applies to 5705 and 5750 PCI devices only */
7088 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7089 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7090 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7091 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7092 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7093 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7094 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7095 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7096 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7100 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7101 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7103 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7104 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
7106 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7107 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7108 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
7110 /* Receive/send statistics. */
7111 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7112 val = tr32(RCVLPC_STATS_ENABLE);
7113 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7114 tw32(RCVLPC_STATS_ENABLE, val);
7115 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7116 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7117 val = tr32(RCVLPC_STATS_ENABLE);
7118 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7119 tw32(RCVLPC_STATS_ENABLE, val);
7121 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7123 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7124 tw32(SNDDATAI_STATSENAB, 0xffffff);
7125 tw32(SNDDATAI_STATSCTRL,
7126 (SNDDATAI_SCTRL_ENABLE |
7127 SNDDATAI_SCTRL_FASTUPD));
7129 /* Setup host coalescing engine. */
7130 tw32(HOSTCC_MODE, 0);
7131 for (i = 0; i < 2000; i++) {
7132 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7137 __tg3_set_coalesce(tp, &tp->coal);
7139 /* set status block DMA address */
7140 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7141 ((u64) tp->status_mapping >> 32));
7142 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7143 ((u64) tp->status_mapping & 0xffffffff));
7145 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7146 /* Status/statistics block address. See tg3_timer,
7147 * the tg3_periodic_fetch_stats call there, and
7148 * tg3_get_stats to see how this works for 5705/5750 chips.
7150 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7151 ((u64) tp->stats_mapping >> 32));
7152 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7153 ((u64) tp->stats_mapping & 0xffffffff));
7154 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7155 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7158 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7160 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7161 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7162 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7163 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7165 /* Clear statistics/status block in chip, and status block in ram. */
7166 for (i = NIC_SRAM_STATS_BLK;
7167 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7169 tg3_write_mem(tp, i, 0);
7172 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7174 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7175 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7176 /* reset to prevent losing 1st rx packet intermittently */
7177 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7181 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7182 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7185 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7186 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7187 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7188 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7189 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7190 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7191 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7194 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7195 * If TG3_FLG2_IS_NIC is zero, we should read the
7196 * register to preserve the GPIO settings for LOMs. The GPIOs,
7197 * whether used as inputs or outputs, are set by boot code after
7200 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7203 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7204 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7205 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7207 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7208 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7209 GRC_LCLCTRL_GPIO_OUTPUT3;
7211 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7212 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7214 tp->grc_local_ctrl &= ~gpio_mask;
7215 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7217 /* GPIO1 must be driven high for eeprom write protect */
7218 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7219 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7220 GRC_LCLCTRL_GPIO_OUTPUT1);
7222 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7225 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7227 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7228 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7232 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7233 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7234 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7235 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7236 WDMAC_MODE_LNGREAD_ENAB);
7238 /* If statement applies to 5705 and 5750 PCI devices only */
7239 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7240 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7241 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7242 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
7243 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7244 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7246 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7247 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7248 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7249 val |= WDMAC_MODE_RX_ACCEL;
7253 /* Enable host coalescing bug fix */
7254 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7255 val |= WDMAC_MODE_STATUS_TAG_FIX;
7257 tw32_f(WDMAC_MODE, val);
7260 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7263 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7265 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7266 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7267 pcix_cmd |= PCI_X_CMD_READ_2K;
7268 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7269 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7270 pcix_cmd |= PCI_X_CMD_READ_2K;
7272 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7276 tw32_f(RDMAC_MODE, rdmac_mode);
7279 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7280 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7281 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7283 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7285 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7287 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7289 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7290 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7291 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7292 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7293 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7294 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7295 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7296 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7298 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7299 err = tg3_load_5701_a0_firmware_fix(tp);
7304 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7305 err = tg3_load_tso_firmware(tp);
7310 tp->tx_mode = TX_MODE_ENABLE;
7311 tw32_f(MAC_TX_MODE, tp->tx_mode);
7314 tp->rx_mode = RX_MODE_ENABLE;
7315 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7316 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7318 tw32_f(MAC_RX_MODE, tp->rx_mode);
7321 tw32(MAC_LED_CTRL, tp->led_ctrl);
7323 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7324 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7325 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7328 tw32_f(MAC_RX_MODE, tp->rx_mode);
7331 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7332 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7333 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7334 /* Set drive transmission level to 1.2V */
7335 /* only if the signal pre-emphasis bit is not set */
7336 val = tr32(MAC_SERDES_CFG);
7339 tw32(MAC_SERDES_CFG, val);
7341 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7342 tw32(MAC_SERDES_CFG, 0x616000);
7345 /* Prevent chip from dropping frames when flow control
7348 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7350 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7351 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7352 /* Use hardware link auto-negotiation */
7353 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7356 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7357 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7360 tmp = tr32(SERDES_RX_CTRL);
7361 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7362 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7363 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7364 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7367 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7368 if (tp->link_config.phy_is_low_power) {
7369 tp->link_config.phy_is_low_power = 0;
7370 tp->link_config.speed = tp->link_config.orig_speed;
7371 tp->link_config.duplex = tp->link_config.orig_duplex;
7372 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7375 err = tg3_setup_phy(tp, 0);
7379 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7380 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) {
7383 /* Clear CRC stats. */
7384 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7385 tg3_writephy(tp, MII_TG3_TEST1,
7386 tmp | MII_TG3_TEST1_CRC_EN);
7387 tg3_readphy(tp, 0x14, &tmp);
7392 __tg3_set_rx_mode(tp->dev);
7394 /* Initialize receive rules. */
7395 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7396 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7397 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7398 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7400 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7401 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7405 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7409 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7411 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7413 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7415 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7417 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7419 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7421 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7423 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7425 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7427 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7429 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7431 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7433 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7435 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7443 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7444 /* Write our heartbeat update interval to APE. */
7445 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7446 APE_HOST_HEARTBEAT_INT_DISABLE);
7448 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7453 /* Called at device open time to get the chip ready for
7454 * packet processing. Invoked with tp->lock held.
7456 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7458 tg3_switch_clocks(tp);
7460 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7462 return tg3_reset_hw(tp, reset_phy);
7465 #define TG3_STAT_ADD32(PSTAT, REG) \
7466 do { u32 __val = tr32(REG); \
7467 (PSTAT)->low += __val; \
7468 if ((PSTAT)->low < __val) \
7469 (PSTAT)->high += 1; \
7472 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7474 struct tg3_hw_stats *sp = tp->hw_stats;
7476 if (!netif_carrier_ok(tp->dev))
7479 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7480 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7481 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7482 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7483 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7484 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7485 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7486 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7487 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7488 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7489 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7490 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7491 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7493 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7494 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7495 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7496 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7497 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7498 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7499 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7500 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7501 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7502 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7503 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7504 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7505 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7506 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7508 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7509 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7510 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7513 static void tg3_timer(unsigned long __opaque)
7515 struct tg3 *tp = (struct tg3 *) __opaque;
7520 spin_lock(&tp->lock);
7522 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7523 /* All of this garbage is because when using non-tagged
7524 * IRQ status the mailbox/status_block protocol the chip
7525 * uses with the cpu is race prone.
7527 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7528 tw32(GRC_LOCAL_CTRL,
7529 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7531 tw32(HOSTCC_MODE, tp->coalesce_mode |
7532 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7535 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7536 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7537 spin_unlock(&tp->lock);
7538 schedule_work(&tp->reset_task);
7543 /* This part only runs once per second. */
7544 if (!--tp->timer_counter) {
7545 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7546 tg3_periodic_fetch_stats(tp);
7548 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7552 mac_stat = tr32(MAC_STATUS);
7555 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7556 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7558 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7562 tg3_setup_phy(tp, 0);
7563 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7564 u32 mac_stat = tr32(MAC_STATUS);
7567 if (netif_carrier_ok(tp->dev) &&
7568 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7571 if (! netif_carrier_ok(tp->dev) &&
7572 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7573 MAC_STATUS_SIGNAL_DET))) {
7577 if (!tp->serdes_counter) {
7580 ~MAC_MODE_PORT_MODE_MASK));
7582 tw32_f(MAC_MODE, tp->mac_mode);
7585 tg3_setup_phy(tp, 0);
7587 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7588 tg3_serdes_parallel_detect(tp);
7590 tp->timer_counter = tp->timer_multiplier;
7593 /* Heartbeat is only sent once every 2 seconds.
7595 * The heartbeat is to tell the ASF firmware that the host
7596 * driver is still alive. In the event that the OS crashes,
7597 * ASF needs to reset the hardware to free up the FIFO space
7598 * that may be filled with rx packets destined for the host.
7599 * If the FIFO is full, ASF will no longer function properly.
7601 * Unintended resets have been reported on real time kernels
7602 * where the timer doesn't run on time. Netpoll will also have
7605 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7606 * to check the ring condition when the heartbeat is expiring
7607 * before doing the reset. This will prevent most unintended
7610 if (!--tp->asf_counter) {
7611 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7612 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7613 tg3_wait_for_event_ack(tp);
7615 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7616 FWCMD_NICDRV_ALIVE3);
7617 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7618 /* 5 seconds timeout */
7619 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7621 tg3_generate_fw_event(tp);
7623 tp->asf_counter = tp->asf_multiplier;
7626 spin_unlock(&tp->lock);
7629 tp->timer.expires = jiffies + tp->timer_offset;
7630 add_timer(&tp->timer);
7633 static int tg3_request_irq(struct tg3 *tp)
7636 unsigned long flags;
7637 struct net_device *dev = tp->dev;
7639 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7641 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7643 flags = IRQF_SAMPLE_RANDOM;
7646 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7647 fn = tg3_interrupt_tagged;
7648 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7650 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7653 static int tg3_test_interrupt(struct tg3 *tp)
7655 struct net_device *dev = tp->dev;
7656 int err, i, intr_ok = 0;
7658 if (!netif_running(dev))
7661 tg3_disable_ints(tp);
7663 free_irq(tp->pdev->irq, dev);
7665 err = request_irq(tp->pdev->irq, tg3_test_isr,
7666 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7670 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7671 tg3_enable_ints(tp);
7673 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7676 for (i = 0; i < 5; i++) {
7677 u32 int_mbox, misc_host_ctrl;
7679 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7681 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7683 if ((int_mbox != 0) ||
7684 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7692 tg3_disable_ints(tp);
7694 free_irq(tp->pdev->irq, dev);
7696 err = tg3_request_irq(tp);
7707 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7708 * successfully restored
7710 static int tg3_test_msi(struct tg3 *tp)
7712 struct net_device *dev = tp->dev;
7716 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7719 /* Turn off SERR reporting in case MSI terminates with Master
7722 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7723 pci_write_config_word(tp->pdev, PCI_COMMAND,
7724 pci_cmd & ~PCI_COMMAND_SERR);
7726 err = tg3_test_interrupt(tp);
7728 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7733 /* other failures */
7737 /* MSI test failed, go back to INTx mode */
7738 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7739 "switching to INTx mode. Please report this failure to "
7740 "the PCI maintainer and include system chipset information.\n",
7743 free_irq(tp->pdev->irq, dev);
7744 pci_disable_msi(tp->pdev);
7746 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7748 err = tg3_request_irq(tp);
7752 /* Need to reset the chip because the MSI cycle may have terminated
7753 * with Master Abort.
7755 tg3_full_lock(tp, 1);
7757 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7758 err = tg3_init_hw(tp, 1);
7760 tg3_full_unlock(tp);
7763 free_irq(tp->pdev->irq, dev);
7768 static int tg3_request_firmware(struct tg3 *tp)
7770 const __be32 *fw_data;
7772 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
7773 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
7774 tp->dev->name, tp->fw_needed);
7778 fw_data = (void *)tp->fw->data;
7780 /* Firmware blob starts with version numbers, followed by
7781 * start address and _full_ length including BSS sections
7782 * (which must be longer than the actual data, of course
7785 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
7786 if (tp->fw_len < (tp->fw->size - 12)) {
7787 printk(KERN_ERR "%s: bogus length %d in \"%s\"\n",
7788 tp->dev->name, tp->fw_len, tp->fw_needed);
7789 release_firmware(tp->fw);
7794 /* We no longer need firmware; we have it. */
7795 tp->fw_needed = NULL;
7799 static int tg3_open(struct net_device *dev)
7801 struct tg3 *tp = netdev_priv(dev);
7804 if (tp->fw_needed) {
7805 err = tg3_request_firmware(tp);
7806 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7810 printk(KERN_WARNING "%s: TSO capability disabled.\n",
7812 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
7813 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7814 printk(KERN_NOTICE "%s: TSO capability restored.\n",
7816 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
7820 netif_carrier_off(tp->dev);
7822 err = tg3_set_power_state(tp, PCI_D0);
7826 tg3_full_lock(tp, 0);
7828 tg3_disable_ints(tp);
7829 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7831 tg3_full_unlock(tp);
7833 /* The placement of this call is tied
7834 * to the setup and use of Host TX descriptors.
7836 err = tg3_alloc_consistent(tp);
7840 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7841 /* All MSI supporting chips should support tagged
7842 * status. Assert that this is the case.
7844 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7845 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7846 "Not using MSI.\n", tp->dev->name);
7847 } else if (pci_enable_msi(tp->pdev) == 0) {
7850 msi_mode = tr32(MSGINT_MODE);
7851 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7852 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7855 err = tg3_request_irq(tp);
7858 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7859 pci_disable_msi(tp->pdev);
7860 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7862 tg3_free_consistent(tp);
7866 napi_enable(&tp->napi);
7868 tg3_full_lock(tp, 0);
7870 err = tg3_init_hw(tp, 1);
7872 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7875 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7876 tp->timer_offset = HZ;
7878 tp->timer_offset = HZ / 10;
7880 BUG_ON(tp->timer_offset > HZ);
7881 tp->timer_counter = tp->timer_multiplier =
7882 (HZ / tp->timer_offset);
7883 tp->asf_counter = tp->asf_multiplier =
7884 ((HZ / tp->timer_offset) * 2);
7886 init_timer(&tp->timer);
7887 tp->timer.expires = jiffies + tp->timer_offset;
7888 tp->timer.data = (unsigned long) tp;
7889 tp->timer.function = tg3_timer;
7892 tg3_full_unlock(tp);
7895 napi_disable(&tp->napi);
7896 free_irq(tp->pdev->irq, dev);
7897 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7898 pci_disable_msi(tp->pdev);
7899 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7901 tg3_free_consistent(tp);
7905 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7906 err = tg3_test_msi(tp);
7909 tg3_full_lock(tp, 0);
7911 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7912 pci_disable_msi(tp->pdev);
7913 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7915 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7917 tg3_free_consistent(tp);
7919 tg3_full_unlock(tp);
7921 napi_disable(&tp->napi);
7926 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7927 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7928 u32 val = tr32(PCIE_TRANSACTION_CFG);
7930 tw32(PCIE_TRANSACTION_CFG,
7931 val | PCIE_TRANS_CFG_1SHOT_MSI);
7938 tg3_full_lock(tp, 0);
7940 add_timer(&tp->timer);
7941 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7942 tg3_enable_ints(tp);
7944 tg3_full_unlock(tp);
7946 netif_start_queue(dev);
7952 /*static*/ void tg3_dump_state(struct tg3 *tp)
7954 u32 val32, val32_2, val32_3, val32_4, val32_5;
7958 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7959 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7960 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7964 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7965 tr32(MAC_MODE), tr32(MAC_STATUS));
7966 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7967 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7968 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7969 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7970 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7971 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7973 /* Send data initiator control block */
7974 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7975 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7976 printk(" SNDDATAI_STATSCTRL[%08x]\n",
7977 tr32(SNDDATAI_STATSCTRL));
7979 /* Send data completion control block */
7980 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7982 /* Send BD ring selector block */
7983 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7984 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7986 /* Send BD initiator control block */
7987 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7988 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7990 /* Send BD completion control block */
7991 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7993 /* Receive list placement control block */
7994 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7995 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7996 printk(" RCVLPC_STATSCTRL[%08x]\n",
7997 tr32(RCVLPC_STATSCTRL));
7999 /* Receive data and receive BD initiator control block */
8000 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8001 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8003 /* Receive data completion control block */
8004 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8007 /* Receive BD initiator control block */
8008 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8009 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8011 /* Receive BD completion control block */
8012 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8013 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8015 /* Receive list selector control block */
8016 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8017 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8019 /* Mbuf cluster free block */
8020 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8021 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8023 /* Host coalescing control block */
8024 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8025 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8026 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8027 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8028 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8029 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8030 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8031 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8032 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8033 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8034 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8035 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8037 /* Memory arbiter control block */
8038 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8039 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8041 /* Buffer manager control block */
8042 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8043 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8044 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8045 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8046 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8047 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8048 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8049 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8051 /* Read DMA control block */
8052 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8053 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8055 /* Write DMA control block */
8056 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8057 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8059 /* DMA completion block */
8060 printk("DEBUG: DMAC_MODE[%08x]\n",
8064 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8065 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8066 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8067 tr32(GRC_LOCAL_CTRL));
8070 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8071 tr32(RCVDBDI_JUMBO_BD + 0x0),
8072 tr32(RCVDBDI_JUMBO_BD + 0x4),
8073 tr32(RCVDBDI_JUMBO_BD + 0x8),
8074 tr32(RCVDBDI_JUMBO_BD + 0xc));
8075 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8076 tr32(RCVDBDI_STD_BD + 0x0),
8077 tr32(RCVDBDI_STD_BD + 0x4),
8078 tr32(RCVDBDI_STD_BD + 0x8),
8079 tr32(RCVDBDI_STD_BD + 0xc));
8080 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8081 tr32(RCVDBDI_MINI_BD + 0x0),
8082 tr32(RCVDBDI_MINI_BD + 0x4),
8083 tr32(RCVDBDI_MINI_BD + 0x8),
8084 tr32(RCVDBDI_MINI_BD + 0xc));
8086 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8087 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8088 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8089 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8090 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8091 val32, val32_2, val32_3, val32_4);
8093 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8094 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8095 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8096 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8097 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8098 val32, val32_2, val32_3, val32_4);
8100 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8101 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8102 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8103 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8104 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8105 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8106 val32, val32_2, val32_3, val32_4, val32_5);
8108 /* SW status block */
8109 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8110 tp->hw_status->status,
8111 tp->hw_status->status_tag,
8112 tp->hw_status->rx_jumbo_consumer,
8113 tp->hw_status->rx_consumer,
8114 tp->hw_status->rx_mini_consumer,
8115 tp->hw_status->idx[0].rx_producer,
8116 tp->hw_status->idx[0].tx_consumer);
8118 /* SW statistics block */
8119 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8120 ((u32 *)tp->hw_stats)[0],
8121 ((u32 *)tp->hw_stats)[1],
8122 ((u32 *)tp->hw_stats)[2],
8123 ((u32 *)tp->hw_stats)[3]);
8126 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8127 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8128 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8129 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8130 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8132 /* NIC side send descriptors. */
8133 for (i = 0; i < 6; i++) {
8136 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8137 + (i * sizeof(struct tg3_tx_buffer_desc));
8138 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8140 readl(txd + 0x0), readl(txd + 0x4),
8141 readl(txd + 0x8), readl(txd + 0xc));
8144 /* NIC side RX descriptors. */
8145 for (i = 0; i < 6; i++) {
8148 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8149 + (i * sizeof(struct tg3_rx_buffer_desc));
8150 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8152 readl(rxd + 0x0), readl(rxd + 0x4),
8153 readl(rxd + 0x8), readl(rxd + 0xc));
8154 rxd += (4 * sizeof(u32));
8155 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8157 readl(rxd + 0x0), readl(rxd + 0x4),
8158 readl(rxd + 0x8), readl(rxd + 0xc));
8161 for (i = 0; i < 6; i++) {
8164 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8165 + (i * sizeof(struct tg3_rx_buffer_desc));
8166 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8168 readl(rxd + 0x0), readl(rxd + 0x4),
8169 readl(rxd + 0x8), readl(rxd + 0xc));
8170 rxd += (4 * sizeof(u32));
8171 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8173 readl(rxd + 0x0), readl(rxd + 0x4),
8174 readl(rxd + 0x8), readl(rxd + 0xc));
8179 static struct net_device_stats *tg3_get_stats(struct net_device *);
8180 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8182 static int tg3_close(struct net_device *dev)
8184 struct tg3 *tp = netdev_priv(dev);
8186 napi_disable(&tp->napi);
8187 cancel_work_sync(&tp->reset_task);
8189 netif_stop_queue(dev);
8191 del_timer_sync(&tp->timer);
8193 tg3_full_lock(tp, 1);
8198 tg3_disable_ints(tp);
8200 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8202 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8204 tg3_full_unlock(tp);
8206 free_irq(tp->pdev->irq, dev);
8207 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8208 pci_disable_msi(tp->pdev);
8209 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8212 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8213 sizeof(tp->net_stats_prev));
8214 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8215 sizeof(tp->estats_prev));
8217 tg3_free_consistent(tp);
8219 tg3_set_power_state(tp, PCI_D3hot);
8221 netif_carrier_off(tp->dev);
8226 static inline unsigned long get_stat64(tg3_stat64_t *val)
8230 #if (BITS_PER_LONG == 32)
8233 ret = ((u64)val->high << 32) | ((u64)val->low);
8238 static inline u64 get_estat64(tg3_stat64_t *val)
8240 return ((u64)val->high << 32) | ((u64)val->low);
8243 static unsigned long calc_crc_errors(struct tg3 *tp)
8245 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8247 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8248 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8249 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8252 spin_lock_bh(&tp->lock);
8253 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8254 tg3_writephy(tp, MII_TG3_TEST1,
8255 val | MII_TG3_TEST1_CRC_EN);
8256 tg3_readphy(tp, 0x14, &val);
8259 spin_unlock_bh(&tp->lock);
8261 tp->phy_crc_errors += val;
8263 return tp->phy_crc_errors;
8266 return get_stat64(&hw_stats->rx_fcs_errors);
8269 #define ESTAT_ADD(member) \
8270 estats->member = old_estats->member + \
8271 get_estat64(&hw_stats->member)
8273 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8275 struct tg3_ethtool_stats *estats = &tp->estats;
8276 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8277 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8282 ESTAT_ADD(rx_octets);
8283 ESTAT_ADD(rx_fragments);
8284 ESTAT_ADD(rx_ucast_packets);
8285 ESTAT_ADD(rx_mcast_packets);
8286 ESTAT_ADD(rx_bcast_packets);
8287 ESTAT_ADD(rx_fcs_errors);
8288 ESTAT_ADD(rx_align_errors);
8289 ESTAT_ADD(rx_xon_pause_rcvd);
8290 ESTAT_ADD(rx_xoff_pause_rcvd);
8291 ESTAT_ADD(rx_mac_ctrl_rcvd);
8292 ESTAT_ADD(rx_xoff_entered);
8293 ESTAT_ADD(rx_frame_too_long_errors);
8294 ESTAT_ADD(rx_jabbers);
8295 ESTAT_ADD(rx_undersize_packets);
8296 ESTAT_ADD(rx_in_length_errors);
8297 ESTAT_ADD(rx_out_length_errors);
8298 ESTAT_ADD(rx_64_or_less_octet_packets);
8299 ESTAT_ADD(rx_65_to_127_octet_packets);
8300 ESTAT_ADD(rx_128_to_255_octet_packets);
8301 ESTAT_ADD(rx_256_to_511_octet_packets);
8302 ESTAT_ADD(rx_512_to_1023_octet_packets);
8303 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8304 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8305 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8306 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8307 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8309 ESTAT_ADD(tx_octets);
8310 ESTAT_ADD(tx_collisions);
8311 ESTAT_ADD(tx_xon_sent);
8312 ESTAT_ADD(tx_xoff_sent);
8313 ESTAT_ADD(tx_flow_control);
8314 ESTAT_ADD(tx_mac_errors);
8315 ESTAT_ADD(tx_single_collisions);
8316 ESTAT_ADD(tx_mult_collisions);
8317 ESTAT_ADD(tx_deferred);
8318 ESTAT_ADD(tx_excessive_collisions);
8319 ESTAT_ADD(tx_late_collisions);
8320 ESTAT_ADD(tx_collide_2times);
8321 ESTAT_ADD(tx_collide_3times);
8322 ESTAT_ADD(tx_collide_4times);
8323 ESTAT_ADD(tx_collide_5times);
8324 ESTAT_ADD(tx_collide_6times);
8325 ESTAT_ADD(tx_collide_7times);
8326 ESTAT_ADD(tx_collide_8times);
8327 ESTAT_ADD(tx_collide_9times);
8328 ESTAT_ADD(tx_collide_10times);
8329 ESTAT_ADD(tx_collide_11times);
8330 ESTAT_ADD(tx_collide_12times);
8331 ESTAT_ADD(tx_collide_13times);
8332 ESTAT_ADD(tx_collide_14times);
8333 ESTAT_ADD(tx_collide_15times);
8334 ESTAT_ADD(tx_ucast_packets);
8335 ESTAT_ADD(tx_mcast_packets);
8336 ESTAT_ADD(tx_bcast_packets);
8337 ESTAT_ADD(tx_carrier_sense_errors);
8338 ESTAT_ADD(tx_discards);
8339 ESTAT_ADD(tx_errors);
8341 ESTAT_ADD(dma_writeq_full);
8342 ESTAT_ADD(dma_write_prioq_full);
8343 ESTAT_ADD(rxbds_empty);
8344 ESTAT_ADD(rx_discards);
8345 ESTAT_ADD(rx_errors);
8346 ESTAT_ADD(rx_threshold_hit);
8348 ESTAT_ADD(dma_readq_full);
8349 ESTAT_ADD(dma_read_prioq_full);
8350 ESTAT_ADD(tx_comp_queue_full);
8352 ESTAT_ADD(ring_set_send_prod_index);
8353 ESTAT_ADD(ring_status_update);
8354 ESTAT_ADD(nic_irqs);
8355 ESTAT_ADD(nic_avoided_irqs);
8356 ESTAT_ADD(nic_tx_threshold_hit);
8361 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8363 struct tg3 *tp = netdev_priv(dev);
8364 struct net_device_stats *stats = &tp->net_stats;
8365 struct net_device_stats *old_stats = &tp->net_stats_prev;
8366 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8371 stats->rx_packets = old_stats->rx_packets +
8372 get_stat64(&hw_stats->rx_ucast_packets) +
8373 get_stat64(&hw_stats->rx_mcast_packets) +
8374 get_stat64(&hw_stats->rx_bcast_packets);
8376 stats->tx_packets = old_stats->tx_packets +
8377 get_stat64(&hw_stats->tx_ucast_packets) +
8378 get_stat64(&hw_stats->tx_mcast_packets) +
8379 get_stat64(&hw_stats->tx_bcast_packets);
8381 stats->rx_bytes = old_stats->rx_bytes +
8382 get_stat64(&hw_stats->rx_octets);
8383 stats->tx_bytes = old_stats->tx_bytes +
8384 get_stat64(&hw_stats->tx_octets);
8386 stats->rx_errors = old_stats->rx_errors +
8387 get_stat64(&hw_stats->rx_errors);
8388 stats->tx_errors = old_stats->tx_errors +
8389 get_stat64(&hw_stats->tx_errors) +
8390 get_stat64(&hw_stats->tx_mac_errors) +
8391 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8392 get_stat64(&hw_stats->tx_discards);
8394 stats->multicast = old_stats->multicast +
8395 get_stat64(&hw_stats->rx_mcast_packets);
8396 stats->collisions = old_stats->collisions +
8397 get_stat64(&hw_stats->tx_collisions);
8399 stats->rx_length_errors = old_stats->rx_length_errors +
8400 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8401 get_stat64(&hw_stats->rx_undersize_packets);
8403 stats->rx_over_errors = old_stats->rx_over_errors +
8404 get_stat64(&hw_stats->rxbds_empty);
8405 stats->rx_frame_errors = old_stats->rx_frame_errors +
8406 get_stat64(&hw_stats->rx_align_errors);
8407 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8408 get_stat64(&hw_stats->tx_discards);
8409 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8410 get_stat64(&hw_stats->tx_carrier_sense_errors);
8412 stats->rx_crc_errors = old_stats->rx_crc_errors +
8413 calc_crc_errors(tp);
8415 stats->rx_missed_errors = old_stats->rx_missed_errors +
8416 get_stat64(&hw_stats->rx_discards);
8421 static inline u32 calc_crc(unsigned char *buf, int len)
8429 for (j = 0; j < len; j++) {
8432 for (k = 0; k < 8; k++) {
8446 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8448 /* accept or reject all multicast frames */
8449 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8450 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8451 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8452 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8455 static void __tg3_set_rx_mode(struct net_device *dev)
8457 struct tg3 *tp = netdev_priv(dev);
8460 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8461 RX_MODE_KEEP_VLAN_TAG);
8463 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8466 #if TG3_VLAN_TAG_USED
8468 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8469 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8471 /* By definition, VLAN is disabled always in this
8474 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8475 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8478 if (dev->flags & IFF_PROMISC) {
8479 /* Promiscuous mode. */
8480 rx_mode |= RX_MODE_PROMISC;
8481 } else if (dev->flags & IFF_ALLMULTI) {
8482 /* Accept all multicast. */
8483 tg3_set_multi (tp, 1);
8484 } else if (dev->mc_count < 1) {
8485 /* Reject all multicast. */
8486 tg3_set_multi (tp, 0);
8488 /* Accept one or more multicast(s). */
8489 struct dev_mc_list *mclist;
8491 u32 mc_filter[4] = { 0, };
8496 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8497 i++, mclist = mclist->next) {
8499 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8501 regidx = (bit & 0x60) >> 5;
8503 mc_filter[regidx] |= (1 << bit);
8506 tw32(MAC_HASH_REG_0, mc_filter[0]);
8507 tw32(MAC_HASH_REG_1, mc_filter[1]);
8508 tw32(MAC_HASH_REG_2, mc_filter[2]);
8509 tw32(MAC_HASH_REG_3, mc_filter[3]);
8512 if (rx_mode != tp->rx_mode) {
8513 tp->rx_mode = rx_mode;
8514 tw32_f(MAC_RX_MODE, rx_mode);
8519 static void tg3_set_rx_mode(struct net_device *dev)
8521 struct tg3 *tp = netdev_priv(dev);
8523 if (!netif_running(dev))
8526 tg3_full_lock(tp, 0);
8527 __tg3_set_rx_mode(dev);
8528 tg3_full_unlock(tp);
8531 #define TG3_REGDUMP_LEN (32 * 1024)
8533 static int tg3_get_regs_len(struct net_device *dev)
8535 return TG3_REGDUMP_LEN;
8538 static void tg3_get_regs(struct net_device *dev,
8539 struct ethtool_regs *regs, void *_p)
8542 struct tg3 *tp = netdev_priv(dev);
8548 memset(p, 0, TG3_REGDUMP_LEN);
8550 if (tp->link_config.phy_is_low_power)
8553 tg3_full_lock(tp, 0);
8555 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
8556 #define GET_REG32_LOOP(base,len) \
8557 do { p = (u32 *)(orig_p + (base)); \
8558 for (i = 0; i < len; i += 4) \
8559 __GET_REG32((base) + i); \
8561 #define GET_REG32_1(reg) \
8562 do { p = (u32 *)(orig_p + (reg)); \
8563 __GET_REG32((reg)); \
8566 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8567 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8568 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8569 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8570 GET_REG32_1(SNDDATAC_MODE);
8571 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8572 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8573 GET_REG32_1(SNDBDC_MODE);
8574 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8575 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8576 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8577 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8578 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8579 GET_REG32_1(RCVDCC_MODE);
8580 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8581 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8582 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8583 GET_REG32_1(MBFREE_MODE);
8584 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8585 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8586 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8587 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8588 GET_REG32_LOOP(WDMAC_MODE, 0x08);
8589 GET_REG32_1(RX_CPU_MODE);
8590 GET_REG32_1(RX_CPU_STATE);
8591 GET_REG32_1(RX_CPU_PGMCTR);
8592 GET_REG32_1(RX_CPU_HWBKPT);
8593 GET_REG32_1(TX_CPU_MODE);
8594 GET_REG32_1(TX_CPU_STATE);
8595 GET_REG32_1(TX_CPU_PGMCTR);
8596 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8597 GET_REG32_LOOP(FTQ_RESET, 0x120);
8598 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8599 GET_REG32_1(DMAC_MODE);
8600 GET_REG32_LOOP(GRC_MODE, 0x4c);
8601 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8602 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8605 #undef GET_REG32_LOOP
8608 tg3_full_unlock(tp);
8611 static int tg3_get_eeprom_len(struct net_device *dev)
8613 struct tg3 *tp = netdev_priv(dev);
8615 return tp->nvram_size;
8618 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8620 struct tg3 *tp = netdev_priv(dev);
8623 u32 i, offset, len, b_offset, b_count;
8626 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
8629 if (tp->link_config.phy_is_low_power)
8632 offset = eeprom->offset;
8636 eeprom->magic = TG3_EEPROM_MAGIC;
8639 /* adjustments to start on required 4 byte boundary */
8640 b_offset = offset & 3;
8641 b_count = 4 - b_offset;
8642 if (b_count > len) {
8643 /* i.e. offset=1 len=2 */
8646 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
8649 memcpy(data, ((char*)&val) + b_offset, b_count);
8652 eeprom->len += b_count;
8655 /* read bytes upto the last 4 byte boundary */
8656 pd = &data[eeprom->len];
8657 for (i = 0; i < (len - (len & 3)); i += 4) {
8658 ret = tg3_nvram_read_be32(tp, offset + i, &val);
8663 memcpy(pd + i, &val, 4);
8668 /* read last bytes not ending on 4 byte boundary */
8669 pd = &data[eeprom->len];
8671 b_offset = offset + len - b_count;
8672 ret = tg3_nvram_read_be32(tp, b_offset, &val);
8675 memcpy(pd, &val, b_count);
8676 eeprom->len += b_count;
8681 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8683 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8685 struct tg3 *tp = netdev_priv(dev);
8687 u32 offset, len, b_offset, odd_len;
8691 if (tp->link_config.phy_is_low_power)
8694 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
8695 eeprom->magic != TG3_EEPROM_MAGIC)
8698 offset = eeprom->offset;
8701 if ((b_offset = (offset & 3))) {
8702 /* adjustments to start on required 4 byte boundary */
8703 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
8714 /* adjustments to end on required 4 byte boundary */
8716 len = (len + 3) & ~3;
8717 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
8723 if (b_offset || odd_len) {
8724 buf = kmalloc(len, GFP_KERNEL);
8728 memcpy(buf, &start, 4);
8730 memcpy(buf+len-4, &end, 4);
8731 memcpy(buf + b_offset, data, eeprom->len);
8734 ret = tg3_nvram_write_block(tp, offset, len, buf);
8742 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8744 struct tg3 *tp = netdev_priv(dev);
8746 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8747 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8749 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
8752 cmd->supported = (SUPPORTED_Autoneg);
8754 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8755 cmd->supported |= (SUPPORTED_1000baseT_Half |
8756 SUPPORTED_1000baseT_Full);
8758 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8759 cmd->supported |= (SUPPORTED_100baseT_Half |
8760 SUPPORTED_100baseT_Full |
8761 SUPPORTED_10baseT_Half |
8762 SUPPORTED_10baseT_Full |
8764 cmd->port = PORT_TP;
8766 cmd->supported |= SUPPORTED_FIBRE;
8767 cmd->port = PORT_FIBRE;
8770 cmd->advertising = tp->link_config.advertising;
8771 if (netif_running(dev)) {
8772 cmd->speed = tp->link_config.active_speed;
8773 cmd->duplex = tp->link_config.active_duplex;
8775 cmd->phy_address = PHY_ADDR;
8776 cmd->transceiver = XCVR_INTERNAL;
8777 cmd->autoneg = tp->link_config.autoneg;
8783 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8785 struct tg3 *tp = netdev_priv(dev);
8787 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8788 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8790 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
8793 if (cmd->autoneg != AUTONEG_ENABLE &&
8794 cmd->autoneg != AUTONEG_DISABLE)
8797 if (cmd->autoneg == AUTONEG_DISABLE &&
8798 cmd->duplex != DUPLEX_FULL &&
8799 cmd->duplex != DUPLEX_HALF)
8802 if (cmd->autoneg == AUTONEG_ENABLE) {
8803 u32 mask = ADVERTISED_Autoneg |
8805 ADVERTISED_Asym_Pause;
8807 if (!(tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8808 mask |= ADVERTISED_1000baseT_Half |
8809 ADVERTISED_1000baseT_Full;
8811 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
8812 mask |= ADVERTISED_100baseT_Half |
8813 ADVERTISED_100baseT_Full |
8814 ADVERTISED_10baseT_Half |
8815 ADVERTISED_10baseT_Full |
8818 mask |= ADVERTISED_FIBRE;
8820 if (cmd->advertising & ~mask)
8823 mask &= (ADVERTISED_1000baseT_Half |
8824 ADVERTISED_1000baseT_Full |
8825 ADVERTISED_100baseT_Half |
8826 ADVERTISED_100baseT_Full |
8827 ADVERTISED_10baseT_Half |
8828 ADVERTISED_10baseT_Full);
8830 cmd->advertising &= mask;
8832 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8833 if (cmd->speed != SPEED_1000)
8836 if (cmd->duplex != DUPLEX_FULL)
8839 if (cmd->speed != SPEED_100 &&
8840 cmd->speed != SPEED_10)
8845 tg3_full_lock(tp, 0);
8847 tp->link_config.autoneg = cmd->autoneg;
8848 if (cmd->autoneg == AUTONEG_ENABLE) {
8849 tp->link_config.advertising = (cmd->advertising |
8850 ADVERTISED_Autoneg);
8851 tp->link_config.speed = SPEED_INVALID;
8852 tp->link_config.duplex = DUPLEX_INVALID;
8854 tp->link_config.advertising = 0;
8855 tp->link_config.speed = cmd->speed;
8856 tp->link_config.duplex = cmd->duplex;
8859 tp->link_config.orig_speed = tp->link_config.speed;
8860 tp->link_config.orig_duplex = tp->link_config.duplex;
8861 tp->link_config.orig_autoneg = tp->link_config.autoneg;
8863 if (netif_running(dev))
8864 tg3_setup_phy(tp, 1);
8866 tg3_full_unlock(tp);
8871 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8873 struct tg3 *tp = netdev_priv(dev);
8875 strcpy(info->driver, DRV_MODULE_NAME);
8876 strcpy(info->version, DRV_MODULE_VERSION);
8877 strcpy(info->fw_version, tp->fw_ver);
8878 strcpy(info->bus_info, pci_name(tp->pdev));
8881 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8883 struct tg3 *tp = netdev_priv(dev);
8885 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
8886 device_can_wakeup(&tp->pdev->dev))
8887 wol->supported = WAKE_MAGIC;
8891 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
8892 device_can_wakeup(&tp->pdev->dev))
8893 wol->wolopts = WAKE_MAGIC;
8894 memset(&wol->sopass, 0, sizeof(wol->sopass));
8897 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8899 struct tg3 *tp = netdev_priv(dev);
8900 struct device *dp = &tp->pdev->dev;
8902 if (wol->wolopts & ~WAKE_MAGIC)
8904 if ((wol->wolopts & WAKE_MAGIC) &&
8905 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
8908 spin_lock_bh(&tp->lock);
8909 if (wol->wolopts & WAKE_MAGIC) {
8910 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8911 device_set_wakeup_enable(dp, true);
8913 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8914 device_set_wakeup_enable(dp, false);
8916 spin_unlock_bh(&tp->lock);
8921 static u32 tg3_get_msglevel(struct net_device *dev)
8923 struct tg3 *tp = netdev_priv(dev);
8924 return tp->msg_enable;
8927 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8929 struct tg3 *tp = netdev_priv(dev);
8930 tp->msg_enable = value;
8933 static int tg3_set_tso(struct net_device *dev, u32 value)
8935 struct tg3 *tp = netdev_priv(dev);
8937 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8942 if ((dev->features & NETIF_F_IPV6_CSUM) &&
8943 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) {
8945 dev->features |= NETIF_F_TSO6;
8946 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8947 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
8948 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
8949 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8950 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8951 dev->features |= NETIF_F_TSO_ECN;
8953 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8955 return ethtool_op_set_tso(dev, value);
8958 static int tg3_nway_reset(struct net_device *dev)
8960 struct tg3 *tp = netdev_priv(dev);
8963 if (!netif_running(dev))
8966 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8969 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8970 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8972 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
8976 spin_lock_bh(&tp->lock);
8978 tg3_readphy(tp, MII_BMCR, &bmcr);
8979 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8980 ((bmcr & BMCR_ANENABLE) ||
8981 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8982 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8986 spin_unlock_bh(&tp->lock);
8992 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8994 struct tg3 *tp = netdev_priv(dev);
8996 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8997 ering->rx_mini_max_pending = 0;
8998 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8999 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9001 ering->rx_jumbo_max_pending = 0;
9003 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9005 ering->rx_pending = tp->rx_pending;
9006 ering->rx_mini_pending = 0;
9007 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9008 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9010 ering->rx_jumbo_pending = 0;
9012 ering->tx_pending = tp->tx_pending;
9015 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9017 struct tg3 *tp = netdev_priv(dev);
9018 int irq_sync = 0, err = 0;
9020 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9021 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9022 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9023 (ering->tx_pending <= MAX_SKB_FRAGS) ||
9024 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9025 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9028 if (netif_running(dev)) {
9034 tg3_full_lock(tp, irq_sync);
9036 tp->rx_pending = ering->rx_pending;
9038 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9039 tp->rx_pending > 63)
9040 tp->rx_pending = 63;
9041 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9042 tp->tx_pending = ering->tx_pending;
9044 if (netif_running(dev)) {
9045 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9046 err = tg3_restart_hw(tp, 1);
9048 tg3_netif_start(tp);
9051 tg3_full_unlock(tp);
9053 if (irq_sync && !err)
9059 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9061 struct tg3 *tp = netdev_priv(dev);
9063 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9065 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
9066 epause->rx_pause = 1;
9068 epause->rx_pause = 0;
9070 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
9071 epause->tx_pause = 1;
9073 epause->tx_pause = 0;
9076 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9078 struct tg3 *tp = netdev_priv(dev);
9081 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9082 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9085 if (epause->autoneg) {
9087 struct phy_device *phydev;
9089 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
9091 if (epause->rx_pause) {
9092 if (epause->tx_pause)
9093 newadv = ADVERTISED_Pause;
9095 newadv = ADVERTISED_Pause |
9096 ADVERTISED_Asym_Pause;
9097 } else if (epause->tx_pause) {
9098 newadv = ADVERTISED_Asym_Pause;
9102 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9103 u32 oldadv = phydev->advertising &
9105 ADVERTISED_Asym_Pause);
9106 if (oldadv != newadv) {
9107 phydev->advertising &=
9108 ~(ADVERTISED_Pause |
9109 ADVERTISED_Asym_Pause);
9110 phydev->advertising |= newadv;
9111 err = phy_start_aneg(phydev);
9114 tp->link_config.advertising &=
9115 ~(ADVERTISED_Pause |
9116 ADVERTISED_Asym_Pause);
9117 tp->link_config.advertising |= newadv;
9120 if (epause->rx_pause)
9121 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9123 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9125 if (epause->tx_pause)
9126 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9128 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9130 if (netif_running(dev))
9131 tg3_setup_flow_control(tp, 0, 0);
9136 if (netif_running(dev)) {
9141 tg3_full_lock(tp, irq_sync);
9143 if (epause->autoneg)
9144 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9146 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9147 if (epause->rx_pause)
9148 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9150 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9151 if (epause->tx_pause)
9152 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9154 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9156 if (netif_running(dev)) {
9157 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9158 err = tg3_restart_hw(tp, 1);
9160 tg3_netif_start(tp);
9163 tg3_full_unlock(tp);
9169 static u32 tg3_get_rx_csum(struct net_device *dev)
9171 struct tg3 *tp = netdev_priv(dev);
9172 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9175 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9177 struct tg3 *tp = netdev_priv(dev);
9179 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9185 spin_lock_bh(&tp->lock);
9187 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9189 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9190 spin_unlock_bh(&tp->lock);
9195 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9197 struct tg3 *tp = netdev_priv(dev);
9199 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9205 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
9206 ethtool_op_set_tx_ipv6_csum(dev, data);
9208 ethtool_op_set_tx_csum(dev, data);
9213 static int tg3_get_sset_count (struct net_device *dev, int sset)
9217 return TG3_NUM_TEST;
9219 return TG3_NUM_STATS;
9225 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9227 switch (stringset) {
9229 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
9232 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
9235 WARN_ON(1); /* we need a WARN() */
9240 static int tg3_phys_id(struct net_device *dev, u32 data)
9242 struct tg3 *tp = netdev_priv(dev);
9245 if (!netif_running(tp->dev))
9249 data = UINT_MAX / 2;
9251 for (i = 0; i < (data * 2); i++) {
9253 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9254 LED_CTRL_1000MBPS_ON |
9255 LED_CTRL_100MBPS_ON |
9256 LED_CTRL_10MBPS_ON |
9257 LED_CTRL_TRAFFIC_OVERRIDE |
9258 LED_CTRL_TRAFFIC_BLINK |
9259 LED_CTRL_TRAFFIC_LED);
9262 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9263 LED_CTRL_TRAFFIC_OVERRIDE);
9265 if (msleep_interruptible(500))
9268 tw32(MAC_LED_CTRL, tp->led_ctrl);
9272 static void tg3_get_ethtool_stats (struct net_device *dev,
9273 struct ethtool_stats *estats, u64 *tmp_stats)
9275 struct tg3 *tp = netdev_priv(dev);
9276 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9279 #define NVRAM_TEST_SIZE 0x100
9280 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
9281 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
9282 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
9283 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9284 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9286 static int tg3_test_nvram(struct tg3 *tp)
9290 int i, j, k, err = 0, size;
9292 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9295 if (tg3_nvram_read(tp, 0, &magic) != 0)
9298 if (magic == TG3_EEPROM_MAGIC)
9299 size = NVRAM_TEST_SIZE;
9300 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9301 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9302 TG3_EEPROM_SB_FORMAT_1) {
9303 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9304 case TG3_EEPROM_SB_REVISION_0:
9305 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9307 case TG3_EEPROM_SB_REVISION_2:
9308 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9310 case TG3_EEPROM_SB_REVISION_3:
9311 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9318 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9319 size = NVRAM_SELFBOOT_HW_SIZE;
9323 buf = kmalloc(size, GFP_KERNEL);
9328 for (i = 0, j = 0; i < size; i += 4, j++) {
9329 err = tg3_nvram_read_be32(tp, i, &buf[j]);
9336 /* Selfboot format */
9337 magic = be32_to_cpu(buf[0]);
9338 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9339 TG3_EEPROM_MAGIC_FW) {
9340 u8 *buf8 = (u8 *) buf, csum8 = 0;
9342 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9343 TG3_EEPROM_SB_REVISION_2) {
9344 /* For rev 2, the csum doesn't include the MBA. */
9345 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9347 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9350 for (i = 0; i < size; i++)
9363 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9364 TG3_EEPROM_MAGIC_HW) {
9365 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9366 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9367 u8 *buf8 = (u8 *) buf;
9369 /* Separate the parity bits and the data bytes. */
9370 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9371 if ((i == 0) || (i == 8)) {
9375 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9376 parity[k++] = buf8[i] & msk;
9383 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9384 parity[k++] = buf8[i] & msk;
9387 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9388 parity[k++] = buf8[i] & msk;
9391 data[j++] = buf8[i];
9395 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9396 u8 hw8 = hweight8(data[i]);
9398 if ((hw8 & 0x1) && parity[i])
9400 else if (!(hw8 & 0x1) && !parity[i])
9407 /* Bootstrap checksum at offset 0x10 */
9408 csum = calc_crc((unsigned char *) buf, 0x10);
9409 if (csum != be32_to_cpu(buf[0x10/4]))
9412 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9413 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9414 if (csum != be32_to_cpu(buf[0xfc/4]))
9424 #define TG3_SERDES_TIMEOUT_SEC 2
9425 #define TG3_COPPER_TIMEOUT_SEC 6
9427 static int tg3_test_link(struct tg3 *tp)
9431 if (!netif_running(tp->dev))
9434 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9435 max = TG3_SERDES_TIMEOUT_SEC;
9437 max = TG3_COPPER_TIMEOUT_SEC;
9439 for (i = 0; i < max; i++) {
9440 if (netif_carrier_ok(tp->dev))
9443 if (msleep_interruptible(1000))
9450 /* Only test the commonly used registers */
9451 static int tg3_test_registers(struct tg3 *tp)
9453 int i, is_5705, is_5750;
9454 u32 offset, read_mask, write_mask, val, save_val, read_val;
9458 #define TG3_FL_5705 0x1
9459 #define TG3_FL_NOT_5705 0x2
9460 #define TG3_FL_NOT_5788 0x4
9461 #define TG3_FL_NOT_5750 0x8
9465 /* MAC Control Registers */
9466 { MAC_MODE, TG3_FL_NOT_5705,
9467 0x00000000, 0x00ef6f8c },
9468 { MAC_MODE, TG3_FL_5705,
9469 0x00000000, 0x01ef6b8c },
9470 { MAC_STATUS, TG3_FL_NOT_5705,
9471 0x03800107, 0x00000000 },
9472 { MAC_STATUS, TG3_FL_5705,
9473 0x03800100, 0x00000000 },
9474 { MAC_ADDR_0_HIGH, 0x0000,
9475 0x00000000, 0x0000ffff },
9476 { MAC_ADDR_0_LOW, 0x0000,
9477 0x00000000, 0xffffffff },
9478 { MAC_RX_MTU_SIZE, 0x0000,
9479 0x00000000, 0x0000ffff },
9480 { MAC_TX_MODE, 0x0000,
9481 0x00000000, 0x00000070 },
9482 { MAC_TX_LENGTHS, 0x0000,
9483 0x00000000, 0x00003fff },
9484 { MAC_RX_MODE, TG3_FL_NOT_5705,
9485 0x00000000, 0x000007fc },
9486 { MAC_RX_MODE, TG3_FL_5705,
9487 0x00000000, 0x000007dc },
9488 { MAC_HASH_REG_0, 0x0000,
9489 0x00000000, 0xffffffff },
9490 { MAC_HASH_REG_1, 0x0000,
9491 0x00000000, 0xffffffff },
9492 { MAC_HASH_REG_2, 0x0000,
9493 0x00000000, 0xffffffff },
9494 { MAC_HASH_REG_3, 0x0000,
9495 0x00000000, 0xffffffff },
9497 /* Receive Data and Receive BD Initiator Control Registers. */
9498 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9499 0x00000000, 0xffffffff },
9500 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9501 0x00000000, 0xffffffff },
9502 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9503 0x00000000, 0x00000003 },
9504 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9505 0x00000000, 0xffffffff },
9506 { RCVDBDI_STD_BD+0, 0x0000,
9507 0x00000000, 0xffffffff },
9508 { RCVDBDI_STD_BD+4, 0x0000,
9509 0x00000000, 0xffffffff },
9510 { RCVDBDI_STD_BD+8, 0x0000,
9511 0x00000000, 0xffff0002 },
9512 { RCVDBDI_STD_BD+0xc, 0x0000,
9513 0x00000000, 0xffffffff },
9515 /* Receive BD Initiator Control Registers. */
9516 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9517 0x00000000, 0xffffffff },
9518 { RCVBDI_STD_THRESH, TG3_FL_5705,
9519 0x00000000, 0x000003ff },
9520 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9521 0x00000000, 0xffffffff },
9523 /* Host Coalescing Control Registers. */
9524 { HOSTCC_MODE, TG3_FL_NOT_5705,
9525 0x00000000, 0x00000004 },
9526 { HOSTCC_MODE, TG3_FL_5705,
9527 0x00000000, 0x000000f6 },
9528 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9529 0x00000000, 0xffffffff },
9530 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9531 0x00000000, 0x000003ff },
9532 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9533 0x00000000, 0xffffffff },
9534 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9535 0x00000000, 0x000003ff },
9536 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9537 0x00000000, 0xffffffff },
9538 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9539 0x00000000, 0x000000ff },
9540 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9541 0x00000000, 0xffffffff },
9542 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9543 0x00000000, 0x000000ff },
9544 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9545 0x00000000, 0xffffffff },
9546 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9547 0x00000000, 0xffffffff },
9548 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9549 0x00000000, 0xffffffff },
9550 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9551 0x00000000, 0x000000ff },
9552 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9553 0x00000000, 0xffffffff },
9554 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9555 0x00000000, 0x000000ff },
9556 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9557 0x00000000, 0xffffffff },
9558 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9559 0x00000000, 0xffffffff },
9560 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9561 0x00000000, 0xffffffff },
9562 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9563 0x00000000, 0xffffffff },
9564 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9565 0x00000000, 0xffffffff },
9566 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9567 0xffffffff, 0x00000000 },
9568 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9569 0xffffffff, 0x00000000 },
9571 /* Buffer Manager Control Registers. */
9572 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9573 0x00000000, 0x007fff80 },
9574 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9575 0x00000000, 0x007fffff },
9576 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9577 0x00000000, 0x0000003f },
9578 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9579 0x00000000, 0x000001ff },
9580 { BUFMGR_MB_HIGH_WATER, 0x0000,
9581 0x00000000, 0x000001ff },
9582 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9583 0xffffffff, 0x00000000 },
9584 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9585 0xffffffff, 0x00000000 },
9587 /* Mailbox Registers */
9588 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9589 0x00000000, 0x000001ff },
9590 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9591 0x00000000, 0x000001ff },
9592 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9593 0x00000000, 0x000007ff },
9594 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9595 0x00000000, 0x000001ff },
9597 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9600 is_5705 = is_5750 = 0;
9601 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9603 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9607 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9608 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9611 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9614 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9615 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9618 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9621 offset = (u32) reg_tbl[i].offset;
9622 read_mask = reg_tbl[i].read_mask;
9623 write_mask = reg_tbl[i].write_mask;
9625 /* Save the original register content */
9626 save_val = tr32(offset);
9628 /* Determine the read-only value. */
9629 read_val = save_val & read_mask;
9631 /* Write zero to the register, then make sure the read-only bits
9632 * are not changed and the read/write bits are all zeros.
9638 /* Test the read-only and read/write bits. */
9639 if (((val & read_mask) != read_val) || (val & write_mask))
9642 /* Write ones to all the bits defined by RdMask and WrMask, then
9643 * make sure the read-only bits are not changed and the
9644 * read/write bits are all ones.
9646 tw32(offset, read_mask | write_mask);
9650 /* Test the read-only bits. */
9651 if ((val & read_mask) != read_val)
9654 /* Test the read/write bits. */
9655 if ((val & write_mask) != write_mask)
9658 tw32(offset, save_val);
9664 if (netif_msg_hw(tp))
9665 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9667 tw32(offset, save_val);
9671 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9673 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9677 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9678 for (j = 0; j < len; j += 4) {
9681 tg3_write_mem(tp, offset + j, test_pattern[i]);
9682 tg3_read_mem(tp, offset + j, &val);
9683 if (val != test_pattern[i])
9690 static int tg3_test_memory(struct tg3 *tp)
9692 static struct mem_entry {
9695 } mem_tbl_570x[] = {
9696 { 0x00000000, 0x00b50},
9697 { 0x00002000, 0x1c000},
9698 { 0xffffffff, 0x00000}
9699 }, mem_tbl_5705[] = {
9700 { 0x00000100, 0x0000c},
9701 { 0x00000200, 0x00008},
9702 { 0x00004000, 0x00800},
9703 { 0x00006000, 0x01000},
9704 { 0x00008000, 0x02000},
9705 { 0x00010000, 0x0e000},
9706 { 0xffffffff, 0x00000}
9707 }, mem_tbl_5755[] = {
9708 { 0x00000200, 0x00008},
9709 { 0x00004000, 0x00800},
9710 { 0x00006000, 0x00800},
9711 { 0x00008000, 0x02000},
9712 { 0x00010000, 0x0c000},
9713 { 0xffffffff, 0x00000}
9714 }, mem_tbl_5906[] = {
9715 { 0x00000200, 0x00008},
9716 { 0x00004000, 0x00400},
9717 { 0x00006000, 0x00400},
9718 { 0x00008000, 0x01000},
9719 { 0x00010000, 0x01000},
9720 { 0xffffffff, 0x00000}
9722 struct mem_entry *mem_tbl;
9726 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
9727 mem_tbl = mem_tbl_5755;
9728 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9729 mem_tbl = mem_tbl_5906;
9730 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
9731 mem_tbl = mem_tbl_5705;
9733 mem_tbl = mem_tbl_570x;
9735 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9736 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9737 mem_tbl[i].len)) != 0)
9744 #define TG3_MAC_LOOPBACK 0
9745 #define TG3_PHY_LOOPBACK 1
9747 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9749 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9751 struct sk_buff *skb, *rx_skb;
9754 int num_pkts, tx_len, rx_len, i, err;
9755 struct tg3_rx_buffer_desc *desc;
9757 if (loopback_mode == TG3_MAC_LOOPBACK) {
9758 /* HW errata - mac loopback fails in some cases on 5780.
9759 * Normal traffic and PHY loopback are not affected by
9762 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9765 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9766 MAC_MODE_PORT_INT_LPBACK;
9767 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9768 mac_mode |= MAC_MODE_LINK_POLARITY;
9769 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9770 mac_mode |= MAC_MODE_PORT_MODE_MII;
9772 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9773 tw32(MAC_MODE, mac_mode);
9774 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9777 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
9778 tg3_phy_fet_toggle_apd(tp, false);
9779 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9781 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9783 tg3_phy_toggle_automdix(tp, 0);
9785 tg3_writephy(tp, MII_BMCR, val);
9788 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9789 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
9790 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9791 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x1800);
9792 mac_mode |= MAC_MODE_PORT_MODE_MII;
9794 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9796 /* reset to prevent losing 1st rx packet intermittently */
9797 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9798 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9800 tw32_f(MAC_RX_MODE, tp->rx_mode);
9802 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9803 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9804 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9805 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9806 mac_mode |= MAC_MODE_LINK_POLARITY;
9807 tg3_writephy(tp, MII_TG3_EXT_CTRL,
9808 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9810 tw32(MAC_MODE, mac_mode);
9818 skb = netdev_alloc_skb(tp->dev, tx_len);
9822 tx_data = skb_put(skb, tx_len);
9823 memcpy(tx_data, tp->dev->dev_addr, 6);
9824 memset(tx_data + 6, 0x0, 8);
9826 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9828 for (i = 14; i < tx_len; i++)
9829 tx_data[i] = (u8) (i & 0xff);
9831 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9833 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9838 rx_start_idx = tp->hw_status->idx[0].rx_producer;
9842 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9847 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9849 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9853 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
9854 for (i = 0; i < 25; i++) {
9855 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9860 tx_idx = tp->hw_status->idx[0].tx_consumer;
9861 rx_idx = tp->hw_status->idx[0].rx_producer;
9862 if ((tx_idx == tp->tx_prod) &&
9863 (rx_idx == (rx_start_idx + num_pkts)))
9867 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9870 if (tx_idx != tp->tx_prod)
9873 if (rx_idx != rx_start_idx + num_pkts)
9876 desc = &tp->rx_rcb[rx_start_idx];
9877 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9878 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9879 if (opaque_key != RXD_OPAQUE_RING_STD)
9882 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9883 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9886 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9887 if (rx_len != tx_len)
9890 rx_skb = tp->rx_std_buffers[desc_idx].skb;
9892 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9893 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9895 for (i = 14; i < tx_len; i++) {
9896 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9901 /* tg3_free_rings will unmap and free the rx_skb */
9906 #define TG3_MAC_LOOPBACK_FAILED 1
9907 #define TG3_PHY_LOOPBACK_FAILED 2
9908 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
9909 TG3_PHY_LOOPBACK_FAILED)
9911 static int tg3_test_loopback(struct tg3 *tp)
9916 if (!netif_running(tp->dev))
9917 return TG3_LOOPBACK_FAILED;
9919 err = tg3_reset_hw(tp, 1);
9921 return TG3_LOOPBACK_FAILED;
9923 /* Turn off gphy autopowerdown. */
9924 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
9925 tg3_phy_toggle_apd(tp, false);
9927 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9931 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9933 /* Wait for up to 40 microseconds to acquire lock. */
9934 for (i = 0; i < 4; i++) {
9935 status = tr32(TG3_CPMU_MUTEX_GNT);
9936 if (status == CPMU_MUTEX_GNT_DRIVER)
9941 if (status != CPMU_MUTEX_GNT_DRIVER)
9942 return TG3_LOOPBACK_FAILED;
9944 /* Turn off link-based power management. */
9945 cpmuctrl = tr32(TG3_CPMU_CTRL);
9947 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9948 CPMU_CTRL_LINK_AWARE_MODE));
9951 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9952 err |= TG3_MAC_LOOPBACK_FAILED;
9954 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9955 tw32(TG3_CPMU_CTRL, cpmuctrl);
9957 /* Release the mutex */
9958 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9961 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
9962 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
9963 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9964 err |= TG3_PHY_LOOPBACK_FAILED;
9967 /* Re-enable gphy autopowerdown. */
9968 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
9969 tg3_phy_toggle_apd(tp, true);
9974 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9977 struct tg3 *tp = netdev_priv(dev);
9979 if (tp->link_config.phy_is_low_power)
9980 tg3_set_power_state(tp, PCI_D0);
9982 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9984 if (tg3_test_nvram(tp) != 0) {
9985 etest->flags |= ETH_TEST_FL_FAILED;
9988 if (tg3_test_link(tp) != 0) {
9989 etest->flags |= ETH_TEST_FL_FAILED;
9992 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9993 int err, err2 = 0, irq_sync = 0;
9995 if (netif_running(dev)) {
10001 tg3_full_lock(tp, irq_sync);
10003 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10004 err = tg3_nvram_lock(tp);
10005 tg3_halt_cpu(tp, RX_CPU_BASE);
10006 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10007 tg3_halt_cpu(tp, TX_CPU_BASE);
10009 tg3_nvram_unlock(tp);
10011 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10014 if (tg3_test_registers(tp) != 0) {
10015 etest->flags |= ETH_TEST_FL_FAILED;
10018 if (tg3_test_memory(tp) != 0) {
10019 etest->flags |= ETH_TEST_FL_FAILED;
10022 if ((data[4] = tg3_test_loopback(tp)) != 0)
10023 etest->flags |= ETH_TEST_FL_FAILED;
10025 tg3_full_unlock(tp);
10027 if (tg3_test_interrupt(tp) != 0) {
10028 etest->flags |= ETH_TEST_FL_FAILED;
10032 tg3_full_lock(tp, 0);
10034 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10035 if (netif_running(dev)) {
10036 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10037 err2 = tg3_restart_hw(tp, 1);
10039 tg3_netif_start(tp);
10042 tg3_full_unlock(tp);
10044 if (irq_sync && !err2)
10047 if (tp->link_config.phy_is_low_power)
10048 tg3_set_power_state(tp, PCI_D3hot);
10052 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10054 struct mii_ioctl_data *data = if_mii(ifr);
10055 struct tg3 *tp = netdev_priv(dev);
10058 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10059 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10061 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
10066 data->phy_id = PHY_ADDR;
10069 case SIOCGMIIREG: {
10072 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10073 break; /* We have no PHY */
10075 if (tp->link_config.phy_is_low_power)
10078 spin_lock_bh(&tp->lock);
10079 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10080 spin_unlock_bh(&tp->lock);
10082 data->val_out = mii_regval;
10088 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10089 break; /* We have no PHY */
10091 if (!capable(CAP_NET_ADMIN))
10094 if (tp->link_config.phy_is_low_power)
10097 spin_lock_bh(&tp->lock);
10098 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10099 spin_unlock_bh(&tp->lock);
10107 return -EOPNOTSUPP;
10110 #if TG3_VLAN_TAG_USED
10111 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10113 struct tg3 *tp = netdev_priv(dev);
10115 if (!netif_running(dev)) {
10120 tg3_netif_stop(tp);
10122 tg3_full_lock(tp, 0);
10126 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10127 __tg3_set_rx_mode(dev);
10129 tg3_netif_start(tp);
10131 tg3_full_unlock(tp);
10135 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10137 struct tg3 *tp = netdev_priv(dev);
10139 memcpy(ec, &tp->coal, sizeof(*ec));
10143 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10145 struct tg3 *tp = netdev_priv(dev);
10146 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10147 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10149 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10150 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10151 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10152 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10153 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10156 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10157 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10158 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10159 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10160 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10161 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10162 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10163 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10164 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10165 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10168 /* No rx interrupts will be generated if both are zero */
10169 if ((ec->rx_coalesce_usecs == 0) &&
10170 (ec->rx_max_coalesced_frames == 0))
10173 /* No tx interrupts will be generated if both are zero */
10174 if ((ec->tx_coalesce_usecs == 0) &&
10175 (ec->tx_max_coalesced_frames == 0))
10178 /* Only copy relevant parameters, ignore all others. */
10179 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10180 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10181 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10182 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10183 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10184 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10185 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10186 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10187 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10189 if (netif_running(dev)) {
10190 tg3_full_lock(tp, 0);
10191 __tg3_set_coalesce(tp, &tp->coal);
10192 tg3_full_unlock(tp);
10197 static const struct ethtool_ops tg3_ethtool_ops = {
10198 .get_settings = tg3_get_settings,
10199 .set_settings = tg3_set_settings,
10200 .get_drvinfo = tg3_get_drvinfo,
10201 .get_regs_len = tg3_get_regs_len,
10202 .get_regs = tg3_get_regs,
10203 .get_wol = tg3_get_wol,
10204 .set_wol = tg3_set_wol,
10205 .get_msglevel = tg3_get_msglevel,
10206 .set_msglevel = tg3_set_msglevel,
10207 .nway_reset = tg3_nway_reset,
10208 .get_link = ethtool_op_get_link,
10209 .get_eeprom_len = tg3_get_eeprom_len,
10210 .get_eeprom = tg3_get_eeprom,
10211 .set_eeprom = tg3_set_eeprom,
10212 .get_ringparam = tg3_get_ringparam,
10213 .set_ringparam = tg3_set_ringparam,
10214 .get_pauseparam = tg3_get_pauseparam,
10215 .set_pauseparam = tg3_set_pauseparam,
10216 .get_rx_csum = tg3_get_rx_csum,
10217 .set_rx_csum = tg3_set_rx_csum,
10218 .set_tx_csum = tg3_set_tx_csum,
10219 .set_sg = ethtool_op_set_sg,
10220 .set_tso = tg3_set_tso,
10221 .self_test = tg3_self_test,
10222 .get_strings = tg3_get_strings,
10223 .phys_id = tg3_phys_id,
10224 .get_ethtool_stats = tg3_get_ethtool_stats,
10225 .get_coalesce = tg3_get_coalesce,
10226 .set_coalesce = tg3_set_coalesce,
10227 .get_sset_count = tg3_get_sset_count,
10230 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10232 u32 cursize, val, magic;
10234 tp->nvram_size = EEPROM_CHIP_SIZE;
10236 if (tg3_nvram_read(tp, 0, &magic) != 0)
10239 if ((magic != TG3_EEPROM_MAGIC) &&
10240 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10241 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10245 * Size the chip by reading offsets at increasing powers of two.
10246 * When we encounter our validation signature, we know the addressing
10247 * has wrapped around, and thus have our chip size.
10251 while (cursize < tp->nvram_size) {
10252 if (tg3_nvram_read(tp, cursize, &val) != 0)
10261 tp->nvram_size = cursize;
10264 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10268 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
10269 tg3_nvram_read(tp, 0, &val) != 0)
10272 /* Selfboot format */
10273 if (val != TG3_EEPROM_MAGIC) {
10274 tg3_get_eeprom_size(tp);
10278 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10280 /* This is confusing. We want to operate on the
10281 * 16-bit value at offset 0xf2. The tg3_nvram_read()
10282 * call will read from NVRAM and byteswap the data
10283 * according to the byteswapping settings for all
10284 * other register accesses. This ensures the data we
10285 * want will always reside in the lower 16-bits.
10286 * However, the data in NVRAM is in LE format, which
10287 * means the data from the NVRAM read will always be
10288 * opposite the endianness of the CPU. The 16-bit
10289 * byteswap then brings the data to CPU endianness.
10291 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
10295 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10298 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10302 nvcfg1 = tr32(NVRAM_CFG1);
10303 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10304 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10306 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10307 tw32(NVRAM_CFG1, nvcfg1);
10310 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10311 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10312 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10313 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10314 tp->nvram_jedecnum = JEDEC_ATMEL;
10315 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10316 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10318 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10319 tp->nvram_jedecnum = JEDEC_ATMEL;
10320 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10322 case FLASH_VENDOR_ATMEL_EEPROM:
10323 tp->nvram_jedecnum = JEDEC_ATMEL;
10324 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10325 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10327 case FLASH_VENDOR_ST:
10328 tp->nvram_jedecnum = JEDEC_ST;
10329 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10330 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10332 case FLASH_VENDOR_SAIFUN:
10333 tp->nvram_jedecnum = JEDEC_SAIFUN;
10334 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10336 case FLASH_VENDOR_SST_SMALL:
10337 case FLASH_VENDOR_SST_LARGE:
10338 tp->nvram_jedecnum = JEDEC_SST;
10339 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10343 tp->nvram_jedecnum = JEDEC_ATMEL;
10344 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10345 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10349 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10353 nvcfg1 = tr32(NVRAM_CFG1);
10355 /* NVRAM protection for TPM */
10356 if (nvcfg1 & (1 << 27))
10357 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10359 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10360 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10361 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10362 tp->nvram_jedecnum = JEDEC_ATMEL;
10363 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10365 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10366 tp->nvram_jedecnum = JEDEC_ATMEL;
10367 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10368 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10370 case FLASH_5752VENDOR_ST_M45PE10:
10371 case FLASH_5752VENDOR_ST_M45PE20:
10372 case FLASH_5752VENDOR_ST_M45PE40:
10373 tp->nvram_jedecnum = JEDEC_ST;
10374 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10375 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10379 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10380 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10381 case FLASH_5752PAGE_SIZE_256:
10382 tp->nvram_pagesize = 256;
10384 case FLASH_5752PAGE_SIZE_512:
10385 tp->nvram_pagesize = 512;
10387 case FLASH_5752PAGE_SIZE_1K:
10388 tp->nvram_pagesize = 1024;
10390 case FLASH_5752PAGE_SIZE_2K:
10391 tp->nvram_pagesize = 2048;
10393 case FLASH_5752PAGE_SIZE_4K:
10394 tp->nvram_pagesize = 4096;
10396 case FLASH_5752PAGE_SIZE_264:
10397 tp->nvram_pagesize = 264;
10401 /* For eeprom, set pagesize to maximum eeprom size */
10402 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10404 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10405 tw32(NVRAM_CFG1, nvcfg1);
10409 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10411 u32 nvcfg1, protect = 0;
10413 nvcfg1 = tr32(NVRAM_CFG1);
10415 /* NVRAM protection for TPM */
10416 if (nvcfg1 & (1 << 27)) {
10417 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10421 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10423 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10424 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10425 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10426 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10427 tp->nvram_jedecnum = JEDEC_ATMEL;
10428 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10429 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10430 tp->nvram_pagesize = 264;
10431 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10432 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10433 tp->nvram_size = (protect ? 0x3e200 :
10434 TG3_NVRAM_SIZE_512KB);
10435 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10436 tp->nvram_size = (protect ? 0x1f200 :
10437 TG3_NVRAM_SIZE_256KB);
10439 tp->nvram_size = (protect ? 0x1f200 :
10440 TG3_NVRAM_SIZE_128KB);
10442 case FLASH_5752VENDOR_ST_M45PE10:
10443 case FLASH_5752VENDOR_ST_M45PE20:
10444 case FLASH_5752VENDOR_ST_M45PE40:
10445 tp->nvram_jedecnum = JEDEC_ST;
10446 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10447 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10448 tp->nvram_pagesize = 256;
10449 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10450 tp->nvram_size = (protect ?
10451 TG3_NVRAM_SIZE_64KB :
10452 TG3_NVRAM_SIZE_128KB);
10453 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10454 tp->nvram_size = (protect ?
10455 TG3_NVRAM_SIZE_64KB :
10456 TG3_NVRAM_SIZE_256KB);
10458 tp->nvram_size = (protect ?
10459 TG3_NVRAM_SIZE_128KB :
10460 TG3_NVRAM_SIZE_512KB);
10465 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10469 nvcfg1 = tr32(NVRAM_CFG1);
10471 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10472 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10473 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10474 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10475 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10476 tp->nvram_jedecnum = JEDEC_ATMEL;
10477 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10478 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10480 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10481 tw32(NVRAM_CFG1, nvcfg1);
10483 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10484 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10485 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10486 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10487 tp->nvram_jedecnum = JEDEC_ATMEL;
10488 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10489 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10490 tp->nvram_pagesize = 264;
10492 case FLASH_5752VENDOR_ST_M45PE10:
10493 case FLASH_5752VENDOR_ST_M45PE20:
10494 case FLASH_5752VENDOR_ST_M45PE40:
10495 tp->nvram_jedecnum = JEDEC_ST;
10496 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10497 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10498 tp->nvram_pagesize = 256;
10503 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10505 u32 nvcfg1, protect = 0;
10507 nvcfg1 = tr32(NVRAM_CFG1);
10509 /* NVRAM protection for TPM */
10510 if (nvcfg1 & (1 << 27)) {
10511 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10515 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10517 case FLASH_5761VENDOR_ATMEL_ADB021D:
10518 case FLASH_5761VENDOR_ATMEL_ADB041D:
10519 case FLASH_5761VENDOR_ATMEL_ADB081D:
10520 case FLASH_5761VENDOR_ATMEL_ADB161D:
10521 case FLASH_5761VENDOR_ATMEL_MDB021D:
10522 case FLASH_5761VENDOR_ATMEL_MDB041D:
10523 case FLASH_5761VENDOR_ATMEL_MDB081D:
10524 case FLASH_5761VENDOR_ATMEL_MDB161D:
10525 tp->nvram_jedecnum = JEDEC_ATMEL;
10526 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10527 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10528 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10529 tp->nvram_pagesize = 256;
10531 case FLASH_5761VENDOR_ST_A_M45PE20:
10532 case FLASH_5761VENDOR_ST_A_M45PE40:
10533 case FLASH_5761VENDOR_ST_A_M45PE80:
10534 case FLASH_5761VENDOR_ST_A_M45PE16:
10535 case FLASH_5761VENDOR_ST_M_M45PE20:
10536 case FLASH_5761VENDOR_ST_M_M45PE40:
10537 case FLASH_5761VENDOR_ST_M_M45PE80:
10538 case FLASH_5761VENDOR_ST_M_M45PE16:
10539 tp->nvram_jedecnum = JEDEC_ST;
10540 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10541 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10542 tp->nvram_pagesize = 256;
10547 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10550 case FLASH_5761VENDOR_ATMEL_ADB161D:
10551 case FLASH_5761VENDOR_ATMEL_MDB161D:
10552 case FLASH_5761VENDOR_ST_A_M45PE16:
10553 case FLASH_5761VENDOR_ST_M_M45PE16:
10554 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10556 case FLASH_5761VENDOR_ATMEL_ADB081D:
10557 case FLASH_5761VENDOR_ATMEL_MDB081D:
10558 case FLASH_5761VENDOR_ST_A_M45PE80:
10559 case FLASH_5761VENDOR_ST_M_M45PE80:
10560 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10562 case FLASH_5761VENDOR_ATMEL_ADB041D:
10563 case FLASH_5761VENDOR_ATMEL_MDB041D:
10564 case FLASH_5761VENDOR_ST_A_M45PE40:
10565 case FLASH_5761VENDOR_ST_M_M45PE40:
10566 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10568 case FLASH_5761VENDOR_ATMEL_ADB021D:
10569 case FLASH_5761VENDOR_ATMEL_MDB021D:
10570 case FLASH_5761VENDOR_ST_A_M45PE20:
10571 case FLASH_5761VENDOR_ST_M_M45PE20:
10572 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10578 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10580 tp->nvram_jedecnum = JEDEC_ATMEL;
10581 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10582 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10585 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
10589 nvcfg1 = tr32(NVRAM_CFG1);
10591 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10592 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10593 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10594 tp->nvram_jedecnum = JEDEC_ATMEL;
10595 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10596 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10598 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10599 tw32(NVRAM_CFG1, nvcfg1);
10601 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10602 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
10603 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
10604 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
10605 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
10606 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
10607 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
10608 tp->nvram_jedecnum = JEDEC_ATMEL;
10609 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10610 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10612 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10613 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10614 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
10615 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
10616 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
10618 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
10619 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
10620 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10622 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
10623 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
10624 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10628 case FLASH_5752VENDOR_ST_M45PE10:
10629 case FLASH_5752VENDOR_ST_M45PE20:
10630 case FLASH_5752VENDOR_ST_M45PE40:
10631 tp->nvram_jedecnum = JEDEC_ST;
10632 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10633 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10635 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10636 case FLASH_5752VENDOR_ST_M45PE10:
10637 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
10639 case FLASH_5752VENDOR_ST_M45PE20:
10640 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10642 case FLASH_5752VENDOR_ST_M45PE40:
10643 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10648 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
10652 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10653 case FLASH_5752PAGE_SIZE_256:
10654 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10655 tp->nvram_pagesize = 256;
10657 case FLASH_5752PAGE_SIZE_512:
10658 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10659 tp->nvram_pagesize = 512;
10661 case FLASH_5752PAGE_SIZE_1K:
10662 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10663 tp->nvram_pagesize = 1024;
10665 case FLASH_5752PAGE_SIZE_2K:
10666 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10667 tp->nvram_pagesize = 2048;
10669 case FLASH_5752PAGE_SIZE_4K:
10670 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10671 tp->nvram_pagesize = 4096;
10673 case FLASH_5752PAGE_SIZE_264:
10674 tp->nvram_pagesize = 264;
10676 case FLASH_5752PAGE_SIZE_528:
10677 tp->nvram_pagesize = 528;
10682 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10683 static void __devinit tg3_nvram_init(struct tg3 *tp)
10685 tw32_f(GRC_EEPROM_ADDR,
10686 (EEPROM_ADDR_FSM_RESET |
10687 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10688 EEPROM_ADDR_CLKPERD_SHIFT)));
10692 /* Enable seeprom accesses. */
10693 tw32_f(GRC_LOCAL_CTRL,
10694 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10697 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10698 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10699 tp->tg3_flags |= TG3_FLAG_NVRAM;
10701 if (tg3_nvram_lock(tp)) {
10702 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10703 "tg3_nvram_init failed.\n", tp->dev->name);
10706 tg3_enable_nvram_access(tp);
10708 tp->nvram_size = 0;
10710 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10711 tg3_get_5752_nvram_info(tp);
10712 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10713 tg3_get_5755_nvram_info(tp);
10714 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10715 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10716 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10717 tg3_get_5787_nvram_info(tp);
10718 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10719 tg3_get_5761_nvram_info(tp);
10720 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10721 tg3_get_5906_nvram_info(tp);
10722 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
10723 tg3_get_57780_nvram_info(tp);
10725 tg3_get_nvram_info(tp);
10727 if (tp->nvram_size == 0)
10728 tg3_get_nvram_size(tp);
10730 tg3_disable_nvram_access(tp);
10731 tg3_nvram_unlock(tp);
10734 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10736 tg3_get_eeprom_size(tp);
10740 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10741 u32 offset, u32 len, u8 *buf)
10746 for (i = 0; i < len; i += 4) {
10752 memcpy(&data, buf + i, 4);
10755 * The SEEPROM interface expects the data to always be opposite
10756 * the native endian format. We accomplish this by reversing
10757 * all the operations that would have been performed on the
10758 * data from a call to tg3_nvram_read_be32().
10760 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
10762 val = tr32(GRC_EEPROM_ADDR);
10763 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10765 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10767 tw32(GRC_EEPROM_ADDR, val |
10768 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10769 (addr & EEPROM_ADDR_ADDR_MASK) |
10770 EEPROM_ADDR_START |
10771 EEPROM_ADDR_WRITE);
10773 for (j = 0; j < 1000; j++) {
10774 val = tr32(GRC_EEPROM_ADDR);
10776 if (val & EEPROM_ADDR_COMPLETE)
10780 if (!(val & EEPROM_ADDR_COMPLETE)) {
10789 /* offset and length are dword aligned */
10790 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10794 u32 pagesize = tp->nvram_pagesize;
10795 u32 pagemask = pagesize - 1;
10799 tmp = kmalloc(pagesize, GFP_KERNEL);
10805 u32 phy_addr, page_off, size;
10807 phy_addr = offset & ~pagemask;
10809 for (j = 0; j < pagesize; j += 4) {
10810 ret = tg3_nvram_read_be32(tp, phy_addr + j,
10811 (__be32 *) (tmp + j));
10818 page_off = offset & pagemask;
10825 memcpy(tmp + page_off, buf, size);
10827 offset = offset + (pagesize - page_off);
10829 tg3_enable_nvram_access(tp);
10832 * Before we can erase the flash page, we need
10833 * to issue a special "write enable" command.
10835 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10837 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10840 /* Erase the target page */
10841 tw32(NVRAM_ADDR, phy_addr);
10843 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10844 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10846 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10849 /* Issue another write enable to start the write. */
10850 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10852 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10855 for (j = 0; j < pagesize; j += 4) {
10858 data = *((__be32 *) (tmp + j));
10860 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10862 tw32(NVRAM_ADDR, phy_addr + j);
10864 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10868 nvram_cmd |= NVRAM_CMD_FIRST;
10869 else if (j == (pagesize - 4))
10870 nvram_cmd |= NVRAM_CMD_LAST;
10872 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10879 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10880 tg3_nvram_exec_cmd(tp, nvram_cmd);
10887 /* offset and length are dword aligned */
10888 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10893 for (i = 0; i < len; i += 4, offset += 4) {
10894 u32 page_off, phy_addr, nvram_cmd;
10897 memcpy(&data, buf + i, 4);
10898 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10900 page_off = offset % tp->nvram_pagesize;
10902 phy_addr = tg3_nvram_phys_addr(tp, offset);
10904 tw32(NVRAM_ADDR, phy_addr);
10906 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10908 if ((page_off == 0) || (i == 0))
10909 nvram_cmd |= NVRAM_CMD_FIRST;
10910 if (page_off == (tp->nvram_pagesize - 4))
10911 nvram_cmd |= NVRAM_CMD_LAST;
10913 if (i == (len - 4))
10914 nvram_cmd |= NVRAM_CMD_LAST;
10916 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10917 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
10918 (tp->nvram_jedecnum == JEDEC_ST) &&
10919 (nvram_cmd & NVRAM_CMD_FIRST)) {
10921 if ((ret = tg3_nvram_exec_cmd(tp,
10922 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10927 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10928 /* We always do complete word writes to eeprom. */
10929 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10932 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10938 /* offset and length are dword aligned */
10939 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10943 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10944 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10945 ~GRC_LCLCTRL_GPIO_OUTPUT1);
10949 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10950 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10955 ret = tg3_nvram_lock(tp);
10959 tg3_enable_nvram_access(tp);
10960 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10961 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10962 tw32(NVRAM_WRITE1, 0x406);
10964 grc_mode = tr32(GRC_MODE);
10965 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10967 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10968 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10970 ret = tg3_nvram_write_block_buffered(tp, offset, len,
10974 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10978 grc_mode = tr32(GRC_MODE);
10979 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10981 tg3_disable_nvram_access(tp);
10982 tg3_nvram_unlock(tp);
10985 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10986 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10993 struct subsys_tbl_ent {
10994 u16 subsys_vendor, subsys_devid;
10998 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10999 /* Broadcom boards. */
11000 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11001 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11002 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11003 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
11004 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11005 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11006 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
11007 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11008 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11009 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11010 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11013 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11014 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11015 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
11016 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11017 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11020 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11021 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11022 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11023 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11025 /* Compaq boards. */
11026 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11027 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11028 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
11029 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11030 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11033 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11036 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11040 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11041 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11042 tp->pdev->subsystem_vendor) &&
11043 (subsys_id_to_phy_id[i].subsys_devid ==
11044 tp->pdev->subsystem_device))
11045 return &subsys_id_to_phy_id[i];
11050 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11055 /* On some early chips the SRAM cannot be accessed in D3hot state,
11056 * so need make sure we're in D0.
11058 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11059 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11060 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11063 /* Make sure register accesses (indirect or otherwise)
11064 * will function correctly.
11066 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11067 tp->misc_host_ctrl);
11069 /* The memory arbiter has to be enabled in order for SRAM accesses
11070 * to succeed. Normally on powerup the tg3 chip firmware will make
11071 * sure it is enabled, but other entities such as system netboot
11072 * code might disable it.
11074 val = tr32(MEMARB_MODE);
11075 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11077 tp->phy_id = PHY_ID_INVALID;
11078 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11080 /* Assume an onboard device and WOL capable by default. */
11081 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11083 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11084 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11085 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11086 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11088 val = tr32(VCPU_CFGSHDW);
11089 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11090 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11091 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11092 (val & VCPU_CFGSHDW_WOL_MAGPKT))
11093 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11097 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11098 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11099 u32 nic_cfg, led_cfg;
11100 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11101 int eeprom_phy_serdes = 0;
11103 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11104 tp->nic_sram_data_cfg = nic_cfg;
11106 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11107 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11108 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11109 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11110 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11111 (ver > 0) && (ver < 0x100))
11112 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11114 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11115 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11117 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11118 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11119 eeprom_phy_serdes = 1;
11121 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11122 if (nic_phy_id != 0) {
11123 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11124 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11126 eeprom_phy_id = (id1 >> 16) << 10;
11127 eeprom_phy_id |= (id2 & 0xfc00) << 16;
11128 eeprom_phy_id |= (id2 & 0x03ff) << 0;
11132 tp->phy_id = eeprom_phy_id;
11133 if (eeprom_phy_serdes) {
11134 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11135 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11137 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11140 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11141 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11142 SHASTA_EXT_LED_MODE_MASK);
11144 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11148 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11149 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11152 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11153 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11156 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11157 tp->led_ctrl = LED_CTRL_MODE_MAC;
11159 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11160 * read on some older 5700/5701 bootcode.
11162 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11164 GET_ASIC_REV(tp->pci_chip_rev_id) ==
11166 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11170 case SHASTA_EXT_LED_SHARED:
11171 tp->led_ctrl = LED_CTRL_MODE_SHARED;
11172 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11173 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11174 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11175 LED_CTRL_MODE_PHY_2);
11178 case SHASTA_EXT_LED_MAC:
11179 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11182 case SHASTA_EXT_LED_COMBO:
11183 tp->led_ctrl = LED_CTRL_MODE_COMBO;
11184 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11185 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11186 LED_CTRL_MODE_PHY_2);
11191 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11192 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11193 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11194 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11196 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11197 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11199 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11200 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11201 if ((tp->pdev->subsystem_vendor ==
11202 PCI_VENDOR_ID_ARIMA) &&
11203 (tp->pdev->subsystem_device == 0x205a ||
11204 tp->pdev->subsystem_device == 0x2063))
11205 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11207 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11208 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11211 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11212 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11213 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11214 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11217 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11218 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11219 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11221 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11222 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11223 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11225 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11226 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
11227 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11229 if (cfg2 & (1 << 17))
11230 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11232 /* serdes signal pre-emphasis in register 0x590 set by */
11233 /* bootcode if bit 18 is set */
11234 if (cfg2 & (1 << 18))
11235 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11237 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11238 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
11239 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
11240 tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
11242 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11245 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11246 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11247 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11250 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11251 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11252 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11253 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11254 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11255 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11258 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11259 device_set_wakeup_enable(&tp->pdev->dev,
11260 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
11263 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11268 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11269 tw32(OTP_CTRL, cmd);
11271 /* Wait for up to 1 ms for command to execute. */
11272 for (i = 0; i < 100; i++) {
11273 val = tr32(OTP_STATUS);
11274 if (val & OTP_STATUS_CMD_DONE)
11279 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11282 /* Read the gphy configuration from the OTP region of the chip. The gphy
11283 * configuration is a 32-bit value that straddles the alignment boundary.
11284 * We do two 32-bit reads and then shift and merge the results.
11286 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11288 u32 bhalf_otp, thalf_otp;
11290 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11292 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11295 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11297 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11300 thalf_otp = tr32(OTP_READ_DATA);
11302 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11304 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11307 bhalf_otp = tr32(OTP_READ_DATA);
11309 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11312 static int __devinit tg3_phy_probe(struct tg3 *tp)
11314 u32 hw_phy_id_1, hw_phy_id_2;
11315 u32 hw_phy_id, hw_phy_id_masked;
11318 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11319 return tg3_phy_init(tp);
11321 /* Reading the PHY ID register can conflict with ASF
11322 * firmware access to the PHY hardware.
11325 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11326 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11327 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11329 /* Now read the physical PHY_ID from the chip and verify
11330 * that it is sane. If it doesn't look good, we fall back
11331 * to either the hard-coded table based PHY_ID and failing
11332 * that the value found in the eeprom area.
11334 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11335 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11337 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11338 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11339 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11341 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11344 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11345 tp->phy_id = hw_phy_id;
11346 if (hw_phy_id_masked == PHY_ID_BCM8002)
11347 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11349 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11351 if (tp->phy_id != PHY_ID_INVALID) {
11352 /* Do nothing, phy ID already set up in
11353 * tg3_get_eeprom_hw_cfg().
11356 struct subsys_tbl_ent *p;
11358 /* No eeprom signature? Try the hardcoded
11359 * subsys device table.
11361 p = lookup_by_subsys(tp);
11365 tp->phy_id = p->phy_id;
11367 tp->phy_id == PHY_ID_BCM8002)
11368 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11372 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11373 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11374 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11375 u32 bmsr, adv_reg, tg3_ctrl, mask;
11377 tg3_readphy(tp, MII_BMSR, &bmsr);
11378 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11379 (bmsr & BMSR_LSTATUS))
11380 goto skip_phy_reset;
11382 err = tg3_phy_reset(tp);
11386 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11387 ADVERTISE_100HALF | ADVERTISE_100FULL |
11388 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11390 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11391 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11392 MII_TG3_CTRL_ADV_1000_FULL);
11393 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11394 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11395 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11396 MII_TG3_CTRL_ENABLE_AS_MASTER);
11399 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11400 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11401 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11402 if (!tg3_copper_is_advertising_all(tp, mask)) {
11403 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11405 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11406 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11408 tg3_writephy(tp, MII_BMCR,
11409 BMCR_ANENABLE | BMCR_ANRESTART);
11411 tg3_phy_set_wirespeed(tp);
11413 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11414 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11415 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11419 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11420 err = tg3_init_5401phy_dsp(tp);
11425 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11426 err = tg3_init_5401phy_dsp(tp);
11429 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11430 tp->link_config.advertising =
11431 (ADVERTISED_1000baseT_Half |
11432 ADVERTISED_1000baseT_Full |
11433 ADVERTISED_Autoneg |
11435 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11436 tp->link_config.advertising &=
11437 ~(ADVERTISED_1000baseT_Half |
11438 ADVERTISED_1000baseT_Full);
11443 static void __devinit tg3_read_partno(struct tg3 *tp)
11445 unsigned char vpd_data[256]; /* in little-endian format */
11449 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
11450 tg3_nvram_read(tp, 0x0, &magic))
11451 goto out_not_found;
11453 if (magic == TG3_EEPROM_MAGIC) {
11454 for (i = 0; i < 256; i += 4) {
11457 /* The data is in little-endian format in NVRAM.
11458 * Use the big-endian read routines to preserve
11459 * the byte order as it exists in NVRAM.
11461 if (tg3_nvram_read_be32(tp, 0x100 + i, &tmp))
11462 goto out_not_found;
11464 memcpy(&vpd_data[i], &tmp, sizeof(tmp));
11469 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11470 for (i = 0; i < 256; i += 4) {
11475 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11477 while (j++ < 100) {
11478 pci_read_config_word(tp->pdev, vpd_cap +
11479 PCI_VPD_ADDR, &tmp16);
11480 if (tmp16 & 0x8000)
11484 if (!(tmp16 & 0x8000))
11485 goto out_not_found;
11487 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11489 v = cpu_to_le32(tmp);
11490 memcpy(&vpd_data[i], &v, sizeof(v));
11494 /* Now parse and find the part number. */
11495 for (i = 0; i < 254; ) {
11496 unsigned char val = vpd_data[i];
11497 unsigned int block_end;
11499 if (val == 0x82 || val == 0x91) {
11502 (vpd_data[i + 2] << 8)));
11507 goto out_not_found;
11509 block_end = (i + 3 +
11511 (vpd_data[i + 2] << 8)));
11514 if (block_end > 256)
11515 goto out_not_found;
11517 while (i < (block_end - 2)) {
11518 if (vpd_data[i + 0] == 'P' &&
11519 vpd_data[i + 1] == 'N') {
11520 int partno_len = vpd_data[i + 2];
11523 if (partno_len > 24 || (partno_len + i) > 256)
11524 goto out_not_found;
11526 memcpy(tp->board_part_number,
11527 &vpd_data[i], partno_len);
11532 i += 3 + vpd_data[i + 2];
11535 /* Part number not found. */
11536 goto out_not_found;
11540 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11541 strcpy(tp->board_part_number, "BCM95906");
11542 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
11543 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
11544 strcpy(tp->board_part_number, "BCM57780");
11545 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
11546 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
11547 strcpy(tp->board_part_number, "BCM57760");
11548 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
11549 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
11550 strcpy(tp->board_part_number, "BCM57790");
11551 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
11552 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
11553 strcpy(tp->board_part_number, "BCM57788");
11555 strcpy(tp->board_part_number, "none");
11558 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11562 if (tg3_nvram_read(tp, offset, &val) ||
11563 (val & 0xfc000000) != 0x0c000000 ||
11564 tg3_nvram_read(tp, offset + 4, &val) ||
11571 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
11573 u32 val, offset, start, ver_offset;
11575 bool newver = false;
11577 if (tg3_nvram_read(tp, 0xc, &offset) ||
11578 tg3_nvram_read(tp, 0x4, &start))
11581 offset = tg3_nvram_logical_addr(tp, offset);
11583 if (tg3_nvram_read(tp, offset, &val))
11586 if ((val & 0xfc000000) == 0x0c000000) {
11587 if (tg3_nvram_read(tp, offset + 4, &val))
11595 if (tg3_nvram_read(tp, offset + 8, &ver_offset))
11598 offset = offset + ver_offset - start;
11599 for (i = 0; i < 16; i += 4) {
11601 if (tg3_nvram_read_be32(tp, offset + i, &v))
11604 memcpy(tp->fw_ver + i, &v, sizeof(v));
11609 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
11612 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
11613 TG3_NVM_BCVER_MAJSFT;
11614 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
11615 snprintf(&tp->fw_ver[0], 32, "v%d.%02d", major, minor);
11619 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
11621 u32 val, major, minor;
11623 /* Use native endian representation */
11624 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
11627 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
11628 TG3_NVM_HWSB_CFG1_MAJSFT;
11629 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
11630 TG3_NVM_HWSB_CFG1_MINSFT;
11632 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
11635 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
11637 u32 offset, major, minor, build;
11639 tp->fw_ver[0] = 's';
11640 tp->fw_ver[1] = 'b';
11641 tp->fw_ver[2] = '\0';
11643 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
11646 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
11647 case TG3_EEPROM_SB_REVISION_0:
11648 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
11650 case TG3_EEPROM_SB_REVISION_2:
11651 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
11653 case TG3_EEPROM_SB_REVISION_3:
11654 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
11660 if (tg3_nvram_read(tp, offset, &val))
11663 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
11664 TG3_EEPROM_SB_EDH_BLD_SHFT;
11665 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
11666 TG3_EEPROM_SB_EDH_MAJ_SHFT;
11667 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
11669 if (minor > 99 || build > 26)
11672 snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
11675 tp->fw_ver[8] = 'a' + build - 1;
11676 tp->fw_ver[9] = '\0';
11680 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
11682 u32 val, offset, start;
11685 for (offset = TG3_NVM_DIR_START;
11686 offset < TG3_NVM_DIR_END;
11687 offset += TG3_NVM_DIRENT_SIZE) {
11688 if (tg3_nvram_read(tp, offset, &val))
11691 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11695 if (offset == TG3_NVM_DIR_END)
11698 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11699 start = 0x08000000;
11700 else if (tg3_nvram_read(tp, offset - 4, &start))
11703 if (tg3_nvram_read(tp, offset + 4, &offset) ||
11704 !tg3_fw_img_is_valid(tp, offset) ||
11705 tg3_nvram_read(tp, offset + 8, &val))
11708 offset += val - start;
11710 vlen = strlen(tp->fw_ver);
11712 tp->fw_ver[vlen++] = ',';
11713 tp->fw_ver[vlen++] = ' ';
11715 for (i = 0; i < 4; i++) {
11717 if (tg3_nvram_read_be32(tp, offset, &v))
11720 offset += sizeof(v);
11722 if (vlen > TG3_VER_SIZE - sizeof(v)) {
11723 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
11727 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
11732 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
11737 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
11738 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
11741 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
11742 if (apedata != APE_SEG_SIG_MAGIC)
11745 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
11746 if (!(apedata & APE_FW_STATUS_READY))
11749 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
11751 vlen = strlen(tp->fw_ver);
11753 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " DASH v%d.%d.%d.%d",
11754 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
11755 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
11756 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
11757 (apedata & APE_FW_VERSION_BLDMSK));
11760 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11764 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
11765 tp->fw_ver[0] = 's';
11766 tp->fw_ver[1] = 'b';
11767 tp->fw_ver[2] = '\0';
11772 if (tg3_nvram_read(tp, 0, &val))
11775 if (val == TG3_EEPROM_MAGIC)
11776 tg3_read_bc_ver(tp);
11777 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
11778 tg3_read_sb_ver(tp, val);
11779 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11780 tg3_read_hwsb_ver(tp);
11784 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11785 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11788 tg3_read_mgmtfw_ver(tp);
11790 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11793 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11795 static int __devinit tg3_get_invariants(struct tg3 *tp)
11797 static struct pci_device_id write_reorder_chipsets[] = {
11798 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11799 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11800 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11801 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11802 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11803 PCI_DEVICE_ID_VIA_8385_0) },
11807 u32 pci_state_reg, grc_misc_cfg;
11812 /* Force memory write invalidate off. If we leave it on,
11813 * then on 5700_BX chips we have to enable a workaround.
11814 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11815 * to match the cacheline size. The Broadcom driver have this
11816 * workaround but turns MWI off all the times so never uses
11817 * it. This seems to suggest that the workaround is insufficient.
11819 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11820 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11821 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11823 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11824 * has the register indirect write enable bit set before
11825 * we try to access any of the MMIO registers. It is also
11826 * critical that the PCI-X hw workaround situation is decided
11827 * before that as well.
11829 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11832 tp->pci_chip_rev_id = (misc_ctrl_reg >>
11833 MISC_HOST_CTRL_CHIPREV_SHIFT);
11834 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11835 u32 prod_id_asic_rev;
11837 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11838 &prod_id_asic_rev);
11839 tp->pci_chip_rev_id = prod_id_asic_rev;
11842 /* Wrong chip ID in 5752 A0. This code can be removed later
11843 * as A0 is not in production.
11845 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11846 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11848 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11849 * we need to disable memory and use config. cycles
11850 * only to access all registers. The 5702/03 chips
11851 * can mistakenly decode the special cycles from the
11852 * ICH chipsets as memory write cycles, causing corruption
11853 * of register and memory space. Only certain ICH bridges
11854 * will drive special cycles with non-zero data during the
11855 * address phase which can fall within the 5703's address
11856 * range. This is not an ICH bug as the PCI spec allows
11857 * non-zero address during special cycles. However, only
11858 * these ICH bridges are known to drive non-zero addresses
11859 * during special cycles.
11861 * Since special cycles do not cross PCI bridges, we only
11862 * enable this workaround if the 5703 is on the secondary
11863 * bus of these ICH bridges.
11865 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11866 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11867 static struct tg3_dev_id {
11871 } ich_chipsets[] = {
11872 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11874 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11876 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11878 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11882 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11883 struct pci_dev *bridge = NULL;
11885 while (pci_id->vendor != 0) {
11886 bridge = pci_get_device(pci_id->vendor, pci_id->device,
11892 if (pci_id->rev != PCI_ANY_ID) {
11893 if (bridge->revision > pci_id->rev)
11896 if (bridge->subordinate &&
11897 (bridge->subordinate->number ==
11898 tp->pdev->bus->number)) {
11900 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11901 pci_dev_put(bridge);
11907 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11908 static struct tg3_dev_id {
11911 } bridge_chipsets[] = {
11912 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11913 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11916 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11917 struct pci_dev *bridge = NULL;
11919 while (pci_id->vendor != 0) {
11920 bridge = pci_get_device(pci_id->vendor,
11927 if (bridge->subordinate &&
11928 (bridge->subordinate->number <=
11929 tp->pdev->bus->number) &&
11930 (bridge->subordinate->subordinate >=
11931 tp->pdev->bus->number)) {
11932 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11933 pci_dev_put(bridge);
11939 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11940 * DMA addresses > 40-bit. This bridge may have other additional
11941 * 57xx devices behind it in some 4-port NIC designs for example.
11942 * Any tg3 device found behind the bridge will also need the 40-bit
11945 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11946 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11947 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11948 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11949 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11952 struct pci_dev *bridge = NULL;
11955 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11956 PCI_DEVICE_ID_SERVERWORKS_EPB,
11958 if (bridge && bridge->subordinate &&
11959 (bridge->subordinate->number <=
11960 tp->pdev->bus->number) &&
11961 (bridge->subordinate->subordinate >=
11962 tp->pdev->bus->number)) {
11963 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11964 pci_dev_put(bridge);
11970 /* Initialize misc host control in PCI block. */
11971 tp->misc_host_ctrl |= (misc_ctrl_reg &
11972 MISC_HOST_CTRL_CHIPREV);
11973 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11974 tp->misc_host_ctrl);
11976 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11977 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11978 tp->pdev_peer = tg3_find_peer(tp);
11980 /* Intentionally exclude ASIC_REV_5906 */
11981 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11982 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11983 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11984 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11985 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
11986 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
11987 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
11989 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11990 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11991 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11992 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
11993 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11994 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11996 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11997 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11998 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12000 /* 5700 B0 chips do not support checksumming correctly due
12001 * to hardware bugs.
12003 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12004 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12006 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12007 tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12008 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
12009 tp->dev->features |= NETIF_F_IPV6_CSUM;
12012 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12013 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12014 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12015 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12016 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12017 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12018 tp->pdev_peer == tp->pdev))
12019 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12021 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12022 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12023 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12024 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12026 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12027 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12029 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12030 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12034 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12035 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12036 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12038 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12041 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12042 if (tp->pcie_cap != 0) {
12045 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12047 pcie_set_readrq(tp->pdev, 4096);
12049 pci_read_config_word(tp->pdev,
12050 tp->pcie_cap + PCI_EXP_LNKCTL,
12052 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
12053 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12054 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12055 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12056 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12057 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
12058 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
12059 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
12061 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
12062 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12063 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12064 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12065 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12066 if (!tp->pcix_cap) {
12067 printk(KERN_ERR PFX "Cannot find PCI-X "
12068 "capability, aborting.\n");
12072 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
12073 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12076 /* If we have an AMD 762 or VIA K8T800 chipset, write
12077 * reordering to the mailbox registers done by the host
12078 * controller can cause major troubles. We read back from
12079 * every mailbox register write to force the writes to be
12080 * posted to the chip in order.
12082 if (pci_dev_present(write_reorder_chipsets) &&
12083 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12084 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12086 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
12087 &tp->pci_cacheline_sz);
12088 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
12089 &tp->pci_lat_timer);
12090 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12091 tp->pci_lat_timer < 64) {
12092 tp->pci_lat_timer = 64;
12093 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
12094 tp->pci_lat_timer);
12097 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12098 /* 5700 BX chips need to have their TX producer index
12099 * mailboxes written twice to workaround a bug.
12101 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12103 /* If we are in PCI-X mode, enable register write workaround.
12105 * The workaround is to use indirect register accesses
12106 * for all chip writes not to mailbox registers.
12108 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12111 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12113 /* The chip can have it's power management PCI config
12114 * space registers clobbered due to this bug.
12115 * So explicitly force the chip into D0 here.
12117 pci_read_config_dword(tp->pdev,
12118 tp->pm_cap + PCI_PM_CTRL,
12120 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12121 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12122 pci_write_config_dword(tp->pdev,
12123 tp->pm_cap + PCI_PM_CTRL,
12126 /* Also, force SERR#/PERR# in PCI command. */
12127 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12128 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12129 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12133 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12134 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12135 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12136 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12138 /* Chip-specific fixup from Broadcom driver */
12139 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12140 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12141 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12142 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12145 /* Default fast path register access methods */
12146 tp->read32 = tg3_read32;
12147 tp->write32 = tg3_write32;
12148 tp->read32_mbox = tg3_read32;
12149 tp->write32_mbox = tg3_write32;
12150 tp->write32_tx_mbox = tg3_write32;
12151 tp->write32_rx_mbox = tg3_write32;
12153 /* Various workaround register access methods */
12154 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12155 tp->write32 = tg3_write_indirect_reg32;
12156 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12157 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12158 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12160 * Back to back register writes can cause problems on these
12161 * chips, the workaround is to read back all reg writes
12162 * except those to mailbox regs.
12164 * See tg3_write_indirect_reg32().
12166 tp->write32 = tg3_write_flush_reg32;
12170 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12171 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12172 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12173 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12174 tp->write32_rx_mbox = tg3_write_flush_reg32;
12177 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12178 tp->read32 = tg3_read_indirect_reg32;
12179 tp->write32 = tg3_write_indirect_reg32;
12180 tp->read32_mbox = tg3_read_indirect_mbox;
12181 tp->write32_mbox = tg3_write_indirect_mbox;
12182 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12183 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12188 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12189 pci_cmd &= ~PCI_COMMAND_MEMORY;
12190 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12192 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12193 tp->read32_mbox = tg3_read32_mbox_5906;
12194 tp->write32_mbox = tg3_write32_mbox_5906;
12195 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12196 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12199 if (tp->write32 == tg3_write_indirect_reg32 ||
12200 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12201 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12202 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12203 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12205 /* Get eeprom hw config before calling tg3_set_power_state().
12206 * In particular, the TG3_FLG2_IS_NIC flag must be
12207 * determined before calling tg3_set_power_state() so that
12208 * we know whether or not to switch out of Vaux power.
12209 * When the flag is set, it means that GPIO1 is used for eeprom
12210 * write protect and also implies that it is a LOM where GPIOs
12211 * are not used to switch power.
12213 tg3_get_eeprom_hw_cfg(tp);
12215 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12216 /* Allow reads and writes to the
12217 * APE register and memory space.
12219 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12220 PCISTATE_ALLOW_APE_SHMEM_WR;
12221 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12225 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12226 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12227 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12228 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12229 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12231 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12232 * GPIO1 driven high will bring 5700's external PHY out of reset.
12233 * It is also used as eeprom write protect on LOMs.
12235 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12236 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12237 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12238 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12239 GRC_LCLCTRL_GPIO_OUTPUT1);
12240 /* Unused GPIO3 must be driven as output on 5752 because there
12241 * are no pull-up resistors on unused GPIO pins.
12243 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12244 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12246 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12247 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12248 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12250 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
12251 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
12252 /* Turn off the debug UART. */
12253 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12254 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12255 /* Keep VMain power. */
12256 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12257 GRC_LCLCTRL_GPIO_OUTPUT0;
12260 /* Force the chip into D0. */
12261 err = tg3_set_power_state(tp, PCI_D0);
12263 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12264 pci_name(tp->pdev));
12268 /* Derive initial jumbo mode from MTU assigned in
12269 * ether_setup() via the alloc_etherdev() call
12271 if (tp->dev->mtu > ETH_DATA_LEN &&
12272 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12273 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12275 /* Determine WakeOnLan speed to use. */
12276 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12277 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12278 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12279 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12280 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12282 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12285 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12286 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
12288 /* A few boards don't want Ethernet@WireSpeed phy feature */
12289 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12290 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12291 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12292 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12293 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) ||
12294 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12295 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12297 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12298 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12299 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12300 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12301 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12303 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
12304 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
12305 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12306 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780) {
12307 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12308 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12309 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12310 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12311 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12312 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12313 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12314 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12315 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12317 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12320 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12321 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12322 tp->phy_otp = tg3_read_otp_phycfg(tp);
12323 if (tp->phy_otp == 0)
12324 tp->phy_otp = TG3_OTP_DEFAULT;
12327 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12328 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12330 tp->mi_mode = MAC_MI_MODE_BASE;
12332 tp->coalesce_mode = 0;
12333 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12334 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12335 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12337 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12338 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12339 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12341 if ((tp->pci_chip_rev_id == CHIPREV_ID_57780_A1 &&
12342 tr32(RCVLPC_STATS_ENABLE) & RCVLPC_STATSENAB_ASF_FIX) ||
12343 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0)
12344 tp->tg3_flags3 |= TG3_FLG3_TOGGLE_10_100_L1PLLPD;
12346 err = tg3_mdio_init(tp);
12350 /* Initialize data/descriptor byte/word swapping. */
12351 val = tr32(GRC_MODE);
12352 val &= GRC_MODE_HOST_STACKUP;
12353 tw32(GRC_MODE, val | tp->grc_mode);
12355 tg3_switch_clocks(tp);
12357 /* Clear this out for sanity. */
12358 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12360 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12362 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12363 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12364 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12366 if (chiprevid == CHIPREV_ID_5701_A0 ||
12367 chiprevid == CHIPREV_ID_5701_B0 ||
12368 chiprevid == CHIPREV_ID_5701_B2 ||
12369 chiprevid == CHIPREV_ID_5701_B5) {
12370 void __iomem *sram_base;
12372 /* Write some dummy words into the SRAM status block
12373 * area, see if it reads back correctly. If the return
12374 * value is bad, force enable the PCIX workaround.
12376 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12378 writel(0x00000000, sram_base);
12379 writel(0x00000000, sram_base + 4);
12380 writel(0xffffffff, sram_base + 4);
12381 if (readl(sram_base) != 0x00000000)
12382 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12387 tg3_nvram_init(tp);
12389 grc_misc_cfg = tr32(GRC_MISC_CFG);
12390 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12392 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12393 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12394 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12395 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12397 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12398 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12399 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12400 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12401 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12402 HOSTCC_MODE_CLRTICK_TXBD);
12404 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12405 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12406 tp->misc_host_ctrl);
12409 /* Preserve the APE MAC_MODE bits */
12410 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12411 tp->mac_mode = tr32(MAC_MODE) |
12412 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12414 tp->mac_mode = TG3_DEF_MAC_MODE;
12416 /* these are limited to 10/100 only */
12417 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12418 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12419 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12420 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12421 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12422 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12423 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12424 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12425 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12426 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12427 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12428 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
12429 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
12430 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12432 err = tg3_phy_probe(tp);
12434 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12435 pci_name(tp->pdev), err);
12436 /* ... but do not return immediately ... */
12440 tg3_read_partno(tp);
12441 tg3_read_fw_ver(tp);
12443 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12444 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12446 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12447 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12449 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12452 /* 5700 {AX,BX} chips have a broken status block link
12453 * change bit implementation, so we must use the
12454 * status register in those cases.
12456 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12457 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12459 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12461 /* The led_ctrl is set during tg3_phy_probe, here we might
12462 * have to force the link status polling mechanism based
12463 * upon subsystem IDs.
12465 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12466 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12467 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12468 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12469 TG3_FLAG_USE_LINKCHG_REG);
12472 /* For all SERDES we poll the MAC status register. */
12473 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12474 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12476 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12478 tp->rx_offset = NET_IP_ALIGN;
12479 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12480 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12483 tp->rx_std_max_post = TG3_RX_RING_SIZE;
12485 /* Increment the rx prod index on the rx std ring by at most
12486 * 8 for these chips to workaround hw errata.
12488 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12489 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12490 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12491 tp->rx_std_max_post = 8;
12493 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12494 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12495 PCIE_PWR_MGMT_L1_THRESH_MSK;
12500 #ifdef CONFIG_SPARC
12501 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12503 struct net_device *dev = tp->dev;
12504 struct pci_dev *pdev = tp->pdev;
12505 struct device_node *dp = pci_device_to_OF_node(pdev);
12506 const unsigned char *addr;
12509 addr = of_get_property(dp, "local-mac-address", &len);
12510 if (addr && len == 6) {
12511 memcpy(dev->dev_addr, addr, 6);
12512 memcpy(dev->perm_addr, dev->dev_addr, 6);
12518 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12520 struct net_device *dev = tp->dev;
12522 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12523 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12528 static int __devinit tg3_get_device_address(struct tg3 *tp)
12530 struct net_device *dev = tp->dev;
12531 u32 hi, lo, mac_offset;
12534 #ifdef CONFIG_SPARC
12535 if (!tg3_get_macaddr_sparc(tp))
12540 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12541 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12542 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12544 if (tg3_nvram_lock(tp))
12545 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12547 tg3_nvram_unlock(tp);
12549 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12552 /* First try to get it from MAC address mailbox. */
12553 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12554 if ((hi >> 16) == 0x484b) {
12555 dev->dev_addr[0] = (hi >> 8) & 0xff;
12556 dev->dev_addr[1] = (hi >> 0) & 0xff;
12558 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12559 dev->dev_addr[2] = (lo >> 24) & 0xff;
12560 dev->dev_addr[3] = (lo >> 16) & 0xff;
12561 dev->dev_addr[4] = (lo >> 8) & 0xff;
12562 dev->dev_addr[5] = (lo >> 0) & 0xff;
12564 /* Some old bootcode may report a 0 MAC address in SRAM */
12565 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12568 /* Next, try NVRAM. */
12569 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
12570 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
12571 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
12572 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
12573 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
12575 /* Finally just fetch it out of the MAC control regs. */
12577 hi = tr32(MAC_ADDR_0_HIGH);
12578 lo = tr32(MAC_ADDR_0_LOW);
12580 dev->dev_addr[5] = lo & 0xff;
12581 dev->dev_addr[4] = (lo >> 8) & 0xff;
12582 dev->dev_addr[3] = (lo >> 16) & 0xff;
12583 dev->dev_addr[2] = (lo >> 24) & 0xff;
12584 dev->dev_addr[1] = hi & 0xff;
12585 dev->dev_addr[0] = (hi >> 8) & 0xff;
12589 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12590 #ifdef CONFIG_SPARC
12591 if (!tg3_get_default_macaddr_sparc(tp))
12596 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12600 #define BOUNDARY_SINGLE_CACHELINE 1
12601 #define BOUNDARY_MULTI_CACHELINE 2
12603 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12605 int cacheline_size;
12609 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12611 cacheline_size = 1024;
12613 cacheline_size = (int) byte * 4;
12615 /* On 5703 and later chips, the boundary bits have no
12618 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12619 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12620 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12623 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12624 goal = BOUNDARY_MULTI_CACHELINE;
12626 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12627 goal = BOUNDARY_SINGLE_CACHELINE;
12636 /* PCI controllers on most RISC systems tend to disconnect
12637 * when a device tries to burst across a cache-line boundary.
12638 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12640 * Unfortunately, for PCI-E there are only limited
12641 * write-side controls for this, and thus for reads
12642 * we will still get the disconnects. We'll also waste
12643 * these PCI cycles for both read and write for chips
12644 * other than 5700 and 5701 which do not implement the
12647 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12648 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12649 switch (cacheline_size) {
12654 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12655 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12656 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12658 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12659 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12664 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12665 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12669 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12670 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12673 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12674 switch (cacheline_size) {
12678 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12679 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12680 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12686 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12687 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12691 switch (cacheline_size) {
12693 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12694 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12695 DMA_RWCTRL_WRITE_BNDRY_16);
12700 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12701 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12702 DMA_RWCTRL_WRITE_BNDRY_32);
12707 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12708 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12709 DMA_RWCTRL_WRITE_BNDRY_64);
12714 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12715 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12716 DMA_RWCTRL_WRITE_BNDRY_128);
12721 val |= (DMA_RWCTRL_READ_BNDRY_256 |
12722 DMA_RWCTRL_WRITE_BNDRY_256);
12725 val |= (DMA_RWCTRL_READ_BNDRY_512 |
12726 DMA_RWCTRL_WRITE_BNDRY_512);
12730 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12731 DMA_RWCTRL_WRITE_BNDRY_1024);
12740 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12742 struct tg3_internal_buffer_desc test_desc;
12743 u32 sram_dma_descs;
12746 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12748 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12749 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12750 tw32(RDMAC_STATUS, 0);
12751 tw32(WDMAC_STATUS, 0);
12753 tw32(BUFMGR_MODE, 0);
12754 tw32(FTQ_RESET, 0);
12756 test_desc.addr_hi = ((u64) buf_dma) >> 32;
12757 test_desc.addr_lo = buf_dma & 0xffffffff;
12758 test_desc.nic_mbuf = 0x00002100;
12759 test_desc.len = size;
12762 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12763 * the *second* time the tg3 driver was getting loaded after an
12766 * Broadcom tells me:
12767 * ...the DMA engine is connected to the GRC block and a DMA
12768 * reset may affect the GRC block in some unpredictable way...
12769 * The behavior of resets to individual blocks has not been tested.
12771 * Broadcom noted the GRC reset will also reset all sub-components.
12774 test_desc.cqid_sqid = (13 << 8) | 2;
12776 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12779 test_desc.cqid_sqid = (16 << 8) | 7;
12781 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12784 test_desc.flags = 0x00000005;
12786 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12789 val = *(((u32 *)&test_desc) + i);
12790 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12791 sram_dma_descs + (i * sizeof(u32)));
12792 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12794 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12797 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12799 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12803 for (i = 0; i < 40; i++) {
12807 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12809 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12810 if ((val & 0xffff) == sram_dma_descs) {
12821 #define TEST_BUFFER_SIZE 0x2000
12823 static int __devinit tg3_test_dma(struct tg3 *tp)
12825 dma_addr_t buf_dma;
12826 u32 *buf, saved_dma_rwctrl;
12829 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12835 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12836 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12838 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12840 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12841 /* DMA read watermark not used on PCIE */
12842 tp->dma_rwctrl |= 0x00180000;
12843 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12844 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12845 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12846 tp->dma_rwctrl |= 0x003f0000;
12848 tp->dma_rwctrl |= 0x003f000f;
12850 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12851 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12852 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12853 u32 read_water = 0x7;
12855 /* If the 5704 is behind the EPB bridge, we can
12856 * do the less restrictive ONE_DMA workaround for
12857 * better performance.
12859 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12860 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12861 tp->dma_rwctrl |= 0x8000;
12862 else if (ccval == 0x6 || ccval == 0x7)
12863 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12865 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12867 /* Set bit 23 to enable PCIX hw bug fix */
12869 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12870 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12872 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12873 /* 5780 always in PCIX mode */
12874 tp->dma_rwctrl |= 0x00144000;
12875 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12876 /* 5714 always in PCIX mode */
12877 tp->dma_rwctrl |= 0x00148000;
12879 tp->dma_rwctrl |= 0x001b000f;
12883 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12884 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12885 tp->dma_rwctrl &= 0xfffffff0;
12887 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12888 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12889 /* Remove this if it causes problems for some boards. */
12890 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12892 /* On 5700/5701 chips, we need to set this bit.
12893 * Otherwise the chip will issue cacheline transactions
12894 * to streamable DMA memory with not all the byte
12895 * enables turned on. This is an error on several
12896 * RISC PCI controllers, in particular sparc64.
12898 * On 5703/5704 chips, this bit has been reassigned
12899 * a different meaning. In particular, it is used
12900 * on those chips to enable a PCI-X workaround.
12902 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12905 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12908 /* Unneeded, already done by tg3_get_invariants. */
12909 tg3_switch_clocks(tp);
12913 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12914 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12917 /* It is best to perform DMA test with maximum write burst size
12918 * to expose the 5700/5701 write DMA bug.
12920 saved_dma_rwctrl = tp->dma_rwctrl;
12921 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12922 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12927 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12930 /* Send the buffer to the chip. */
12931 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12933 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12938 /* validate data reached card RAM correctly. */
12939 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12941 tg3_read_mem(tp, 0x2100 + (i*4), &val);
12942 if (le32_to_cpu(val) != p[i]) {
12943 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
12944 /* ret = -ENODEV here? */
12949 /* Now read it back. */
12950 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12952 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12958 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12962 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12963 DMA_RWCTRL_WRITE_BNDRY_16) {
12964 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12965 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12966 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12969 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12975 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12981 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12982 DMA_RWCTRL_WRITE_BNDRY_16) {
12983 static struct pci_device_id dma_wait_state_chipsets[] = {
12984 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12985 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12989 /* DMA test passed without adjusting DMA boundary,
12990 * now look for chipsets that are known to expose the
12991 * DMA bug without failing the test.
12993 if (pci_dev_present(dma_wait_state_chipsets)) {
12994 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12995 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12998 /* Safe to use the calculated DMA boundary. */
12999 tp->dma_rwctrl = saved_dma_rwctrl;
13001 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13005 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13010 static void __devinit tg3_init_link_config(struct tg3 *tp)
13012 tp->link_config.advertising =
13013 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13014 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13015 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13016 ADVERTISED_Autoneg | ADVERTISED_MII);
13017 tp->link_config.speed = SPEED_INVALID;
13018 tp->link_config.duplex = DUPLEX_INVALID;
13019 tp->link_config.autoneg = AUTONEG_ENABLE;
13020 tp->link_config.active_speed = SPEED_INVALID;
13021 tp->link_config.active_duplex = DUPLEX_INVALID;
13022 tp->link_config.phy_is_low_power = 0;
13023 tp->link_config.orig_speed = SPEED_INVALID;
13024 tp->link_config.orig_duplex = DUPLEX_INVALID;
13025 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13028 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13030 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13031 tp->bufmgr_config.mbuf_read_dma_low_water =
13032 DEFAULT_MB_RDMA_LOW_WATER_5705;
13033 tp->bufmgr_config.mbuf_mac_rx_low_water =
13034 DEFAULT_MB_MACRX_LOW_WATER_5705;
13035 tp->bufmgr_config.mbuf_high_water =
13036 DEFAULT_MB_HIGH_WATER_5705;
13037 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13038 tp->bufmgr_config.mbuf_mac_rx_low_water =
13039 DEFAULT_MB_MACRX_LOW_WATER_5906;
13040 tp->bufmgr_config.mbuf_high_water =
13041 DEFAULT_MB_HIGH_WATER_5906;
13044 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13045 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13046 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13047 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13048 tp->bufmgr_config.mbuf_high_water_jumbo =
13049 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13051 tp->bufmgr_config.mbuf_read_dma_low_water =
13052 DEFAULT_MB_RDMA_LOW_WATER;
13053 tp->bufmgr_config.mbuf_mac_rx_low_water =
13054 DEFAULT_MB_MACRX_LOW_WATER;
13055 tp->bufmgr_config.mbuf_high_water =
13056 DEFAULT_MB_HIGH_WATER;
13058 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13059 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13060 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13061 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13062 tp->bufmgr_config.mbuf_high_water_jumbo =
13063 DEFAULT_MB_HIGH_WATER_JUMBO;
13066 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13067 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13070 static char * __devinit tg3_phy_string(struct tg3 *tp)
13072 switch (tp->phy_id & PHY_ID_MASK) {
13073 case PHY_ID_BCM5400: return "5400";
13074 case PHY_ID_BCM5401: return "5401";
13075 case PHY_ID_BCM5411: return "5411";
13076 case PHY_ID_BCM5701: return "5701";
13077 case PHY_ID_BCM5703: return "5703";
13078 case PHY_ID_BCM5704: return "5704";
13079 case PHY_ID_BCM5705: return "5705";
13080 case PHY_ID_BCM5750: return "5750";
13081 case PHY_ID_BCM5752: return "5752";
13082 case PHY_ID_BCM5714: return "5714";
13083 case PHY_ID_BCM5780: return "5780";
13084 case PHY_ID_BCM5755: return "5755";
13085 case PHY_ID_BCM5787: return "5787";
13086 case PHY_ID_BCM5784: return "5784";
13087 case PHY_ID_BCM5756: return "5722/5756";
13088 case PHY_ID_BCM5906: return "5906";
13089 case PHY_ID_BCM5761: return "5761";
13090 case PHY_ID_BCM8002: return "8002/serdes";
13091 case 0: return "serdes";
13092 default: return "unknown";
13096 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13098 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13099 strcpy(str, "PCI Express");
13101 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13102 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13104 strcpy(str, "PCIX:");
13106 if ((clock_ctrl == 7) ||
13107 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13108 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13109 strcat(str, "133MHz");
13110 else if (clock_ctrl == 0)
13111 strcat(str, "33MHz");
13112 else if (clock_ctrl == 2)
13113 strcat(str, "50MHz");
13114 else if (clock_ctrl == 4)
13115 strcat(str, "66MHz");
13116 else if (clock_ctrl == 6)
13117 strcat(str, "100MHz");
13119 strcpy(str, "PCI:");
13120 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13121 strcat(str, "66MHz");
13123 strcat(str, "33MHz");
13125 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13126 strcat(str, ":32-bit");
13128 strcat(str, ":64-bit");
13132 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13134 struct pci_dev *peer;
13135 unsigned int func, devnr = tp->pdev->devfn & ~7;
13137 for (func = 0; func < 8; func++) {
13138 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13139 if (peer && peer != tp->pdev)
13143 /* 5704 can be configured in single-port mode, set peer to
13144 * tp->pdev in that case.
13152 * We don't need to keep the refcount elevated; there's no way
13153 * to remove one half of this device without removing the other
13160 static void __devinit tg3_init_coal(struct tg3 *tp)
13162 struct ethtool_coalesce *ec = &tp->coal;
13164 memset(ec, 0, sizeof(*ec));
13165 ec->cmd = ETHTOOL_GCOALESCE;
13166 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13167 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13168 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13169 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13170 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13171 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13172 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13173 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13174 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13176 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13177 HOSTCC_MODE_CLRTICK_TXBD)) {
13178 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13179 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13180 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13181 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13184 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13185 ec->rx_coalesce_usecs_irq = 0;
13186 ec->tx_coalesce_usecs_irq = 0;
13187 ec->stats_block_coalesce_usecs = 0;
13191 static const struct net_device_ops tg3_netdev_ops = {
13192 .ndo_open = tg3_open,
13193 .ndo_stop = tg3_close,
13194 .ndo_start_xmit = tg3_start_xmit,
13195 .ndo_get_stats = tg3_get_stats,
13196 .ndo_validate_addr = eth_validate_addr,
13197 .ndo_set_multicast_list = tg3_set_rx_mode,
13198 .ndo_set_mac_address = tg3_set_mac_addr,
13199 .ndo_do_ioctl = tg3_ioctl,
13200 .ndo_tx_timeout = tg3_tx_timeout,
13201 .ndo_change_mtu = tg3_change_mtu,
13202 #if TG3_VLAN_TAG_USED
13203 .ndo_vlan_rx_register = tg3_vlan_rx_register,
13205 #ifdef CONFIG_NET_POLL_CONTROLLER
13206 .ndo_poll_controller = tg3_poll_controller,
13210 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
13211 .ndo_open = tg3_open,
13212 .ndo_stop = tg3_close,
13213 .ndo_start_xmit = tg3_start_xmit_dma_bug,
13214 .ndo_get_stats = tg3_get_stats,
13215 .ndo_validate_addr = eth_validate_addr,
13216 .ndo_set_multicast_list = tg3_set_rx_mode,
13217 .ndo_set_mac_address = tg3_set_mac_addr,
13218 .ndo_do_ioctl = tg3_ioctl,
13219 .ndo_tx_timeout = tg3_tx_timeout,
13220 .ndo_change_mtu = tg3_change_mtu,
13221 #if TG3_VLAN_TAG_USED
13222 .ndo_vlan_rx_register = tg3_vlan_rx_register,
13224 #ifdef CONFIG_NET_POLL_CONTROLLER
13225 .ndo_poll_controller = tg3_poll_controller,
13229 static int __devinit tg3_init_one(struct pci_dev *pdev,
13230 const struct pci_device_id *ent)
13232 static int tg3_version_printed = 0;
13233 struct net_device *dev;
13237 u64 dma_mask, persist_dma_mask;
13239 if (tg3_version_printed++ == 0)
13240 printk(KERN_INFO "%s", version);
13242 err = pci_enable_device(pdev);
13244 printk(KERN_ERR PFX "Cannot enable PCI device, "
13249 err = pci_request_regions(pdev, DRV_MODULE_NAME);
13251 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13253 goto err_out_disable_pdev;
13256 pci_set_master(pdev);
13258 /* Find power-management capability. */
13259 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13261 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13264 goto err_out_free_res;
13267 dev = alloc_etherdev(sizeof(*tp));
13269 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13271 goto err_out_free_res;
13274 SET_NETDEV_DEV(dev, &pdev->dev);
13276 #if TG3_VLAN_TAG_USED
13277 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13280 tp = netdev_priv(dev);
13283 tp->pm_cap = pm_cap;
13284 tp->rx_mode = TG3_DEF_RX_MODE;
13285 tp->tx_mode = TG3_DEF_TX_MODE;
13288 tp->msg_enable = tg3_debug;
13290 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13292 /* The word/byte swap controls here control register access byte
13293 * swapping. DMA data byte swapping is controlled in the GRC_MODE
13296 tp->misc_host_ctrl =
13297 MISC_HOST_CTRL_MASK_PCI_INT |
13298 MISC_HOST_CTRL_WORD_SWAP |
13299 MISC_HOST_CTRL_INDIR_ACCESS |
13300 MISC_HOST_CTRL_PCISTATE_RW;
13302 /* The NONFRM (non-frame) byte/word swap controls take effect
13303 * on descriptor entries, anything which isn't packet data.
13305 * The StrongARM chips on the board (one for tx, one for rx)
13306 * are running in big-endian mode.
13308 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13309 GRC_MODE_WSWAP_NONFRM_DATA);
13310 #ifdef __BIG_ENDIAN
13311 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13313 spin_lock_init(&tp->lock);
13314 spin_lock_init(&tp->indirect_lock);
13315 INIT_WORK(&tp->reset_task, tg3_reset_task);
13317 tp->regs = pci_ioremap_bar(pdev, BAR_0);
13319 printk(KERN_ERR PFX "Cannot map device registers, "
13322 goto err_out_free_dev;
13325 tg3_init_link_config(tp);
13327 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13328 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13329 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13331 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
13332 dev->ethtool_ops = &tg3_ethtool_ops;
13333 dev->watchdog_timeo = TG3_TX_TIMEOUT;
13334 dev->irq = pdev->irq;
13336 err = tg3_get_invariants(tp);
13338 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13340 goto err_out_iounmap;
13343 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13344 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13345 dev->netdev_ops = &tg3_netdev_ops;
13347 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
13350 /* The EPB bridge inside 5714, 5715, and 5780 and any
13351 * device behind the EPB cannot support DMA addresses > 40-bit.
13352 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13353 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13354 * do DMA address check in tg3_start_xmit().
13356 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13357 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
13358 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13359 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
13360 #ifdef CONFIG_HIGHMEM
13361 dma_mask = DMA_BIT_MASK(64);
13364 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
13366 /* Configure DMA attributes. */
13367 if (dma_mask > DMA_BIT_MASK(32)) {
13368 err = pci_set_dma_mask(pdev, dma_mask);
13370 dev->features |= NETIF_F_HIGHDMA;
13371 err = pci_set_consistent_dma_mask(pdev,
13374 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13375 "DMA for consistent allocations\n");
13376 goto err_out_iounmap;
13380 if (err || dma_mask == DMA_BIT_MASK(32)) {
13381 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
13383 printk(KERN_ERR PFX "No usable DMA configuration, "
13385 goto err_out_iounmap;
13389 tg3_init_bufmgr_config(tp);
13391 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13392 tp->fw_needed = FIRMWARE_TG3;
13394 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13395 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13397 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13398 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13399 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13400 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13401 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13402 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13404 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13405 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13406 tp->fw_needed = FIRMWARE_TG3TSO5;
13408 tp->fw_needed = FIRMWARE_TG3TSO;
13411 /* TSO is on by default on chips that support hardware TSO.
13412 * Firmware TSO on older chips gives lower performance, so it
13413 * is off by default, but can be enabled using ethtool.
13415 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13416 if (dev->features & NETIF_F_IP_CSUM)
13417 dev->features |= NETIF_F_TSO;
13418 if ((dev->features & NETIF_F_IPV6_CSUM) &&
13419 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2))
13420 dev->features |= NETIF_F_TSO6;
13421 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13422 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13423 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13424 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13425 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13426 dev->features |= NETIF_F_TSO_ECN;
13430 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13431 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13432 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13433 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13434 tp->rx_pending = 63;
13437 err = tg3_get_device_address(tp);
13439 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13444 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13445 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
13446 if (!tp->aperegs) {
13447 printk(KERN_ERR PFX "Cannot map APE registers, "
13453 tg3_ape_lock_init(tp);
13455 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
13456 tg3_read_dash_ver(tp);
13460 * Reset chip in case UNDI or EFI driver did not shutdown
13461 * DMA self test will enable WDMAC and we'll see (spurious)
13462 * pending DMA on the PCI bus at that point.
13464 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13465 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13466 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13467 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13470 err = tg3_test_dma(tp);
13472 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13473 goto err_out_apeunmap;
13476 /* flow control autonegotiation is default behavior */
13477 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13478 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13482 pci_set_drvdata(pdev, dev);
13484 err = register_netdev(dev);
13486 printk(KERN_ERR PFX "Cannot register net device, "
13488 goto err_out_apeunmap;
13491 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
13493 tp->board_part_number,
13494 tp->pci_chip_rev_id,
13495 tg3_bus_string(tp, str),
13498 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
13500 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
13502 tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
13503 dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev));
13506 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
13507 tp->dev->name, tg3_phy_string(tp),
13508 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13509 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13510 "10/100/1000Base-T")),
13511 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
13513 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
13515 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13516 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13517 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13518 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13519 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13520 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13521 dev->name, tp->dma_rwctrl,
13522 (pdev->dma_mask == DMA_BIT_MASK(32)) ? 32 :
13523 (((u64) pdev->dma_mask == DMA_BIT_MASK(40)) ? 40 : 64));
13529 iounmap(tp->aperegs);
13530 tp->aperegs = NULL;
13535 release_firmware(tp->fw);
13547 pci_release_regions(pdev);
13549 err_out_disable_pdev:
13550 pci_disable_device(pdev);
13551 pci_set_drvdata(pdev, NULL);
13555 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13557 struct net_device *dev = pci_get_drvdata(pdev);
13560 struct tg3 *tp = netdev_priv(dev);
13563 release_firmware(tp->fw);
13565 flush_scheduled_work();
13567 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13572 unregister_netdev(dev);
13574 iounmap(tp->aperegs);
13575 tp->aperegs = NULL;
13582 pci_release_regions(pdev);
13583 pci_disable_device(pdev);
13584 pci_set_drvdata(pdev, NULL);
13588 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13590 struct net_device *dev = pci_get_drvdata(pdev);
13591 struct tg3 *tp = netdev_priv(dev);
13592 pci_power_t target_state;
13595 /* PCI register 4 needs to be saved whether netif_running() or not.
13596 * MSI address and data need to be saved if using MSI and
13599 pci_save_state(pdev);
13601 if (!netif_running(dev))
13604 flush_scheduled_work();
13606 tg3_netif_stop(tp);
13608 del_timer_sync(&tp->timer);
13610 tg3_full_lock(tp, 1);
13611 tg3_disable_ints(tp);
13612 tg3_full_unlock(tp);
13614 netif_device_detach(dev);
13616 tg3_full_lock(tp, 0);
13617 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13618 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13619 tg3_full_unlock(tp);
13621 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13623 err = tg3_set_power_state(tp, target_state);
13627 tg3_full_lock(tp, 0);
13629 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13630 err2 = tg3_restart_hw(tp, 1);
13634 tp->timer.expires = jiffies + tp->timer_offset;
13635 add_timer(&tp->timer);
13637 netif_device_attach(dev);
13638 tg3_netif_start(tp);
13641 tg3_full_unlock(tp);
13650 static int tg3_resume(struct pci_dev *pdev)
13652 struct net_device *dev = pci_get_drvdata(pdev);
13653 struct tg3 *tp = netdev_priv(dev);
13656 pci_restore_state(tp->pdev);
13658 if (!netif_running(dev))
13661 err = tg3_set_power_state(tp, PCI_D0);
13665 netif_device_attach(dev);
13667 tg3_full_lock(tp, 0);
13669 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13670 err = tg3_restart_hw(tp, 1);
13674 tp->timer.expires = jiffies + tp->timer_offset;
13675 add_timer(&tp->timer);
13677 tg3_netif_start(tp);
13680 tg3_full_unlock(tp);
13688 static struct pci_driver tg3_driver = {
13689 .name = DRV_MODULE_NAME,
13690 .id_table = tg3_pci_tbl,
13691 .probe = tg3_init_one,
13692 .remove = __devexit_p(tg3_remove_one),
13693 .suspend = tg3_suspend,
13694 .resume = tg3_resume
13697 static int __init tg3_init(void)
13699 return pci_register_driver(&tg3_driver);
13702 static void __exit tg3_cleanup(void)
13704 pci_unregister_driver(&tg3_driver);
13707 module_init(tg3_init);
13708 module_exit(tg3_cleanup);