1 /*******************************************************************************
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2009 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/vmalloc.h>
32 #include <linux/pagemap.h>
33 #include <linux/netdevice.h>
34 #include <linux/ipv6.h>
35 #include <net/checksum.h>
36 #include <net/ip6_checksum.h>
37 #include <linux/net_tstamp.h>
38 #include <linux/mii.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_vlan.h>
41 #include <linux/pci.h>
42 #include <linux/pci-aspm.h>
43 #include <linux/delay.h>
44 #include <linux/interrupt.h>
45 #include <linux/if_ether.h>
46 #include <linux/aer.h>
48 #include <linux/dca.h>
52 #define DRV_VERSION "2.1.0-k2"
53 char igb_driver_name[] = "igb";
54 char igb_driver_version[] = DRV_VERSION;
55 static const char igb_driver_string[] =
56 "Intel(R) Gigabit Ethernet Network Driver";
57 static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
59 static const struct e1000_info *igb_info_tbl[] = {
60 [board_82575] = &e1000_82575_info,
63 static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
80 /* required last entry */
84 MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
86 void igb_reset(struct igb_adapter *);
87 static int igb_setup_all_tx_resources(struct igb_adapter *);
88 static int igb_setup_all_rx_resources(struct igb_adapter *);
89 static void igb_free_all_tx_resources(struct igb_adapter *);
90 static void igb_free_all_rx_resources(struct igb_adapter *);
91 static void igb_setup_mrqc(struct igb_adapter *);
92 void igb_update_stats(struct igb_adapter *);
93 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
94 static void __devexit igb_remove(struct pci_dev *pdev);
95 static int igb_sw_init(struct igb_adapter *);
96 static int igb_open(struct net_device *);
97 static int igb_close(struct net_device *);
98 static void igb_configure_tx(struct igb_adapter *);
99 static void igb_configure_rx(struct igb_adapter *);
100 static void igb_clean_all_tx_rings(struct igb_adapter *);
101 static void igb_clean_all_rx_rings(struct igb_adapter *);
102 static void igb_clean_tx_ring(struct igb_ring *);
103 static void igb_clean_rx_ring(struct igb_ring *);
104 static void igb_set_rx_mode(struct net_device *);
105 static void igb_update_phy_info(unsigned long);
106 static void igb_watchdog(unsigned long);
107 static void igb_watchdog_task(struct work_struct *);
108 static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
109 static struct net_device_stats *igb_get_stats(struct net_device *);
110 static int igb_change_mtu(struct net_device *, int);
111 static int igb_set_mac(struct net_device *, void *);
112 static void igb_set_uta(struct igb_adapter *adapter);
113 static irqreturn_t igb_intr(int irq, void *);
114 static irqreturn_t igb_intr_msi(int irq, void *);
115 static irqreturn_t igb_msix_other(int irq, void *);
116 static irqreturn_t igb_msix_ring(int irq, void *);
117 #ifdef CONFIG_IGB_DCA
118 static void igb_update_dca(struct igb_q_vector *);
119 static void igb_setup_dca(struct igb_adapter *);
120 #endif /* CONFIG_IGB_DCA */
121 static bool igb_clean_tx_irq(struct igb_q_vector *);
122 static int igb_poll(struct napi_struct *, int);
123 static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
124 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
125 static void igb_tx_timeout(struct net_device *);
126 static void igb_reset_task(struct work_struct *);
127 static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
128 static void igb_vlan_rx_add_vid(struct net_device *, u16);
129 static void igb_vlan_rx_kill_vid(struct net_device *, u16);
130 static void igb_restore_vlan(struct igb_adapter *);
131 static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
132 static void igb_ping_all_vfs(struct igb_adapter *);
133 static void igb_msg_task(struct igb_adapter *);
134 static void igb_vmm_control(struct igb_adapter *);
135 static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
136 static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
137 static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
138 static int igb_ndo_set_vf_vlan(struct net_device *netdev,
139 int vf, u16 vlan, u8 qos);
140 static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
141 static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
142 struct ifla_vf_info *ivi);
145 static int igb_suspend(struct pci_dev *, pm_message_t);
146 static int igb_resume(struct pci_dev *);
148 static void igb_shutdown(struct pci_dev *);
149 #ifdef CONFIG_IGB_DCA
150 static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
151 static struct notifier_block dca_notifier = {
152 .notifier_call = igb_notify_dca,
157 #ifdef CONFIG_NET_POLL_CONTROLLER
158 /* for netdump / net console */
159 static void igb_netpoll(struct net_device *);
161 #ifdef CONFIG_PCI_IOV
162 static unsigned int max_vfs = 0;
163 module_param(max_vfs, uint, 0);
164 MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
165 "per physical function");
166 #endif /* CONFIG_PCI_IOV */
168 static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
169 pci_channel_state_t);
170 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
171 static void igb_io_resume(struct pci_dev *);
173 static struct pci_error_handlers igb_err_handler = {
174 .error_detected = igb_io_error_detected,
175 .slot_reset = igb_io_slot_reset,
176 .resume = igb_io_resume,
180 static struct pci_driver igb_driver = {
181 .name = igb_driver_name,
182 .id_table = igb_pci_tbl,
184 .remove = __devexit_p(igb_remove),
186 /* Power Managment Hooks */
187 .suspend = igb_suspend,
188 .resume = igb_resume,
190 .shutdown = igb_shutdown,
191 .err_handler = &igb_err_handler
194 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
195 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
196 MODULE_LICENSE("GPL");
197 MODULE_VERSION(DRV_VERSION);
200 * igb_read_clock - read raw cycle counter (to be used by time counter)
202 static cycle_t igb_read_clock(const struct cyclecounter *tc)
204 struct igb_adapter *adapter =
205 container_of(tc, struct igb_adapter, cycles);
206 struct e1000_hw *hw = &adapter->hw;
211 * The timestamp latches on lowest register read. For the 82580
212 * the lowest register is SYSTIMR instead of SYSTIML. However we never
213 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
215 if (hw->mac.type == e1000_82580) {
216 stamp = rd32(E1000_SYSTIMR) >> 8;
217 shift = IGB_82580_TSYNC_SHIFT;
220 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
221 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
227 * igb_get_hw_dev_name - return device name string
228 * used by hardware layer to print debugging information
230 char *igb_get_hw_dev_name(struct e1000_hw *hw)
232 struct igb_adapter *adapter = hw->back;
233 return adapter->netdev->name;
237 * igb_get_time_str - format current NIC and system time as string
239 static char *igb_get_time_str(struct igb_adapter *adapter,
242 cycle_t hw = adapter->cycles.read(&adapter->cycles);
243 struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
245 struct timespec delta;
246 getnstimeofday(&sys);
248 delta = timespec_sub(nic, sys);
251 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
253 (long)nic.tv_sec, nic.tv_nsec,
254 (long)sys.tv_sec, sys.tv_nsec,
255 (long)delta.tv_sec, delta.tv_nsec);
262 * igb_init_module - Driver Registration Routine
264 * igb_init_module is the first routine called when the driver is
265 * loaded. All it does is register with the PCI subsystem.
267 static int __init igb_init_module(void)
270 printk(KERN_INFO "%s - version %s\n",
271 igb_driver_string, igb_driver_version);
273 printk(KERN_INFO "%s\n", igb_copyright);
275 #ifdef CONFIG_IGB_DCA
276 dca_register_notify(&dca_notifier);
278 ret = pci_register_driver(&igb_driver);
282 module_init(igb_init_module);
285 * igb_exit_module - Driver Exit Cleanup Routine
287 * igb_exit_module is called just before the driver is removed
290 static void __exit igb_exit_module(void)
292 #ifdef CONFIG_IGB_DCA
293 dca_unregister_notify(&dca_notifier);
295 pci_unregister_driver(&igb_driver);
298 module_exit(igb_exit_module);
300 #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
302 * igb_cache_ring_register - Descriptor ring to register mapping
303 * @adapter: board private structure to initialize
305 * Once we know the feature-set enabled for the device, we'll cache
306 * the register offset the descriptor ring is assigned to.
308 static void igb_cache_ring_register(struct igb_adapter *adapter)
311 u32 rbase_offset = adapter->vfs_allocated_count;
313 switch (adapter->hw.mac.type) {
315 /* The queues are allocated for virtualization such that VF 0
316 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
317 * In order to avoid collision we start at the first free queue
318 * and continue consuming queues in the same sequence
320 if (adapter->vfs_allocated_count) {
321 for (; i < adapter->rss_queues; i++)
322 adapter->rx_ring[i]->reg_idx = rbase_offset +
324 for (; j < adapter->rss_queues; j++)
325 adapter->tx_ring[j]->reg_idx = rbase_offset +
331 for (; i < adapter->num_rx_queues; i++)
332 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
333 for (; j < adapter->num_tx_queues; j++)
334 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
339 static void igb_free_queues(struct igb_adapter *adapter)
343 for (i = 0; i < adapter->num_tx_queues; i++) {
344 kfree(adapter->tx_ring[i]);
345 adapter->tx_ring[i] = NULL;
347 for (i = 0; i < adapter->num_rx_queues; i++) {
348 kfree(adapter->rx_ring[i]);
349 adapter->rx_ring[i] = NULL;
351 adapter->num_rx_queues = 0;
352 adapter->num_tx_queues = 0;
356 * igb_alloc_queues - Allocate memory for all rings
357 * @adapter: board private structure to initialize
359 * We allocate one ring per queue at run-time since we don't know the
360 * number of queues at compile-time.
362 static int igb_alloc_queues(struct igb_adapter *adapter)
364 struct igb_ring *ring;
367 for (i = 0; i < adapter->num_tx_queues; i++) {
368 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
371 ring->count = adapter->tx_ring_count;
372 ring->queue_index = i;
373 ring->pdev = adapter->pdev;
374 ring->netdev = adapter->netdev;
375 /* For 82575, context index must be unique per ring. */
376 if (adapter->hw.mac.type == e1000_82575)
377 ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
378 adapter->tx_ring[i] = ring;
381 for (i = 0; i < adapter->num_rx_queues; i++) {
382 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
385 ring->count = adapter->rx_ring_count;
386 ring->queue_index = i;
387 ring->pdev = adapter->pdev;
388 ring->netdev = adapter->netdev;
389 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
390 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
391 /* set flag indicating ring supports SCTP checksum offload */
392 if (adapter->hw.mac.type >= e1000_82576)
393 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
394 adapter->rx_ring[i] = ring;
397 igb_cache_ring_register(adapter);
402 igb_free_queues(adapter);
407 #define IGB_N0_QUEUE -1
408 static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
411 struct igb_adapter *adapter = q_vector->adapter;
412 struct e1000_hw *hw = &adapter->hw;
414 int rx_queue = IGB_N0_QUEUE;
415 int tx_queue = IGB_N0_QUEUE;
417 if (q_vector->rx_ring)
418 rx_queue = q_vector->rx_ring->reg_idx;
419 if (q_vector->tx_ring)
420 tx_queue = q_vector->tx_ring->reg_idx;
422 switch (hw->mac.type) {
424 /* The 82575 assigns vectors using a bitmask, which matches the
425 bitmask for the EICR/EIMS/EIMC registers. To assign one
426 or more queues to a vector, we write the appropriate bits
427 into the MSIXBM register for that vector. */
428 if (rx_queue > IGB_N0_QUEUE)
429 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
430 if (tx_queue > IGB_N0_QUEUE)
431 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
432 if (!adapter->msix_entries && msix_vector == 0)
433 msixbm |= E1000_EIMS_OTHER;
434 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
435 q_vector->eims_value = msixbm;
438 /* 82576 uses a table-based method for assigning vectors.
439 Each queue has a single entry in the table to which we write
440 a vector number along with a "valid" bit. Sadly, the layout
441 of the table is somewhat counterintuitive. */
442 if (rx_queue > IGB_N0_QUEUE) {
443 index = (rx_queue & 0x7);
444 ivar = array_rd32(E1000_IVAR0, index);
446 /* vector goes into low byte of register */
447 ivar = ivar & 0xFFFFFF00;
448 ivar |= msix_vector | E1000_IVAR_VALID;
450 /* vector goes into third byte of register */
451 ivar = ivar & 0xFF00FFFF;
452 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
454 array_wr32(E1000_IVAR0, index, ivar);
456 if (tx_queue > IGB_N0_QUEUE) {
457 index = (tx_queue & 0x7);
458 ivar = array_rd32(E1000_IVAR0, index);
460 /* vector goes into second byte of register */
461 ivar = ivar & 0xFFFF00FF;
462 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
464 /* vector goes into high byte of register */
465 ivar = ivar & 0x00FFFFFF;
466 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
468 array_wr32(E1000_IVAR0, index, ivar);
470 q_vector->eims_value = 1 << msix_vector;
473 /* 82580 uses the same table-based approach as 82576 but has fewer
474 entries as a result we carry over for queues greater than 4. */
475 if (rx_queue > IGB_N0_QUEUE) {
476 index = (rx_queue >> 1);
477 ivar = array_rd32(E1000_IVAR0, index);
478 if (rx_queue & 0x1) {
479 /* vector goes into third byte of register */
480 ivar = ivar & 0xFF00FFFF;
481 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
483 /* vector goes into low byte of register */
484 ivar = ivar & 0xFFFFFF00;
485 ivar |= msix_vector | E1000_IVAR_VALID;
487 array_wr32(E1000_IVAR0, index, ivar);
489 if (tx_queue > IGB_N0_QUEUE) {
490 index = (tx_queue >> 1);
491 ivar = array_rd32(E1000_IVAR0, index);
492 if (tx_queue & 0x1) {
493 /* vector goes into high byte of register */
494 ivar = ivar & 0x00FFFFFF;
495 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
497 /* vector goes into second byte of register */
498 ivar = ivar & 0xFFFF00FF;
499 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
501 array_wr32(E1000_IVAR0, index, ivar);
503 q_vector->eims_value = 1 << msix_vector;
510 /* add q_vector eims value to global eims_enable_mask */
511 adapter->eims_enable_mask |= q_vector->eims_value;
513 /* configure q_vector to set itr on first interrupt */
514 q_vector->set_itr = 1;
518 * igb_configure_msix - Configure MSI-X hardware
520 * igb_configure_msix sets up the hardware to properly
521 * generate MSI-X interrupts.
523 static void igb_configure_msix(struct igb_adapter *adapter)
527 struct e1000_hw *hw = &adapter->hw;
529 adapter->eims_enable_mask = 0;
531 /* set vector for other causes, i.e. link changes */
532 switch (hw->mac.type) {
534 tmp = rd32(E1000_CTRL_EXT);
535 /* enable MSI-X PBA support*/
536 tmp |= E1000_CTRL_EXT_PBA_CLR;
538 /* Auto-Mask interrupts upon ICR read. */
539 tmp |= E1000_CTRL_EXT_EIAME;
540 tmp |= E1000_CTRL_EXT_IRCA;
542 wr32(E1000_CTRL_EXT, tmp);
544 /* enable msix_other interrupt */
545 array_wr32(E1000_MSIXBM(0), vector++,
547 adapter->eims_other = E1000_EIMS_OTHER;
553 /* Turn on MSI-X capability first, or our settings
554 * won't stick. And it will take days to debug. */
555 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
556 E1000_GPIE_PBA | E1000_GPIE_EIAME |
559 /* enable msix_other interrupt */
560 adapter->eims_other = 1 << vector;
561 tmp = (vector++ | E1000_IVAR_VALID) << 8;
563 wr32(E1000_IVAR_MISC, tmp);
566 /* do nothing, since nothing else supports MSI-X */
568 } /* switch (hw->mac.type) */
570 adapter->eims_enable_mask |= adapter->eims_other;
572 for (i = 0; i < adapter->num_q_vectors; i++)
573 igb_assign_vector(adapter->q_vector[i], vector++);
579 * igb_request_msix - Initialize MSI-X interrupts
581 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
584 static int igb_request_msix(struct igb_adapter *adapter)
586 struct net_device *netdev = adapter->netdev;
587 struct e1000_hw *hw = &adapter->hw;
588 int i, err = 0, vector = 0;
590 err = request_irq(adapter->msix_entries[vector].vector,
591 igb_msix_other, 0, netdev->name, adapter);
596 for (i = 0; i < adapter->num_q_vectors; i++) {
597 struct igb_q_vector *q_vector = adapter->q_vector[i];
599 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
601 if (q_vector->rx_ring && q_vector->tx_ring)
602 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
603 q_vector->rx_ring->queue_index);
604 else if (q_vector->tx_ring)
605 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
606 q_vector->tx_ring->queue_index);
607 else if (q_vector->rx_ring)
608 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
609 q_vector->rx_ring->queue_index);
611 sprintf(q_vector->name, "%s-unused", netdev->name);
613 err = request_irq(adapter->msix_entries[vector].vector,
614 igb_msix_ring, 0, q_vector->name,
621 igb_configure_msix(adapter);
627 static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
629 if (adapter->msix_entries) {
630 pci_disable_msix(adapter->pdev);
631 kfree(adapter->msix_entries);
632 adapter->msix_entries = NULL;
633 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
634 pci_disable_msi(adapter->pdev);
639 * igb_free_q_vectors - Free memory allocated for interrupt vectors
640 * @adapter: board private structure to initialize
642 * This function frees the memory allocated to the q_vectors. In addition if
643 * NAPI is enabled it will delete any references to the NAPI struct prior
644 * to freeing the q_vector.
646 static void igb_free_q_vectors(struct igb_adapter *adapter)
650 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
651 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
652 adapter->q_vector[v_idx] = NULL;
655 netif_napi_del(&q_vector->napi);
658 adapter->num_q_vectors = 0;
662 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
664 * This function resets the device so that it has 0 rx queues, tx queues, and
665 * MSI-X interrupts allocated.
667 static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
669 igb_free_queues(adapter);
670 igb_free_q_vectors(adapter);
671 igb_reset_interrupt_capability(adapter);
675 * igb_set_interrupt_capability - set MSI or MSI-X if supported
677 * Attempt to configure interrupts using the best available
678 * capabilities of the hardware and kernel.
680 static void igb_set_interrupt_capability(struct igb_adapter *adapter)
685 /* Number of supported queues. */
686 adapter->num_rx_queues = adapter->rss_queues;
687 adapter->num_tx_queues = adapter->rss_queues;
689 /* start with one vector for every rx queue */
690 numvecs = adapter->num_rx_queues;
692 /* if tx handler is separate add 1 for every tx queue */
693 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
694 numvecs += adapter->num_tx_queues;
696 /* store the number of vectors reserved for queues */
697 adapter->num_q_vectors = numvecs;
699 /* add 1 vector for link status interrupts */
701 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
703 if (!adapter->msix_entries)
706 for (i = 0; i < numvecs; i++)
707 adapter->msix_entries[i].entry = i;
709 err = pci_enable_msix(adapter->pdev,
710 adapter->msix_entries,
715 igb_reset_interrupt_capability(adapter);
717 /* If we can't do MSI-X, try MSI */
719 #ifdef CONFIG_PCI_IOV
720 /* disable SR-IOV for non MSI-X configurations */
721 if (adapter->vf_data) {
722 struct e1000_hw *hw = &adapter->hw;
723 /* disable iov and allow time for transactions to clear */
724 pci_disable_sriov(adapter->pdev);
727 kfree(adapter->vf_data);
728 adapter->vf_data = NULL;
729 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
731 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
734 adapter->vfs_allocated_count = 0;
735 adapter->rss_queues = 1;
736 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
737 adapter->num_rx_queues = 1;
738 adapter->num_tx_queues = 1;
739 adapter->num_q_vectors = 1;
740 if (!pci_enable_msi(adapter->pdev))
741 adapter->flags |= IGB_FLAG_HAS_MSI;
743 /* Notify the stack of the (possibly) reduced Tx Queue count. */
744 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
749 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
750 * @adapter: board private structure to initialize
752 * We allocate one q_vector per queue interrupt. If allocation fails we
755 static int igb_alloc_q_vectors(struct igb_adapter *adapter)
757 struct igb_q_vector *q_vector;
758 struct e1000_hw *hw = &adapter->hw;
761 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
762 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
765 q_vector->adapter = adapter;
766 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
767 q_vector->itr_val = IGB_START_ITR;
768 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
769 adapter->q_vector[v_idx] = q_vector;
774 igb_free_q_vectors(adapter);
778 static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
779 int ring_idx, int v_idx)
781 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
783 q_vector->rx_ring = adapter->rx_ring[ring_idx];
784 q_vector->rx_ring->q_vector = q_vector;
785 q_vector->itr_val = adapter->rx_itr_setting;
786 if (q_vector->itr_val && q_vector->itr_val <= 3)
787 q_vector->itr_val = IGB_START_ITR;
790 static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
791 int ring_idx, int v_idx)
793 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
795 q_vector->tx_ring = adapter->tx_ring[ring_idx];
796 q_vector->tx_ring->q_vector = q_vector;
797 q_vector->itr_val = adapter->tx_itr_setting;
798 if (q_vector->itr_val && q_vector->itr_val <= 3)
799 q_vector->itr_val = IGB_START_ITR;
803 * igb_map_ring_to_vector - maps allocated queues to vectors
805 * This function maps the recently allocated queues to vectors.
807 static int igb_map_ring_to_vector(struct igb_adapter *adapter)
812 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
813 (adapter->num_q_vectors < adapter->num_tx_queues))
816 if (adapter->num_q_vectors >=
817 (adapter->num_rx_queues + adapter->num_tx_queues)) {
818 for (i = 0; i < adapter->num_rx_queues; i++)
819 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
820 for (i = 0; i < adapter->num_tx_queues; i++)
821 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
823 for (i = 0; i < adapter->num_rx_queues; i++) {
824 if (i < adapter->num_tx_queues)
825 igb_map_tx_ring_to_vector(adapter, i, v_idx);
826 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
828 for (; i < adapter->num_tx_queues; i++)
829 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
835 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
837 * This function initializes the interrupts and allocates all of the queues.
839 static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
841 struct pci_dev *pdev = adapter->pdev;
844 igb_set_interrupt_capability(adapter);
846 err = igb_alloc_q_vectors(adapter);
848 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
849 goto err_alloc_q_vectors;
852 err = igb_alloc_queues(adapter);
854 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
855 goto err_alloc_queues;
858 err = igb_map_ring_to_vector(adapter);
860 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
867 igb_free_queues(adapter);
869 igb_free_q_vectors(adapter);
871 igb_reset_interrupt_capability(adapter);
876 * igb_request_irq - initialize interrupts
878 * Attempts to configure interrupts using the best available
879 * capabilities of the hardware and kernel.
881 static int igb_request_irq(struct igb_adapter *adapter)
883 struct net_device *netdev = adapter->netdev;
884 struct pci_dev *pdev = adapter->pdev;
887 if (adapter->msix_entries) {
888 err = igb_request_msix(adapter);
891 /* fall back to MSI */
892 igb_clear_interrupt_scheme(adapter);
893 if (!pci_enable_msi(adapter->pdev))
894 adapter->flags |= IGB_FLAG_HAS_MSI;
895 igb_free_all_tx_resources(adapter);
896 igb_free_all_rx_resources(adapter);
897 adapter->num_tx_queues = 1;
898 adapter->num_rx_queues = 1;
899 adapter->num_q_vectors = 1;
900 err = igb_alloc_q_vectors(adapter);
903 "Unable to allocate memory for vectors\n");
906 err = igb_alloc_queues(adapter);
909 "Unable to allocate memory for queues\n");
910 igb_free_q_vectors(adapter);
913 igb_setup_all_tx_resources(adapter);
914 igb_setup_all_rx_resources(adapter);
916 igb_assign_vector(adapter->q_vector[0], 0);
919 if (adapter->flags & IGB_FLAG_HAS_MSI) {
920 err = request_irq(adapter->pdev->irq, igb_intr_msi, 0,
921 netdev->name, adapter);
925 /* fall back to legacy interrupts */
926 igb_reset_interrupt_capability(adapter);
927 adapter->flags &= ~IGB_FLAG_HAS_MSI;
930 err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED,
931 netdev->name, adapter);
934 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
941 static void igb_free_irq(struct igb_adapter *adapter)
943 if (adapter->msix_entries) {
946 free_irq(adapter->msix_entries[vector++].vector, adapter);
948 for (i = 0; i < adapter->num_q_vectors; i++) {
949 struct igb_q_vector *q_vector = adapter->q_vector[i];
950 free_irq(adapter->msix_entries[vector++].vector,
954 free_irq(adapter->pdev->irq, adapter);
959 * igb_irq_disable - Mask off interrupt generation on the NIC
960 * @adapter: board private structure
962 static void igb_irq_disable(struct igb_adapter *adapter)
964 struct e1000_hw *hw = &adapter->hw;
967 * we need to be careful when disabling interrupts. The VFs are also
968 * mapped into these registers and so clearing the bits can cause
969 * issues on the VF drivers so we only need to clear what we set
971 if (adapter->msix_entries) {
972 u32 regval = rd32(E1000_EIAM);
973 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
974 wr32(E1000_EIMC, adapter->eims_enable_mask);
975 regval = rd32(E1000_EIAC);
976 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
982 synchronize_irq(adapter->pdev->irq);
986 * igb_irq_enable - Enable default interrupt generation settings
987 * @adapter: board private structure
989 static void igb_irq_enable(struct igb_adapter *adapter)
991 struct e1000_hw *hw = &adapter->hw;
993 if (adapter->msix_entries) {
994 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
995 u32 regval = rd32(E1000_EIAC);
996 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
997 regval = rd32(E1000_EIAM);
998 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
999 wr32(E1000_EIMS, adapter->eims_enable_mask);
1000 if (adapter->vfs_allocated_count) {
1001 wr32(E1000_MBVFIMR, 0xFF);
1002 ims |= E1000_IMS_VMMB;
1004 if (adapter->hw.mac.type == e1000_82580)
1005 ims |= E1000_IMS_DRSTA;
1007 wr32(E1000_IMS, ims);
1009 wr32(E1000_IMS, IMS_ENABLE_MASK |
1011 wr32(E1000_IAM, IMS_ENABLE_MASK |
1016 static void igb_update_mng_vlan(struct igb_adapter *adapter)
1018 struct e1000_hw *hw = &adapter->hw;
1019 u16 vid = adapter->hw.mng_cookie.vlan_id;
1020 u16 old_vid = adapter->mng_vlan_id;
1022 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1023 /* add VID to filter table */
1024 igb_vfta_set(hw, vid, true);
1025 adapter->mng_vlan_id = vid;
1027 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1030 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1032 !vlan_group_get_device(adapter->vlgrp, old_vid)) {
1033 /* remove VID from filter table */
1034 igb_vfta_set(hw, old_vid, false);
1039 * igb_release_hw_control - release control of the h/w to f/w
1040 * @adapter: address of board private structure
1042 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1043 * For ASF and Pass Through versions of f/w this means that the
1044 * driver is no longer loaded.
1047 static void igb_release_hw_control(struct igb_adapter *adapter)
1049 struct e1000_hw *hw = &adapter->hw;
1052 /* Let firmware take over control of h/w */
1053 ctrl_ext = rd32(E1000_CTRL_EXT);
1054 wr32(E1000_CTRL_EXT,
1055 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1059 * igb_get_hw_control - get control of the h/w from f/w
1060 * @adapter: address of board private structure
1062 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1063 * For ASF and Pass Through versions of f/w this means that
1064 * the driver is loaded.
1067 static void igb_get_hw_control(struct igb_adapter *adapter)
1069 struct e1000_hw *hw = &adapter->hw;
1072 /* Let firmware know the driver has taken over */
1073 ctrl_ext = rd32(E1000_CTRL_EXT);
1074 wr32(E1000_CTRL_EXT,
1075 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1079 * igb_configure - configure the hardware for RX and TX
1080 * @adapter: private board structure
1082 static void igb_configure(struct igb_adapter *adapter)
1084 struct net_device *netdev = adapter->netdev;
1087 igb_get_hw_control(adapter);
1088 igb_set_rx_mode(netdev);
1090 igb_restore_vlan(adapter);
1092 igb_setup_tctl(adapter);
1093 igb_setup_mrqc(adapter);
1094 igb_setup_rctl(adapter);
1096 igb_configure_tx(adapter);
1097 igb_configure_rx(adapter);
1099 igb_rx_fifo_flush_82575(&adapter->hw);
1101 /* call igb_desc_unused which always leaves
1102 * at least 1 descriptor unused to make sure
1103 * next_to_use != next_to_clean */
1104 for (i = 0; i < adapter->num_rx_queues; i++) {
1105 struct igb_ring *ring = adapter->rx_ring[i];
1106 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
1110 adapter->tx_queue_len = netdev->tx_queue_len;
1114 * igb_power_up_link - Power up the phy/serdes link
1115 * @adapter: address of board private structure
1117 void igb_power_up_link(struct igb_adapter *adapter)
1119 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1120 igb_power_up_phy_copper(&adapter->hw);
1122 igb_power_up_serdes_link_82575(&adapter->hw);
1126 * igb_power_down_link - Power down the phy/serdes link
1127 * @adapter: address of board private structure
1129 static void igb_power_down_link(struct igb_adapter *adapter)
1131 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1132 igb_power_down_phy_copper_82575(&adapter->hw);
1134 igb_shutdown_serdes_link_82575(&adapter->hw);
1138 * igb_up - Open the interface and prepare it to handle traffic
1139 * @adapter: board private structure
1141 int igb_up(struct igb_adapter *adapter)
1143 struct e1000_hw *hw = &adapter->hw;
1146 /* hardware has been reset, we need to reload some things */
1147 igb_configure(adapter);
1149 clear_bit(__IGB_DOWN, &adapter->state);
1151 for (i = 0; i < adapter->num_q_vectors; i++) {
1152 struct igb_q_vector *q_vector = adapter->q_vector[i];
1153 napi_enable(&q_vector->napi);
1155 if (adapter->msix_entries)
1156 igb_configure_msix(adapter);
1158 igb_assign_vector(adapter->q_vector[0], 0);
1160 /* Clear any pending interrupts. */
1162 igb_irq_enable(adapter);
1164 /* notify VFs that reset has been completed */
1165 if (adapter->vfs_allocated_count) {
1166 u32 reg_data = rd32(E1000_CTRL_EXT);
1167 reg_data |= E1000_CTRL_EXT_PFRSTD;
1168 wr32(E1000_CTRL_EXT, reg_data);
1171 netif_tx_start_all_queues(adapter->netdev);
1173 /* start the watchdog. */
1174 hw->mac.get_link_status = 1;
1175 schedule_work(&adapter->watchdog_task);
1180 void igb_down(struct igb_adapter *adapter)
1182 struct net_device *netdev = adapter->netdev;
1183 struct e1000_hw *hw = &adapter->hw;
1187 /* signal that we're down so the interrupt handler does not
1188 * reschedule our watchdog timer */
1189 set_bit(__IGB_DOWN, &adapter->state);
1191 /* disable receives in the hardware */
1192 rctl = rd32(E1000_RCTL);
1193 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1194 /* flush and sleep below */
1196 netif_tx_stop_all_queues(netdev);
1198 /* disable transmits in the hardware */
1199 tctl = rd32(E1000_TCTL);
1200 tctl &= ~E1000_TCTL_EN;
1201 wr32(E1000_TCTL, tctl);
1202 /* flush both disables and wait for them to finish */
1206 for (i = 0; i < adapter->num_q_vectors; i++) {
1207 struct igb_q_vector *q_vector = adapter->q_vector[i];
1208 napi_disable(&q_vector->napi);
1211 igb_irq_disable(adapter);
1213 del_timer_sync(&adapter->watchdog_timer);
1214 del_timer_sync(&adapter->phy_info_timer);
1216 netdev->tx_queue_len = adapter->tx_queue_len;
1217 netif_carrier_off(netdev);
1219 /* record the stats before reset*/
1220 igb_update_stats(adapter);
1222 adapter->link_speed = 0;
1223 adapter->link_duplex = 0;
1225 if (!pci_channel_offline(adapter->pdev))
1227 igb_clean_all_tx_rings(adapter);
1228 igb_clean_all_rx_rings(adapter);
1229 #ifdef CONFIG_IGB_DCA
1231 /* since we reset the hardware DCA settings were cleared */
1232 igb_setup_dca(adapter);
1236 void igb_reinit_locked(struct igb_adapter *adapter)
1238 WARN_ON(in_interrupt());
1239 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1243 clear_bit(__IGB_RESETTING, &adapter->state);
1246 void igb_reset(struct igb_adapter *adapter)
1248 struct pci_dev *pdev = adapter->pdev;
1249 struct e1000_hw *hw = &adapter->hw;
1250 struct e1000_mac_info *mac = &hw->mac;
1251 struct e1000_fc_info *fc = &hw->fc;
1252 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1255 /* Repartition Pba for greater than 9k mtu
1256 * To take effect CTRL.RST is required.
1258 switch (mac->type) {
1260 pba = rd32(E1000_RXPBS);
1261 pba = igb_rxpbs_adjust_82580(pba);
1264 pba = rd32(E1000_RXPBS);
1265 pba &= E1000_RXPBS_SIZE_MASK_82576;
1269 pba = E1000_PBA_34K;
1273 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1274 (mac->type < e1000_82576)) {
1275 /* adjust PBA for jumbo frames */
1276 wr32(E1000_PBA, pba);
1278 /* To maintain wire speed transmits, the Tx FIFO should be
1279 * large enough to accommodate two full transmit packets,
1280 * rounded up to the next 1KB and expressed in KB. Likewise,
1281 * the Rx FIFO should be large enough to accommodate at least
1282 * one full receive packet and is similarly rounded up and
1283 * expressed in KB. */
1284 pba = rd32(E1000_PBA);
1285 /* upper 16 bits has Tx packet buffer allocation size in KB */
1286 tx_space = pba >> 16;
1287 /* lower 16 bits has Rx packet buffer allocation size in KB */
1289 /* the tx fifo also stores 16 bytes of information about the tx
1290 * but don't include ethernet FCS because hardware appends it */
1291 min_tx_space = (adapter->max_frame_size +
1292 sizeof(union e1000_adv_tx_desc) -
1294 min_tx_space = ALIGN(min_tx_space, 1024);
1295 min_tx_space >>= 10;
1296 /* software strips receive CRC, so leave room for it */
1297 min_rx_space = adapter->max_frame_size;
1298 min_rx_space = ALIGN(min_rx_space, 1024);
1299 min_rx_space >>= 10;
1301 /* If current Tx allocation is less than the min Tx FIFO size,
1302 * and the min Tx FIFO size is less than the current Rx FIFO
1303 * allocation, take space away from current Rx allocation */
1304 if (tx_space < min_tx_space &&
1305 ((min_tx_space - tx_space) < pba)) {
1306 pba = pba - (min_tx_space - tx_space);
1308 /* if short on rx space, rx wins and must trump tx
1310 if (pba < min_rx_space)
1313 wr32(E1000_PBA, pba);
1316 /* flow control settings */
1317 /* The high water mark must be low enough to fit one full frame
1318 * (or the size used for early receive) above it in the Rx FIFO.
1319 * Set it to the lower of:
1320 * - 90% of the Rx FIFO size, or
1321 * - the full Rx FIFO size minus one full frame */
1322 hwm = min(((pba << 10) * 9 / 10),
1323 ((pba << 10) - 2 * adapter->max_frame_size));
1325 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1326 fc->low_water = fc->high_water - 16;
1327 fc->pause_time = 0xFFFF;
1329 fc->current_mode = fc->requested_mode;
1331 /* disable receive for all VFs and wait one second */
1332 if (adapter->vfs_allocated_count) {
1334 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1335 adapter->vf_data[i].flags = 0;
1337 /* ping all the active vfs to let them know we are going down */
1338 igb_ping_all_vfs(adapter);
1340 /* disable transmits and receives */
1341 wr32(E1000_VFRE, 0);
1342 wr32(E1000_VFTE, 0);
1345 /* Allow time for pending master requests to run */
1346 hw->mac.ops.reset_hw(hw);
1349 if (hw->mac.ops.init_hw(hw))
1350 dev_err(&pdev->dev, "Hardware Error\n");
1352 if (hw->mac.type == e1000_82580) {
1353 u32 reg = rd32(E1000_PCIEMISC);
1354 wr32(E1000_PCIEMISC,
1355 reg & ~E1000_PCIEMISC_LX_DECISION);
1357 if (!netif_running(adapter->netdev))
1358 igb_power_down_link(adapter);
1360 igb_update_mng_vlan(adapter);
1362 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1363 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1365 igb_get_phy_info(hw);
1368 static const struct net_device_ops igb_netdev_ops = {
1369 .ndo_open = igb_open,
1370 .ndo_stop = igb_close,
1371 .ndo_start_xmit = igb_xmit_frame_adv,
1372 .ndo_get_stats = igb_get_stats,
1373 .ndo_set_rx_mode = igb_set_rx_mode,
1374 .ndo_set_multicast_list = igb_set_rx_mode,
1375 .ndo_set_mac_address = igb_set_mac,
1376 .ndo_change_mtu = igb_change_mtu,
1377 .ndo_do_ioctl = igb_ioctl,
1378 .ndo_tx_timeout = igb_tx_timeout,
1379 .ndo_validate_addr = eth_validate_addr,
1380 .ndo_vlan_rx_register = igb_vlan_rx_register,
1381 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1382 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
1383 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1384 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1385 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1386 .ndo_get_vf_config = igb_ndo_get_vf_config,
1387 #ifdef CONFIG_NET_POLL_CONTROLLER
1388 .ndo_poll_controller = igb_netpoll,
1393 * igb_probe - Device Initialization Routine
1394 * @pdev: PCI device information struct
1395 * @ent: entry in igb_pci_tbl
1397 * Returns 0 on success, negative on failure
1399 * igb_probe initializes an adapter identified by a pci_dev structure.
1400 * The OS initialization, configuring of the adapter private structure,
1401 * and a hardware reset occur.
1403 static int __devinit igb_probe(struct pci_dev *pdev,
1404 const struct pci_device_id *ent)
1406 struct net_device *netdev;
1407 struct igb_adapter *adapter;
1408 struct e1000_hw *hw;
1409 u16 eeprom_data = 0;
1410 static int global_quad_port_a; /* global quad port a indication */
1411 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1412 unsigned long mmio_start, mmio_len;
1413 int err, pci_using_dac;
1414 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1417 err = pci_enable_device_mem(pdev);
1422 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1424 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1428 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1430 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1432 dev_err(&pdev->dev, "No usable DMA "
1433 "configuration, aborting\n");
1439 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1445 pci_enable_pcie_error_reporting(pdev);
1447 pci_set_master(pdev);
1448 pci_save_state(pdev);
1451 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1452 IGB_ABS_MAX_TX_QUEUES);
1454 goto err_alloc_etherdev;
1456 SET_NETDEV_DEV(netdev, &pdev->dev);
1458 pci_set_drvdata(pdev, netdev);
1459 adapter = netdev_priv(netdev);
1460 adapter->netdev = netdev;
1461 adapter->pdev = pdev;
1464 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1466 mmio_start = pci_resource_start(pdev, 0);
1467 mmio_len = pci_resource_len(pdev, 0);
1470 hw->hw_addr = ioremap(mmio_start, mmio_len);
1474 netdev->netdev_ops = &igb_netdev_ops;
1475 igb_set_ethtool_ops(netdev);
1476 netdev->watchdog_timeo = 5 * HZ;
1478 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1480 netdev->mem_start = mmio_start;
1481 netdev->mem_end = mmio_start + mmio_len;
1483 /* PCI config space info */
1484 hw->vendor_id = pdev->vendor;
1485 hw->device_id = pdev->device;
1486 hw->revision_id = pdev->revision;
1487 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1488 hw->subsystem_device_id = pdev->subsystem_device;
1490 /* Copy the default MAC, PHY and NVM function pointers */
1491 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1492 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1493 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1494 /* Initialize skew-specific constants */
1495 err = ei->get_invariants(hw);
1499 /* setup the private structure */
1500 err = igb_sw_init(adapter);
1504 igb_get_bus_info_pcie(hw);
1506 hw->phy.autoneg_wait_to_complete = false;
1508 /* Copper options */
1509 if (hw->phy.media_type == e1000_media_type_copper) {
1510 hw->phy.mdix = AUTO_ALL_MODES;
1511 hw->phy.disable_polarity_correction = false;
1512 hw->phy.ms_type = e1000_ms_hw_default;
1515 if (igb_check_reset_block(hw))
1516 dev_info(&pdev->dev,
1517 "PHY reset is blocked due to SOL/IDER session.\n");
1519 netdev->features = NETIF_F_SG |
1521 NETIF_F_HW_VLAN_TX |
1522 NETIF_F_HW_VLAN_RX |
1523 NETIF_F_HW_VLAN_FILTER;
1525 netdev->features |= NETIF_F_IPV6_CSUM;
1526 netdev->features |= NETIF_F_TSO;
1527 netdev->features |= NETIF_F_TSO6;
1528 netdev->features |= NETIF_F_GRO;
1530 netdev->vlan_features |= NETIF_F_TSO;
1531 netdev->vlan_features |= NETIF_F_TSO6;
1532 netdev->vlan_features |= NETIF_F_IP_CSUM;
1533 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
1534 netdev->vlan_features |= NETIF_F_SG;
1537 netdev->features |= NETIF_F_HIGHDMA;
1539 if (hw->mac.type >= e1000_82576)
1540 netdev->features |= NETIF_F_SCTP_CSUM;
1542 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
1544 /* before reading the NVM, reset the controller to put the device in a
1545 * known good starting state */
1546 hw->mac.ops.reset_hw(hw);
1548 /* make sure the NVM is good */
1549 if (igb_validate_nvm_checksum(hw) < 0) {
1550 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1555 /* copy the MAC address out of the NVM */
1556 if (hw->mac.ops.read_mac_addr(hw))
1557 dev_err(&pdev->dev, "NVM Read Error\n");
1559 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1560 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1562 if (!is_valid_ether_addr(netdev->perm_addr)) {
1563 dev_err(&pdev->dev, "Invalid MAC Address\n");
1568 setup_timer(&adapter->watchdog_timer, &igb_watchdog,
1569 (unsigned long) adapter);
1570 setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
1571 (unsigned long) adapter);
1573 INIT_WORK(&adapter->reset_task, igb_reset_task);
1574 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1576 /* Initialize link properties that are user-changeable */
1577 adapter->fc_autoneg = true;
1578 hw->mac.autoneg = true;
1579 hw->phy.autoneg_advertised = 0x2f;
1581 hw->fc.requested_mode = e1000_fc_default;
1582 hw->fc.current_mode = e1000_fc_default;
1584 igb_validate_mdi_setting(hw);
1586 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1587 * enable the ACPI Magic Packet filter
1590 if (hw->bus.func == 0)
1591 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1592 else if (hw->mac.type == e1000_82580)
1593 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
1594 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
1596 else if (hw->bus.func == 1)
1597 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1599 if (eeprom_data & eeprom_apme_mask)
1600 adapter->eeprom_wol |= E1000_WUFC_MAG;
1602 /* now that we have the eeprom settings, apply the special cases where
1603 * the eeprom may be wrong or the board simply won't support wake on
1604 * lan on a particular port */
1605 switch (pdev->device) {
1606 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1607 adapter->eeprom_wol = 0;
1609 case E1000_DEV_ID_82575EB_FIBER_SERDES:
1610 case E1000_DEV_ID_82576_FIBER:
1611 case E1000_DEV_ID_82576_SERDES:
1612 /* Wake events only supported on port A for dual fiber
1613 * regardless of eeprom setting */
1614 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1615 adapter->eeprom_wol = 0;
1617 case E1000_DEV_ID_82576_QUAD_COPPER:
1618 /* if quad port adapter, disable WoL on all but port A */
1619 if (global_quad_port_a != 0)
1620 adapter->eeprom_wol = 0;
1622 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
1623 /* Reset for multiple quad port adapters */
1624 if (++global_quad_port_a == 4)
1625 global_quad_port_a = 0;
1629 /* initialize the wol settings based on the eeprom settings */
1630 adapter->wol = adapter->eeprom_wol;
1631 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1633 /* reset the hardware with the new settings */
1636 /* let the f/w know that the h/w is now under the control of the
1638 igb_get_hw_control(adapter);
1640 strcpy(netdev->name, "eth%d");
1641 err = register_netdev(netdev);
1645 /* carrier off reporting is important to ethtool even BEFORE open */
1646 netif_carrier_off(netdev);
1648 #ifdef CONFIG_IGB_DCA
1649 if (dca_add_requester(&pdev->dev) == 0) {
1650 adapter->flags |= IGB_FLAG_DCA_ENABLED;
1651 dev_info(&pdev->dev, "DCA enabled\n");
1652 igb_setup_dca(adapter);
1656 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1657 /* print bus type/speed/width info */
1658 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
1660 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1662 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1663 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
1664 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
1668 igb_read_part_num(hw, &part_num);
1669 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
1670 (part_num >> 8), (part_num & 0xff));
1672 dev_info(&pdev->dev,
1673 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1674 adapter->msix_entries ? "MSI-X" :
1675 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
1676 adapter->num_rx_queues, adapter->num_tx_queues);
1681 igb_release_hw_control(adapter);
1683 if (!igb_check_reset_block(hw))
1686 if (hw->flash_address)
1687 iounmap(hw->flash_address);
1689 igb_clear_interrupt_scheme(adapter);
1690 iounmap(hw->hw_addr);
1692 free_netdev(netdev);
1694 pci_release_selected_regions(pdev,
1695 pci_select_bars(pdev, IORESOURCE_MEM));
1698 pci_disable_device(pdev);
1703 * igb_remove - Device Removal Routine
1704 * @pdev: PCI device information struct
1706 * igb_remove is called by the PCI subsystem to alert the driver
1707 * that it should release a PCI device. The could be caused by a
1708 * Hot-Plug event, or because the driver is going to be removed from
1711 static void __devexit igb_remove(struct pci_dev *pdev)
1713 struct net_device *netdev = pci_get_drvdata(pdev);
1714 struct igb_adapter *adapter = netdev_priv(netdev);
1715 struct e1000_hw *hw = &adapter->hw;
1717 /* flush_scheduled work may reschedule our watchdog task, so
1718 * explicitly disable watchdog tasks from being rescheduled */
1719 set_bit(__IGB_DOWN, &adapter->state);
1720 del_timer_sync(&adapter->watchdog_timer);
1721 del_timer_sync(&adapter->phy_info_timer);
1723 flush_scheduled_work();
1725 #ifdef CONFIG_IGB_DCA
1726 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
1727 dev_info(&pdev->dev, "DCA disabled\n");
1728 dca_remove_requester(&pdev->dev);
1729 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
1730 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
1734 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1735 * would have already happened in close and is redundant. */
1736 igb_release_hw_control(adapter);
1738 unregister_netdev(netdev);
1740 igb_clear_interrupt_scheme(adapter);
1742 #ifdef CONFIG_PCI_IOV
1743 /* reclaim resources allocated to VFs */
1744 if (adapter->vf_data) {
1745 /* disable iov and allow time for transactions to clear */
1746 pci_disable_sriov(pdev);
1749 kfree(adapter->vf_data);
1750 adapter->vf_data = NULL;
1751 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1753 dev_info(&pdev->dev, "IOV Disabled\n");
1757 iounmap(hw->hw_addr);
1758 if (hw->flash_address)
1759 iounmap(hw->flash_address);
1760 pci_release_selected_regions(pdev,
1761 pci_select_bars(pdev, IORESOURCE_MEM));
1763 free_netdev(netdev);
1765 pci_disable_pcie_error_reporting(pdev);
1767 pci_disable_device(pdev);
1771 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
1772 * @adapter: board private structure to initialize
1774 * This function initializes the vf specific data storage and then attempts to
1775 * allocate the VFs. The reason for ordering it this way is because it is much
1776 * mor expensive time wise to disable SR-IOV than it is to allocate and free
1777 * the memory for the VFs.
1779 static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
1781 #ifdef CONFIG_PCI_IOV
1782 struct pci_dev *pdev = adapter->pdev;
1784 if (adapter->vfs_allocated_count > 7)
1785 adapter->vfs_allocated_count = 7;
1787 if (adapter->vfs_allocated_count) {
1788 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
1789 sizeof(struct vf_data_storage),
1791 /* if allocation failed then we do not support SR-IOV */
1792 if (!adapter->vf_data) {
1793 adapter->vfs_allocated_count = 0;
1794 dev_err(&pdev->dev, "Unable to allocate memory for VF "
1799 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
1800 kfree(adapter->vf_data);
1801 adapter->vf_data = NULL;
1802 #endif /* CONFIG_PCI_IOV */
1803 adapter->vfs_allocated_count = 0;
1804 #ifdef CONFIG_PCI_IOV
1806 unsigned char mac_addr[ETH_ALEN];
1808 dev_info(&pdev->dev, "%d vfs allocated\n",
1809 adapter->vfs_allocated_count);
1810 for (i = 0; i < adapter->vfs_allocated_count; i++) {
1811 random_ether_addr(mac_addr);
1812 igb_set_vf_mac(adapter, i, mac_addr);
1815 #endif /* CONFIG_PCI_IOV */
1820 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
1821 * @adapter: board private structure to initialize
1823 * igb_init_hw_timer initializes the function pointer and values for the hw
1824 * timer found in hardware.
1826 static void igb_init_hw_timer(struct igb_adapter *adapter)
1828 struct e1000_hw *hw = &adapter->hw;
1830 switch (hw->mac.type) {
1832 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1833 adapter->cycles.read = igb_read_clock;
1834 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1835 adapter->cycles.mult = 1;
1837 * The 82580 timesync updates the system timer every 8ns by 8ns
1838 * and the value cannot be shifted. Instead we need to shift
1839 * the registers to generate a 64bit timer value. As a result
1840 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
1841 * 24 in order to generate a larger value for synchronization.
1843 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
1844 /* disable system timer temporarily by setting bit 31 */
1845 wr32(E1000_TSAUXC, 0x80000000);
1848 /* Set registers so that rollover occurs soon to test this. */
1849 wr32(E1000_SYSTIMR, 0x00000000);
1850 wr32(E1000_SYSTIML, 0x80000000);
1851 wr32(E1000_SYSTIMH, 0x000000FF);
1854 /* enable system timer by clearing bit 31 */
1855 wr32(E1000_TSAUXC, 0x0);
1858 timecounter_init(&adapter->clock,
1860 ktime_to_ns(ktime_get_real()));
1862 * Synchronize our NIC clock against system wall clock. NIC
1863 * time stamp reading requires ~3us per sample, each sample
1864 * was pretty stable even under load => only require 10
1865 * samples for each offset comparison.
1867 memset(&adapter->compare, 0, sizeof(adapter->compare));
1868 adapter->compare.source = &adapter->clock;
1869 adapter->compare.target = ktime_get_real;
1870 adapter->compare.num_samples = 10;
1871 timecompare_update(&adapter->compare, 0);
1875 * Initialize hardware timer: we keep it running just in case
1876 * that some program needs it later on.
1878 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1879 adapter->cycles.read = igb_read_clock;
1880 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1881 adapter->cycles.mult = 1;
1883 * Scale the NIC clock cycle by a large factor so that
1884 * relatively small clock corrections can be added or
1885 * substracted at each clock tick. The drawbacks of a large
1886 * factor are a) that the clock register overflows more quickly
1887 * (not such a big deal) and b) that the increment per tick has
1888 * to fit into 24 bits. As a result we need to use a shift of
1889 * 19 so we can fit a value of 16 into the TIMINCA register.
1891 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
1893 (1 << E1000_TIMINCA_16NS_SHIFT) |
1894 (16 << IGB_82576_TSYNC_SHIFT));
1896 /* Set registers so that rollover occurs soon to test this. */
1897 wr32(E1000_SYSTIML, 0x00000000);
1898 wr32(E1000_SYSTIMH, 0xFF800000);
1901 timecounter_init(&adapter->clock,
1903 ktime_to_ns(ktime_get_real()));
1905 * Synchronize our NIC clock against system wall clock. NIC
1906 * time stamp reading requires ~3us per sample, each sample
1907 * was pretty stable even under load => only require 10
1908 * samples for each offset comparison.
1910 memset(&adapter->compare, 0, sizeof(adapter->compare));
1911 adapter->compare.source = &adapter->clock;
1912 adapter->compare.target = ktime_get_real;
1913 adapter->compare.num_samples = 10;
1914 timecompare_update(&adapter->compare, 0);
1917 /* 82575 does not support timesync */
1925 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1926 * @adapter: board private structure to initialize
1928 * igb_sw_init initializes the Adapter private data structure.
1929 * Fields are initialized based on PCI device information and
1930 * OS network device settings (MTU size).
1932 static int __devinit igb_sw_init(struct igb_adapter *adapter)
1934 struct e1000_hw *hw = &adapter->hw;
1935 struct net_device *netdev = adapter->netdev;
1936 struct pci_dev *pdev = adapter->pdev;
1938 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
1940 adapter->tx_ring_count = IGB_DEFAULT_TXD;
1941 adapter->rx_ring_count = IGB_DEFAULT_RXD;
1942 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
1943 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
1945 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1946 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1948 #ifdef CONFIG_PCI_IOV
1949 if (hw->mac.type == e1000_82576)
1950 adapter->vfs_allocated_count = max_vfs;
1952 #endif /* CONFIG_PCI_IOV */
1953 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
1956 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
1957 * then we should combine the queues into a queue pair in order to
1958 * conserve interrupts due to limited supply
1960 if ((adapter->rss_queues > 4) ||
1961 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
1962 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1964 /* This call may decrease the number of queues */
1965 if (igb_init_interrupt_scheme(adapter)) {
1966 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1970 igb_init_hw_timer(adapter);
1971 igb_probe_vfs(adapter);
1973 /* Explicitly disable IRQ since the NIC can be in any state. */
1974 igb_irq_disable(adapter);
1976 set_bit(__IGB_DOWN, &adapter->state);
1981 * igb_open - Called when a network interface is made active
1982 * @netdev: network interface device structure
1984 * Returns 0 on success, negative value on failure
1986 * The open entry point is called when a network interface is made
1987 * active by the system (IFF_UP). At this point all resources needed
1988 * for transmit and receive operations are allocated, the interrupt
1989 * handler is registered with the OS, the watchdog timer is started,
1990 * and the stack is notified that the interface is ready.
1992 static int igb_open(struct net_device *netdev)
1994 struct igb_adapter *adapter = netdev_priv(netdev);
1995 struct e1000_hw *hw = &adapter->hw;
1999 /* disallow open during test */
2000 if (test_bit(__IGB_TESTING, &adapter->state))
2003 netif_carrier_off(netdev);
2005 /* allocate transmit descriptors */
2006 err = igb_setup_all_tx_resources(adapter);
2010 /* allocate receive descriptors */
2011 err = igb_setup_all_rx_resources(adapter);
2015 igb_power_up_link(adapter);
2017 /* before we allocate an interrupt, we must be ready to handle it.
2018 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2019 * as soon as we call pci_request_irq, so we have to setup our
2020 * clean_rx handler before we do so. */
2021 igb_configure(adapter);
2023 err = igb_request_irq(adapter);
2027 /* From here on the code is the same as igb_up() */
2028 clear_bit(__IGB_DOWN, &adapter->state);
2030 for (i = 0; i < adapter->num_q_vectors; i++) {
2031 struct igb_q_vector *q_vector = adapter->q_vector[i];
2032 napi_enable(&q_vector->napi);
2035 /* Clear any pending interrupts. */
2038 igb_irq_enable(adapter);
2040 /* notify VFs that reset has been completed */
2041 if (adapter->vfs_allocated_count) {
2042 u32 reg_data = rd32(E1000_CTRL_EXT);
2043 reg_data |= E1000_CTRL_EXT_PFRSTD;
2044 wr32(E1000_CTRL_EXT, reg_data);
2047 netif_tx_start_all_queues(netdev);
2049 /* start the watchdog. */
2050 hw->mac.get_link_status = 1;
2051 schedule_work(&adapter->watchdog_task);
2056 igb_release_hw_control(adapter);
2057 igb_power_down_link(adapter);
2058 igb_free_all_rx_resources(adapter);
2060 igb_free_all_tx_resources(adapter);
2068 * igb_close - Disables a network interface
2069 * @netdev: network interface device structure
2071 * Returns 0, this is not allowed to fail
2073 * The close entry point is called when an interface is de-activated
2074 * by the OS. The hardware is still under the driver's control, but
2075 * needs to be disabled. A global MAC reset is issued to stop the
2076 * hardware, and all transmit and receive resources are freed.
2078 static int igb_close(struct net_device *netdev)
2080 struct igb_adapter *adapter = netdev_priv(netdev);
2082 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
2085 igb_free_irq(adapter);
2087 igb_free_all_tx_resources(adapter);
2088 igb_free_all_rx_resources(adapter);
2094 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
2095 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2097 * Return 0 on success, negative on failure
2099 int igb_setup_tx_resources(struct igb_ring *tx_ring)
2101 struct pci_dev *pdev = tx_ring->pdev;
2104 size = sizeof(struct igb_buffer) * tx_ring->count;
2105 tx_ring->buffer_info = vmalloc(size);
2106 if (!tx_ring->buffer_info)
2108 memset(tx_ring->buffer_info, 0, size);
2110 /* round up to nearest 4K */
2111 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
2112 tx_ring->size = ALIGN(tx_ring->size, 4096);
2114 tx_ring->desc = pci_alloc_consistent(pdev,
2121 tx_ring->next_to_use = 0;
2122 tx_ring->next_to_clean = 0;
2126 vfree(tx_ring->buffer_info);
2128 "Unable to allocate memory for the transmit descriptor ring\n");
2133 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2134 * (Descriptors) for all queues
2135 * @adapter: board private structure
2137 * Return 0 on success, negative on failure
2139 static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2141 struct pci_dev *pdev = adapter->pdev;
2144 for (i = 0; i < adapter->num_tx_queues; i++) {
2145 err = igb_setup_tx_resources(adapter->tx_ring[i]);
2148 "Allocation for Tx Queue %u failed\n", i);
2149 for (i--; i >= 0; i--)
2150 igb_free_tx_resources(adapter->tx_ring[i]);
2155 for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
2156 int r_idx = i % adapter->num_tx_queues;
2157 adapter->multi_tx_table[i] = adapter->tx_ring[r_idx];
2163 * igb_setup_tctl - configure the transmit control registers
2164 * @adapter: Board private structure
2166 void igb_setup_tctl(struct igb_adapter *adapter)
2168 struct e1000_hw *hw = &adapter->hw;
2171 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2172 wr32(E1000_TXDCTL(0), 0);
2174 /* Program the Transmit Control Register */
2175 tctl = rd32(E1000_TCTL);
2176 tctl &= ~E1000_TCTL_CT;
2177 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2178 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2180 igb_config_collision_dist(hw);
2182 /* Enable transmits */
2183 tctl |= E1000_TCTL_EN;
2185 wr32(E1000_TCTL, tctl);
2189 * igb_configure_tx_ring - Configure transmit ring after Reset
2190 * @adapter: board private structure
2191 * @ring: tx ring to configure
2193 * Configure a transmit ring after a reset.
2195 void igb_configure_tx_ring(struct igb_adapter *adapter,
2196 struct igb_ring *ring)
2198 struct e1000_hw *hw = &adapter->hw;
2200 u64 tdba = ring->dma;
2201 int reg_idx = ring->reg_idx;
2203 /* disable the queue */
2204 txdctl = rd32(E1000_TXDCTL(reg_idx));
2205 wr32(E1000_TXDCTL(reg_idx),
2206 txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
2210 wr32(E1000_TDLEN(reg_idx),
2211 ring->count * sizeof(union e1000_adv_tx_desc));
2212 wr32(E1000_TDBAL(reg_idx),
2213 tdba & 0x00000000ffffffffULL);
2214 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2216 ring->head = hw->hw_addr + E1000_TDH(reg_idx);
2217 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
2218 writel(0, ring->head);
2219 writel(0, ring->tail);
2221 txdctl |= IGB_TX_PTHRESH;
2222 txdctl |= IGB_TX_HTHRESH << 8;
2223 txdctl |= IGB_TX_WTHRESH << 16;
2225 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2226 wr32(E1000_TXDCTL(reg_idx), txdctl);
2230 * igb_configure_tx - Configure transmit Unit after Reset
2231 * @adapter: board private structure
2233 * Configure the Tx unit of the MAC after a reset.
2235 static void igb_configure_tx(struct igb_adapter *adapter)
2239 for (i = 0; i < adapter->num_tx_queues; i++)
2240 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
2244 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
2245 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2247 * Returns 0 on success, negative on failure
2249 int igb_setup_rx_resources(struct igb_ring *rx_ring)
2251 struct pci_dev *pdev = rx_ring->pdev;
2254 size = sizeof(struct igb_buffer) * rx_ring->count;
2255 rx_ring->buffer_info = vmalloc(size);
2256 if (!rx_ring->buffer_info)
2258 memset(rx_ring->buffer_info, 0, size);
2260 desc_len = sizeof(union e1000_adv_rx_desc);
2262 /* Round up to nearest 4K */
2263 rx_ring->size = rx_ring->count * desc_len;
2264 rx_ring->size = ALIGN(rx_ring->size, 4096);
2266 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
2272 rx_ring->next_to_clean = 0;
2273 rx_ring->next_to_use = 0;
2278 vfree(rx_ring->buffer_info);
2279 rx_ring->buffer_info = NULL;
2280 dev_err(&pdev->dev, "Unable to allocate memory for "
2281 "the receive descriptor ring\n");
2286 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2287 * (Descriptors) for all queues
2288 * @adapter: board private structure
2290 * Return 0 on success, negative on failure
2292 static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2294 struct pci_dev *pdev = adapter->pdev;
2297 for (i = 0; i < adapter->num_rx_queues; i++) {
2298 err = igb_setup_rx_resources(adapter->rx_ring[i]);
2301 "Allocation for Rx Queue %u failed\n", i);
2302 for (i--; i >= 0; i--)
2303 igb_free_rx_resources(adapter->rx_ring[i]);
2312 * igb_setup_mrqc - configure the multiple receive queue control registers
2313 * @adapter: Board private structure
2315 static void igb_setup_mrqc(struct igb_adapter *adapter)
2317 struct e1000_hw *hw = &adapter->hw;
2319 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2324 static const u8 rsshash[40] = {
2325 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2326 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2327 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2328 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2330 /* Fill out hash function seeds */
2331 for (j = 0; j < 10; j++) {
2332 u32 rsskey = rsshash[(j * 4)];
2333 rsskey |= rsshash[(j * 4) + 1] << 8;
2334 rsskey |= rsshash[(j * 4) + 2] << 16;
2335 rsskey |= rsshash[(j * 4) + 3] << 24;
2336 array_wr32(E1000_RSSRK(0), j, rsskey);
2339 num_rx_queues = adapter->rss_queues;
2341 if (adapter->vfs_allocated_count) {
2342 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2343 switch (hw->mac.type) {
2359 if (hw->mac.type == e1000_82575)
2363 for (j = 0; j < (32 * 4); j++) {
2364 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2366 reta.bytes[j & 3] |= num_rx_queues << shift2;
2368 wr32(E1000_RETA(j >> 2), reta.dword);
2372 * Disable raw packet checksumming so that RSS hash is placed in
2373 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2374 * offloads as they are enabled by default
2376 rxcsum = rd32(E1000_RXCSUM);
2377 rxcsum |= E1000_RXCSUM_PCSD;
2379 if (adapter->hw.mac.type >= e1000_82576)
2380 /* Enable Receive Checksum Offload for SCTP */
2381 rxcsum |= E1000_RXCSUM_CRCOFL;
2383 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2384 wr32(E1000_RXCSUM, rxcsum);
2386 /* If VMDq is enabled then we set the appropriate mode for that, else
2387 * we default to RSS so that an RSS hash is calculated per packet even
2388 * if we are only using one queue */
2389 if (adapter->vfs_allocated_count) {
2390 if (hw->mac.type > e1000_82575) {
2391 /* Set the default pool for the PF's first queue */
2392 u32 vtctl = rd32(E1000_VT_CTL);
2393 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2394 E1000_VT_CTL_DISABLE_DEF_POOL);
2395 vtctl |= adapter->vfs_allocated_count <<
2396 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2397 wr32(E1000_VT_CTL, vtctl);
2399 if (adapter->rss_queues > 1)
2400 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2402 mrqc = E1000_MRQC_ENABLE_VMDQ;
2404 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2406 igb_vmm_control(adapter);
2408 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2409 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2410 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2411 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2412 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2413 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2414 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2415 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2417 wr32(E1000_MRQC, mrqc);
2421 * igb_setup_rctl - configure the receive control registers
2422 * @adapter: Board private structure
2424 void igb_setup_rctl(struct igb_adapter *adapter)
2426 struct e1000_hw *hw = &adapter->hw;
2429 rctl = rd32(E1000_RCTL);
2431 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2432 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
2434 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
2435 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2438 * enable stripping of CRC. It's unlikely this will break BMC
2439 * redirection as it did with e1000. Newer features require
2440 * that the HW strips the CRC.
2442 rctl |= E1000_RCTL_SECRC;
2444 /* disable store bad packets and clear size bits. */
2445 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
2447 /* enable LPE to prevent packets larger than max_frame_size */
2448 rctl |= E1000_RCTL_LPE;
2450 /* disable queue 0 to prevent tail write w/o re-config */
2451 wr32(E1000_RXDCTL(0), 0);
2453 /* Attention!!! For SR-IOV PF driver operations you must enable
2454 * queue drop for all VF and PF queues to prevent head of line blocking
2455 * if an un-trusted VF does not provide descriptors to hardware.
2457 if (adapter->vfs_allocated_count) {
2458 /* set all queue drop enable bits */
2459 wr32(E1000_QDE, ALL_QUEUES);
2462 wr32(E1000_RCTL, rctl);
2465 static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2468 struct e1000_hw *hw = &adapter->hw;
2471 /* if it isn't the PF check to see if VFs are enabled and
2472 * increase the size to support vlan tags */
2473 if (vfn < adapter->vfs_allocated_count &&
2474 adapter->vf_data[vfn].vlans_enabled)
2475 size += VLAN_TAG_SIZE;
2477 vmolr = rd32(E1000_VMOLR(vfn));
2478 vmolr &= ~E1000_VMOLR_RLPML_MASK;
2479 vmolr |= size | E1000_VMOLR_LPE;
2480 wr32(E1000_VMOLR(vfn), vmolr);
2486 * igb_rlpml_set - set maximum receive packet size
2487 * @adapter: board private structure
2489 * Configure maximum receivable packet size.
2491 static void igb_rlpml_set(struct igb_adapter *adapter)
2493 u32 max_frame_size = adapter->max_frame_size;
2494 struct e1000_hw *hw = &adapter->hw;
2495 u16 pf_id = adapter->vfs_allocated_count;
2498 max_frame_size += VLAN_TAG_SIZE;
2500 /* if vfs are enabled we set RLPML to the largest possible request
2501 * size and set the VMOLR RLPML to the size we need */
2503 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
2504 max_frame_size = MAX_JUMBO_FRAME_SIZE;
2507 wr32(E1000_RLPML, max_frame_size);
2510 static inline void igb_set_vmolr(struct igb_adapter *adapter,
2513 struct e1000_hw *hw = &adapter->hw;
2517 * This register exists only on 82576 and newer so if we are older then
2518 * we should exit and do nothing
2520 if (hw->mac.type < e1000_82576)
2523 vmolr = rd32(E1000_VMOLR(vfn));
2524 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
2526 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
2528 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
2530 /* clear all bits that might not be set */
2531 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
2533 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
2534 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
2536 * for VMDq only allow the VFs and pool 0 to accept broadcast and
2539 if (vfn <= adapter->vfs_allocated_count)
2540 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
2542 wr32(E1000_VMOLR(vfn), vmolr);
2546 * igb_configure_rx_ring - Configure a receive ring after Reset
2547 * @adapter: board private structure
2548 * @ring: receive ring to be configured
2550 * Configure the Rx unit of the MAC after a reset.
2552 void igb_configure_rx_ring(struct igb_adapter *adapter,
2553 struct igb_ring *ring)
2555 struct e1000_hw *hw = &adapter->hw;
2556 u64 rdba = ring->dma;
2557 int reg_idx = ring->reg_idx;
2560 /* disable the queue */
2561 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2562 wr32(E1000_RXDCTL(reg_idx),
2563 rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
2565 /* Set DMA base address registers */
2566 wr32(E1000_RDBAL(reg_idx),
2567 rdba & 0x00000000ffffffffULL);
2568 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
2569 wr32(E1000_RDLEN(reg_idx),
2570 ring->count * sizeof(union e1000_adv_rx_desc));
2572 /* initialize head and tail */
2573 ring->head = hw->hw_addr + E1000_RDH(reg_idx);
2574 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
2575 writel(0, ring->head);
2576 writel(0, ring->tail);
2578 /* set descriptor configuration */
2579 if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
2580 srrctl = ALIGN(ring->rx_buffer_len, 64) <<
2581 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2582 #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2583 srrctl |= IGB_RXBUFFER_16384 >>
2584 E1000_SRRCTL_BSIZEPKT_SHIFT;
2586 srrctl |= (PAGE_SIZE / 2) >>
2587 E1000_SRRCTL_BSIZEPKT_SHIFT;
2589 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2591 srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
2592 E1000_SRRCTL_BSIZEPKT_SHIFT;
2593 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2595 /* Only set Drop Enable if we are supporting multiple queues */
2596 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
2597 srrctl |= E1000_SRRCTL_DROP_EN;
2599 wr32(E1000_SRRCTL(reg_idx), srrctl);
2601 /* set filtering for VMDQ pools */
2602 igb_set_vmolr(adapter, reg_idx & 0x7, true);
2604 /* enable receive descriptor fetching */
2605 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2606 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2607 rxdctl &= 0xFFF00000;
2608 rxdctl |= IGB_RX_PTHRESH;
2609 rxdctl |= IGB_RX_HTHRESH << 8;
2610 rxdctl |= IGB_RX_WTHRESH << 16;
2611 wr32(E1000_RXDCTL(reg_idx), rxdctl);
2615 * igb_configure_rx - Configure receive Unit after Reset
2616 * @adapter: board private structure
2618 * Configure the Rx unit of the MAC after a reset.
2620 static void igb_configure_rx(struct igb_adapter *adapter)
2624 /* set UTA to appropriate mode */
2625 igb_set_uta(adapter);
2627 /* set the correct pool for the PF default MAC address in entry 0 */
2628 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
2629 adapter->vfs_allocated_count);
2631 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2632 * the Base and Length of the Rx Descriptor Ring */
2633 for (i = 0; i < adapter->num_rx_queues; i++)
2634 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
2638 * igb_free_tx_resources - Free Tx Resources per Queue
2639 * @tx_ring: Tx descriptor ring for a specific queue
2641 * Free all transmit software resources
2643 void igb_free_tx_resources(struct igb_ring *tx_ring)
2645 igb_clean_tx_ring(tx_ring);
2647 vfree(tx_ring->buffer_info);
2648 tx_ring->buffer_info = NULL;
2650 /* if not set, then don't free */
2654 pci_free_consistent(tx_ring->pdev, tx_ring->size,
2655 tx_ring->desc, tx_ring->dma);
2657 tx_ring->desc = NULL;
2661 * igb_free_all_tx_resources - Free Tx Resources for All Queues
2662 * @adapter: board private structure
2664 * Free all transmit software resources
2666 static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2670 for (i = 0; i < adapter->num_tx_queues; i++)
2671 igb_free_tx_resources(adapter->tx_ring[i]);
2674 void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
2675 struct igb_buffer *buffer_info)
2677 if (buffer_info->dma) {
2678 if (buffer_info->mapped_as_page)
2679 pci_unmap_page(tx_ring->pdev,
2681 buffer_info->length,
2684 pci_unmap_single(tx_ring->pdev,
2686 buffer_info->length,
2688 buffer_info->dma = 0;
2690 if (buffer_info->skb) {
2691 dev_kfree_skb_any(buffer_info->skb);
2692 buffer_info->skb = NULL;
2694 buffer_info->time_stamp = 0;
2695 buffer_info->length = 0;
2696 buffer_info->next_to_watch = 0;
2697 buffer_info->mapped_as_page = false;
2701 * igb_clean_tx_ring - Free Tx Buffers
2702 * @tx_ring: ring to be cleaned
2704 static void igb_clean_tx_ring(struct igb_ring *tx_ring)
2706 struct igb_buffer *buffer_info;
2710 if (!tx_ring->buffer_info)
2712 /* Free all the Tx ring sk_buffs */
2714 for (i = 0; i < tx_ring->count; i++) {
2715 buffer_info = &tx_ring->buffer_info[i];
2716 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
2719 size = sizeof(struct igb_buffer) * tx_ring->count;
2720 memset(tx_ring->buffer_info, 0, size);
2722 /* Zero out the descriptor ring */
2723 memset(tx_ring->desc, 0, tx_ring->size);
2725 tx_ring->next_to_use = 0;
2726 tx_ring->next_to_clean = 0;
2730 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
2731 * @adapter: board private structure
2733 static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2737 for (i = 0; i < adapter->num_tx_queues; i++)
2738 igb_clean_tx_ring(adapter->tx_ring[i]);
2742 * igb_free_rx_resources - Free Rx Resources
2743 * @rx_ring: ring to clean the resources from
2745 * Free all receive software resources
2747 void igb_free_rx_resources(struct igb_ring *rx_ring)
2749 igb_clean_rx_ring(rx_ring);
2751 vfree(rx_ring->buffer_info);
2752 rx_ring->buffer_info = NULL;
2754 /* if not set, then don't free */
2758 pci_free_consistent(rx_ring->pdev, rx_ring->size,
2759 rx_ring->desc, rx_ring->dma);
2761 rx_ring->desc = NULL;
2765 * igb_free_all_rx_resources - Free Rx Resources for All Queues
2766 * @adapter: board private structure
2768 * Free all receive software resources
2770 static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2774 for (i = 0; i < adapter->num_rx_queues; i++)
2775 igb_free_rx_resources(adapter->rx_ring[i]);
2779 * igb_clean_rx_ring - Free Rx Buffers per Queue
2780 * @rx_ring: ring to free buffers from
2782 static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2784 struct igb_buffer *buffer_info;
2788 if (!rx_ring->buffer_info)
2791 /* Free all the Rx ring sk_buffs */
2792 for (i = 0; i < rx_ring->count; i++) {
2793 buffer_info = &rx_ring->buffer_info[i];
2794 if (buffer_info->dma) {
2795 pci_unmap_single(rx_ring->pdev,
2797 rx_ring->rx_buffer_len,
2798 PCI_DMA_FROMDEVICE);
2799 buffer_info->dma = 0;
2802 if (buffer_info->skb) {
2803 dev_kfree_skb(buffer_info->skb);
2804 buffer_info->skb = NULL;
2806 if (buffer_info->page_dma) {
2807 pci_unmap_page(rx_ring->pdev,
2808 buffer_info->page_dma,
2810 PCI_DMA_FROMDEVICE);
2811 buffer_info->page_dma = 0;
2813 if (buffer_info->page) {
2814 put_page(buffer_info->page);
2815 buffer_info->page = NULL;
2816 buffer_info->page_offset = 0;
2820 size = sizeof(struct igb_buffer) * rx_ring->count;
2821 memset(rx_ring->buffer_info, 0, size);
2823 /* Zero out the descriptor ring */
2824 memset(rx_ring->desc, 0, rx_ring->size);
2826 rx_ring->next_to_clean = 0;
2827 rx_ring->next_to_use = 0;
2831 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2832 * @adapter: board private structure
2834 static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2838 for (i = 0; i < adapter->num_rx_queues; i++)
2839 igb_clean_rx_ring(adapter->rx_ring[i]);
2843 * igb_set_mac - Change the Ethernet Address of the NIC
2844 * @netdev: network interface device structure
2845 * @p: pointer to an address structure
2847 * Returns 0 on success, negative on failure
2849 static int igb_set_mac(struct net_device *netdev, void *p)
2851 struct igb_adapter *adapter = netdev_priv(netdev);
2852 struct e1000_hw *hw = &adapter->hw;
2853 struct sockaddr *addr = p;
2855 if (!is_valid_ether_addr(addr->sa_data))
2856 return -EADDRNOTAVAIL;
2858 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2859 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
2861 /* set the correct pool for the new PF MAC address in entry 0 */
2862 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
2863 adapter->vfs_allocated_count);
2869 * igb_write_mc_addr_list - write multicast addresses to MTA
2870 * @netdev: network interface device structure
2872 * Writes multicast address list to the MTA hash table.
2873 * Returns: -ENOMEM on failure
2874 * 0 on no addresses written
2875 * X on writing X addresses to MTA
2877 static int igb_write_mc_addr_list(struct net_device *netdev)
2879 struct igb_adapter *adapter = netdev_priv(netdev);
2880 struct e1000_hw *hw = &adapter->hw;
2881 struct dev_mc_list *mc_ptr;
2885 if (netdev_mc_empty(netdev)) {
2886 /* nothing to program, so clear mc list */
2887 igb_update_mc_addr_list(hw, NULL, 0);
2888 igb_restore_vf_multicasts(adapter);
2892 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
2896 /* The shared function expects a packed array of only addresses. */
2898 netdev_for_each_mc_addr(mc_ptr, netdev)
2899 memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2901 igb_update_mc_addr_list(hw, mta_list, i);
2904 return netdev_mc_count(netdev);
2908 * igb_write_uc_addr_list - write unicast addresses to RAR table
2909 * @netdev: network interface device structure
2911 * Writes unicast address list to the RAR table.
2912 * Returns: -ENOMEM on failure/insufficient address space
2913 * 0 on no addresses written
2914 * X on writing X addresses to the RAR table
2916 static int igb_write_uc_addr_list(struct net_device *netdev)
2918 struct igb_adapter *adapter = netdev_priv(netdev);
2919 struct e1000_hw *hw = &adapter->hw;
2920 unsigned int vfn = adapter->vfs_allocated_count;
2921 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
2924 /* return ENOMEM indicating insufficient memory for addresses */
2925 if (netdev_uc_count(netdev) > rar_entries)
2928 if (!netdev_uc_empty(netdev) && rar_entries) {
2929 struct netdev_hw_addr *ha;
2931 netdev_for_each_uc_addr(ha, netdev) {
2934 igb_rar_set_qsel(adapter, ha->addr,
2940 /* write the addresses in reverse order to avoid write combining */
2941 for (; rar_entries > 0 ; rar_entries--) {
2942 wr32(E1000_RAH(rar_entries), 0);
2943 wr32(E1000_RAL(rar_entries), 0);
2951 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2952 * @netdev: network interface device structure
2954 * The set_rx_mode entry point is called whenever the unicast or multicast
2955 * address lists or the network interface flags are updated. This routine is
2956 * responsible for configuring the hardware for proper unicast, multicast,
2957 * promiscuous mode, and all-multi behavior.
2959 static void igb_set_rx_mode(struct net_device *netdev)
2961 struct igb_adapter *adapter = netdev_priv(netdev);
2962 struct e1000_hw *hw = &adapter->hw;
2963 unsigned int vfn = adapter->vfs_allocated_count;
2964 u32 rctl, vmolr = 0;
2967 /* Check for Promiscuous and All Multicast modes */
2968 rctl = rd32(E1000_RCTL);
2970 /* clear the effected bits */
2971 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
2973 if (netdev->flags & IFF_PROMISC) {
2974 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2975 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
2977 if (netdev->flags & IFF_ALLMULTI) {
2978 rctl |= E1000_RCTL_MPE;
2979 vmolr |= E1000_VMOLR_MPME;
2982 * Write addresses to the MTA, if the attempt fails
2983 * then we should just turn on promiscous mode so
2984 * that we can at least receive multicast traffic
2986 count = igb_write_mc_addr_list(netdev);
2988 rctl |= E1000_RCTL_MPE;
2989 vmolr |= E1000_VMOLR_MPME;
2991 vmolr |= E1000_VMOLR_ROMPE;
2995 * Write addresses to available RAR registers, if there is not
2996 * sufficient space to store all the addresses then enable
2997 * unicast promiscous mode
2999 count = igb_write_uc_addr_list(netdev);
3001 rctl |= E1000_RCTL_UPE;
3002 vmolr |= E1000_VMOLR_ROPE;
3004 rctl |= E1000_RCTL_VFE;
3006 wr32(E1000_RCTL, rctl);
3009 * In order to support SR-IOV and eventually VMDq it is necessary to set
3010 * the VMOLR to enable the appropriate modes. Without this workaround
3011 * we will have issues with VLAN tag stripping not being done for frames
3012 * that are only arriving because we are the default pool
3014 if (hw->mac.type < e1000_82576)
3017 vmolr |= rd32(E1000_VMOLR(vfn)) &
3018 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3019 wr32(E1000_VMOLR(vfn), vmolr);
3020 igb_restore_vf_multicasts(adapter);
3023 /* Need to wait a few seconds after link up to get diagnostic information from
3025 static void igb_update_phy_info(unsigned long data)
3027 struct igb_adapter *adapter = (struct igb_adapter *) data;
3028 igb_get_phy_info(&adapter->hw);
3032 * igb_has_link - check shared code for link and determine up/down
3033 * @adapter: pointer to driver private info
3035 bool igb_has_link(struct igb_adapter *adapter)
3037 struct e1000_hw *hw = &adapter->hw;
3038 bool link_active = false;
3041 /* get_link_status is set on LSC (link status) interrupt or
3042 * rx sequence error interrupt. get_link_status will stay
3043 * false until the e1000_check_for_link establishes link
3044 * for copper adapters ONLY
3046 switch (hw->phy.media_type) {
3047 case e1000_media_type_copper:
3048 if (hw->mac.get_link_status) {
3049 ret_val = hw->mac.ops.check_for_link(hw);
3050 link_active = !hw->mac.get_link_status;
3055 case e1000_media_type_internal_serdes:
3056 ret_val = hw->mac.ops.check_for_link(hw);
3057 link_active = hw->mac.serdes_has_link;
3060 case e1000_media_type_unknown:
3068 * igb_watchdog - Timer Call-back
3069 * @data: pointer to adapter cast into an unsigned long
3071 static void igb_watchdog(unsigned long data)
3073 struct igb_adapter *adapter = (struct igb_adapter *)data;
3074 /* Do the rest outside of interrupt context */
3075 schedule_work(&adapter->watchdog_task);
3078 static void igb_watchdog_task(struct work_struct *work)
3080 struct igb_adapter *adapter = container_of(work,
3083 struct e1000_hw *hw = &adapter->hw;
3084 struct net_device *netdev = adapter->netdev;
3088 link = igb_has_link(adapter);
3090 if (!netif_carrier_ok(netdev)) {
3092 hw->mac.ops.get_speed_and_duplex(hw,
3093 &adapter->link_speed,
3094 &adapter->link_duplex);
3096 ctrl = rd32(E1000_CTRL);
3097 /* Links status message must follow this format */
3098 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
3099 "Flow Control: %s\n",
3101 adapter->link_speed,
3102 adapter->link_duplex == FULL_DUPLEX ?
3103 "Full Duplex" : "Half Duplex",
3104 ((ctrl & E1000_CTRL_TFCE) &&
3105 (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
3106 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3107 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
3109 /* tweak tx_queue_len according to speed/duplex and
3110 * adjust the timeout factor */
3111 netdev->tx_queue_len = adapter->tx_queue_len;
3112 adapter->tx_timeout_factor = 1;
3113 switch (adapter->link_speed) {
3115 netdev->tx_queue_len = 10;
3116 adapter->tx_timeout_factor = 14;
3119 netdev->tx_queue_len = 100;
3120 /* maybe add some timeout factor ? */
3124 netif_carrier_on(netdev);
3126 igb_ping_all_vfs(adapter);
3128 /* link state has changed, schedule phy info update */
3129 if (!test_bit(__IGB_DOWN, &adapter->state))
3130 mod_timer(&adapter->phy_info_timer,
3131 round_jiffies(jiffies + 2 * HZ));
3134 if (netif_carrier_ok(netdev)) {
3135 adapter->link_speed = 0;
3136 adapter->link_duplex = 0;
3137 /* Links status message must follow this format */
3138 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3140 netif_carrier_off(netdev);
3142 igb_ping_all_vfs(adapter);
3144 /* link state has changed, schedule phy info update */
3145 if (!test_bit(__IGB_DOWN, &adapter->state))
3146 mod_timer(&adapter->phy_info_timer,
3147 round_jiffies(jiffies + 2 * HZ));
3151 igb_update_stats(adapter);
3153 for (i = 0; i < adapter->num_tx_queues; i++) {
3154 struct igb_ring *tx_ring = adapter->tx_ring[i];
3155 if (!netif_carrier_ok(netdev)) {
3156 /* We've lost link, so the controller stops DMA,
3157 * but we've got queued Tx work that's never going
3158 * to get done, so reset controller to flush Tx.
3159 * (Do the reset outside of interrupt context). */
3160 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3161 adapter->tx_timeout_count++;
3162 schedule_work(&adapter->reset_task);
3163 /* return immediately since reset is imminent */
3168 /* Force detection of hung controller every watchdog period */
3169 tx_ring->detect_tx_hung = true;
3172 /* Cause software interrupt to ensure rx ring is cleaned */
3173 if (adapter->msix_entries) {
3175 for (i = 0; i < adapter->num_q_vectors; i++) {
3176 struct igb_q_vector *q_vector = adapter->q_vector[i];
3177 eics |= q_vector->eims_value;
3179 wr32(E1000_EICS, eics);
3181 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3184 /* Reset the timer */
3185 if (!test_bit(__IGB_DOWN, &adapter->state))
3186 mod_timer(&adapter->watchdog_timer,
3187 round_jiffies(jiffies + 2 * HZ));
3190 enum latency_range {
3194 latency_invalid = 255
3198 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3200 * Stores a new ITR value based on strictly on packet size. This
3201 * algorithm is less sophisticated than that used in igb_update_itr,
3202 * due to the difficulty of synchronizing statistics across multiple
3203 * receive rings. The divisors and thresholds used by this fuction
3204 * were determined based on theoretical maximum wire speed and testing
3205 * data, in order to minimize response time while increasing bulk
3207 * This functionality is controlled by the InterruptThrottleRate module
3208 * parameter (see igb_param.c)
3209 * NOTE: This function is called only when operating in a multiqueue
3210 * receive environment.
3211 * @q_vector: pointer to q_vector
3213 static void igb_update_ring_itr(struct igb_q_vector *q_vector)
3215 int new_val = q_vector->itr_val;
3216 int avg_wire_size = 0;
3217 struct igb_adapter *adapter = q_vector->adapter;
3219 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3220 * ints/sec - ITR timer value of 120 ticks.
3222 if (adapter->link_speed != SPEED_1000) {
3227 if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
3228 struct igb_ring *ring = q_vector->rx_ring;
3229 avg_wire_size = ring->total_bytes / ring->total_packets;
3232 if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
3233 struct igb_ring *ring = q_vector->tx_ring;
3234 avg_wire_size = max_t(u32, avg_wire_size,
3235 (ring->total_bytes /
3236 ring->total_packets));
3239 /* if avg_wire_size isn't set no work was done */
3243 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3244 avg_wire_size += 24;
3246 /* Don't starve jumbo frames */
3247 avg_wire_size = min(avg_wire_size, 3000);
3249 /* Give a little boost to mid-size frames */
3250 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3251 new_val = avg_wire_size / 3;
3253 new_val = avg_wire_size / 2;
3255 /* when in itr mode 3 do not exceed 20K ints/sec */
3256 if (adapter->rx_itr_setting == 3 && new_val < 196)
3260 if (new_val != q_vector->itr_val) {
3261 q_vector->itr_val = new_val;
3262 q_vector->set_itr = 1;
3265 if (q_vector->rx_ring) {
3266 q_vector->rx_ring->total_bytes = 0;
3267 q_vector->rx_ring->total_packets = 0;
3269 if (q_vector->tx_ring) {
3270 q_vector->tx_ring->total_bytes = 0;
3271 q_vector->tx_ring->total_packets = 0;
3276 * igb_update_itr - update the dynamic ITR value based on statistics
3277 * Stores a new ITR value based on packets and byte
3278 * counts during the last interrupt. The advantage of per interrupt
3279 * computation is faster updates and more accurate ITR for the current
3280 * traffic pattern. Constants in this function were computed
3281 * based on theoretical maximum wire speed and thresholds were set based
3282 * on testing data as well as attempting to minimize response time
3283 * while increasing bulk throughput.
3284 * this functionality is controlled by the InterruptThrottleRate module
3285 * parameter (see igb_param.c)
3286 * NOTE: These calculations are only valid when operating in a single-
3287 * queue environment.
3288 * @adapter: pointer to adapter
3289 * @itr_setting: current q_vector->itr_val
3290 * @packets: the number of packets during this measurement interval
3291 * @bytes: the number of bytes during this measurement interval
3293 static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
3294 int packets, int bytes)
3296 unsigned int retval = itr_setting;
3299 goto update_itr_done;
3301 switch (itr_setting) {
3302 case lowest_latency:
3303 /* handle TSO and jumbo frames */
3304 if (bytes/packets > 8000)
3305 retval = bulk_latency;
3306 else if ((packets < 5) && (bytes > 512))
3307 retval = low_latency;
3309 case low_latency: /* 50 usec aka 20000 ints/s */
3310 if (bytes > 10000) {
3311 /* this if handles the TSO accounting */
3312 if (bytes/packets > 8000) {
3313 retval = bulk_latency;
3314 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
3315 retval = bulk_latency;
3316 } else if ((packets > 35)) {
3317 retval = lowest_latency;
3319 } else if (bytes/packets > 2000) {
3320 retval = bulk_latency;
3321 } else if (packets <= 2 && bytes < 512) {
3322 retval = lowest_latency;
3325 case bulk_latency: /* 250 usec aka 4000 ints/s */
3326 if (bytes > 25000) {
3328 retval = low_latency;
3329 } else if (bytes < 1500) {
3330 retval = low_latency;
3339 static void igb_set_itr(struct igb_adapter *adapter)
3341 struct igb_q_vector *q_vector = adapter->q_vector[0];
3343 u32 new_itr = q_vector->itr_val;
3345 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3346 if (adapter->link_speed != SPEED_1000) {
3352 adapter->rx_itr = igb_update_itr(adapter,
3354 q_vector->rx_ring->total_packets,
3355 q_vector->rx_ring->total_bytes);
3357 adapter->tx_itr = igb_update_itr(adapter,
3359 q_vector->tx_ring->total_packets,
3360 q_vector->tx_ring->total_bytes);
3361 current_itr = max(adapter->rx_itr, adapter->tx_itr);
3363 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3364 if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
3365 current_itr = low_latency;
3367 switch (current_itr) {
3368 /* counts and packets in update_itr are dependent on these numbers */
3369 case lowest_latency:
3370 new_itr = 56; /* aka 70,000 ints/sec */
3373 new_itr = 196; /* aka 20,000 ints/sec */
3376 new_itr = 980; /* aka 4,000 ints/sec */
3383 q_vector->rx_ring->total_bytes = 0;
3384 q_vector->rx_ring->total_packets = 0;
3385 q_vector->tx_ring->total_bytes = 0;
3386 q_vector->tx_ring->total_packets = 0;
3388 if (new_itr != q_vector->itr_val) {
3389 /* this attempts to bias the interrupt rate towards Bulk
3390 * by adding intermediate steps when interrupt rate is
3392 new_itr = new_itr > q_vector->itr_val ?
3393 max((new_itr * q_vector->itr_val) /
3394 (new_itr + (q_vector->itr_val >> 2)),
3397 /* Don't write the value here; it resets the adapter's
3398 * internal timer, and causes us to delay far longer than
3399 * we should between interrupts. Instead, we write the ITR
3400 * value at the beginning of the next interrupt so the timing
3401 * ends up being correct.
3403 q_vector->itr_val = new_itr;
3404 q_vector->set_itr = 1;
3410 #define IGB_TX_FLAGS_CSUM 0x00000001
3411 #define IGB_TX_FLAGS_VLAN 0x00000002
3412 #define IGB_TX_FLAGS_TSO 0x00000004
3413 #define IGB_TX_FLAGS_IPV4 0x00000008
3414 #define IGB_TX_FLAGS_TSTAMP 0x00000010
3415 #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3416 #define IGB_TX_FLAGS_VLAN_SHIFT 16
3418 static inline int igb_tso_adv(struct igb_ring *tx_ring,
3419 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
3421 struct e1000_adv_tx_context_desc *context_desc;
3424 struct igb_buffer *buffer_info;
3425 u32 info = 0, tu_cmd = 0;
3429 if (skb_header_cloned(skb)) {
3430 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3435 l4len = tcp_hdrlen(skb);
3438 if (skb->protocol == htons(ETH_P_IP)) {
3439 struct iphdr *iph = ip_hdr(skb);
3442 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3446 } else if (skb_is_gso_v6(skb)) {
3447 ipv6_hdr(skb)->payload_len = 0;
3448 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3449 &ipv6_hdr(skb)->daddr,
3453 i = tx_ring->next_to_use;
3455 buffer_info = &tx_ring->buffer_info[i];
3456 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3457 /* VLAN MACLEN IPLEN */
3458 if (tx_flags & IGB_TX_FLAGS_VLAN)
3459 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3460 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3461 *hdr_len += skb_network_offset(skb);
3462 info |= skb_network_header_len(skb);
3463 *hdr_len += skb_network_header_len(skb);
3464 context_desc->vlan_macip_lens = cpu_to_le32(info);
3466 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3467 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3469 if (skb->protocol == htons(ETH_P_IP))
3470 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3471 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3473 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3476 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
3477 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
3479 /* For 82575, context index must be unique per ring. */
3480 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3481 mss_l4len_idx |= tx_ring->reg_idx << 4;
3483 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3484 context_desc->seqnum_seed = 0;
3486 buffer_info->time_stamp = jiffies;
3487 buffer_info->next_to_watch = i;
3488 buffer_info->dma = 0;
3490 if (i == tx_ring->count)
3493 tx_ring->next_to_use = i;
3498 static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3499 struct sk_buff *skb, u32 tx_flags)
3501 struct e1000_adv_tx_context_desc *context_desc;
3502 struct pci_dev *pdev = tx_ring->pdev;
3503 struct igb_buffer *buffer_info;
3504 u32 info = 0, tu_cmd = 0;
3507 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3508 (tx_flags & IGB_TX_FLAGS_VLAN)) {
3509 i = tx_ring->next_to_use;
3510 buffer_info = &tx_ring->buffer_info[i];
3511 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3513 if (tx_flags & IGB_TX_FLAGS_VLAN)
3514 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3516 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3517 if (skb->ip_summed == CHECKSUM_PARTIAL)
3518 info |= skb_network_header_len(skb);
3520 context_desc->vlan_macip_lens = cpu_to_le32(info);
3522 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3524 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3527 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3528 const struct vlan_ethhdr *vhdr =
3529 (const struct vlan_ethhdr*)skb->data;
3531 protocol = vhdr->h_vlan_encapsulated_proto;
3533 protocol = skb->protocol;
3537 case cpu_to_be16(ETH_P_IP):
3538 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3539 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3540 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3541 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
3542 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
3544 case cpu_to_be16(ETH_P_IPV6):
3545 /* XXX what about other V6 headers?? */
3546 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3547 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3548 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
3549 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
3552 if (unlikely(net_ratelimit()))
3553 dev_warn(&pdev->dev,
3554 "partial checksum but proto=%x!\n",
3560 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3561 context_desc->seqnum_seed = 0;
3562 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3563 context_desc->mss_l4len_idx =
3564 cpu_to_le32(tx_ring->reg_idx << 4);
3566 buffer_info->time_stamp = jiffies;
3567 buffer_info->next_to_watch = i;
3568 buffer_info->dma = 0;
3571 if (i == tx_ring->count)
3573 tx_ring->next_to_use = i;
3580 #define IGB_MAX_TXD_PWR 16
3581 #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3583 static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3586 struct igb_buffer *buffer_info;
3587 struct pci_dev *pdev = tx_ring->pdev;
3588 unsigned int len = skb_headlen(skb);
3589 unsigned int count = 0, i;
3592 i = tx_ring->next_to_use;
3594 buffer_info = &tx_ring->buffer_info[i];
3595 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3596 buffer_info->length = len;
3597 /* set time_stamp *before* dma to help avoid a possible race */
3598 buffer_info->time_stamp = jiffies;
3599 buffer_info->next_to_watch = i;
3600 buffer_info->dma = pci_map_single(pdev, skb->data, len,
3602 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3605 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3606 struct skb_frag_struct *frag;
3610 if (i == tx_ring->count)
3613 frag = &skb_shinfo(skb)->frags[f];
3616 buffer_info = &tx_ring->buffer_info[i];
3617 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3618 buffer_info->length = len;
3619 buffer_info->time_stamp = jiffies;
3620 buffer_info->next_to_watch = i;
3621 buffer_info->mapped_as_page = true;
3622 buffer_info->dma = pci_map_page(pdev,
3627 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3632 tx_ring->buffer_info[i].skb = skb;
3633 tx_ring->buffer_info[i].gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
3634 tx_ring->buffer_info[first].next_to_watch = i;
3639 dev_err(&pdev->dev, "TX DMA map failed\n");
3641 /* clear timestamp and dma mappings for failed buffer_info mapping */
3642 buffer_info->dma = 0;
3643 buffer_info->time_stamp = 0;
3644 buffer_info->length = 0;
3645 buffer_info->next_to_watch = 0;
3646 buffer_info->mapped_as_page = false;
3648 /* clear timestamp and dma mappings for remaining portion of packet */
3653 buffer_info = &tx_ring->buffer_info[i];
3654 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
3660 static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
3661 u32 tx_flags, int count, u32 paylen,
3664 union e1000_adv_tx_desc *tx_desc;
3665 struct igb_buffer *buffer_info;
3666 u32 olinfo_status = 0, cmd_type_len;
3667 unsigned int i = tx_ring->next_to_use;
3669 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
3670 E1000_ADVTXD_DCMD_DEXT);
3672 if (tx_flags & IGB_TX_FLAGS_VLAN)
3673 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
3675 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
3676 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
3678 if (tx_flags & IGB_TX_FLAGS_TSO) {
3679 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
3681 /* insert tcp checksum */
3682 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3684 /* insert ip checksum */
3685 if (tx_flags & IGB_TX_FLAGS_IPV4)
3686 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
3688 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
3689 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3692 if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
3693 (tx_flags & (IGB_TX_FLAGS_CSUM |
3695 IGB_TX_FLAGS_VLAN)))
3696 olinfo_status |= tx_ring->reg_idx << 4;
3698 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
3701 buffer_info = &tx_ring->buffer_info[i];
3702 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3703 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
3704 tx_desc->read.cmd_type_len =
3705 cpu_to_le32(cmd_type_len | buffer_info->length);
3706 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3709 if (i == tx_ring->count)
3711 } while (count > 0);
3713 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
3714 /* Force memory writes to complete before letting h/w
3715 * know there are new descriptors to fetch. (Only
3716 * applicable for weak-ordered memory model archs,
3717 * such as IA-64). */
3720 tx_ring->next_to_use = i;
3721 writel(i, tx_ring->tail);
3722 /* we need this if more than one processor can write to our tail
3723 * at a time, it syncronizes IO on IA64/Altix systems */
3727 static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
3729 struct net_device *netdev = tx_ring->netdev;
3731 netif_stop_subqueue(netdev, tx_ring->queue_index);
3733 /* Herbert's original patch had:
3734 * smp_mb__after_netif_stop_queue();
3735 * but since that doesn't exist yet, just open code it. */
3738 /* We need to check again in a case another CPU has just
3739 * made room available. */
3740 if (igb_desc_unused(tx_ring) < size)
3744 netif_wake_subqueue(netdev, tx_ring->queue_index);
3745 tx_ring->tx_stats.restart_queue++;
3749 static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
3751 if (igb_desc_unused(tx_ring) >= size)
3753 return __igb_maybe_stop_tx(tx_ring, size);
3756 netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3757 struct igb_ring *tx_ring)
3759 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
3764 union skb_shared_tx *shtx = skb_tx(skb);
3766 /* need: 1 descriptor per page,
3767 * + 2 desc gap to keep tail from touching head,
3768 * + 1 desc for skb->data,
3769 * + 1 desc for context descriptor,
3770 * otherwise try next time */
3771 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
3772 /* this is a hard error */
3773 return NETDEV_TX_BUSY;
3776 if (unlikely(shtx->hardware)) {
3777 shtx->in_progress = 1;
3778 tx_flags |= IGB_TX_FLAGS_TSTAMP;
3781 if (vlan_tx_tag_present(skb) && adapter->vlgrp) {
3782 tx_flags |= IGB_TX_FLAGS_VLAN;
3783 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
3786 if (skb->protocol == htons(ETH_P_IP))
3787 tx_flags |= IGB_TX_FLAGS_IPV4;
3789 first = tx_ring->next_to_use;
3790 if (skb_is_gso(skb)) {
3791 tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
3794 dev_kfree_skb_any(skb);
3795 return NETDEV_TX_OK;
3800 tx_flags |= IGB_TX_FLAGS_TSO;
3801 else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
3802 (skb->ip_summed == CHECKSUM_PARTIAL))
3803 tx_flags |= IGB_TX_FLAGS_CSUM;
3806 * count reflects descriptors mapped, if 0 or less then mapping error
3807 * has occured and we need to rewind the descriptor queue
3809 count = igb_tx_map_adv(tx_ring, skb, first);
3811 dev_kfree_skb_any(skb);
3812 tx_ring->buffer_info[first].time_stamp = 0;
3813 tx_ring->next_to_use = first;
3814 return NETDEV_TX_OK;
3817 igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
3819 /* Make sure there is space in the ring for the next send. */
3820 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
3822 return NETDEV_TX_OK;
3825 static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
3826 struct net_device *netdev)
3828 struct igb_adapter *adapter = netdev_priv(netdev);
3829 struct igb_ring *tx_ring;
3832 if (test_bit(__IGB_DOWN, &adapter->state)) {
3833 dev_kfree_skb_any(skb);
3834 return NETDEV_TX_OK;
3837 if (skb->len <= 0) {
3838 dev_kfree_skb_any(skb);
3839 return NETDEV_TX_OK;
3842 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
3843 tx_ring = adapter->multi_tx_table[r_idx];
3845 /* This goes back to the question of how to logically map a tx queue
3846 * to a flow. Right now, performance is impacted slightly negatively
3847 * if using multiple tx queues. If the stack breaks away from a
3848 * single qdisc implementation, we can look at this again. */
3849 return igb_xmit_frame_ring_adv(skb, tx_ring);
3853 * igb_tx_timeout - Respond to a Tx Hang
3854 * @netdev: network interface device structure
3856 static void igb_tx_timeout(struct net_device *netdev)
3858 struct igb_adapter *adapter = netdev_priv(netdev);
3859 struct e1000_hw *hw = &adapter->hw;
3861 /* Do the reset outside of interrupt context */
3862 adapter->tx_timeout_count++;
3864 if (hw->mac.type == e1000_82580)
3865 hw->dev_spec._82575.global_device_reset = true;
3867 schedule_work(&adapter->reset_task);
3869 (adapter->eims_enable_mask & ~adapter->eims_other));
3872 static void igb_reset_task(struct work_struct *work)
3874 struct igb_adapter *adapter;
3875 adapter = container_of(work, struct igb_adapter, reset_task);
3877 igb_reinit_locked(adapter);
3881 * igb_get_stats - Get System Network Statistics
3882 * @netdev: network interface device structure
3884 * Returns the address of the device statistics structure.
3885 * The statistics are actually updated from the timer callback.
3887 static struct net_device_stats *igb_get_stats(struct net_device *netdev)
3889 /* only return the current stats */
3890 return &netdev->stats;
3894 * igb_change_mtu - Change the Maximum Transfer Unit
3895 * @netdev: network interface device structure
3896 * @new_mtu: new value for maximum frame size
3898 * Returns 0 on success, negative on failure
3900 static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3902 struct igb_adapter *adapter = netdev_priv(netdev);
3903 struct pci_dev *pdev = adapter->pdev;
3904 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3905 u32 rx_buffer_len, i;
3907 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3908 dev_err(&pdev->dev, "Invalid MTU setting\n");
3912 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3913 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
3917 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
3920 /* igb_down has a dependency on max_frame_size */
3921 adapter->max_frame_size = max_frame;
3923 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3924 * means we reserve 2 more, this pushes us to allocate from the next
3926 * i.e. RXBUFFER_2048 --> size-4096 slab
3929 if (max_frame <= IGB_RXBUFFER_1024)
3930 rx_buffer_len = IGB_RXBUFFER_1024;
3931 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
3932 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3934 rx_buffer_len = IGB_RXBUFFER_128;
3936 if (netif_running(netdev))
3939 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
3940 netdev->mtu, new_mtu);
3941 netdev->mtu = new_mtu;
3943 for (i = 0; i < adapter->num_rx_queues; i++)
3944 adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len;
3946 if (netif_running(netdev))
3951 clear_bit(__IGB_RESETTING, &adapter->state);
3957 * igb_update_stats - Update the board statistics counters
3958 * @adapter: board private structure
3961 void igb_update_stats(struct igb_adapter *adapter)
3963 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
3964 struct e1000_hw *hw = &adapter->hw;
3965 struct pci_dev *pdev = adapter->pdev;
3971 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3974 * Prevent stats update while adapter is being reset, or if the pci
3975 * connection is down.
3977 if (adapter->link_speed == 0)
3979 if (pci_channel_offline(pdev))
3984 for (i = 0; i < adapter->num_rx_queues; i++) {
3985 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
3986 struct igb_ring *ring = adapter->rx_ring[i];
3987 ring->rx_stats.drops += rqdpc_tmp;
3988 net_stats->rx_fifo_errors += rqdpc_tmp;
3989 bytes += ring->rx_stats.bytes;
3990 packets += ring->rx_stats.packets;
3993 net_stats->rx_bytes = bytes;
3994 net_stats->rx_packets = packets;
3998 for (i = 0; i < adapter->num_tx_queues; i++) {
3999 struct igb_ring *ring = adapter->tx_ring[i];
4000 bytes += ring->tx_stats.bytes;
4001 packets += ring->tx_stats.packets;
4003 net_stats->tx_bytes = bytes;
4004 net_stats->tx_packets = packets;
4006 /* read stats registers */
4007 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4008 adapter->stats.gprc += rd32(E1000_GPRC);
4009 adapter->stats.gorc += rd32(E1000_GORCL);
4010 rd32(E1000_GORCH); /* clear GORCL */
4011 adapter->stats.bprc += rd32(E1000_BPRC);
4012 adapter->stats.mprc += rd32(E1000_MPRC);
4013 adapter->stats.roc += rd32(E1000_ROC);
4015 adapter->stats.prc64 += rd32(E1000_PRC64);
4016 adapter->stats.prc127 += rd32(E1000_PRC127);
4017 adapter->stats.prc255 += rd32(E1000_PRC255);
4018 adapter->stats.prc511 += rd32(E1000_PRC511);
4019 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4020 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4021 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4022 adapter->stats.sec += rd32(E1000_SEC);
4024 adapter->stats.mpc += rd32(E1000_MPC);
4025 adapter->stats.scc += rd32(E1000_SCC);
4026 adapter->stats.ecol += rd32(E1000_ECOL);
4027 adapter->stats.mcc += rd32(E1000_MCC);
4028 adapter->stats.latecol += rd32(E1000_LATECOL);
4029 adapter->stats.dc += rd32(E1000_DC);
4030 adapter->stats.rlec += rd32(E1000_RLEC);
4031 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4032 adapter->stats.xontxc += rd32(E1000_XONTXC);
4033 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4034 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4035 adapter->stats.fcruc += rd32(E1000_FCRUC);
4036 adapter->stats.gptc += rd32(E1000_GPTC);
4037 adapter->stats.gotc += rd32(E1000_GOTCL);
4038 rd32(E1000_GOTCH); /* clear GOTCL */
4039 rnbc = rd32(E1000_RNBC);
4040 adapter->stats.rnbc += rnbc;
4041 net_stats->rx_fifo_errors += rnbc;
4042 adapter->stats.ruc += rd32(E1000_RUC);
4043 adapter->stats.rfc += rd32(E1000_RFC);
4044 adapter->stats.rjc += rd32(E1000_RJC);
4045 adapter->stats.tor += rd32(E1000_TORH);
4046 adapter->stats.tot += rd32(E1000_TOTH);
4047 adapter->stats.tpr += rd32(E1000_TPR);
4049 adapter->stats.ptc64 += rd32(E1000_PTC64);
4050 adapter->stats.ptc127 += rd32(E1000_PTC127);
4051 adapter->stats.ptc255 += rd32(E1000_PTC255);
4052 adapter->stats.ptc511 += rd32(E1000_PTC511);
4053 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4054 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4056 adapter->stats.mptc += rd32(E1000_MPTC);
4057 adapter->stats.bptc += rd32(E1000_BPTC);
4059 adapter->stats.tpt += rd32(E1000_TPT);
4060 adapter->stats.colc += rd32(E1000_COLC);
4062 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
4063 /* read internal phy specific stats */
4064 reg = rd32(E1000_CTRL_EXT);
4065 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4066 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4067 adapter->stats.tncrs += rd32(E1000_TNCRS);
4070 adapter->stats.tsctc += rd32(E1000_TSCTC);
4071 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4073 adapter->stats.iac += rd32(E1000_IAC);
4074 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4075 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4076 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4077 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4078 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4079 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4080 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4081 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4083 /* Fill out the OS statistics structure */
4084 net_stats->multicast = adapter->stats.mprc;
4085 net_stats->collisions = adapter->stats.colc;
4089 /* RLEC on some newer hardware can be incorrect so build
4090 * our own version based on RUC and ROC */
4091 net_stats->rx_errors = adapter->stats.rxerrc +
4092 adapter->stats.crcerrs + adapter->stats.algnerrc +
4093 adapter->stats.ruc + adapter->stats.roc +
4094 adapter->stats.cexterr;
4095 net_stats->rx_length_errors = adapter->stats.ruc +
4097 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4098 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4099 net_stats->rx_missed_errors = adapter->stats.mpc;
4102 net_stats->tx_errors = adapter->stats.ecol +
4103 adapter->stats.latecol;
4104 net_stats->tx_aborted_errors = adapter->stats.ecol;
4105 net_stats->tx_window_errors = adapter->stats.latecol;
4106 net_stats->tx_carrier_errors = adapter->stats.tncrs;
4108 /* Tx Dropped needs to be maintained elsewhere */
4111 if (hw->phy.media_type == e1000_media_type_copper) {
4112 if ((adapter->link_speed == SPEED_1000) &&
4113 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
4114 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4115 adapter->phy_stats.idle_errors += phy_tmp;
4119 /* Management Stats */
4120 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4121 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4122 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
4125 static irqreturn_t igb_msix_other(int irq, void *data)
4127 struct igb_adapter *adapter = data;
4128 struct e1000_hw *hw = &adapter->hw;
4129 u32 icr = rd32(E1000_ICR);
4130 /* reading ICR causes bit 31 of EICR to be cleared */
4132 if (icr & E1000_ICR_DRSTA)
4133 schedule_work(&adapter->reset_task);
4135 if (icr & E1000_ICR_DOUTSYNC) {
4136 /* HW is reporting DMA is out of sync */
4137 adapter->stats.doosync++;
4140 /* Check for a mailbox event */
4141 if (icr & E1000_ICR_VMMB)
4142 igb_msg_task(adapter);
4144 if (icr & E1000_ICR_LSC) {
4145 hw->mac.get_link_status = 1;
4146 /* guard against interrupt when we're going down */
4147 if (!test_bit(__IGB_DOWN, &adapter->state))
4148 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4151 if (adapter->vfs_allocated_count)
4152 wr32(E1000_IMS, E1000_IMS_LSC |
4154 E1000_IMS_DOUTSYNC);
4156 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
4157 wr32(E1000_EIMS, adapter->eims_other);
4162 static void igb_write_itr(struct igb_q_vector *q_vector)
4164 struct igb_adapter *adapter = q_vector->adapter;
4165 u32 itr_val = q_vector->itr_val & 0x7FFC;
4167 if (!q_vector->set_itr)
4173 if (adapter->hw.mac.type == e1000_82575)
4174 itr_val |= itr_val << 16;
4176 itr_val |= 0x8000000;
4178 writel(itr_val, q_vector->itr_register);
4179 q_vector->set_itr = 0;
4182 static irqreturn_t igb_msix_ring(int irq, void *data)
4184 struct igb_q_vector *q_vector = data;
4186 /* Write the ITR value calculated from the previous interrupt. */
4187 igb_write_itr(q_vector);
4189 napi_schedule(&q_vector->napi);
4194 #ifdef CONFIG_IGB_DCA
4195 static void igb_update_dca(struct igb_q_vector *q_vector)
4197 struct igb_adapter *adapter = q_vector->adapter;
4198 struct e1000_hw *hw = &adapter->hw;
4199 int cpu = get_cpu();
4201 if (q_vector->cpu == cpu)
4204 if (q_vector->tx_ring) {
4205 int q = q_vector->tx_ring->reg_idx;
4206 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4207 if (hw->mac.type == e1000_82575) {
4208 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4209 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4211 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4212 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4213 E1000_DCA_TXCTRL_CPUID_SHIFT;
4215 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4216 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4218 if (q_vector->rx_ring) {
4219 int q = q_vector->rx_ring->reg_idx;
4220 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4221 if (hw->mac.type == e1000_82575) {
4222 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4223 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4225 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
4226 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4227 E1000_DCA_RXCTRL_CPUID_SHIFT;
4229 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4230 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4231 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4232 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
4234 q_vector->cpu = cpu;
4239 static void igb_setup_dca(struct igb_adapter *adapter)
4241 struct e1000_hw *hw = &adapter->hw;
4244 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
4247 /* Always use CB2 mode, difference is masked in the CB driver. */
4248 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4250 for (i = 0; i < adapter->num_q_vectors; i++) {
4251 adapter->q_vector[i]->cpu = -1;
4252 igb_update_dca(adapter->q_vector[i]);
4256 static int __igb_notify_dca(struct device *dev, void *data)
4258 struct net_device *netdev = dev_get_drvdata(dev);
4259 struct igb_adapter *adapter = netdev_priv(netdev);
4260 struct pci_dev *pdev = adapter->pdev;
4261 struct e1000_hw *hw = &adapter->hw;
4262 unsigned long event = *(unsigned long *)data;
4265 case DCA_PROVIDER_ADD:
4266 /* if already enabled, don't do it again */
4267 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
4269 if (dca_add_requester(dev) == 0) {
4270 adapter->flags |= IGB_FLAG_DCA_ENABLED;
4271 dev_info(&pdev->dev, "DCA enabled\n");
4272 igb_setup_dca(adapter);
4275 /* Fall Through since DCA is disabled. */
4276 case DCA_PROVIDER_REMOVE:
4277 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
4278 /* without this a class_device is left
4279 * hanging around in the sysfs model */
4280 dca_remove_requester(dev);
4281 dev_info(&pdev->dev, "DCA disabled\n");
4282 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
4283 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
4291 static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4296 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4299 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4301 #endif /* CONFIG_IGB_DCA */
4303 static void igb_ping_all_vfs(struct igb_adapter *adapter)
4305 struct e1000_hw *hw = &adapter->hw;
4309 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4310 ping = E1000_PF_CONTROL_MSG;
4311 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
4312 ping |= E1000_VT_MSGTYPE_CTS;
4313 igb_write_mbx(hw, &ping, 1, i);
4317 static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4319 struct e1000_hw *hw = &adapter->hw;
4320 u32 vmolr = rd32(E1000_VMOLR(vf));
4321 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4323 vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC |
4324 IGB_VF_FLAG_MULTI_PROMISC);
4325 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4327 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4328 vmolr |= E1000_VMOLR_MPME;
4329 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4332 * if we have hashes and we are clearing a multicast promisc
4333 * flag we need to write the hashes to the MTA as this step
4334 * was previously skipped
4336 if (vf_data->num_vf_mc_hashes > 30) {
4337 vmolr |= E1000_VMOLR_MPME;
4338 } else if (vf_data->num_vf_mc_hashes) {
4340 vmolr |= E1000_VMOLR_ROMPE;
4341 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4342 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4346 wr32(E1000_VMOLR(vf), vmolr);
4348 /* there are flags left unprocessed, likely not supported */
4349 if (*msgbuf & E1000_VT_MSGINFO_MASK)
4356 static int igb_set_vf_multicasts(struct igb_adapter *adapter,
4357 u32 *msgbuf, u32 vf)
4359 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4360 u16 *hash_list = (u16 *)&msgbuf[1];
4361 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4364 /* salt away the number of multicast addresses assigned
4365 * to this VF for later use to restore when the PF multi cast
4368 vf_data->num_vf_mc_hashes = n;
4370 /* only up to 30 hash values supported */
4374 /* store the hashes for later use */
4375 for (i = 0; i < n; i++)
4376 vf_data->vf_mc_hashes[i] = hash_list[i];
4378 /* Flush and reset the mta with the new values */
4379 igb_set_rx_mode(adapter->netdev);
4384 static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
4386 struct e1000_hw *hw = &adapter->hw;
4387 struct vf_data_storage *vf_data;
4390 for (i = 0; i < adapter->vfs_allocated_count; i++) {
4391 u32 vmolr = rd32(E1000_VMOLR(i));
4392 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4394 vf_data = &adapter->vf_data[i];
4396 if ((vf_data->num_vf_mc_hashes > 30) ||
4397 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
4398 vmolr |= E1000_VMOLR_MPME;
4399 } else if (vf_data->num_vf_mc_hashes) {
4400 vmolr |= E1000_VMOLR_ROMPE;
4401 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4402 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4404 wr32(E1000_VMOLR(i), vmolr);
4408 static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
4410 struct e1000_hw *hw = &adapter->hw;
4411 u32 pool_mask, reg, vid;
4414 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4416 /* Find the vlan filter for this id */
4417 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4418 reg = rd32(E1000_VLVF(i));
4420 /* remove the vf from the pool */
4423 /* if pool is empty then remove entry from vfta */
4424 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
4425 (reg & E1000_VLVF_VLANID_ENABLE)) {
4427 vid = reg & E1000_VLVF_VLANID_MASK;
4428 igb_vfta_set(hw, vid, false);
4431 wr32(E1000_VLVF(i), reg);
4434 adapter->vf_data[vf].vlans_enabled = 0;
4437 static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4439 struct e1000_hw *hw = &adapter->hw;
4442 /* The vlvf table only exists on 82576 hardware and newer */
4443 if (hw->mac.type < e1000_82576)
4446 /* we only need to do this if VMDq is enabled */
4447 if (!adapter->vfs_allocated_count)
4450 /* Find the vlan filter for this id */
4451 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4452 reg = rd32(E1000_VLVF(i));
4453 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
4454 vid == (reg & E1000_VLVF_VLANID_MASK))
4459 if (i == E1000_VLVF_ARRAY_SIZE) {
4460 /* Did not find a matching VLAN ID entry that was
4461 * enabled. Search for a free filter entry, i.e.
4462 * one without the enable bit set
4464 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4465 reg = rd32(E1000_VLVF(i));
4466 if (!(reg & E1000_VLVF_VLANID_ENABLE))
4470 if (i < E1000_VLVF_ARRAY_SIZE) {
4471 /* Found an enabled/available entry */
4472 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4474 /* if !enabled we need to set this up in vfta */
4475 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
4476 /* add VID to filter table */
4477 igb_vfta_set(hw, vid, true);
4478 reg |= E1000_VLVF_VLANID_ENABLE;
4480 reg &= ~E1000_VLVF_VLANID_MASK;
4482 wr32(E1000_VLVF(i), reg);
4484 /* do not modify RLPML for PF devices */
4485 if (vf >= adapter->vfs_allocated_count)
4488 if (!adapter->vf_data[vf].vlans_enabled) {
4490 reg = rd32(E1000_VMOLR(vf));
4491 size = reg & E1000_VMOLR_RLPML_MASK;
4493 reg &= ~E1000_VMOLR_RLPML_MASK;
4495 wr32(E1000_VMOLR(vf), reg);
4498 adapter->vf_data[vf].vlans_enabled++;
4502 if (i < E1000_VLVF_ARRAY_SIZE) {
4503 /* remove vf from the pool */
4504 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
4505 /* if pool is empty then remove entry from vfta */
4506 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
4508 igb_vfta_set(hw, vid, false);
4510 wr32(E1000_VLVF(i), reg);
4512 /* do not modify RLPML for PF devices */
4513 if (vf >= adapter->vfs_allocated_count)
4516 adapter->vf_data[vf].vlans_enabled--;
4517 if (!adapter->vf_data[vf].vlans_enabled) {
4519 reg = rd32(E1000_VMOLR(vf));
4520 size = reg & E1000_VMOLR_RLPML_MASK;
4522 reg &= ~E1000_VMOLR_RLPML_MASK;
4524 wr32(E1000_VMOLR(vf), reg);
4531 static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
4533 struct e1000_hw *hw = &adapter->hw;
4536 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
4538 wr32(E1000_VMVIR(vf), 0);
4541 static int igb_ndo_set_vf_vlan(struct net_device *netdev,
4542 int vf, u16 vlan, u8 qos)
4545 struct igb_adapter *adapter = netdev_priv(netdev);
4547 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
4550 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
4553 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
4554 igb_set_vmolr(adapter, vf, !vlan);
4555 adapter->vf_data[vf].pf_vlan = vlan;
4556 adapter->vf_data[vf].pf_qos = qos;
4557 dev_info(&adapter->pdev->dev,
4558 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
4559 if (test_bit(__IGB_DOWN, &adapter->state)) {
4560 dev_warn(&adapter->pdev->dev,
4561 "The VF VLAN has been set,"
4562 " but the PF device is not up.\n");
4563 dev_warn(&adapter->pdev->dev,
4564 "Bring the PF device up before"
4565 " attempting to use the VF device.\n");
4568 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
4570 igb_set_vmvir(adapter, vlan, vf);
4571 igb_set_vmolr(adapter, vf, true);
4572 adapter->vf_data[vf].pf_vlan = 0;
4573 adapter->vf_data[vf].pf_qos = 0;
4579 static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4581 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4582 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
4584 return igb_vlvf_set(adapter, vid, add, vf);
4587 static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
4590 adapter->vf_data[vf].flags &= ~(IGB_VF_FLAG_PF_SET_MAC);
4591 adapter->vf_data[vf].last_nack = jiffies;
4593 /* reset offloads to defaults */
4594 igb_set_vmolr(adapter, vf, true);
4596 /* reset vlans for device */
4597 igb_clear_vf_vfta(adapter, vf);
4598 if (adapter->vf_data[vf].pf_vlan)
4599 igb_ndo_set_vf_vlan(adapter->netdev, vf,
4600 adapter->vf_data[vf].pf_vlan,
4601 adapter->vf_data[vf].pf_qos);
4603 igb_clear_vf_vfta(adapter, vf);
4605 /* reset multicast table array for vf */
4606 adapter->vf_data[vf].num_vf_mc_hashes = 0;
4608 /* Flush and reset the mta with the new values */
4609 igb_set_rx_mode(adapter->netdev);
4612 static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4614 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
4616 /* generate a new mac address as we were hotplug removed/added */
4617 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
4618 random_ether_addr(vf_mac);
4620 /* process remaining reset events */
4621 igb_vf_reset(adapter, vf);
4624 static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4626 struct e1000_hw *hw = &adapter->hw;
4627 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
4628 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
4630 u8 *addr = (u8 *)(&msgbuf[1]);
4632 /* process all the same items cleared in a function level reset */
4633 igb_vf_reset(adapter, vf);
4635 /* set vf mac address */
4636 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
4638 /* enable transmit and receive for vf */
4639 reg = rd32(E1000_VFTE);
4640 wr32(E1000_VFTE, reg | (1 << vf));
4641 reg = rd32(E1000_VFRE);
4642 wr32(E1000_VFRE, reg | (1 << vf));
4644 adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS;
4646 /* reply to reset with ack and vf mac address */
4647 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
4648 memcpy(addr, vf_mac, 6);
4649 igb_write_mbx(hw, msgbuf, 3, vf);
4652 static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
4654 unsigned char *addr = (char *)&msg[1];
4657 if (is_valid_ether_addr(addr))
4658 err = igb_set_vf_mac(adapter, vf, addr);
4663 static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
4665 struct e1000_hw *hw = &adapter->hw;
4666 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4667 u32 msg = E1000_VT_MSGTYPE_NACK;
4669 /* if device isn't clear to send it shouldn't be reading either */
4670 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
4671 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
4672 igb_write_mbx(hw, &msg, 1, vf);
4673 vf_data->last_nack = jiffies;
4677 static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4679 struct pci_dev *pdev = adapter->pdev;
4680 u32 msgbuf[E1000_VFMAILBOX_SIZE];
4681 struct e1000_hw *hw = &adapter->hw;
4682 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4685 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
4688 /* if receive failed revoke VF CTS stats and restart init */
4689 dev_err(&pdev->dev, "Error receiving message from VF\n");
4690 vf_data->flags &= ~IGB_VF_FLAG_CTS;
4691 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
4696 /* this is a message we already processed, do nothing */
4697 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
4701 * until the vf completes a reset it should not be
4702 * allowed to start any configuration.
4705 if (msgbuf[0] == E1000_VF_RESET) {
4706 igb_vf_reset_msg(adapter, vf);
4710 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
4711 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
4717 switch ((msgbuf[0] & 0xFFFF)) {
4718 case E1000_VF_SET_MAC_ADDR:
4719 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
4721 case E1000_VF_SET_PROMISC:
4722 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
4724 case E1000_VF_SET_MULTICAST:
4725 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
4727 case E1000_VF_SET_LPE:
4728 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
4730 case E1000_VF_SET_VLAN:
4731 if (adapter->vf_data[vf].pf_vlan)
4734 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
4737 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
4742 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
4744 /* notify the VF of the results of what it sent us */
4746 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4748 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
4750 igb_write_mbx(hw, msgbuf, 1, vf);
4753 static void igb_msg_task(struct igb_adapter *adapter)
4755 struct e1000_hw *hw = &adapter->hw;
4758 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4759 /* process any reset requests */
4760 if (!igb_check_for_rst(hw, vf))
4761 igb_vf_reset_event(adapter, vf);
4763 /* process any messages pending */
4764 if (!igb_check_for_msg(hw, vf))
4765 igb_rcv_msg_from_vf(adapter, vf);
4767 /* process any acks */
4768 if (!igb_check_for_ack(hw, vf))
4769 igb_rcv_ack_from_vf(adapter, vf);
4774 * igb_set_uta - Set unicast filter table address
4775 * @adapter: board private structure
4777 * The unicast table address is a register array of 32-bit registers.
4778 * The table is meant to be used in a way similar to how the MTA is used
4779 * however due to certain limitations in the hardware it is necessary to
4780 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
4781 * enable bit to allow vlan tag stripping when promiscous mode is enabled
4783 static void igb_set_uta(struct igb_adapter *adapter)
4785 struct e1000_hw *hw = &adapter->hw;
4788 /* The UTA table only exists on 82576 hardware and newer */
4789 if (hw->mac.type < e1000_82576)
4792 /* we only need to do this if VMDq is enabled */
4793 if (!adapter->vfs_allocated_count)
4796 for (i = 0; i < hw->mac.uta_reg_count; i++)
4797 array_wr32(E1000_UTA, i, ~0);
4801 * igb_intr_msi - Interrupt Handler
4802 * @irq: interrupt number
4803 * @data: pointer to a network interface device structure
4805 static irqreturn_t igb_intr_msi(int irq, void *data)
4807 struct igb_adapter *adapter = data;
4808 struct igb_q_vector *q_vector = adapter->q_vector[0];
4809 struct e1000_hw *hw = &adapter->hw;
4810 /* read ICR disables interrupts using IAM */
4811 u32 icr = rd32(E1000_ICR);
4813 igb_write_itr(q_vector);
4815 if (icr & E1000_ICR_DRSTA)
4816 schedule_work(&adapter->reset_task);
4818 if (icr & E1000_ICR_DOUTSYNC) {
4819 /* HW is reporting DMA is out of sync */
4820 adapter->stats.doosync++;
4823 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4824 hw->mac.get_link_status = 1;
4825 if (!test_bit(__IGB_DOWN, &adapter->state))
4826 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4829 napi_schedule(&q_vector->napi);
4835 * igb_intr - Legacy Interrupt Handler
4836 * @irq: interrupt number
4837 * @data: pointer to a network interface device structure
4839 static irqreturn_t igb_intr(int irq, void *data)
4841 struct igb_adapter *adapter = data;
4842 struct igb_q_vector *q_vector = adapter->q_vector[0];
4843 struct e1000_hw *hw = &adapter->hw;
4844 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4845 * need for the IMC write */
4846 u32 icr = rd32(E1000_ICR);
4848 return IRQ_NONE; /* Not our interrupt */
4850 igb_write_itr(q_vector);
4852 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4853 * not set, then the adapter didn't send an interrupt */
4854 if (!(icr & E1000_ICR_INT_ASSERTED))
4857 if (icr & E1000_ICR_DRSTA)
4858 schedule_work(&adapter->reset_task);
4860 if (icr & E1000_ICR_DOUTSYNC) {
4861 /* HW is reporting DMA is out of sync */
4862 adapter->stats.doosync++;
4865 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4866 hw->mac.get_link_status = 1;
4867 /* guard against interrupt when we're going down */
4868 if (!test_bit(__IGB_DOWN, &adapter->state))
4869 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4872 napi_schedule(&q_vector->napi);
4877 static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
4879 struct igb_adapter *adapter = q_vector->adapter;
4880 struct e1000_hw *hw = &adapter->hw;
4882 if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
4883 (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
4884 if (!adapter->msix_entries)
4885 igb_set_itr(adapter);
4887 igb_update_ring_itr(q_vector);
4890 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4891 if (adapter->msix_entries)
4892 wr32(E1000_EIMS, q_vector->eims_value);
4894 igb_irq_enable(adapter);
4899 * igb_poll - NAPI Rx polling callback
4900 * @napi: napi polling structure
4901 * @budget: count of how many packets we should handle
4903 static int igb_poll(struct napi_struct *napi, int budget)
4905 struct igb_q_vector *q_vector = container_of(napi,
4906 struct igb_q_vector,
4908 int tx_clean_complete = 1, work_done = 0;
4910 #ifdef CONFIG_IGB_DCA
4911 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
4912 igb_update_dca(q_vector);
4914 if (q_vector->tx_ring)
4915 tx_clean_complete = igb_clean_tx_irq(q_vector);
4917 if (q_vector->rx_ring)
4918 igb_clean_rx_irq_adv(q_vector, &work_done, budget);
4920 if (!tx_clean_complete)
4923 /* If not enough Rx work done, exit the polling mode */
4924 if (work_done < budget) {
4925 napi_complete(napi);
4926 igb_ring_irq_enable(q_vector);
4933 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
4934 * @adapter: board private structure
4935 * @shhwtstamps: timestamp structure to update
4936 * @regval: unsigned 64bit system time value.
4938 * We need to convert the system time value stored in the RX/TXSTMP registers
4939 * into a hwtstamp which can be used by the upper level timestamping functions
4941 static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
4942 struct skb_shared_hwtstamps *shhwtstamps,
4948 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
4949 * 24 to match clock shift we setup earlier.
4951 if (adapter->hw.mac.type == e1000_82580)
4952 regval <<= IGB_82580_TSYNC_SHIFT;
4954 ns = timecounter_cyc2time(&adapter->clock, regval);
4955 timecompare_update(&adapter->compare, ns);
4956 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
4957 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4958 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
4962 * igb_tx_hwtstamp - utility function which checks for TX time stamp
4963 * @q_vector: pointer to q_vector containing needed info
4964 * @skb: packet that was just sent
4966 * If we were asked to do hardware stamping and such a time stamp is
4967 * available, then it must have been for this skb here because we only
4968 * allow only one such packet into the queue.
4970 static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
4972 struct igb_adapter *adapter = q_vector->adapter;
4973 union skb_shared_tx *shtx = skb_tx(skb);
4974 struct e1000_hw *hw = &adapter->hw;
4975 struct skb_shared_hwtstamps shhwtstamps;
4978 /* if skb does not support hw timestamp or TX stamp not valid exit */
4979 if (likely(!shtx->hardware) ||
4980 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
4983 regval = rd32(E1000_TXSTMPL);
4984 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4986 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
4987 skb_tstamp_tx(skb, &shhwtstamps);
4991 * igb_clean_tx_irq - Reclaim resources after transmit completes
4992 * @q_vector: pointer to q_vector containing needed info
4993 * returns true if ring is completely cleaned
4995 static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
4997 struct igb_adapter *adapter = q_vector->adapter;
4998 struct igb_ring *tx_ring = q_vector->tx_ring;
4999 struct net_device *netdev = tx_ring->netdev;
5000 struct e1000_hw *hw = &adapter->hw;
5001 struct igb_buffer *buffer_info;
5002 struct sk_buff *skb;
5003 union e1000_adv_tx_desc *tx_desc, *eop_desc;
5004 unsigned int total_bytes = 0, total_packets = 0;
5005 unsigned int i, eop, count = 0;
5006 bool cleaned = false;
5008 i = tx_ring->next_to_clean;
5009 eop = tx_ring->buffer_info[i].next_to_watch;
5010 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
5012 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
5013 (count < tx_ring->count)) {
5014 for (cleaned = false; !cleaned; count++) {
5015 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
5016 buffer_info = &tx_ring->buffer_info[i];
5017 cleaned = (i == eop);
5018 skb = buffer_info->skb;
5021 unsigned int segs, bytecount;
5022 /* gso_segs is currently only valid for tcp */
5023 segs = buffer_info->gso_segs;
5024 /* multiply data chunks by size of headers */
5025 bytecount = ((segs - 1) * skb_headlen(skb)) +
5027 total_packets += segs;
5028 total_bytes += bytecount;
5030 igb_tx_hwtstamp(q_vector, skb);
5033 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
5034 tx_desc->wb.status = 0;
5037 if (i == tx_ring->count)
5040 eop = tx_ring->buffer_info[i].next_to_watch;
5041 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
5044 tx_ring->next_to_clean = i;
5046 if (unlikely(count &&
5047 netif_carrier_ok(netdev) &&
5048 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
5049 /* Make sure that anybody stopping the queue after this
5050 * sees the new next_to_clean.
5053 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
5054 !(test_bit(__IGB_DOWN, &adapter->state))) {
5055 netif_wake_subqueue(netdev, tx_ring->queue_index);
5056 tx_ring->tx_stats.restart_queue++;
5060 if (tx_ring->detect_tx_hung) {
5061 /* Detect a transmit hang in hardware, this serializes the
5062 * check with the clearing of time_stamp and movement of i */
5063 tx_ring->detect_tx_hung = false;
5064 if (tx_ring->buffer_info[i].time_stamp &&
5065 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
5066 (adapter->tx_timeout_factor * HZ)) &&
5067 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
5069 /* detected Tx unit hang */
5070 dev_err(&tx_ring->pdev->dev,
5071 "Detected Tx Unit Hang\n"
5075 " next_to_use <%x>\n"
5076 " next_to_clean <%x>\n"
5077 "buffer_info[next_to_clean]\n"
5078 " time_stamp <%lx>\n"
5079 " next_to_watch <%x>\n"
5081 " desc.status <%x>\n",
5082 tx_ring->queue_index,
5083 readl(tx_ring->head),
5084 readl(tx_ring->tail),
5085 tx_ring->next_to_use,
5086 tx_ring->next_to_clean,
5087 tx_ring->buffer_info[eop].time_stamp,
5090 eop_desc->wb.status);
5091 netif_stop_subqueue(netdev, tx_ring->queue_index);
5094 tx_ring->total_bytes += total_bytes;
5095 tx_ring->total_packets += total_packets;
5096 tx_ring->tx_stats.bytes += total_bytes;
5097 tx_ring->tx_stats.packets += total_packets;
5098 return (count < tx_ring->count);
5102 * igb_receive_skb - helper function to handle rx indications
5103 * @q_vector: structure containing interrupt and ring information
5104 * @skb: packet to send up
5105 * @vlan_tag: vlan tag for packet
5107 static void igb_receive_skb(struct igb_q_vector *q_vector,
5108 struct sk_buff *skb,
5111 struct igb_adapter *adapter = q_vector->adapter;
5114 vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
5117 napi_gro_receive(&q_vector->napi, skb);
5120 static inline void igb_rx_checksum_adv(struct igb_ring *ring,
5121 u32 status_err, struct sk_buff *skb)
5123 skb->ip_summed = CHECKSUM_NONE;
5125 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
5126 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
5127 (status_err & E1000_RXD_STAT_IXSM))
5130 /* TCP/UDP checksum error bit is set */
5132 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
5134 * work around errata with sctp packets where the TCPE aka
5135 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5136 * packets, (aka let the stack check the crc32c)
5138 if ((skb->len == 60) &&
5139 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
5140 ring->rx_stats.csum_err++;
5142 /* let the stack verify checksum errors */
5145 /* It must be a TCP or UDP packet with a valid checksum */
5146 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
5147 skb->ip_summed = CHECKSUM_UNNECESSARY;
5149 dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
5152 static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
5153 struct sk_buff *skb)
5155 struct igb_adapter *adapter = q_vector->adapter;
5156 struct e1000_hw *hw = &adapter->hw;
5160 * If this bit is set, then the RX registers contain the time stamp. No
5161 * other packet will be time stamped until we read these registers, so
5162 * read the registers to make them available again. Because only one
5163 * packet can be time stamped at a time, we know that the register
5164 * values must belong to this one here and therefore we don't need to
5165 * compare any of the additional attributes stored for it.
5167 * If nothing went wrong, then it should have a skb_shared_tx that we
5168 * can turn into a skb_shared_hwtstamps.
5170 if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
5172 if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5175 regval = rd32(E1000_RXSTMPL);
5176 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5178 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5180 static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
5181 union e1000_adv_rx_desc *rx_desc)
5183 /* HW will not DMA in data larger than the given buffer, even if it
5184 * parses the (NFS, of course) header to be larger. In that case, it
5185 * fills the header buffer and spills the rest into the page.
5187 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5188 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
5189 if (hlen > rx_ring->rx_buffer_len)
5190 hlen = rx_ring->rx_buffer_len;
5194 static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5195 int *work_done, int budget)
5197 struct igb_ring *rx_ring = q_vector->rx_ring;
5198 struct net_device *netdev = rx_ring->netdev;
5199 struct pci_dev *pdev = rx_ring->pdev;
5200 union e1000_adv_rx_desc *rx_desc , *next_rxd;
5201 struct igb_buffer *buffer_info , *next_buffer;
5202 struct sk_buff *skb;
5203 bool cleaned = false;
5204 int cleaned_count = 0;
5205 int current_node = numa_node_id();
5206 unsigned int total_bytes = 0, total_packets = 0;
5212 i = rx_ring->next_to_clean;
5213 buffer_info = &rx_ring->buffer_info[i];
5214 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5215 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5217 while (staterr & E1000_RXD_STAT_DD) {
5218 if (*work_done >= budget)
5222 skb = buffer_info->skb;
5223 prefetch(skb->data - NET_IP_ALIGN);
5224 buffer_info->skb = NULL;
5227 if (i == rx_ring->count)
5230 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
5232 next_buffer = &rx_ring->buffer_info[i];
5234 length = le16_to_cpu(rx_desc->wb.upper.length);
5238 if (buffer_info->dma) {
5239 pci_unmap_single(pdev, buffer_info->dma,
5240 rx_ring->rx_buffer_len,
5241 PCI_DMA_FROMDEVICE);
5242 buffer_info->dma = 0;
5243 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
5244 skb_put(skb, length);
5247 skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
5251 pci_unmap_page(pdev, buffer_info->page_dma,
5252 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
5253 buffer_info->page_dma = 0;
5255 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
5257 buffer_info->page_offset,
5260 if ((page_count(buffer_info->page) != 1) ||
5261 (page_to_nid(buffer_info->page) != current_node))
5262 buffer_info->page = NULL;
5264 get_page(buffer_info->page);
5267 skb->data_len += length;
5268 skb->truesize += length;
5271 if (!(staterr & E1000_RXD_STAT_EOP)) {
5272 buffer_info->skb = next_buffer->skb;
5273 buffer_info->dma = next_buffer->dma;
5274 next_buffer->skb = skb;
5275 next_buffer->dma = 0;
5279 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
5280 dev_kfree_skb_irq(skb);
5284 igb_rx_hwtstamp(q_vector, staterr, skb);
5285 total_bytes += skb->len;
5288 igb_rx_checksum_adv(rx_ring, staterr, skb);
5290 skb->protocol = eth_type_trans(skb, netdev);
5291 skb_record_rx_queue(skb, rx_ring->queue_index);
5293 vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
5294 le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
5296 igb_receive_skb(q_vector, skb, vlan_tag);
5299 rx_desc->wb.upper.status_error = 0;
5301 /* return some buffers to hardware, one at a time is too slow */
5302 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
5303 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
5307 /* use prefetched values */
5309 buffer_info = next_buffer;
5310 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5313 rx_ring->next_to_clean = i;
5314 cleaned_count = igb_desc_unused(rx_ring);
5317 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
5319 rx_ring->total_packets += total_packets;
5320 rx_ring->total_bytes += total_bytes;
5321 rx_ring->rx_stats.packets += total_packets;
5322 rx_ring->rx_stats.bytes += total_bytes;
5327 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
5328 * @adapter: address of board private structure
5330 void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5332 struct net_device *netdev = rx_ring->netdev;
5333 union e1000_adv_rx_desc *rx_desc;
5334 struct igb_buffer *buffer_info;
5335 struct sk_buff *skb;
5339 i = rx_ring->next_to_use;
5340 buffer_info = &rx_ring->buffer_info[i];
5342 bufsz = rx_ring->rx_buffer_len;
5344 while (cleaned_count--) {
5345 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5347 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
5348 if (!buffer_info->page) {
5349 buffer_info->page = netdev_alloc_page(netdev);
5350 if (!buffer_info->page) {
5351 rx_ring->rx_stats.alloc_failed++;
5354 buffer_info->page_offset = 0;
5356 buffer_info->page_offset ^= PAGE_SIZE / 2;
5358 buffer_info->page_dma =
5359 pci_map_page(rx_ring->pdev, buffer_info->page,
5360 buffer_info->page_offset,
5362 PCI_DMA_FROMDEVICE);
5363 if (pci_dma_mapping_error(rx_ring->pdev,
5364 buffer_info->page_dma)) {
5365 buffer_info->page_dma = 0;
5366 rx_ring->rx_stats.alloc_failed++;
5371 skb = buffer_info->skb;
5373 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
5375 rx_ring->rx_stats.alloc_failed++;
5379 buffer_info->skb = skb;
5381 if (!buffer_info->dma) {
5382 buffer_info->dma = pci_map_single(rx_ring->pdev,
5385 PCI_DMA_FROMDEVICE);
5386 if (pci_dma_mapping_error(rx_ring->pdev,
5387 buffer_info->dma)) {
5388 buffer_info->dma = 0;
5389 rx_ring->rx_stats.alloc_failed++;
5393 /* Refresh the desc even if buffer_addrs didn't change because
5394 * each write-back erases this info. */
5395 if (bufsz < IGB_RXBUFFER_1024) {
5396 rx_desc->read.pkt_addr =
5397 cpu_to_le64(buffer_info->page_dma);
5398 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
5400 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
5401 rx_desc->read.hdr_addr = 0;
5405 if (i == rx_ring->count)
5407 buffer_info = &rx_ring->buffer_info[i];
5411 if (rx_ring->next_to_use != i) {
5412 rx_ring->next_to_use = i;
5414 i = (rx_ring->count - 1);
5418 /* Force memory writes to complete before letting h/w
5419 * know there are new descriptors to fetch. (Only
5420 * applicable for weak-ordered memory model archs,
5421 * such as IA-64). */
5423 writel(i, rx_ring->tail);
5433 static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5435 struct igb_adapter *adapter = netdev_priv(netdev);
5436 struct mii_ioctl_data *data = if_mii(ifr);
5438 if (adapter->hw.phy.media_type != e1000_media_type_copper)
5443 data->phy_id = adapter->hw.phy.addr;
5446 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
5458 * igb_hwtstamp_ioctl - control hardware time stamping
5463 * Outgoing time stamping can be enabled and disabled. Play nice and
5464 * disable it when requested, although it shouldn't case any overhead
5465 * when no packet needs it. At most one packet in the queue may be
5466 * marked for time stamping, otherwise it would be impossible to tell
5467 * for sure to which packet the hardware time stamp belongs.
5469 * Incoming time stamping has to be configured via the hardware
5470 * filters. Not all combinations are supported, in particular event
5471 * type has to be specified. Matching the kind of event packet is
5472 * not supported, with the exception of "all V2 events regardless of
5476 static int igb_hwtstamp_ioctl(struct net_device *netdev,
5477 struct ifreq *ifr, int cmd)
5479 struct igb_adapter *adapter = netdev_priv(netdev);
5480 struct e1000_hw *hw = &adapter->hw;
5481 struct hwtstamp_config config;
5482 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
5483 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
5484 u32 tsync_rx_cfg = 0;
5489 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5492 /* reserved for future extensions */
5496 switch (config.tx_type) {
5497 case HWTSTAMP_TX_OFF:
5499 case HWTSTAMP_TX_ON:
5505 switch (config.rx_filter) {
5506 case HWTSTAMP_FILTER_NONE:
5509 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
5510 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
5511 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
5512 case HWTSTAMP_FILTER_ALL:
5514 * register TSYNCRXCFG must be set, therefore it is not
5515 * possible to time stamp both Sync and Delay_Req messages
5516 * => fall back to time stamping all packets
5518 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
5519 config.rx_filter = HWTSTAMP_FILTER_ALL;
5521 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
5522 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
5523 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
5526 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
5527 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
5528 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
5531 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
5532 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
5533 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
5534 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
5537 config.rx_filter = HWTSTAMP_FILTER_SOME;
5539 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5540 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
5541 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
5542 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
5545 config.rx_filter = HWTSTAMP_FILTER_SOME;
5547 case HWTSTAMP_FILTER_PTP_V2_EVENT:
5548 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5549 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
5550 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
5551 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
5558 if (hw->mac.type == e1000_82575) {
5559 if (tsync_rx_ctl | tsync_tx_ctl)
5564 /* enable/disable TX */
5565 regval = rd32(E1000_TSYNCTXCTL);
5566 regval &= ~E1000_TSYNCTXCTL_ENABLED;
5567 regval |= tsync_tx_ctl;
5568 wr32(E1000_TSYNCTXCTL, regval);
5570 /* enable/disable RX */
5571 regval = rd32(E1000_TSYNCRXCTL);
5572 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
5573 regval |= tsync_rx_ctl;
5574 wr32(E1000_TSYNCRXCTL, regval);
5576 /* define which PTP packets are time stamped */
5577 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
5579 /* define ethertype filter for timestamped packets */
5582 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
5583 E1000_ETQF_1588 | /* enable timestamping */
5584 ETH_P_1588)); /* 1588 eth protocol type */
5586 wr32(E1000_ETQF(3), 0);
5588 #define PTP_PORT 319
5589 /* L4 Queue Filter[3]: filter by destination port and protocol */
5591 u32 ftqf = (IPPROTO_UDP /* UDP */
5592 | E1000_FTQF_VF_BP /* VF not compared */
5593 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
5594 | E1000_FTQF_MASK); /* mask all inputs */
5595 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
5597 wr32(E1000_IMIR(3), htons(PTP_PORT));
5598 wr32(E1000_IMIREXT(3),
5599 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
5600 if (hw->mac.type == e1000_82576) {
5601 /* enable source port check */
5602 wr32(E1000_SPQF(3), htons(PTP_PORT));
5603 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
5605 wr32(E1000_FTQF(3), ftqf);
5607 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
5611 adapter->hwtstamp_config = config;
5613 /* clear TX/RX time stamp registers, just to be sure */
5614 regval = rd32(E1000_TXSTMPH);
5615 regval = rd32(E1000_RXSTMPH);
5617 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
5627 static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5633 return igb_mii_ioctl(netdev, ifr, cmd);
5635 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
5641 s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5643 struct igb_adapter *adapter = hw->back;
5646 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5648 return -E1000_ERR_CONFIG;
5650 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
5655 s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5657 struct igb_adapter *adapter = hw->back;
5660 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5662 return -E1000_ERR_CONFIG;
5664 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
5669 static void igb_vlan_rx_register(struct net_device *netdev,
5670 struct vlan_group *grp)
5672 struct igb_adapter *adapter = netdev_priv(netdev);
5673 struct e1000_hw *hw = &adapter->hw;
5676 igb_irq_disable(adapter);
5677 adapter->vlgrp = grp;
5680 /* enable VLAN tag insert/strip */
5681 ctrl = rd32(E1000_CTRL);
5682 ctrl |= E1000_CTRL_VME;
5683 wr32(E1000_CTRL, ctrl);
5685 /* Disable CFI check */
5686 rctl = rd32(E1000_RCTL);
5687 rctl &= ~E1000_RCTL_CFIEN;
5688 wr32(E1000_RCTL, rctl);
5690 /* disable VLAN tag insert/strip */
5691 ctrl = rd32(E1000_CTRL);
5692 ctrl &= ~E1000_CTRL_VME;
5693 wr32(E1000_CTRL, ctrl);
5696 igb_rlpml_set(adapter);
5698 if (!test_bit(__IGB_DOWN, &adapter->state))
5699 igb_irq_enable(adapter);
5702 static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
5704 struct igb_adapter *adapter = netdev_priv(netdev);
5705 struct e1000_hw *hw = &adapter->hw;
5706 int pf_id = adapter->vfs_allocated_count;
5708 /* attempt to add filter to vlvf array */
5709 igb_vlvf_set(adapter, vid, true, pf_id);
5711 /* add the filter since PF can receive vlans w/o entry in vlvf */
5712 igb_vfta_set(hw, vid, true);
5715 static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5717 struct igb_adapter *adapter = netdev_priv(netdev);
5718 struct e1000_hw *hw = &adapter->hw;
5719 int pf_id = adapter->vfs_allocated_count;
5722 igb_irq_disable(adapter);
5723 vlan_group_set_device(adapter->vlgrp, vid, NULL);
5725 if (!test_bit(__IGB_DOWN, &adapter->state))
5726 igb_irq_enable(adapter);
5728 /* remove vlan from VLVF table array */
5729 err = igb_vlvf_set(adapter, vid, false, pf_id);
5731 /* if vid was not present in VLVF just remove it from table */
5733 igb_vfta_set(hw, vid, false);
5736 static void igb_restore_vlan(struct igb_adapter *adapter)
5738 igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
5740 if (adapter->vlgrp) {
5742 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
5743 if (!vlan_group_get_device(adapter->vlgrp, vid))
5745 igb_vlan_rx_add_vid(adapter->netdev, vid);
5750 int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5752 struct pci_dev *pdev = adapter->pdev;
5753 struct e1000_mac_info *mac = &adapter->hw.mac;
5758 case SPEED_10 + DUPLEX_HALF:
5759 mac->forced_speed_duplex = ADVERTISE_10_HALF;
5761 case SPEED_10 + DUPLEX_FULL:
5762 mac->forced_speed_duplex = ADVERTISE_10_FULL;
5764 case SPEED_100 + DUPLEX_HALF:
5765 mac->forced_speed_duplex = ADVERTISE_100_HALF;
5767 case SPEED_100 + DUPLEX_FULL:
5768 mac->forced_speed_duplex = ADVERTISE_100_FULL;
5770 case SPEED_1000 + DUPLEX_FULL:
5772 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
5774 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5776 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
5782 static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
5784 struct net_device *netdev = pci_get_drvdata(pdev);
5785 struct igb_adapter *adapter = netdev_priv(netdev);
5786 struct e1000_hw *hw = &adapter->hw;
5787 u32 ctrl, rctl, status;
5788 u32 wufc = adapter->wol;
5793 netif_device_detach(netdev);
5795 if (netif_running(netdev))
5798 igb_clear_interrupt_scheme(adapter);
5801 retval = pci_save_state(pdev);
5806 status = rd32(E1000_STATUS);
5807 if (status & E1000_STATUS_LU)
5808 wufc &= ~E1000_WUFC_LNKC;
5811 igb_setup_rctl(adapter);
5812 igb_set_rx_mode(netdev);
5814 /* turn on all-multi mode if wake on multicast is enabled */
5815 if (wufc & E1000_WUFC_MC) {
5816 rctl = rd32(E1000_RCTL);
5817 rctl |= E1000_RCTL_MPE;
5818 wr32(E1000_RCTL, rctl);
5821 ctrl = rd32(E1000_CTRL);
5822 /* advertise wake from D3Cold */
5823 #define E1000_CTRL_ADVD3WUC 0x00100000
5824 /* phy power management enable */
5825 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5826 ctrl |= E1000_CTRL_ADVD3WUC;
5827 wr32(E1000_CTRL, ctrl);
5829 /* Allow time for pending master requests to run */
5830 igb_disable_pcie_master(hw);
5832 wr32(E1000_WUC, E1000_WUC_PME_EN);
5833 wr32(E1000_WUFC, wufc);
5836 wr32(E1000_WUFC, 0);
5839 *enable_wake = wufc || adapter->en_mng_pt;
5841 igb_power_down_link(adapter);
5843 igb_power_up_link(adapter);
5845 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5846 * would have already happened in close and is redundant. */
5847 igb_release_hw_control(adapter);
5849 pci_disable_device(pdev);
5855 static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
5860 retval = __igb_shutdown(pdev, &wake);
5865 pci_prepare_to_sleep(pdev);
5867 pci_wake_from_d3(pdev, false);
5868 pci_set_power_state(pdev, PCI_D3hot);
5874 static int igb_resume(struct pci_dev *pdev)
5876 struct net_device *netdev = pci_get_drvdata(pdev);
5877 struct igb_adapter *adapter = netdev_priv(netdev);
5878 struct e1000_hw *hw = &adapter->hw;
5881 pci_set_power_state(pdev, PCI_D0);
5882 pci_restore_state(pdev);
5883 pci_save_state(pdev);
5885 err = pci_enable_device_mem(pdev);
5888 "igb: Cannot enable PCI device from suspend\n");
5891 pci_set_master(pdev);
5893 pci_enable_wake(pdev, PCI_D3hot, 0);
5894 pci_enable_wake(pdev, PCI_D3cold, 0);
5896 if (igb_init_interrupt_scheme(adapter)) {
5897 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
5903 /* let the f/w know that the h/w is now under the control of the
5905 igb_get_hw_control(adapter);
5907 wr32(E1000_WUS, ~0);
5909 if (netif_running(netdev)) {
5910 err = igb_open(netdev);
5915 netif_device_attach(netdev);
5921 static void igb_shutdown(struct pci_dev *pdev)
5925 __igb_shutdown(pdev, &wake);
5927 if (system_state == SYSTEM_POWER_OFF) {
5928 pci_wake_from_d3(pdev, wake);
5929 pci_set_power_state(pdev, PCI_D3hot);
5933 #ifdef CONFIG_NET_POLL_CONTROLLER
5935 * Polling 'interrupt' - used by things like netconsole to send skbs
5936 * without having to re-enable interrupts. It's not called while
5937 * the interrupt routine is executing.
5939 static void igb_netpoll(struct net_device *netdev)
5941 struct igb_adapter *adapter = netdev_priv(netdev);
5942 struct e1000_hw *hw = &adapter->hw;
5945 if (!adapter->msix_entries) {
5946 struct igb_q_vector *q_vector = adapter->q_vector[0];
5947 igb_irq_disable(adapter);
5948 napi_schedule(&q_vector->napi);
5952 for (i = 0; i < adapter->num_q_vectors; i++) {
5953 struct igb_q_vector *q_vector = adapter->q_vector[i];
5954 wr32(E1000_EIMC, q_vector->eims_value);
5955 napi_schedule(&q_vector->napi);
5958 #endif /* CONFIG_NET_POLL_CONTROLLER */
5961 * igb_io_error_detected - called when PCI error is detected
5962 * @pdev: Pointer to PCI device
5963 * @state: The current pci connection state
5965 * This function is called after a PCI bus error affecting
5966 * this device has been detected.
5968 static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
5969 pci_channel_state_t state)
5971 struct net_device *netdev = pci_get_drvdata(pdev);
5972 struct igb_adapter *adapter = netdev_priv(netdev);
5974 netif_device_detach(netdev);
5976 if (state == pci_channel_io_perm_failure)
5977 return PCI_ERS_RESULT_DISCONNECT;
5979 if (netif_running(netdev))
5981 pci_disable_device(pdev);
5983 /* Request a slot slot reset. */
5984 return PCI_ERS_RESULT_NEED_RESET;
5988 * igb_io_slot_reset - called after the pci bus has been reset.
5989 * @pdev: Pointer to PCI device
5991 * Restart the card from scratch, as if from a cold-boot. Implementation
5992 * resembles the first-half of the igb_resume routine.
5994 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
5996 struct net_device *netdev = pci_get_drvdata(pdev);
5997 struct igb_adapter *adapter = netdev_priv(netdev);
5998 struct e1000_hw *hw = &adapter->hw;
5999 pci_ers_result_t result;
6002 if (pci_enable_device_mem(pdev)) {
6004 "Cannot re-enable PCI device after reset.\n");
6005 result = PCI_ERS_RESULT_DISCONNECT;
6007 pci_set_master(pdev);
6008 pci_restore_state(pdev);
6009 pci_save_state(pdev);
6011 pci_enable_wake(pdev, PCI_D3hot, 0);
6012 pci_enable_wake(pdev, PCI_D3cold, 0);
6015 wr32(E1000_WUS, ~0);
6016 result = PCI_ERS_RESULT_RECOVERED;
6019 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6021 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6022 "failed 0x%0x\n", err);
6023 /* non-fatal, continue */
6030 * igb_io_resume - called when traffic can start flowing again.
6031 * @pdev: Pointer to PCI device
6033 * This callback is called when the error recovery driver tells us that
6034 * its OK to resume normal operation. Implementation resembles the
6035 * second-half of the igb_resume routine.
6037 static void igb_io_resume(struct pci_dev *pdev)
6039 struct net_device *netdev = pci_get_drvdata(pdev);
6040 struct igb_adapter *adapter = netdev_priv(netdev);
6042 if (netif_running(netdev)) {
6043 if (igb_up(adapter)) {
6044 dev_err(&pdev->dev, "igb_up failed after reset\n");
6049 netif_device_attach(netdev);
6051 /* let the f/w know that the h/w is now under the control of the
6053 igb_get_hw_control(adapter);
6056 static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6059 u32 rar_low, rar_high;
6060 struct e1000_hw *hw = &adapter->hw;
6062 /* HW expects these in little endian so we reverse the byte order
6063 * from network order (big endian) to little endian
6065 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6066 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6067 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6069 /* Indicate to hardware the Address is Valid. */
6070 rar_high |= E1000_RAH_AV;
6072 if (hw->mac.type == e1000_82575)
6073 rar_high |= E1000_RAH_POOL_1 * qsel;
6075 rar_high |= E1000_RAH_POOL_1 << qsel;
6077 wr32(E1000_RAL(index), rar_low);
6079 wr32(E1000_RAH(index), rar_high);
6083 static int igb_set_vf_mac(struct igb_adapter *adapter,
6084 int vf, unsigned char *mac_addr)
6086 struct e1000_hw *hw = &adapter->hw;
6087 /* VF MAC addresses start at end of receive addresses and moves
6088 * torwards the first, as a result a collision should not be possible */
6089 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
6091 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
6093 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
6098 static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6100 struct igb_adapter *adapter = netdev_priv(netdev);
6101 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6103 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6104 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6105 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6106 " change effective.");
6107 if (test_bit(__IGB_DOWN, &adapter->state)) {
6108 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6109 " but the PF device is not up.\n");
6110 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6111 " attempting to use the VF device.\n");
6113 return igb_set_vf_mac(adapter, vf, mac);
6116 static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6121 static int igb_ndo_get_vf_config(struct net_device *netdev,
6122 int vf, struct ifla_vf_info *ivi)
6124 struct igb_adapter *adapter = netdev_priv(netdev);
6125 if (vf >= adapter->vfs_allocated_count)
6128 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
6130 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6131 ivi->qos = adapter->vf_data[vf].pf_qos;
6135 static void igb_vmm_control(struct igb_adapter *adapter)
6137 struct e1000_hw *hw = &adapter->hw;
6140 switch (hw->mac.type) {
6143 /* replication is not supported for 82575 */
6146 /* notify HW that the MAC is adding vlan tags */
6147 reg = rd32(E1000_DTXCTL);
6148 reg |= E1000_DTXCTL_VLAN_ADDED;
6149 wr32(E1000_DTXCTL, reg);
6151 /* enable replication vlan tag stripping */
6152 reg = rd32(E1000_RPLOLR);
6153 reg |= E1000_RPLOLR_STRVLAN;
6154 wr32(E1000_RPLOLR, reg);
6158 if (adapter->vfs_allocated_count) {
6159 igb_vmdq_set_loopback_pf(hw, true);
6160 igb_vmdq_set_replication_pf(hw, true);
6162 igb_vmdq_set_loopback_pf(hw, false);
6163 igb_vmdq_set_replication_pf(hw, false);