1 /*******************************************************************************
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2009 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/vmalloc.h>
32 #include <linux/pagemap.h>
33 #include <linux/netdevice.h>
34 #include <linux/ipv6.h>
35 #include <linux/slab.h>
36 #include <net/checksum.h>
37 #include <net/ip6_checksum.h>
38 #include <linux/net_tstamp.h>
39 #include <linux/mii.h>
40 #include <linux/ethtool.h>
41 #include <linux/if_vlan.h>
42 #include <linux/pci.h>
43 #include <linux/pci-aspm.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/if_ether.h>
47 #include <linux/aer.h>
49 #include <linux/dca.h>
53 #define DRV_VERSION "2.1.0-k2"
54 char igb_driver_name[] = "igb";
55 char igb_driver_version[] = DRV_VERSION;
56 static const char igb_driver_string[] =
57 "Intel(R) Gigabit Ethernet Network Driver";
58 static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
60 static const struct e1000_info *igb_info_tbl[] = {
61 [board_82575] = &e1000_82575_info,
64 static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
85 /* required last entry */
89 MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
91 void igb_reset(struct igb_adapter *);
92 static int igb_setup_all_tx_resources(struct igb_adapter *);
93 static int igb_setup_all_rx_resources(struct igb_adapter *);
94 static void igb_free_all_tx_resources(struct igb_adapter *);
95 static void igb_free_all_rx_resources(struct igb_adapter *);
96 static void igb_setup_mrqc(struct igb_adapter *);
97 void igb_update_stats(struct igb_adapter *);
98 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
99 static void __devexit igb_remove(struct pci_dev *pdev);
100 static int igb_sw_init(struct igb_adapter *);
101 static int igb_open(struct net_device *);
102 static int igb_close(struct net_device *);
103 static void igb_configure_tx(struct igb_adapter *);
104 static void igb_configure_rx(struct igb_adapter *);
105 static void igb_clean_all_tx_rings(struct igb_adapter *);
106 static void igb_clean_all_rx_rings(struct igb_adapter *);
107 static void igb_clean_tx_ring(struct igb_ring *);
108 static void igb_clean_rx_ring(struct igb_ring *);
109 static void igb_set_rx_mode(struct net_device *);
110 static void igb_update_phy_info(unsigned long);
111 static void igb_watchdog(unsigned long);
112 static void igb_watchdog_task(struct work_struct *);
113 static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
114 static struct net_device_stats *igb_get_stats(struct net_device *);
115 static int igb_change_mtu(struct net_device *, int);
116 static int igb_set_mac(struct net_device *, void *);
117 static void igb_set_uta(struct igb_adapter *adapter);
118 static irqreturn_t igb_intr(int irq, void *);
119 static irqreturn_t igb_intr_msi(int irq, void *);
120 static irqreturn_t igb_msix_other(int irq, void *);
121 static irqreturn_t igb_msix_ring(int irq, void *);
122 #ifdef CONFIG_IGB_DCA
123 static void igb_update_dca(struct igb_q_vector *);
124 static void igb_setup_dca(struct igb_adapter *);
125 #endif /* CONFIG_IGB_DCA */
126 static bool igb_clean_tx_irq(struct igb_q_vector *);
127 static int igb_poll(struct napi_struct *, int);
128 static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
129 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
130 static void igb_tx_timeout(struct net_device *);
131 static void igb_reset_task(struct work_struct *);
132 static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
133 static void igb_vlan_rx_add_vid(struct net_device *, u16);
134 static void igb_vlan_rx_kill_vid(struct net_device *, u16);
135 static void igb_restore_vlan(struct igb_adapter *);
136 static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
137 static void igb_ping_all_vfs(struct igb_adapter *);
138 static void igb_msg_task(struct igb_adapter *);
139 static void igb_vmm_control(struct igb_adapter *);
140 static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
141 static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
142 static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
143 static int igb_ndo_set_vf_vlan(struct net_device *netdev,
144 int vf, u16 vlan, u8 qos);
145 static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
146 static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
147 struct ifla_vf_info *ivi);
150 static int igb_suspend(struct pci_dev *, pm_message_t);
151 static int igb_resume(struct pci_dev *);
153 static void igb_shutdown(struct pci_dev *);
154 #ifdef CONFIG_IGB_DCA
155 static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
156 static struct notifier_block dca_notifier = {
157 .notifier_call = igb_notify_dca,
162 #ifdef CONFIG_NET_POLL_CONTROLLER
163 /* for netdump / net console */
164 static void igb_netpoll(struct net_device *);
166 #ifdef CONFIG_PCI_IOV
167 static unsigned int max_vfs = 0;
168 module_param(max_vfs, uint, 0);
169 MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
170 "per physical function");
171 #endif /* CONFIG_PCI_IOV */
173 static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
174 pci_channel_state_t);
175 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
176 static void igb_io_resume(struct pci_dev *);
178 static struct pci_error_handlers igb_err_handler = {
179 .error_detected = igb_io_error_detected,
180 .slot_reset = igb_io_slot_reset,
181 .resume = igb_io_resume,
185 static struct pci_driver igb_driver = {
186 .name = igb_driver_name,
187 .id_table = igb_pci_tbl,
189 .remove = __devexit_p(igb_remove),
191 /* Power Managment Hooks */
192 .suspend = igb_suspend,
193 .resume = igb_resume,
195 .shutdown = igb_shutdown,
196 .err_handler = &igb_err_handler
199 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
200 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
201 MODULE_LICENSE("GPL");
202 MODULE_VERSION(DRV_VERSION);
204 struct igb_reg_info {
209 static const struct igb_reg_info igb_reg_info_tbl[] = {
211 /* General Registers */
212 {E1000_CTRL, "CTRL"},
213 {E1000_STATUS, "STATUS"},
214 {E1000_CTRL_EXT, "CTRL_EXT"},
216 /* Interrupt Registers */
220 {E1000_RCTL, "RCTL"},
221 {E1000_RDLEN(0), "RDLEN"},
222 {E1000_RDH(0), "RDH"},
223 {E1000_RDT(0), "RDT"},
224 {E1000_RXDCTL(0), "RXDCTL"},
225 {E1000_RDBAL(0), "RDBAL"},
226 {E1000_RDBAH(0), "RDBAH"},
229 {E1000_TCTL, "TCTL"},
230 {E1000_TDBAL(0), "TDBAL"},
231 {E1000_TDBAH(0), "TDBAH"},
232 {E1000_TDLEN(0), "TDLEN"},
233 {E1000_TDH(0), "TDH"},
234 {E1000_TDT(0), "TDT"},
235 {E1000_TXDCTL(0), "TXDCTL"},
236 {E1000_TDFH, "TDFH"},
237 {E1000_TDFT, "TDFT"},
238 {E1000_TDFHS, "TDFHS"},
239 {E1000_TDFPC, "TDFPC"},
241 /* List Terminator */
246 * igb_regdump - register printout routine
248 static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
254 switch (reginfo->ofs) {
256 for (n = 0; n < 4; n++)
257 regs[n] = rd32(E1000_RDLEN(n));
260 for (n = 0; n < 4; n++)
261 regs[n] = rd32(E1000_RDH(n));
264 for (n = 0; n < 4; n++)
265 regs[n] = rd32(E1000_RDT(n));
267 case E1000_RXDCTL(0):
268 for (n = 0; n < 4; n++)
269 regs[n] = rd32(E1000_RXDCTL(n));
272 for (n = 0; n < 4; n++)
273 regs[n] = rd32(E1000_RDBAL(n));
276 for (n = 0; n < 4; n++)
277 regs[n] = rd32(E1000_RDBAH(n));
280 for (n = 0; n < 4; n++)
281 regs[n] = rd32(E1000_RDBAL(n));
284 for (n = 0; n < 4; n++)
285 regs[n] = rd32(E1000_TDBAH(n));
288 for (n = 0; n < 4; n++)
289 regs[n] = rd32(E1000_TDLEN(n));
292 for (n = 0; n < 4; n++)
293 regs[n] = rd32(E1000_TDH(n));
296 for (n = 0; n < 4; n++)
297 regs[n] = rd32(E1000_TDT(n));
299 case E1000_TXDCTL(0):
300 for (n = 0; n < 4; n++)
301 regs[n] = rd32(E1000_TXDCTL(n));
304 printk(KERN_INFO "%-15s %08x\n",
305 reginfo->name, rd32(reginfo->ofs));
309 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
310 printk(KERN_INFO "%-15s ", rname);
311 for (n = 0; n < 4; n++)
312 printk(KERN_CONT "%08x ", regs[n]);
313 printk(KERN_CONT "\n");
317 * igb_dump - Print registers, tx-rings and rx-rings
319 static void igb_dump(struct igb_adapter *adapter)
321 struct net_device *netdev = adapter->netdev;
322 struct e1000_hw *hw = &adapter->hw;
323 struct igb_reg_info *reginfo;
325 struct igb_ring *tx_ring;
326 union e1000_adv_tx_desc *tx_desc;
327 struct my_u0 { u64 a; u64 b; } *u0;
328 struct igb_buffer *buffer_info;
329 struct igb_ring *rx_ring;
330 union e1000_adv_rx_desc *rx_desc;
334 if (!netif_msg_hw(adapter))
337 /* Print netdevice Info */
339 dev_info(&adapter->pdev->dev, "Net device Info\n");
340 printk(KERN_INFO "Device Name state "
341 "trans_start last_rx\n");
342 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
349 /* Print Registers */
350 dev_info(&adapter->pdev->dev, "Register Dump\n");
351 printk(KERN_INFO " Register Name Value\n");
352 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
353 reginfo->name; reginfo++) {
354 igb_regdump(hw, reginfo);
357 /* Print TX Ring Summary */
358 if (!netdev || !netif_running(netdev))
361 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
362 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
363 " leng ntw timestamp\n");
364 for (n = 0; n < adapter->num_tx_queues; n++) {
365 tx_ring = adapter->tx_ring[n];
366 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
367 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
368 n, tx_ring->next_to_use, tx_ring->next_to_clean,
369 (u64)buffer_info->dma,
371 buffer_info->next_to_watch,
372 (u64)buffer_info->time_stamp);
376 if (!netif_msg_tx_done(adapter))
377 goto rx_ring_summary;
379 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
381 /* Transmit Descriptor Formats
383 * Advanced Transmit Descriptor
384 * +--------------------------------------------------------------+
385 * 0 | Buffer Address [63:0] |
386 * +--------------------------------------------------------------+
387 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
388 * +--------------------------------------------------------------+
389 * 63 46 45 40 39 38 36 35 32 31 24 15 0
392 for (n = 0; n < adapter->num_tx_queues; n++) {
393 tx_ring = adapter->tx_ring[n];
394 printk(KERN_INFO "------------------------------------\n");
395 printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
396 printk(KERN_INFO "------------------------------------\n");
397 printk(KERN_INFO "T [desc] [address 63:0 ] "
398 "[PlPOCIStDDM Ln] [bi->dma ] "
399 "leng ntw timestamp bi->skb\n");
401 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
402 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
403 buffer_info = &tx_ring->buffer_info[i];
404 u0 = (struct my_u0 *)tx_desc;
405 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
406 " %04X %3X %016llX %p", i,
409 (u64)buffer_info->dma,
411 buffer_info->next_to_watch,
412 (u64)buffer_info->time_stamp,
414 if (i == tx_ring->next_to_use &&
415 i == tx_ring->next_to_clean)
416 printk(KERN_CONT " NTC/U\n");
417 else if (i == tx_ring->next_to_use)
418 printk(KERN_CONT " NTU\n");
419 else if (i == tx_ring->next_to_clean)
420 printk(KERN_CONT " NTC\n");
422 printk(KERN_CONT "\n");
424 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
425 print_hex_dump(KERN_INFO, "",
427 16, 1, phys_to_virt(buffer_info->dma),
428 buffer_info->length, true);
432 /* Print RX Rings Summary */
434 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
435 printk(KERN_INFO "Queue [NTU] [NTC]\n");
436 for (n = 0; n < adapter->num_rx_queues; n++) {
437 rx_ring = adapter->rx_ring[n];
438 printk(KERN_INFO " %5d %5X %5X\n", n,
439 rx_ring->next_to_use, rx_ring->next_to_clean);
443 if (!netif_msg_rx_status(adapter))
446 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
448 /* Advanced Receive Descriptor (Read) Format
450 * +-----------------------------------------------------+
451 * 0 | Packet Buffer Address [63:1] |A0/NSE|
452 * +----------------------------------------------+------+
453 * 8 | Header Buffer Address [63:1] | DD |
454 * +-----------------------------------------------------+
457 * Advanced Receive Descriptor (Write-Back) Format
459 * 63 48 47 32 31 30 21 20 17 16 4 3 0
460 * +------------------------------------------------------+
461 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
462 * | Checksum Ident | | | | Type | Type |
463 * +------------------------------------------------------+
464 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
465 * +------------------------------------------------------+
466 * 63 48 47 32 31 20 19 0
469 for (n = 0; n < adapter->num_rx_queues; n++) {
470 rx_ring = adapter->rx_ring[n];
471 printk(KERN_INFO "------------------------------------\n");
472 printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
473 printk(KERN_INFO "------------------------------------\n");
474 printk(KERN_INFO "R [desc] [ PktBuf A0] "
475 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
476 "<-- Adv Rx Read format\n");
477 printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
478 "[vl er S cks ln] ---------------- [bi->skb] "
479 "<-- Adv Rx Write-Back format\n");
481 for (i = 0; i < rx_ring->count; i++) {
482 buffer_info = &rx_ring->buffer_info[i];
483 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
484 u0 = (struct my_u0 *)rx_desc;
485 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
486 if (staterr & E1000_RXD_STAT_DD) {
487 /* Descriptor Done */
488 printk(KERN_INFO "RWB[0x%03X] %016llX "
489 "%016llX ---------------- %p", i,
494 printk(KERN_INFO "R [0x%03X] %016llX "
495 "%016llX %016llX %p", i,
498 (u64)buffer_info->dma,
501 if (netif_msg_pktdata(adapter)) {
502 print_hex_dump(KERN_INFO, "",
505 phys_to_virt(buffer_info->dma),
506 rx_ring->rx_buffer_len, true);
507 if (rx_ring->rx_buffer_len
509 print_hex_dump(KERN_INFO, "",
513 buffer_info->page_dma +
514 buffer_info->page_offset),
519 if (i == rx_ring->next_to_use)
520 printk(KERN_CONT " NTU\n");
521 else if (i == rx_ring->next_to_clean)
522 printk(KERN_CONT " NTC\n");
524 printk(KERN_CONT "\n");
535 * igb_read_clock - read raw cycle counter (to be used by time counter)
537 static cycle_t igb_read_clock(const struct cyclecounter *tc)
539 struct igb_adapter *adapter =
540 container_of(tc, struct igb_adapter, cycles);
541 struct e1000_hw *hw = &adapter->hw;
546 * The timestamp latches on lowest register read. For the 82580
547 * the lowest register is SYSTIMR instead of SYSTIML. However we never
548 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
550 if (hw->mac.type == e1000_82580) {
551 stamp = rd32(E1000_SYSTIMR) >> 8;
552 shift = IGB_82580_TSYNC_SHIFT;
555 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
556 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
561 * igb_get_hw_dev - return device
562 * used by hardware layer to print debugging information
564 struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
566 struct igb_adapter *adapter = hw->back;
567 return adapter->netdev;
571 * igb_init_module - Driver Registration Routine
573 * igb_init_module is the first routine called when the driver is
574 * loaded. All it does is register with the PCI subsystem.
576 static int __init igb_init_module(void)
579 printk(KERN_INFO "%s - version %s\n",
580 igb_driver_string, igb_driver_version);
582 printk(KERN_INFO "%s\n", igb_copyright);
584 #ifdef CONFIG_IGB_DCA
585 dca_register_notify(&dca_notifier);
587 ret = pci_register_driver(&igb_driver);
591 module_init(igb_init_module);
594 * igb_exit_module - Driver Exit Cleanup Routine
596 * igb_exit_module is called just before the driver is removed
599 static void __exit igb_exit_module(void)
601 #ifdef CONFIG_IGB_DCA
602 dca_unregister_notify(&dca_notifier);
604 pci_unregister_driver(&igb_driver);
607 module_exit(igb_exit_module);
609 #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
611 * igb_cache_ring_register - Descriptor ring to register mapping
612 * @adapter: board private structure to initialize
614 * Once we know the feature-set enabled for the device, we'll cache
615 * the register offset the descriptor ring is assigned to.
617 static void igb_cache_ring_register(struct igb_adapter *adapter)
620 u32 rbase_offset = adapter->vfs_allocated_count;
622 switch (adapter->hw.mac.type) {
624 /* The queues are allocated for virtualization such that VF 0
625 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
626 * In order to avoid collision we start at the first free queue
627 * and continue consuming queues in the same sequence
629 if (adapter->vfs_allocated_count) {
630 for (; i < adapter->rss_queues; i++)
631 adapter->rx_ring[i]->reg_idx = rbase_offset +
638 for (; i < adapter->num_rx_queues; i++)
639 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
640 for (; j < adapter->num_tx_queues; j++)
641 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
646 static void igb_free_queues(struct igb_adapter *adapter)
650 for (i = 0; i < adapter->num_tx_queues; i++) {
651 kfree(adapter->tx_ring[i]);
652 adapter->tx_ring[i] = NULL;
654 for (i = 0; i < adapter->num_rx_queues; i++) {
655 kfree(adapter->rx_ring[i]);
656 adapter->rx_ring[i] = NULL;
658 adapter->num_rx_queues = 0;
659 adapter->num_tx_queues = 0;
663 * igb_alloc_queues - Allocate memory for all rings
664 * @adapter: board private structure to initialize
666 * We allocate one ring per queue at run-time since we don't know the
667 * number of queues at compile-time.
669 static int igb_alloc_queues(struct igb_adapter *adapter)
671 struct igb_ring *ring;
674 for (i = 0; i < adapter->num_tx_queues; i++) {
675 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
678 ring->count = adapter->tx_ring_count;
679 ring->queue_index = i;
680 ring->dev = &adapter->pdev->dev;
681 ring->netdev = adapter->netdev;
682 /* For 82575, context index must be unique per ring. */
683 if (adapter->hw.mac.type == e1000_82575)
684 ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
685 adapter->tx_ring[i] = ring;
688 for (i = 0; i < adapter->num_rx_queues; i++) {
689 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
692 ring->count = adapter->rx_ring_count;
693 ring->queue_index = i;
694 ring->dev = &adapter->pdev->dev;
695 ring->netdev = adapter->netdev;
696 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
697 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
698 /* set flag indicating ring supports SCTP checksum offload */
699 if (adapter->hw.mac.type >= e1000_82576)
700 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
701 adapter->rx_ring[i] = ring;
704 igb_cache_ring_register(adapter);
709 igb_free_queues(adapter);
714 #define IGB_N0_QUEUE -1
715 static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
718 struct igb_adapter *adapter = q_vector->adapter;
719 struct e1000_hw *hw = &adapter->hw;
721 int rx_queue = IGB_N0_QUEUE;
722 int tx_queue = IGB_N0_QUEUE;
724 if (q_vector->rx_ring)
725 rx_queue = q_vector->rx_ring->reg_idx;
726 if (q_vector->tx_ring)
727 tx_queue = q_vector->tx_ring->reg_idx;
729 switch (hw->mac.type) {
731 /* The 82575 assigns vectors using a bitmask, which matches the
732 bitmask for the EICR/EIMS/EIMC registers. To assign one
733 or more queues to a vector, we write the appropriate bits
734 into the MSIXBM register for that vector. */
735 if (rx_queue > IGB_N0_QUEUE)
736 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
737 if (tx_queue > IGB_N0_QUEUE)
738 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
739 if (!adapter->msix_entries && msix_vector == 0)
740 msixbm |= E1000_EIMS_OTHER;
741 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
742 q_vector->eims_value = msixbm;
745 /* 82576 uses a table-based method for assigning vectors.
746 Each queue has a single entry in the table to which we write
747 a vector number along with a "valid" bit. Sadly, the layout
748 of the table is somewhat counterintuitive. */
749 if (rx_queue > IGB_N0_QUEUE) {
750 index = (rx_queue & 0x7);
751 ivar = array_rd32(E1000_IVAR0, index);
753 /* vector goes into low byte of register */
754 ivar = ivar & 0xFFFFFF00;
755 ivar |= msix_vector | E1000_IVAR_VALID;
757 /* vector goes into third byte of register */
758 ivar = ivar & 0xFF00FFFF;
759 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
761 array_wr32(E1000_IVAR0, index, ivar);
763 if (tx_queue > IGB_N0_QUEUE) {
764 index = (tx_queue & 0x7);
765 ivar = array_rd32(E1000_IVAR0, index);
767 /* vector goes into second byte of register */
768 ivar = ivar & 0xFFFF00FF;
769 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
771 /* vector goes into high byte of register */
772 ivar = ivar & 0x00FFFFFF;
773 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
775 array_wr32(E1000_IVAR0, index, ivar);
777 q_vector->eims_value = 1 << msix_vector;
781 /* 82580 uses the same table-based approach as 82576 but has fewer
782 entries as a result we carry over for queues greater than 4. */
783 if (rx_queue > IGB_N0_QUEUE) {
784 index = (rx_queue >> 1);
785 ivar = array_rd32(E1000_IVAR0, index);
786 if (rx_queue & 0x1) {
787 /* vector goes into third byte of register */
788 ivar = ivar & 0xFF00FFFF;
789 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
791 /* vector goes into low byte of register */
792 ivar = ivar & 0xFFFFFF00;
793 ivar |= msix_vector | E1000_IVAR_VALID;
795 array_wr32(E1000_IVAR0, index, ivar);
797 if (tx_queue > IGB_N0_QUEUE) {
798 index = (tx_queue >> 1);
799 ivar = array_rd32(E1000_IVAR0, index);
800 if (tx_queue & 0x1) {
801 /* vector goes into high byte of register */
802 ivar = ivar & 0x00FFFFFF;
803 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
805 /* vector goes into second byte of register */
806 ivar = ivar & 0xFFFF00FF;
807 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
809 array_wr32(E1000_IVAR0, index, ivar);
811 q_vector->eims_value = 1 << msix_vector;
818 /* add q_vector eims value to global eims_enable_mask */
819 adapter->eims_enable_mask |= q_vector->eims_value;
821 /* configure q_vector to set itr on first interrupt */
822 q_vector->set_itr = 1;
826 * igb_configure_msix - Configure MSI-X hardware
828 * igb_configure_msix sets up the hardware to properly
829 * generate MSI-X interrupts.
831 static void igb_configure_msix(struct igb_adapter *adapter)
835 struct e1000_hw *hw = &adapter->hw;
837 adapter->eims_enable_mask = 0;
839 /* set vector for other causes, i.e. link changes */
840 switch (hw->mac.type) {
842 tmp = rd32(E1000_CTRL_EXT);
843 /* enable MSI-X PBA support*/
844 tmp |= E1000_CTRL_EXT_PBA_CLR;
846 /* Auto-Mask interrupts upon ICR read. */
847 tmp |= E1000_CTRL_EXT_EIAME;
848 tmp |= E1000_CTRL_EXT_IRCA;
850 wr32(E1000_CTRL_EXT, tmp);
852 /* enable msix_other interrupt */
853 array_wr32(E1000_MSIXBM(0), vector++,
855 adapter->eims_other = E1000_EIMS_OTHER;
862 /* Turn on MSI-X capability first, or our settings
863 * won't stick. And it will take days to debug. */
864 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
865 E1000_GPIE_PBA | E1000_GPIE_EIAME |
868 /* enable msix_other interrupt */
869 adapter->eims_other = 1 << vector;
870 tmp = (vector++ | E1000_IVAR_VALID) << 8;
872 wr32(E1000_IVAR_MISC, tmp);
875 /* do nothing, since nothing else supports MSI-X */
877 } /* switch (hw->mac.type) */
879 adapter->eims_enable_mask |= adapter->eims_other;
881 for (i = 0; i < adapter->num_q_vectors; i++)
882 igb_assign_vector(adapter->q_vector[i], vector++);
888 * igb_request_msix - Initialize MSI-X interrupts
890 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
893 static int igb_request_msix(struct igb_adapter *adapter)
895 struct net_device *netdev = adapter->netdev;
896 struct e1000_hw *hw = &adapter->hw;
897 int i, err = 0, vector = 0;
899 err = request_irq(adapter->msix_entries[vector].vector,
900 igb_msix_other, 0, netdev->name, adapter);
905 for (i = 0; i < adapter->num_q_vectors; i++) {
906 struct igb_q_vector *q_vector = adapter->q_vector[i];
908 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
910 if (q_vector->rx_ring && q_vector->tx_ring)
911 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
912 q_vector->rx_ring->queue_index);
913 else if (q_vector->tx_ring)
914 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
915 q_vector->tx_ring->queue_index);
916 else if (q_vector->rx_ring)
917 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
918 q_vector->rx_ring->queue_index);
920 sprintf(q_vector->name, "%s-unused", netdev->name);
922 err = request_irq(adapter->msix_entries[vector].vector,
923 igb_msix_ring, 0, q_vector->name,
930 igb_configure_msix(adapter);
936 static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
938 if (adapter->msix_entries) {
939 pci_disable_msix(adapter->pdev);
940 kfree(adapter->msix_entries);
941 adapter->msix_entries = NULL;
942 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
943 pci_disable_msi(adapter->pdev);
948 * igb_free_q_vectors - Free memory allocated for interrupt vectors
949 * @adapter: board private structure to initialize
951 * This function frees the memory allocated to the q_vectors. In addition if
952 * NAPI is enabled it will delete any references to the NAPI struct prior
953 * to freeing the q_vector.
955 static void igb_free_q_vectors(struct igb_adapter *adapter)
959 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
960 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
961 adapter->q_vector[v_idx] = NULL;
964 netif_napi_del(&q_vector->napi);
967 adapter->num_q_vectors = 0;
971 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
973 * This function resets the device so that it has 0 rx queues, tx queues, and
974 * MSI-X interrupts allocated.
976 static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
978 igb_free_queues(adapter);
979 igb_free_q_vectors(adapter);
980 igb_reset_interrupt_capability(adapter);
984 * igb_set_interrupt_capability - set MSI or MSI-X if supported
986 * Attempt to configure interrupts using the best available
987 * capabilities of the hardware and kernel.
989 static void igb_set_interrupt_capability(struct igb_adapter *adapter)
994 /* Number of supported queues. */
995 adapter->num_rx_queues = adapter->rss_queues;
996 if (adapter->vfs_allocated_count)
997 adapter->num_tx_queues = 1;
999 adapter->num_tx_queues = adapter->rss_queues;
1001 /* start with one vector for every rx queue */
1002 numvecs = adapter->num_rx_queues;
1004 /* if tx handler is separate add 1 for every tx queue */
1005 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1006 numvecs += adapter->num_tx_queues;
1008 /* store the number of vectors reserved for queues */
1009 adapter->num_q_vectors = numvecs;
1011 /* add 1 vector for link status interrupts */
1013 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1015 if (!adapter->msix_entries)
1018 for (i = 0; i < numvecs; i++)
1019 adapter->msix_entries[i].entry = i;
1021 err = pci_enable_msix(adapter->pdev,
1022 adapter->msix_entries,
1027 igb_reset_interrupt_capability(adapter);
1029 /* If we can't do MSI-X, try MSI */
1031 #ifdef CONFIG_PCI_IOV
1032 /* disable SR-IOV for non MSI-X configurations */
1033 if (adapter->vf_data) {
1034 struct e1000_hw *hw = &adapter->hw;
1035 /* disable iov and allow time for transactions to clear */
1036 pci_disable_sriov(adapter->pdev);
1039 kfree(adapter->vf_data);
1040 adapter->vf_data = NULL;
1041 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1043 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1046 adapter->vfs_allocated_count = 0;
1047 adapter->rss_queues = 1;
1048 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1049 adapter->num_rx_queues = 1;
1050 adapter->num_tx_queues = 1;
1051 adapter->num_q_vectors = 1;
1052 if (!pci_enable_msi(adapter->pdev))
1053 adapter->flags |= IGB_FLAG_HAS_MSI;
1055 /* Notify the stack of the (possibly) reduced Tx Queue count. */
1056 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
1060 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1061 * @adapter: board private structure to initialize
1063 * We allocate one q_vector per queue interrupt. If allocation fails we
1066 static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1068 struct igb_q_vector *q_vector;
1069 struct e1000_hw *hw = &adapter->hw;
1072 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
1073 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
1076 q_vector->adapter = adapter;
1077 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
1078 q_vector->itr_val = IGB_START_ITR;
1079 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1080 adapter->q_vector[v_idx] = q_vector;
1085 igb_free_q_vectors(adapter);
1089 static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
1090 int ring_idx, int v_idx)
1092 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1094 q_vector->rx_ring = adapter->rx_ring[ring_idx];
1095 q_vector->rx_ring->q_vector = q_vector;
1096 q_vector->itr_val = adapter->rx_itr_setting;
1097 if (q_vector->itr_val && q_vector->itr_val <= 3)
1098 q_vector->itr_val = IGB_START_ITR;
1101 static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
1102 int ring_idx, int v_idx)
1104 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1106 q_vector->tx_ring = adapter->tx_ring[ring_idx];
1107 q_vector->tx_ring->q_vector = q_vector;
1108 q_vector->itr_val = adapter->tx_itr_setting;
1109 if (q_vector->itr_val && q_vector->itr_val <= 3)
1110 q_vector->itr_val = IGB_START_ITR;
1114 * igb_map_ring_to_vector - maps allocated queues to vectors
1116 * This function maps the recently allocated queues to vectors.
1118 static int igb_map_ring_to_vector(struct igb_adapter *adapter)
1123 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
1124 (adapter->num_q_vectors < adapter->num_tx_queues))
1127 if (adapter->num_q_vectors >=
1128 (adapter->num_rx_queues + adapter->num_tx_queues)) {
1129 for (i = 0; i < adapter->num_rx_queues; i++)
1130 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1131 for (i = 0; i < adapter->num_tx_queues; i++)
1132 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1134 for (i = 0; i < adapter->num_rx_queues; i++) {
1135 if (i < adapter->num_tx_queues)
1136 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1137 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1139 for (; i < adapter->num_tx_queues; i++)
1140 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1146 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1148 * This function initializes the interrupts and allocates all of the queues.
1150 static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1152 struct pci_dev *pdev = adapter->pdev;
1155 igb_set_interrupt_capability(adapter);
1157 err = igb_alloc_q_vectors(adapter);
1159 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1160 goto err_alloc_q_vectors;
1163 err = igb_alloc_queues(adapter);
1165 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1166 goto err_alloc_queues;
1169 err = igb_map_ring_to_vector(adapter);
1171 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
1172 goto err_map_queues;
1178 igb_free_queues(adapter);
1180 igb_free_q_vectors(adapter);
1181 err_alloc_q_vectors:
1182 igb_reset_interrupt_capability(adapter);
1187 * igb_request_irq - initialize interrupts
1189 * Attempts to configure interrupts using the best available
1190 * capabilities of the hardware and kernel.
1192 static int igb_request_irq(struct igb_adapter *adapter)
1194 struct net_device *netdev = adapter->netdev;
1195 struct pci_dev *pdev = adapter->pdev;
1198 if (adapter->msix_entries) {
1199 err = igb_request_msix(adapter);
1202 /* fall back to MSI */
1203 igb_clear_interrupt_scheme(adapter);
1204 if (!pci_enable_msi(adapter->pdev))
1205 adapter->flags |= IGB_FLAG_HAS_MSI;
1206 igb_free_all_tx_resources(adapter);
1207 igb_free_all_rx_resources(adapter);
1208 adapter->num_tx_queues = 1;
1209 adapter->num_rx_queues = 1;
1210 adapter->num_q_vectors = 1;
1211 err = igb_alloc_q_vectors(adapter);
1214 "Unable to allocate memory for vectors\n");
1217 err = igb_alloc_queues(adapter);
1220 "Unable to allocate memory for queues\n");
1221 igb_free_q_vectors(adapter);
1224 igb_setup_all_tx_resources(adapter);
1225 igb_setup_all_rx_resources(adapter);
1227 igb_assign_vector(adapter->q_vector[0], 0);
1230 if (adapter->flags & IGB_FLAG_HAS_MSI) {
1231 err = request_irq(adapter->pdev->irq, igb_intr_msi, 0,
1232 netdev->name, adapter);
1236 /* fall back to legacy interrupts */
1237 igb_reset_interrupt_capability(adapter);
1238 adapter->flags &= ~IGB_FLAG_HAS_MSI;
1241 err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED,
1242 netdev->name, adapter);
1245 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
1252 static void igb_free_irq(struct igb_adapter *adapter)
1254 if (adapter->msix_entries) {
1257 free_irq(adapter->msix_entries[vector++].vector, adapter);
1259 for (i = 0; i < adapter->num_q_vectors; i++) {
1260 struct igb_q_vector *q_vector = adapter->q_vector[i];
1261 free_irq(adapter->msix_entries[vector++].vector,
1265 free_irq(adapter->pdev->irq, adapter);
1270 * igb_irq_disable - Mask off interrupt generation on the NIC
1271 * @adapter: board private structure
1273 static void igb_irq_disable(struct igb_adapter *adapter)
1275 struct e1000_hw *hw = &adapter->hw;
1278 * we need to be careful when disabling interrupts. The VFs are also
1279 * mapped into these registers and so clearing the bits can cause
1280 * issues on the VF drivers so we only need to clear what we set
1282 if (adapter->msix_entries) {
1283 u32 regval = rd32(E1000_EIAM);
1284 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1285 wr32(E1000_EIMC, adapter->eims_enable_mask);
1286 regval = rd32(E1000_EIAC);
1287 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
1291 wr32(E1000_IMC, ~0);
1293 synchronize_irq(adapter->pdev->irq);
1297 * igb_irq_enable - Enable default interrupt generation settings
1298 * @adapter: board private structure
1300 static void igb_irq_enable(struct igb_adapter *adapter)
1302 struct e1000_hw *hw = &adapter->hw;
1304 if (adapter->msix_entries) {
1305 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
1306 u32 regval = rd32(E1000_EIAC);
1307 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1308 regval = rd32(E1000_EIAM);
1309 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
1310 wr32(E1000_EIMS, adapter->eims_enable_mask);
1311 if (adapter->vfs_allocated_count) {
1312 wr32(E1000_MBVFIMR, 0xFF);
1313 ims |= E1000_IMS_VMMB;
1315 if (adapter->hw.mac.type == e1000_82580)
1316 ims |= E1000_IMS_DRSTA;
1318 wr32(E1000_IMS, ims);
1320 wr32(E1000_IMS, IMS_ENABLE_MASK |
1322 wr32(E1000_IAM, IMS_ENABLE_MASK |
1327 static void igb_update_mng_vlan(struct igb_adapter *adapter)
1329 struct e1000_hw *hw = &adapter->hw;
1330 u16 vid = adapter->hw.mng_cookie.vlan_id;
1331 u16 old_vid = adapter->mng_vlan_id;
1333 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1334 /* add VID to filter table */
1335 igb_vfta_set(hw, vid, true);
1336 adapter->mng_vlan_id = vid;
1338 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1341 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1343 !vlan_group_get_device(adapter->vlgrp, old_vid)) {
1344 /* remove VID from filter table */
1345 igb_vfta_set(hw, old_vid, false);
1350 * igb_release_hw_control - release control of the h/w to f/w
1351 * @adapter: address of board private structure
1353 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1354 * For ASF and Pass Through versions of f/w this means that the
1355 * driver is no longer loaded.
1358 static void igb_release_hw_control(struct igb_adapter *adapter)
1360 struct e1000_hw *hw = &adapter->hw;
1363 /* Let firmware take over control of h/w */
1364 ctrl_ext = rd32(E1000_CTRL_EXT);
1365 wr32(E1000_CTRL_EXT,
1366 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1370 * igb_get_hw_control - get control of the h/w from f/w
1371 * @adapter: address of board private structure
1373 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1374 * For ASF and Pass Through versions of f/w this means that
1375 * the driver is loaded.
1378 static void igb_get_hw_control(struct igb_adapter *adapter)
1380 struct e1000_hw *hw = &adapter->hw;
1383 /* Let firmware know the driver has taken over */
1384 ctrl_ext = rd32(E1000_CTRL_EXT);
1385 wr32(E1000_CTRL_EXT,
1386 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1390 * igb_configure - configure the hardware for RX and TX
1391 * @adapter: private board structure
1393 static void igb_configure(struct igb_adapter *adapter)
1395 struct net_device *netdev = adapter->netdev;
1398 igb_get_hw_control(adapter);
1399 igb_set_rx_mode(netdev);
1401 igb_restore_vlan(adapter);
1403 igb_setup_tctl(adapter);
1404 igb_setup_mrqc(adapter);
1405 igb_setup_rctl(adapter);
1407 igb_configure_tx(adapter);
1408 igb_configure_rx(adapter);
1410 igb_rx_fifo_flush_82575(&adapter->hw);
1412 /* call igb_desc_unused which always leaves
1413 * at least 1 descriptor unused to make sure
1414 * next_to_use != next_to_clean */
1415 for (i = 0; i < adapter->num_rx_queues; i++) {
1416 struct igb_ring *ring = adapter->rx_ring[i];
1417 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
1422 * igb_power_up_link - Power up the phy/serdes link
1423 * @adapter: address of board private structure
1425 void igb_power_up_link(struct igb_adapter *adapter)
1427 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1428 igb_power_up_phy_copper(&adapter->hw);
1430 igb_power_up_serdes_link_82575(&adapter->hw);
1434 * igb_power_down_link - Power down the phy/serdes link
1435 * @adapter: address of board private structure
1437 static void igb_power_down_link(struct igb_adapter *adapter)
1439 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1440 igb_power_down_phy_copper_82575(&adapter->hw);
1442 igb_shutdown_serdes_link_82575(&adapter->hw);
1446 * igb_up - Open the interface and prepare it to handle traffic
1447 * @adapter: board private structure
1449 int igb_up(struct igb_adapter *adapter)
1451 struct e1000_hw *hw = &adapter->hw;
1454 /* hardware has been reset, we need to reload some things */
1455 igb_configure(adapter);
1457 clear_bit(__IGB_DOWN, &adapter->state);
1459 for (i = 0; i < adapter->num_q_vectors; i++) {
1460 struct igb_q_vector *q_vector = adapter->q_vector[i];
1461 napi_enable(&q_vector->napi);
1463 if (adapter->msix_entries)
1464 igb_configure_msix(adapter);
1466 igb_assign_vector(adapter->q_vector[0], 0);
1468 /* Clear any pending interrupts. */
1470 igb_irq_enable(adapter);
1472 /* notify VFs that reset has been completed */
1473 if (adapter->vfs_allocated_count) {
1474 u32 reg_data = rd32(E1000_CTRL_EXT);
1475 reg_data |= E1000_CTRL_EXT_PFRSTD;
1476 wr32(E1000_CTRL_EXT, reg_data);
1479 netif_tx_start_all_queues(adapter->netdev);
1481 /* start the watchdog. */
1482 hw->mac.get_link_status = 1;
1483 schedule_work(&adapter->watchdog_task);
1488 void igb_down(struct igb_adapter *adapter)
1490 struct net_device *netdev = adapter->netdev;
1491 struct e1000_hw *hw = &adapter->hw;
1495 /* signal that we're down so the interrupt handler does not
1496 * reschedule our watchdog timer */
1497 set_bit(__IGB_DOWN, &adapter->state);
1499 /* disable receives in the hardware */
1500 rctl = rd32(E1000_RCTL);
1501 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1502 /* flush and sleep below */
1504 netif_tx_stop_all_queues(netdev);
1506 /* disable transmits in the hardware */
1507 tctl = rd32(E1000_TCTL);
1508 tctl &= ~E1000_TCTL_EN;
1509 wr32(E1000_TCTL, tctl);
1510 /* flush both disables and wait for them to finish */
1514 for (i = 0; i < adapter->num_q_vectors; i++) {
1515 struct igb_q_vector *q_vector = adapter->q_vector[i];
1516 napi_disable(&q_vector->napi);
1519 igb_irq_disable(adapter);
1521 del_timer_sync(&adapter->watchdog_timer);
1522 del_timer_sync(&adapter->phy_info_timer);
1524 netif_carrier_off(netdev);
1526 /* record the stats before reset*/
1527 igb_update_stats(adapter);
1529 adapter->link_speed = 0;
1530 adapter->link_duplex = 0;
1532 if (!pci_channel_offline(adapter->pdev))
1534 igb_clean_all_tx_rings(adapter);
1535 igb_clean_all_rx_rings(adapter);
1536 #ifdef CONFIG_IGB_DCA
1538 /* since we reset the hardware DCA settings were cleared */
1539 igb_setup_dca(adapter);
1543 void igb_reinit_locked(struct igb_adapter *adapter)
1545 WARN_ON(in_interrupt());
1546 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1550 clear_bit(__IGB_RESETTING, &adapter->state);
1553 void igb_reset(struct igb_adapter *adapter)
1555 struct pci_dev *pdev = adapter->pdev;
1556 struct e1000_hw *hw = &adapter->hw;
1557 struct e1000_mac_info *mac = &hw->mac;
1558 struct e1000_fc_info *fc = &hw->fc;
1559 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1562 /* Repartition Pba for greater than 9k mtu
1563 * To take effect CTRL.RST is required.
1565 switch (mac->type) {
1568 pba = rd32(E1000_RXPBS);
1569 pba = igb_rxpbs_adjust_82580(pba);
1572 pba = rd32(E1000_RXPBS);
1573 pba &= E1000_RXPBS_SIZE_MASK_82576;
1577 pba = E1000_PBA_34K;
1581 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1582 (mac->type < e1000_82576)) {
1583 /* adjust PBA for jumbo frames */
1584 wr32(E1000_PBA, pba);
1586 /* To maintain wire speed transmits, the Tx FIFO should be
1587 * large enough to accommodate two full transmit packets,
1588 * rounded up to the next 1KB and expressed in KB. Likewise,
1589 * the Rx FIFO should be large enough to accommodate at least
1590 * one full receive packet and is similarly rounded up and
1591 * expressed in KB. */
1592 pba = rd32(E1000_PBA);
1593 /* upper 16 bits has Tx packet buffer allocation size in KB */
1594 tx_space = pba >> 16;
1595 /* lower 16 bits has Rx packet buffer allocation size in KB */
1597 /* the tx fifo also stores 16 bytes of information about the tx
1598 * but don't include ethernet FCS because hardware appends it */
1599 min_tx_space = (adapter->max_frame_size +
1600 sizeof(union e1000_adv_tx_desc) -
1602 min_tx_space = ALIGN(min_tx_space, 1024);
1603 min_tx_space >>= 10;
1604 /* software strips receive CRC, so leave room for it */
1605 min_rx_space = adapter->max_frame_size;
1606 min_rx_space = ALIGN(min_rx_space, 1024);
1607 min_rx_space >>= 10;
1609 /* If current Tx allocation is less than the min Tx FIFO size,
1610 * and the min Tx FIFO size is less than the current Rx FIFO
1611 * allocation, take space away from current Rx allocation */
1612 if (tx_space < min_tx_space &&
1613 ((min_tx_space - tx_space) < pba)) {
1614 pba = pba - (min_tx_space - tx_space);
1616 /* if short on rx space, rx wins and must trump tx
1618 if (pba < min_rx_space)
1621 wr32(E1000_PBA, pba);
1624 /* flow control settings */
1625 /* The high water mark must be low enough to fit one full frame
1626 * (or the size used for early receive) above it in the Rx FIFO.
1627 * Set it to the lower of:
1628 * - 90% of the Rx FIFO size, or
1629 * - the full Rx FIFO size minus one full frame */
1630 hwm = min(((pba << 10) * 9 / 10),
1631 ((pba << 10) - 2 * adapter->max_frame_size));
1633 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1634 fc->low_water = fc->high_water - 16;
1635 fc->pause_time = 0xFFFF;
1637 fc->current_mode = fc->requested_mode;
1639 /* disable receive for all VFs and wait one second */
1640 if (adapter->vfs_allocated_count) {
1642 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1643 adapter->vf_data[i].flags = 0;
1645 /* ping all the active vfs to let them know we are going down */
1646 igb_ping_all_vfs(adapter);
1648 /* disable transmits and receives */
1649 wr32(E1000_VFRE, 0);
1650 wr32(E1000_VFTE, 0);
1653 /* Allow time for pending master requests to run */
1654 hw->mac.ops.reset_hw(hw);
1657 if (hw->mac.ops.init_hw(hw))
1658 dev_err(&pdev->dev, "Hardware Error\n");
1660 if (hw->mac.type == e1000_82580) {
1661 u32 reg = rd32(E1000_PCIEMISC);
1662 wr32(E1000_PCIEMISC,
1663 reg & ~E1000_PCIEMISC_LX_DECISION);
1665 if (!netif_running(adapter->netdev))
1666 igb_power_down_link(adapter);
1668 igb_update_mng_vlan(adapter);
1670 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1671 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1673 igb_get_phy_info(hw);
1676 static const struct net_device_ops igb_netdev_ops = {
1677 .ndo_open = igb_open,
1678 .ndo_stop = igb_close,
1679 .ndo_start_xmit = igb_xmit_frame_adv,
1680 .ndo_get_stats = igb_get_stats,
1681 .ndo_set_rx_mode = igb_set_rx_mode,
1682 .ndo_set_multicast_list = igb_set_rx_mode,
1683 .ndo_set_mac_address = igb_set_mac,
1684 .ndo_change_mtu = igb_change_mtu,
1685 .ndo_do_ioctl = igb_ioctl,
1686 .ndo_tx_timeout = igb_tx_timeout,
1687 .ndo_validate_addr = eth_validate_addr,
1688 .ndo_vlan_rx_register = igb_vlan_rx_register,
1689 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1690 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
1691 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1692 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1693 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1694 .ndo_get_vf_config = igb_ndo_get_vf_config,
1695 #ifdef CONFIG_NET_POLL_CONTROLLER
1696 .ndo_poll_controller = igb_netpoll,
1701 * igb_probe - Device Initialization Routine
1702 * @pdev: PCI device information struct
1703 * @ent: entry in igb_pci_tbl
1705 * Returns 0 on success, negative on failure
1707 * igb_probe initializes an adapter identified by a pci_dev structure.
1708 * The OS initialization, configuring of the adapter private structure,
1709 * and a hardware reset occur.
1711 static int __devinit igb_probe(struct pci_dev *pdev,
1712 const struct pci_device_id *ent)
1714 struct net_device *netdev;
1715 struct igb_adapter *adapter;
1716 struct e1000_hw *hw;
1717 u16 eeprom_data = 0;
1718 static int global_quad_port_a; /* global quad port a indication */
1719 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1720 unsigned long mmio_start, mmio_len;
1721 int err, pci_using_dac;
1722 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1725 err = pci_enable_device_mem(pdev);
1730 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
1732 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1736 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1738 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1740 dev_err(&pdev->dev, "No usable DMA "
1741 "configuration, aborting\n");
1747 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1753 pci_enable_pcie_error_reporting(pdev);
1755 pci_set_master(pdev);
1756 pci_save_state(pdev);
1759 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1760 IGB_ABS_MAX_TX_QUEUES);
1762 goto err_alloc_etherdev;
1764 SET_NETDEV_DEV(netdev, &pdev->dev);
1766 pci_set_drvdata(pdev, netdev);
1767 adapter = netdev_priv(netdev);
1768 adapter->netdev = netdev;
1769 adapter->pdev = pdev;
1772 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1774 mmio_start = pci_resource_start(pdev, 0);
1775 mmio_len = pci_resource_len(pdev, 0);
1778 hw->hw_addr = ioremap(mmio_start, mmio_len);
1782 netdev->netdev_ops = &igb_netdev_ops;
1783 igb_set_ethtool_ops(netdev);
1784 netdev->watchdog_timeo = 5 * HZ;
1786 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1788 netdev->mem_start = mmio_start;
1789 netdev->mem_end = mmio_start + mmio_len;
1791 /* PCI config space info */
1792 hw->vendor_id = pdev->vendor;
1793 hw->device_id = pdev->device;
1794 hw->revision_id = pdev->revision;
1795 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1796 hw->subsystem_device_id = pdev->subsystem_device;
1798 /* Copy the default MAC, PHY and NVM function pointers */
1799 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1800 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1801 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1802 /* Initialize skew-specific constants */
1803 err = ei->get_invariants(hw);
1807 /* setup the private structure */
1808 err = igb_sw_init(adapter);
1812 igb_get_bus_info_pcie(hw);
1814 hw->phy.autoneg_wait_to_complete = false;
1816 /* Copper options */
1817 if (hw->phy.media_type == e1000_media_type_copper) {
1818 hw->phy.mdix = AUTO_ALL_MODES;
1819 hw->phy.disable_polarity_correction = false;
1820 hw->phy.ms_type = e1000_ms_hw_default;
1823 if (igb_check_reset_block(hw))
1824 dev_info(&pdev->dev,
1825 "PHY reset is blocked due to SOL/IDER session.\n");
1827 netdev->features = NETIF_F_SG |
1829 NETIF_F_HW_VLAN_TX |
1830 NETIF_F_HW_VLAN_RX |
1831 NETIF_F_HW_VLAN_FILTER;
1833 netdev->features |= NETIF_F_IPV6_CSUM;
1834 netdev->features |= NETIF_F_TSO;
1835 netdev->features |= NETIF_F_TSO6;
1836 netdev->features |= NETIF_F_GRO;
1838 netdev->vlan_features |= NETIF_F_TSO;
1839 netdev->vlan_features |= NETIF_F_TSO6;
1840 netdev->vlan_features |= NETIF_F_IP_CSUM;
1841 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
1842 netdev->vlan_features |= NETIF_F_SG;
1845 netdev->features |= NETIF_F_HIGHDMA;
1847 if (hw->mac.type >= e1000_82576)
1848 netdev->features |= NETIF_F_SCTP_CSUM;
1850 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
1852 /* before reading the NVM, reset the controller to put the device in a
1853 * known good starting state */
1854 hw->mac.ops.reset_hw(hw);
1856 /* make sure the NVM is good */
1857 if (igb_validate_nvm_checksum(hw) < 0) {
1858 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1863 /* copy the MAC address out of the NVM */
1864 if (hw->mac.ops.read_mac_addr(hw))
1865 dev_err(&pdev->dev, "NVM Read Error\n");
1867 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1868 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1870 if (!is_valid_ether_addr(netdev->perm_addr)) {
1871 dev_err(&pdev->dev, "Invalid MAC Address\n");
1876 setup_timer(&adapter->watchdog_timer, &igb_watchdog,
1877 (unsigned long) adapter);
1878 setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
1879 (unsigned long) adapter);
1881 INIT_WORK(&adapter->reset_task, igb_reset_task);
1882 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1884 /* Initialize link properties that are user-changeable */
1885 adapter->fc_autoneg = true;
1886 hw->mac.autoneg = true;
1887 hw->phy.autoneg_advertised = 0x2f;
1889 hw->fc.requested_mode = e1000_fc_default;
1890 hw->fc.current_mode = e1000_fc_default;
1892 igb_validate_mdi_setting(hw);
1894 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1895 * enable the ACPI Magic Packet filter
1898 if (hw->bus.func == 0)
1899 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1900 else if (hw->mac.type == e1000_82580)
1901 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
1902 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
1904 else if (hw->bus.func == 1)
1905 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1907 if (eeprom_data & eeprom_apme_mask)
1908 adapter->eeprom_wol |= E1000_WUFC_MAG;
1910 /* now that we have the eeprom settings, apply the special cases where
1911 * the eeprom may be wrong or the board simply won't support wake on
1912 * lan on a particular port */
1913 switch (pdev->device) {
1914 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1915 adapter->eeprom_wol = 0;
1917 case E1000_DEV_ID_82575EB_FIBER_SERDES:
1918 case E1000_DEV_ID_82576_FIBER:
1919 case E1000_DEV_ID_82576_SERDES:
1920 /* Wake events only supported on port A for dual fiber
1921 * regardless of eeprom setting */
1922 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1923 adapter->eeprom_wol = 0;
1925 case E1000_DEV_ID_82576_QUAD_COPPER:
1926 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
1927 /* if quad port adapter, disable WoL on all but port A */
1928 if (global_quad_port_a != 0)
1929 adapter->eeprom_wol = 0;
1931 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
1932 /* Reset for multiple quad port adapters */
1933 if (++global_quad_port_a == 4)
1934 global_quad_port_a = 0;
1938 /* initialize the wol settings based on the eeprom settings */
1939 adapter->wol = adapter->eeprom_wol;
1940 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1942 /* reset the hardware with the new settings */
1945 /* let the f/w know that the h/w is now under the control of the
1947 igb_get_hw_control(adapter);
1949 strcpy(netdev->name, "eth%d");
1950 err = register_netdev(netdev);
1954 /* carrier off reporting is important to ethtool even BEFORE open */
1955 netif_carrier_off(netdev);
1957 #ifdef CONFIG_IGB_DCA
1958 if (dca_add_requester(&pdev->dev) == 0) {
1959 adapter->flags |= IGB_FLAG_DCA_ENABLED;
1960 dev_info(&pdev->dev, "DCA enabled\n");
1961 igb_setup_dca(adapter);
1965 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1966 /* print bus type/speed/width info */
1967 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
1969 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1970 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
1972 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1973 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
1974 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
1978 igb_read_part_num(hw, &part_num);
1979 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
1980 (part_num >> 8), (part_num & 0xff));
1982 dev_info(&pdev->dev,
1983 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1984 adapter->msix_entries ? "MSI-X" :
1985 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
1986 adapter->num_rx_queues, adapter->num_tx_queues);
1991 igb_release_hw_control(adapter);
1993 if (!igb_check_reset_block(hw))
1996 if (hw->flash_address)
1997 iounmap(hw->flash_address);
1999 igb_clear_interrupt_scheme(adapter);
2000 iounmap(hw->hw_addr);
2002 free_netdev(netdev);
2004 pci_release_selected_regions(pdev,
2005 pci_select_bars(pdev, IORESOURCE_MEM));
2008 pci_disable_device(pdev);
2013 * igb_remove - Device Removal Routine
2014 * @pdev: PCI device information struct
2016 * igb_remove is called by the PCI subsystem to alert the driver
2017 * that it should release a PCI device. The could be caused by a
2018 * Hot-Plug event, or because the driver is going to be removed from
2021 static void __devexit igb_remove(struct pci_dev *pdev)
2023 struct net_device *netdev = pci_get_drvdata(pdev);
2024 struct igb_adapter *adapter = netdev_priv(netdev);
2025 struct e1000_hw *hw = &adapter->hw;
2027 /* flush_scheduled work may reschedule our watchdog task, so
2028 * explicitly disable watchdog tasks from being rescheduled */
2029 set_bit(__IGB_DOWN, &adapter->state);
2030 del_timer_sync(&adapter->watchdog_timer);
2031 del_timer_sync(&adapter->phy_info_timer);
2033 flush_scheduled_work();
2035 #ifdef CONFIG_IGB_DCA
2036 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
2037 dev_info(&pdev->dev, "DCA disabled\n");
2038 dca_remove_requester(&pdev->dev);
2039 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
2040 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
2044 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2045 * would have already happened in close and is redundant. */
2046 igb_release_hw_control(adapter);
2048 unregister_netdev(netdev);
2050 igb_clear_interrupt_scheme(adapter);
2052 #ifdef CONFIG_PCI_IOV
2053 /* reclaim resources allocated to VFs */
2054 if (adapter->vf_data) {
2055 /* disable iov and allow time for transactions to clear */
2056 pci_disable_sriov(pdev);
2059 kfree(adapter->vf_data);
2060 adapter->vf_data = NULL;
2061 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
2063 dev_info(&pdev->dev, "IOV Disabled\n");
2067 iounmap(hw->hw_addr);
2068 if (hw->flash_address)
2069 iounmap(hw->flash_address);
2070 pci_release_selected_regions(pdev,
2071 pci_select_bars(pdev, IORESOURCE_MEM));
2073 free_netdev(netdev);
2075 pci_disable_pcie_error_reporting(pdev);
2077 pci_disable_device(pdev);
2081 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2082 * @adapter: board private structure to initialize
2084 * This function initializes the vf specific data storage and then attempts to
2085 * allocate the VFs. The reason for ordering it this way is because it is much
2086 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2087 * the memory for the VFs.
2089 static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2091 #ifdef CONFIG_PCI_IOV
2092 struct pci_dev *pdev = adapter->pdev;
2094 if (adapter->vfs_allocated_count) {
2095 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2096 sizeof(struct vf_data_storage),
2098 /* if allocation failed then we do not support SR-IOV */
2099 if (!adapter->vf_data) {
2100 adapter->vfs_allocated_count = 0;
2101 dev_err(&pdev->dev, "Unable to allocate memory for VF "
2106 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
2107 kfree(adapter->vf_data);
2108 adapter->vf_data = NULL;
2109 #endif /* CONFIG_PCI_IOV */
2110 adapter->vfs_allocated_count = 0;
2111 #ifdef CONFIG_PCI_IOV
2113 unsigned char mac_addr[ETH_ALEN];
2115 dev_info(&pdev->dev, "%d vfs allocated\n",
2116 adapter->vfs_allocated_count);
2117 for (i = 0; i < adapter->vfs_allocated_count; i++) {
2118 random_ether_addr(mac_addr);
2119 igb_set_vf_mac(adapter, i, mac_addr);
2122 #endif /* CONFIG_PCI_IOV */
2127 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
2128 * @adapter: board private structure to initialize
2130 * igb_init_hw_timer initializes the function pointer and values for the hw
2131 * timer found in hardware.
2133 static void igb_init_hw_timer(struct igb_adapter *adapter)
2135 struct e1000_hw *hw = &adapter->hw;
2137 switch (hw->mac.type) {
2140 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2141 adapter->cycles.read = igb_read_clock;
2142 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2143 adapter->cycles.mult = 1;
2145 * The 82580 timesync updates the system timer every 8ns by 8ns
2146 * and the value cannot be shifted. Instead we need to shift
2147 * the registers to generate a 64bit timer value. As a result
2148 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
2149 * 24 in order to generate a larger value for synchronization.
2151 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
2152 /* disable system timer temporarily by setting bit 31 */
2153 wr32(E1000_TSAUXC, 0x80000000);
2156 /* Set registers so that rollover occurs soon to test this. */
2157 wr32(E1000_SYSTIMR, 0x00000000);
2158 wr32(E1000_SYSTIML, 0x80000000);
2159 wr32(E1000_SYSTIMH, 0x000000FF);
2162 /* enable system timer by clearing bit 31 */
2163 wr32(E1000_TSAUXC, 0x0);
2166 timecounter_init(&adapter->clock,
2168 ktime_to_ns(ktime_get_real()));
2170 * Synchronize our NIC clock against system wall clock. NIC
2171 * time stamp reading requires ~3us per sample, each sample
2172 * was pretty stable even under load => only require 10
2173 * samples for each offset comparison.
2175 memset(&adapter->compare, 0, sizeof(adapter->compare));
2176 adapter->compare.source = &adapter->clock;
2177 adapter->compare.target = ktime_get_real;
2178 adapter->compare.num_samples = 10;
2179 timecompare_update(&adapter->compare, 0);
2183 * Initialize hardware timer: we keep it running just in case
2184 * that some program needs it later on.
2186 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2187 adapter->cycles.read = igb_read_clock;
2188 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2189 adapter->cycles.mult = 1;
2191 * Scale the NIC clock cycle by a large factor so that
2192 * relatively small clock corrections can be added or
2193 * substracted at each clock tick. The drawbacks of a large
2194 * factor are a) that the clock register overflows more quickly
2195 * (not such a big deal) and b) that the increment per tick has
2196 * to fit into 24 bits. As a result we need to use a shift of
2197 * 19 so we can fit a value of 16 into the TIMINCA register.
2199 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
2201 (1 << E1000_TIMINCA_16NS_SHIFT) |
2202 (16 << IGB_82576_TSYNC_SHIFT));
2204 /* Set registers so that rollover occurs soon to test this. */
2205 wr32(E1000_SYSTIML, 0x00000000);
2206 wr32(E1000_SYSTIMH, 0xFF800000);
2209 timecounter_init(&adapter->clock,
2211 ktime_to_ns(ktime_get_real()));
2213 * Synchronize our NIC clock against system wall clock. NIC
2214 * time stamp reading requires ~3us per sample, each sample
2215 * was pretty stable even under load => only require 10
2216 * samples for each offset comparison.
2218 memset(&adapter->compare, 0, sizeof(adapter->compare));
2219 adapter->compare.source = &adapter->clock;
2220 adapter->compare.target = ktime_get_real;
2221 adapter->compare.num_samples = 10;
2222 timecompare_update(&adapter->compare, 0);
2225 /* 82575 does not support timesync */
2233 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2234 * @adapter: board private structure to initialize
2236 * igb_sw_init initializes the Adapter private data structure.
2237 * Fields are initialized based on PCI device information and
2238 * OS network device settings (MTU size).
2240 static int __devinit igb_sw_init(struct igb_adapter *adapter)
2242 struct e1000_hw *hw = &adapter->hw;
2243 struct net_device *netdev = adapter->netdev;
2244 struct pci_dev *pdev = adapter->pdev;
2246 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2248 adapter->tx_ring_count = IGB_DEFAULT_TXD;
2249 adapter->rx_ring_count = IGB_DEFAULT_RXD;
2250 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2251 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2253 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2254 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2256 #ifdef CONFIG_PCI_IOV
2257 if (hw->mac.type == e1000_82576)
2258 adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs;
2260 #endif /* CONFIG_PCI_IOV */
2261 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
2264 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
2265 * then we should combine the queues into a queue pair in order to
2266 * conserve interrupts due to limited supply
2268 if ((adapter->rss_queues > 4) ||
2269 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
2270 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2272 /* This call may decrease the number of queues */
2273 if (igb_init_interrupt_scheme(adapter)) {
2274 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2278 igb_init_hw_timer(adapter);
2279 igb_probe_vfs(adapter);
2281 /* Explicitly disable IRQ since the NIC can be in any state. */
2282 igb_irq_disable(adapter);
2284 set_bit(__IGB_DOWN, &adapter->state);
2289 * igb_open - Called when a network interface is made active
2290 * @netdev: network interface device structure
2292 * Returns 0 on success, negative value on failure
2294 * The open entry point is called when a network interface is made
2295 * active by the system (IFF_UP). At this point all resources needed
2296 * for transmit and receive operations are allocated, the interrupt
2297 * handler is registered with the OS, the watchdog timer is started,
2298 * and the stack is notified that the interface is ready.
2300 static int igb_open(struct net_device *netdev)
2302 struct igb_adapter *adapter = netdev_priv(netdev);
2303 struct e1000_hw *hw = &adapter->hw;
2307 /* disallow open during test */
2308 if (test_bit(__IGB_TESTING, &adapter->state))
2311 netif_carrier_off(netdev);
2313 /* allocate transmit descriptors */
2314 err = igb_setup_all_tx_resources(adapter);
2318 /* allocate receive descriptors */
2319 err = igb_setup_all_rx_resources(adapter);
2323 igb_power_up_link(adapter);
2325 /* before we allocate an interrupt, we must be ready to handle it.
2326 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2327 * as soon as we call pci_request_irq, so we have to setup our
2328 * clean_rx handler before we do so. */
2329 igb_configure(adapter);
2331 err = igb_request_irq(adapter);
2335 /* From here on the code is the same as igb_up() */
2336 clear_bit(__IGB_DOWN, &adapter->state);
2338 for (i = 0; i < adapter->num_q_vectors; i++) {
2339 struct igb_q_vector *q_vector = adapter->q_vector[i];
2340 napi_enable(&q_vector->napi);
2343 /* Clear any pending interrupts. */
2346 igb_irq_enable(adapter);
2348 /* notify VFs that reset has been completed */
2349 if (adapter->vfs_allocated_count) {
2350 u32 reg_data = rd32(E1000_CTRL_EXT);
2351 reg_data |= E1000_CTRL_EXT_PFRSTD;
2352 wr32(E1000_CTRL_EXT, reg_data);
2355 netif_tx_start_all_queues(netdev);
2357 /* start the watchdog. */
2358 hw->mac.get_link_status = 1;
2359 schedule_work(&adapter->watchdog_task);
2364 igb_release_hw_control(adapter);
2365 igb_power_down_link(adapter);
2366 igb_free_all_rx_resources(adapter);
2368 igb_free_all_tx_resources(adapter);
2376 * igb_close - Disables a network interface
2377 * @netdev: network interface device structure
2379 * Returns 0, this is not allowed to fail
2381 * The close entry point is called when an interface is de-activated
2382 * by the OS. The hardware is still under the driver's control, but
2383 * needs to be disabled. A global MAC reset is issued to stop the
2384 * hardware, and all transmit and receive resources are freed.
2386 static int igb_close(struct net_device *netdev)
2388 struct igb_adapter *adapter = netdev_priv(netdev);
2390 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
2393 igb_free_irq(adapter);
2395 igb_free_all_tx_resources(adapter);
2396 igb_free_all_rx_resources(adapter);
2402 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
2403 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2405 * Return 0 on success, negative on failure
2407 int igb_setup_tx_resources(struct igb_ring *tx_ring)
2409 struct device *dev = tx_ring->dev;
2412 size = sizeof(struct igb_buffer) * tx_ring->count;
2413 tx_ring->buffer_info = vmalloc(size);
2414 if (!tx_ring->buffer_info)
2416 memset(tx_ring->buffer_info, 0, size);
2418 /* round up to nearest 4K */
2419 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
2420 tx_ring->size = ALIGN(tx_ring->size, 4096);
2422 tx_ring->desc = dma_alloc_coherent(dev,
2430 tx_ring->next_to_use = 0;
2431 tx_ring->next_to_clean = 0;
2435 vfree(tx_ring->buffer_info);
2437 "Unable to allocate memory for the transmit descriptor ring\n");
2442 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2443 * (Descriptors) for all queues
2444 * @adapter: board private structure
2446 * Return 0 on success, negative on failure
2448 static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2450 struct pci_dev *pdev = adapter->pdev;
2453 for (i = 0; i < adapter->num_tx_queues; i++) {
2454 err = igb_setup_tx_resources(adapter->tx_ring[i]);
2457 "Allocation for Tx Queue %u failed\n", i);
2458 for (i--; i >= 0; i--)
2459 igb_free_tx_resources(adapter->tx_ring[i]);
2464 for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
2465 int r_idx = i % adapter->num_tx_queues;
2466 adapter->multi_tx_table[i] = adapter->tx_ring[r_idx];
2472 * igb_setup_tctl - configure the transmit control registers
2473 * @adapter: Board private structure
2475 void igb_setup_tctl(struct igb_adapter *adapter)
2477 struct e1000_hw *hw = &adapter->hw;
2480 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2481 wr32(E1000_TXDCTL(0), 0);
2483 /* Program the Transmit Control Register */
2484 tctl = rd32(E1000_TCTL);
2485 tctl &= ~E1000_TCTL_CT;
2486 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2487 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2489 igb_config_collision_dist(hw);
2491 /* Enable transmits */
2492 tctl |= E1000_TCTL_EN;
2494 wr32(E1000_TCTL, tctl);
2498 * igb_configure_tx_ring - Configure transmit ring after Reset
2499 * @adapter: board private structure
2500 * @ring: tx ring to configure
2502 * Configure a transmit ring after a reset.
2504 void igb_configure_tx_ring(struct igb_adapter *adapter,
2505 struct igb_ring *ring)
2507 struct e1000_hw *hw = &adapter->hw;
2509 u64 tdba = ring->dma;
2510 int reg_idx = ring->reg_idx;
2512 /* disable the queue */
2513 txdctl = rd32(E1000_TXDCTL(reg_idx));
2514 wr32(E1000_TXDCTL(reg_idx),
2515 txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
2519 wr32(E1000_TDLEN(reg_idx),
2520 ring->count * sizeof(union e1000_adv_tx_desc));
2521 wr32(E1000_TDBAL(reg_idx),
2522 tdba & 0x00000000ffffffffULL);
2523 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2525 ring->head = hw->hw_addr + E1000_TDH(reg_idx);
2526 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
2527 writel(0, ring->head);
2528 writel(0, ring->tail);
2530 txdctl |= IGB_TX_PTHRESH;
2531 txdctl |= IGB_TX_HTHRESH << 8;
2532 txdctl |= IGB_TX_WTHRESH << 16;
2534 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2535 wr32(E1000_TXDCTL(reg_idx), txdctl);
2539 * igb_configure_tx - Configure transmit Unit after Reset
2540 * @adapter: board private structure
2542 * Configure the Tx unit of the MAC after a reset.
2544 static void igb_configure_tx(struct igb_adapter *adapter)
2548 for (i = 0; i < adapter->num_tx_queues; i++)
2549 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
2553 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
2554 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2556 * Returns 0 on success, negative on failure
2558 int igb_setup_rx_resources(struct igb_ring *rx_ring)
2560 struct device *dev = rx_ring->dev;
2563 size = sizeof(struct igb_buffer) * rx_ring->count;
2564 rx_ring->buffer_info = vmalloc(size);
2565 if (!rx_ring->buffer_info)
2567 memset(rx_ring->buffer_info, 0, size);
2569 desc_len = sizeof(union e1000_adv_rx_desc);
2571 /* Round up to nearest 4K */
2572 rx_ring->size = rx_ring->count * desc_len;
2573 rx_ring->size = ALIGN(rx_ring->size, 4096);
2575 rx_ring->desc = dma_alloc_coherent(dev,
2583 rx_ring->next_to_clean = 0;
2584 rx_ring->next_to_use = 0;
2589 vfree(rx_ring->buffer_info);
2590 rx_ring->buffer_info = NULL;
2591 dev_err(dev, "Unable to allocate memory for the receive descriptor"
2597 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2598 * (Descriptors) for all queues
2599 * @adapter: board private structure
2601 * Return 0 on success, negative on failure
2603 static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2605 struct pci_dev *pdev = adapter->pdev;
2608 for (i = 0; i < adapter->num_rx_queues; i++) {
2609 err = igb_setup_rx_resources(adapter->rx_ring[i]);
2612 "Allocation for Rx Queue %u failed\n", i);
2613 for (i--; i >= 0; i--)
2614 igb_free_rx_resources(adapter->rx_ring[i]);
2623 * igb_setup_mrqc - configure the multiple receive queue control registers
2624 * @adapter: Board private structure
2626 static void igb_setup_mrqc(struct igb_adapter *adapter)
2628 struct e1000_hw *hw = &adapter->hw;
2630 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2635 static const u8 rsshash[40] = {
2636 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2637 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2638 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2639 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2641 /* Fill out hash function seeds */
2642 for (j = 0; j < 10; j++) {
2643 u32 rsskey = rsshash[(j * 4)];
2644 rsskey |= rsshash[(j * 4) + 1] << 8;
2645 rsskey |= rsshash[(j * 4) + 2] << 16;
2646 rsskey |= rsshash[(j * 4) + 3] << 24;
2647 array_wr32(E1000_RSSRK(0), j, rsskey);
2650 num_rx_queues = adapter->rss_queues;
2652 if (adapter->vfs_allocated_count) {
2653 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2654 switch (hw->mac.type) {
2671 if (hw->mac.type == e1000_82575)
2675 for (j = 0; j < (32 * 4); j++) {
2676 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2678 reta.bytes[j & 3] |= num_rx_queues << shift2;
2680 wr32(E1000_RETA(j >> 2), reta.dword);
2684 * Disable raw packet checksumming so that RSS hash is placed in
2685 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2686 * offloads as they are enabled by default
2688 rxcsum = rd32(E1000_RXCSUM);
2689 rxcsum |= E1000_RXCSUM_PCSD;
2691 if (adapter->hw.mac.type >= e1000_82576)
2692 /* Enable Receive Checksum Offload for SCTP */
2693 rxcsum |= E1000_RXCSUM_CRCOFL;
2695 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2696 wr32(E1000_RXCSUM, rxcsum);
2698 /* If VMDq is enabled then we set the appropriate mode for that, else
2699 * we default to RSS so that an RSS hash is calculated per packet even
2700 * if we are only using one queue */
2701 if (adapter->vfs_allocated_count) {
2702 if (hw->mac.type > e1000_82575) {
2703 /* Set the default pool for the PF's first queue */
2704 u32 vtctl = rd32(E1000_VT_CTL);
2705 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2706 E1000_VT_CTL_DISABLE_DEF_POOL);
2707 vtctl |= adapter->vfs_allocated_count <<
2708 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2709 wr32(E1000_VT_CTL, vtctl);
2711 if (adapter->rss_queues > 1)
2712 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2714 mrqc = E1000_MRQC_ENABLE_VMDQ;
2716 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2718 igb_vmm_control(adapter);
2720 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2721 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2722 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2723 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2724 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2725 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2726 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2727 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2729 wr32(E1000_MRQC, mrqc);
2733 * igb_setup_rctl - configure the receive control registers
2734 * @adapter: Board private structure
2736 void igb_setup_rctl(struct igb_adapter *adapter)
2738 struct e1000_hw *hw = &adapter->hw;
2741 rctl = rd32(E1000_RCTL);
2743 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2744 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
2746 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
2747 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2750 * enable stripping of CRC. It's unlikely this will break BMC
2751 * redirection as it did with e1000. Newer features require
2752 * that the HW strips the CRC.
2754 rctl |= E1000_RCTL_SECRC;
2756 /* disable store bad packets and clear size bits. */
2757 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
2759 /* enable LPE to prevent packets larger than max_frame_size */
2760 rctl |= E1000_RCTL_LPE;
2762 /* disable queue 0 to prevent tail write w/o re-config */
2763 wr32(E1000_RXDCTL(0), 0);
2765 /* Attention!!! For SR-IOV PF driver operations you must enable
2766 * queue drop for all VF and PF queues to prevent head of line blocking
2767 * if an un-trusted VF does not provide descriptors to hardware.
2769 if (adapter->vfs_allocated_count) {
2770 /* set all queue drop enable bits */
2771 wr32(E1000_QDE, ALL_QUEUES);
2774 wr32(E1000_RCTL, rctl);
2777 static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2780 struct e1000_hw *hw = &adapter->hw;
2783 /* if it isn't the PF check to see if VFs are enabled and
2784 * increase the size to support vlan tags */
2785 if (vfn < adapter->vfs_allocated_count &&
2786 adapter->vf_data[vfn].vlans_enabled)
2787 size += VLAN_TAG_SIZE;
2789 vmolr = rd32(E1000_VMOLR(vfn));
2790 vmolr &= ~E1000_VMOLR_RLPML_MASK;
2791 vmolr |= size | E1000_VMOLR_LPE;
2792 wr32(E1000_VMOLR(vfn), vmolr);
2798 * igb_rlpml_set - set maximum receive packet size
2799 * @adapter: board private structure
2801 * Configure maximum receivable packet size.
2803 static void igb_rlpml_set(struct igb_adapter *adapter)
2805 u32 max_frame_size = adapter->max_frame_size;
2806 struct e1000_hw *hw = &adapter->hw;
2807 u16 pf_id = adapter->vfs_allocated_count;
2810 max_frame_size += VLAN_TAG_SIZE;
2812 /* if vfs are enabled we set RLPML to the largest possible request
2813 * size and set the VMOLR RLPML to the size we need */
2815 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
2816 max_frame_size = MAX_JUMBO_FRAME_SIZE;
2819 wr32(E1000_RLPML, max_frame_size);
2822 static inline void igb_set_vmolr(struct igb_adapter *adapter,
2825 struct e1000_hw *hw = &adapter->hw;
2829 * This register exists only on 82576 and newer so if we are older then
2830 * we should exit and do nothing
2832 if (hw->mac.type < e1000_82576)
2835 vmolr = rd32(E1000_VMOLR(vfn));
2836 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
2838 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
2840 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
2842 /* clear all bits that might not be set */
2843 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
2845 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
2846 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
2848 * for VMDq only allow the VFs and pool 0 to accept broadcast and
2851 if (vfn <= adapter->vfs_allocated_count)
2852 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
2854 wr32(E1000_VMOLR(vfn), vmolr);
2858 * igb_configure_rx_ring - Configure a receive ring after Reset
2859 * @adapter: board private structure
2860 * @ring: receive ring to be configured
2862 * Configure the Rx unit of the MAC after a reset.
2864 void igb_configure_rx_ring(struct igb_adapter *adapter,
2865 struct igb_ring *ring)
2867 struct e1000_hw *hw = &adapter->hw;
2868 u64 rdba = ring->dma;
2869 int reg_idx = ring->reg_idx;
2872 /* disable the queue */
2873 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2874 wr32(E1000_RXDCTL(reg_idx),
2875 rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
2877 /* Set DMA base address registers */
2878 wr32(E1000_RDBAL(reg_idx),
2879 rdba & 0x00000000ffffffffULL);
2880 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
2881 wr32(E1000_RDLEN(reg_idx),
2882 ring->count * sizeof(union e1000_adv_rx_desc));
2884 /* initialize head and tail */
2885 ring->head = hw->hw_addr + E1000_RDH(reg_idx);
2886 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
2887 writel(0, ring->head);
2888 writel(0, ring->tail);
2890 /* set descriptor configuration */
2891 if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
2892 srrctl = ALIGN(ring->rx_buffer_len, 64) <<
2893 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2894 #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2895 srrctl |= IGB_RXBUFFER_16384 >>
2896 E1000_SRRCTL_BSIZEPKT_SHIFT;
2898 srrctl |= (PAGE_SIZE / 2) >>
2899 E1000_SRRCTL_BSIZEPKT_SHIFT;
2901 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2903 srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
2904 E1000_SRRCTL_BSIZEPKT_SHIFT;
2905 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2907 if (hw->mac.type == e1000_82580)
2908 srrctl |= E1000_SRRCTL_TIMESTAMP;
2909 /* Only set Drop Enable if we are supporting multiple queues */
2910 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
2911 srrctl |= E1000_SRRCTL_DROP_EN;
2913 wr32(E1000_SRRCTL(reg_idx), srrctl);
2915 /* set filtering for VMDQ pools */
2916 igb_set_vmolr(adapter, reg_idx & 0x7, true);
2918 /* enable receive descriptor fetching */
2919 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2920 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2921 rxdctl &= 0xFFF00000;
2922 rxdctl |= IGB_RX_PTHRESH;
2923 rxdctl |= IGB_RX_HTHRESH << 8;
2924 rxdctl |= IGB_RX_WTHRESH << 16;
2925 wr32(E1000_RXDCTL(reg_idx), rxdctl);
2929 * igb_configure_rx - Configure receive Unit after Reset
2930 * @adapter: board private structure
2932 * Configure the Rx unit of the MAC after a reset.
2934 static void igb_configure_rx(struct igb_adapter *adapter)
2938 /* set UTA to appropriate mode */
2939 igb_set_uta(adapter);
2941 /* set the correct pool for the PF default MAC address in entry 0 */
2942 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
2943 adapter->vfs_allocated_count);
2945 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2946 * the Base and Length of the Rx Descriptor Ring */
2947 for (i = 0; i < adapter->num_rx_queues; i++)
2948 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
2952 * igb_free_tx_resources - Free Tx Resources per Queue
2953 * @tx_ring: Tx descriptor ring for a specific queue
2955 * Free all transmit software resources
2957 void igb_free_tx_resources(struct igb_ring *tx_ring)
2959 igb_clean_tx_ring(tx_ring);
2961 vfree(tx_ring->buffer_info);
2962 tx_ring->buffer_info = NULL;
2964 /* if not set, then don't free */
2968 dma_free_coherent(tx_ring->dev, tx_ring->size,
2969 tx_ring->desc, tx_ring->dma);
2971 tx_ring->desc = NULL;
2975 * igb_free_all_tx_resources - Free Tx Resources for All Queues
2976 * @adapter: board private structure
2978 * Free all transmit software resources
2980 static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2984 for (i = 0; i < adapter->num_tx_queues; i++)
2985 igb_free_tx_resources(adapter->tx_ring[i]);
2988 void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
2989 struct igb_buffer *buffer_info)
2991 if (buffer_info->dma) {
2992 if (buffer_info->mapped_as_page)
2993 dma_unmap_page(tx_ring->dev,
2995 buffer_info->length,
2998 dma_unmap_single(tx_ring->dev,
3000 buffer_info->length,
3002 buffer_info->dma = 0;
3004 if (buffer_info->skb) {
3005 dev_kfree_skb_any(buffer_info->skb);
3006 buffer_info->skb = NULL;
3008 buffer_info->time_stamp = 0;
3009 buffer_info->length = 0;
3010 buffer_info->next_to_watch = 0;
3011 buffer_info->mapped_as_page = false;
3015 * igb_clean_tx_ring - Free Tx Buffers
3016 * @tx_ring: ring to be cleaned
3018 static void igb_clean_tx_ring(struct igb_ring *tx_ring)
3020 struct igb_buffer *buffer_info;
3024 if (!tx_ring->buffer_info)
3026 /* Free all the Tx ring sk_buffs */
3028 for (i = 0; i < tx_ring->count; i++) {
3029 buffer_info = &tx_ring->buffer_info[i];
3030 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
3033 size = sizeof(struct igb_buffer) * tx_ring->count;
3034 memset(tx_ring->buffer_info, 0, size);
3036 /* Zero out the descriptor ring */
3037 memset(tx_ring->desc, 0, tx_ring->size);
3039 tx_ring->next_to_use = 0;
3040 tx_ring->next_to_clean = 0;
3044 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3045 * @adapter: board private structure
3047 static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3051 for (i = 0; i < adapter->num_tx_queues; i++)
3052 igb_clean_tx_ring(adapter->tx_ring[i]);
3056 * igb_free_rx_resources - Free Rx Resources
3057 * @rx_ring: ring to clean the resources from
3059 * Free all receive software resources
3061 void igb_free_rx_resources(struct igb_ring *rx_ring)
3063 igb_clean_rx_ring(rx_ring);
3065 vfree(rx_ring->buffer_info);
3066 rx_ring->buffer_info = NULL;
3068 /* if not set, then don't free */
3072 dma_free_coherent(rx_ring->dev, rx_ring->size,
3073 rx_ring->desc, rx_ring->dma);
3075 rx_ring->desc = NULL;
3079 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3080 * @adapter: board private structure
3082 * Free all receive software resources
3084 static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3088 for (i = 0; i < adapter->num_rx_queues; i++)
3089 igb_free_rx_resources(adapter->rx_ring[i]);
3093 * igb_clean_rx_ring - Free Rx Buffers per Queue
3094 * @rx_ring: ring to free buffers from
3096 static void igb_clean_rx_ring(struct igb_ring *rx_ring)
3098 struct igb_buffer *buffer_info;
3102 if (!rx_ring->buffer_info)
3105 /* Free all the Rx ring sk_buffs */
3106 for (i = 0; i < rx_ring->count; i++) {
3107 buffer_info = &rx_ring->buffer_info[i];
3108 if (buffer_info->dma) {
3109 dma_unmap_single(rx_ring->dev,
3111 rx_ring->rx_buffer_len,
3113 buffer_info->dma = 0;
3116 if (buffer_info->skb) {
3117 dev_kfree_skb(buffer_info->skb);
3118 buffer_info->skb = NULL;
3120 if (buffer_info->page_dma) {
3121 dma_unmap_page(rx_ring->dev,
3122 buffer_info->page_dma,
3125 buffer_info->page_dma = 0;
3127 if (buffer_info->page) {
3128 put_page(buffer_info->page);
3129 buffer_info->page = NULL;
3130 buffer_info->page_offset = 0;
3134 size = sizeof(struct igb_buffer) * rx_ring->count;
3135 memset(rx_ring->buffer_info, 0, size);
3137 /* Zero out the descriptor ring */
3138 memset(rx_ring->desc, 0, rx_ring->size);
3140 rx_ring->next_to_clean = 0;
3141 rx_ring->next_to_use = 0;
3145 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3146 * @adapter: board private structure
3148 static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3152 for (i = 0; i < adapter->num_rx_queues; i++)
3153 igb_clean_rx_ring(adapter->rx_ring[i]);
3157 * igb_set_mac - Change the Ethernet Address of the NIC
3158 * @netdev: network interface device structure
3159 * @p: pointer to an address structure
3161 * Returns 0 on success, negative on failure
3163 static int igb_set_mac(struct net_device *netdev, void *p)
3165 struct igb_adapter *adapter = netdev_priv(netdev);
3166 struct e1000_hw *hw = &adapter->hw;
3167 struct sockaddr *addr = p;
3169 if (!is_valid_ether_addr(addr->sa_data))
3170 return -EADDRNOTAVAIL;
3172 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3173 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3175 /* set the correct pool for the new PF MAC address in entry 0 */
3176 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3177 adapter->vfs_allocated_count);
3183 * igb_write_mc_addr_list - write multicast addresses to MTA
3184 * @netdev: network interface device structure
3186 * Writes multicast address list to the MTA hash table.
3187 * Returns: -ENOMEM on failure
3188 * 0 on no addresses written
3189 * X on writing X addresses to MTA
3191 static int igb_write_mc_addr_list(struct net_device *netdev)
3193 struct igb_adapter *adapter = netdev_priv(netdev);
3194 struct e1000_hw *hw = &adapter->hw;
3195 struct netdev_hw_addr *ha;
3199 if (netdev_mc_empty(netdev)) {
3200 /* nothing to program, so clear mc list */
3201 igb_update_mc_addr_list(hw, NULL, 0);
3202 igb_restore_vf_multicasts(adapter);
3206 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
3210 /* The shared function expects a packed array of only addresses. */
3212 netdev_for_each_mc_addr(ha, netdev)
3213 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
3215 igb_update_mc_addr_list(hw, mta_list, i);
3218 return netdev_mc_count(netdev);
3222 * igb_write_uc_addr_list - write unicast addresses to RAR table
3223 * @netdev: network interface device structure
3225 * Writes unicast address list to the RAR table.
3226 * Returns: -ENOMEM on failure/insufficient address space
3227 * 0 on no addresses written
3228 * X on writing X addresses to the RAR table
3230 static int igb_write_uc_addr_list(struct net_device *netdev)
3232 struct igb_adapter *adapter = netdev_priv(netdev);
3233 struct e1000_hw *hw = &adapter->hw;
3234 unsigned int vfn = adapter->vfs_allocated_count;
3235 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3238 /* return ENOMEM indicating insufficient memory for addresses */
3239 if (netdev_uc_count(netdev) > rar_entries)
3242 if (!netdev_uc_empty(netdev) && rar_entries) {
3243 struct netdev_hw_addr *ha;
3245 netdev_for_each_uc_addr(ha, netdev) {
3248 igb_rar_set_qsel(adapter, ha->addr,
3254 /* write the addresses in reverse order to avoid write combining */
3255 for (; rar_entries > 0 ; rar_entries--) {
3256 wr32(E1000_RAH(rar_entries), 0);
3257 wr32(E1000_RAL(rar_entries), 0);
3265 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
3266 * @netdev: network interface device structure
3268 * The set_rx_mode entry point is called whenever the unicast or multicast
3269 * address lists or the network interface flags are updated. This routine is
3270 * responsible for configuring the hardware for proper unicast, multicast,
3271 * promiscuous mode, and all-multi behavior.
3273 static void igb_set_rx_mode(struct net_device *netdev)
3275 struct igb_adapter *adapter = netdev_priv(netdev);
3276 struct e1000_hw *hw = &adapter->hw;
3277 unsigned int vfn = adapter->vfs_allocated_count;
3278 u32 rctl, vmolr = 0;
3281 /* Check for Promiscuous and All Multicast modes */
3282 rctl = rd32(E1000_RCTL);
3284 /* clear the effected bits */
3285 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3287 if (netdev->flags & IFF_PROMISC) {
3288 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
3289 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
3291 if (netdev->flags & IFF_ALLMULTI) {
3292 rctl |= E1000_RCTL_MPE;
3293 vmolr |= E1000_VMOLR_MPME;
3296 * Write addresses to the MTA, if the attempt fails
3297 * then we should just turn on promiscous mode so
3298 * that we can at least receive multicast traffic
3300 count = igb_write_mc_addr_list(netdev);
3302 rctl |= E1000_RCTL_MPE;
3303 vmolr |= E1000_VMOLR_MPME;
3305 vmolr |= E1000_VMOLR_ROMPE;
3309 * Write addresses to available RAR registers, if there is not
3310 * sufficient space to store all the addresses then enable
3311 * unicast promiscous mode
3313 count = igb_write_uc_addr_list(netdev);
3315 rctl |= E1000_RCTL_UPE;
3316 vmolr |= E1000_VMOLR_ROPE;
3318 rctl |= E1000_RCTL_VFE;
3320 wr32(E1000_RCTL, rctl);
3323 * In order to support SR-IOV and eventually VMDq it is necessary to set
3324 * the VMOLR to enable the appropriate modes. Without this workaround
3325 * we will have issues with VLAN tag stripping not being done for frames
3326 * that are only arriving because we are the default pool
3328 if (hw->mac.type < e1000_82576)
3331 vmolr |= rd32(E1000_VMOLR(vfn)) &
3332 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3333 wr32(E1000_VMOLR(vfn), vmolr);
3334 igb_restore_vf_multicasts(adapter);
3337 /* Need to wait a few seconds after link up to get diagnostic information from
3339 static void igb_update_phy_info(unsigned long data)
3341 struct igb_adapter *adapter = (struct igb_adapter *) data;
3342 igb_get_phy_info(&adapter->hw);
3346 * igb_has_link - check shared code for link and determine up/down
3347 * @adapter: pointer to driver private info
3349 bool igb_has_link(struct igb_adapter *adapter)
3351 struct e1000_hw *hw = &adapter->hw;
3352 bool link_active = false;
3355 /* get_link_status is set on LSC (link status) interrupt or
3356 * rx sequence error interrupt. get_link_status will stay
3357 * false until the e1000_check_for_link establishes link
3358 * for copper adapters ONLY
3360 switch (hw->phy.media_type) {
3361 case e1000_media_type_copper:
3362 if (hw->mac.get_link_status) {
3363 ret_val = hw->mac.ops.check_for_link(hw);
3364 link_active = !hw->mac.get_link_status;
3369 case e1000_media_type_internal_serdes:
3370 ret_val = hw->mac.ops.check_for_link(hw);
3371 link_active = hw->mac.serdes_has_link;
3374 case e1000_media_type_unknown:
3382 * igb_watchdog - Timer Call-back
3383 * @data: pointer to adapter cast into an unsigned long
3385 static void igb_watchdog(unsigned long data)
3387 struct igb_adapter *adapter = (struct igb_adapter *)data;
3388 /* Do the rest outside of interrupt context */
3389 schedule_work(&adapter->watchdog_task);
3392 static void igb_watchdog_task(struct work_struct *work)
3394 struct igb_adapter *adapter = container_of(work,
3397 struct e1000_hw *hw = &adapter->hw;
3398 struct net_device *netdev = adapter->netdev;
3402 link = igb_has_link(adapter);
3404 if (!netif_carrier_ok(netdev)) {
3406 hw->mac.ops.get_speed_and_duplex(hw,
3407 &adapter->link_speed,
3408 &adapter->link_duplex);
3410 ctrl = rd32(E1000_CTRL);
3411 /* Links status message must follow this format */
3412 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
3413 "Flow Control: %s\n",
3415 adapter->link_speed,
3416 adapter->link_duplex == FULL_DUPLEX ?
3417 "Full Duplex" : "Half Duplex",
3418 ((ctrl & E1000_CTRL_TFCE) &&
3419 (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
3420 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3421 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
3423 /* adjust timeout factor according to speed/duplex */
3424 adapter->tx_timeout_factor = 1;
3425 switch (adapter->link_speed) {
3427 adapter->tx_timeout_factor = 14;
3430 /* maybe add some timeout factor ? */
3434 netif_carrier_on(netdev);
3436 igb_ping_all_vfs(adapter);
3438 /* link state has changed, schedule phy info update */
3439 if (!test_bit(__IGB_DOWN, &adapter->state))
3440 mod_timer(&adapter->phy_info_timer,
3441 round_jiffies(jiffies + 2 * HZ));
3444 if (netif_carrier_ok(netdev)) {
3445 adapter->link_speed = 0;
3446 adapter->link_duplex = 0;
3447 /* Links status message must follow this format */
3448 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3450 netif_carrier_off(netdev);
3452 igb_ping_all_vfs(adapter);
3454 /* link state has changed, schedule phy info update */
3455 if (!test_bit(__IGB_DOWN, &adapter->state))
3456 mod_timer(&adapter->phy_info_timer,
3457 round_jiffies(jiffies + 2 * HZ));
3461 igb_update_stats(adapter);
3463 for (i = 0; i < adapter->num_tx_queues; i++) {
3464 struct igb_ring *tx_ring = adapter->tx_ring[i];
3465 if (!netif_carrier_ok(netdev)) {
3466 /* We've lost link, so the controller stops DMA,
3467 * but we've got queued Tx work that's never going
3468 * to get done, so reset controller to flush Tx.
3469 * (Do the reset outside of interrupt context). */
3470 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3471 adapter->tx_timeout_count++;
3472 schedule_work(&adapter->reset_task);
3473 /* return immediately since reset is imminent */
3478 /* Force detection of hung controller every watchdog period */
3479 tx_ring->detect_tx_hung = true;
3482 /* Cause software interrupt to ensure rx ring is cleaned */
3483 if (adapter->msix_entries) {
3485 for (i = 0; i < adapter->num_q_vectors; i++) {
3486 struct igb_q_vector *q_vector = adapter->q_vector[i];
3487 eics |= q_vector->eims_value;
3489 wr32(E1000_EICS, eics);
3491 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3494 /* Reset the timer */
3495 if (!test_bit(__IGB_DOWN, &adapter->state))
3496 mod_timer(&adapter->watchdog_timer,
3497 round_jiffies(jiffies + 2 * HZ));
3500 enum latency_range {
3504 latency_invalid = 255
3508 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3510 * Stores a new ITR value based on strictly on packet size. This
3511 * algorithm is less sophisticated than that used in igb_update_itr,
3512 * due to the difficulty of synchronizing statistics across multiple
3513 * receive rings. The divisors and thresholds used by this fuction
3514 * were determined based on theoretical maximum wire speed and testing
3515 * data, in order to minimize response time while increasing bulk
3517 * This functionality is controlled by the InterruptThrottleRate module
3518 * parameter (see igb_param.c)
3519 * NOTE: This function is called only when operating in a multiqueue
3520 * receive environment.
3521 * @q_vector: pointer to q_vector
3523 static void igb_update_ring_itr(struct igb_q_vector *q_vector)
3525 int new_val = q_vector->itr_val;
3526 int avg_wire_size = 0;
3527 struct igb_adapter *adapter = q_vector->adapter;
3529 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3530 * ints/sec - ITR timer value of 120 ticks.
3532 if (adapter->link_speed != SPEED_1000) {
3537 if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
3538 struct igb_ring *ring = q_vector->rx_ring;
3539 avg_wire_size = ring->total_bytes / ring->total_packets;
3542 if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
3543 struct igb_ring *ring = q_vector->tx_ring;
3544 avg_wire_size = max_t(u32, avg_wire_size,
3545 (ring->total_bytes /
3546 ring->total_packets));
3549 /* if avg_wire_size isn't set no work was done */
3553 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3554 avg_wire_size += 24;
3556 /* Don't starve jumbo frames */
3557 avg_wire_size = min(avg_wire_size, 3000);
3559 /* Give a little boost to mid-size frames */
3560 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3561 new_val = avg_wire_size / 3;
3563 new_val = avg_wire_size / 2;
3565 /* when in itr mode 3 do not exceed 20K ints/sec */
3566 if (adapter->rx_itr_setting == 3 && new_val < 196)
3570 if (new_val != q_vector->itr_val) {
3571 q_vector->itr_val = new_val;
3572 q_vector->set_itr = 1;
3575 if (q_vector->rx_ring) {
3576 q_vector->rx_ring->total_bytes = 0;
3577 q_vector->rx_ring->total_packets = 0;
3579 if (q_vector->tx_ring) {
3580 q_vector->tx_ring->total_bytes = 0;
3581 q_vector->tx_ring->total_packets = 0;
3586 * igb_update_itr - update the dynamic ITR value based on statistics
3587 * Stores a new ITR value based on packets and byte
3588 * counts during the last interrupt. The advantage of per interrupt
3589 * computation is faster updates and more accurate ITR for the current
3590 * traffic pattern. Constants in this function were computed
3591 * based on theoretical maximum wire speed and thresholds were set based
3592 * on testing data as well as attempting to minimize response time
3593 * while increasing bulk throughput.
3594 * this functionality is controlled by the InterruptThrottleRate module
3595 * parameter (see igb_param.c)
3596 * NOTE: These calculations are only valid when operating in a single-
3597 * queue environment.
3598 * @adapter: pointer to adapter
3599 * @itr_setting: current q_vector->itr_val
3600 * @packets: the number of packets during this measurement interval
3601 * @bytes: the number of bytes during this measurement interval
3603 static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
3604 int packets, int bytes)
3606 unsigned int retval = itr_setting;
3609 goto update_itr_done;
3611 switch (itr_setting) {
3612 case lowest_latency:
3613 /* handle TSO and jumbo frames */
3614 if (bytes/packets > 8000)
3615 retval = bulk_latency;
3616 else if ((packets < 5) && (bytes > 512))
3617 retval = low_latency;
3619 case low_latency: /* 50 usec aka 20000 ints/s */
3620 if (bytes > 10000) {
3621 /* this if handles the TSO accounting */
3622 if (bytes/packets > 8000) {
3623 retval = bulk_latency;
3624 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
3625 retval = bulk_latency;
3626 } else if ((packets > 35)) {
3627 retval = lowest_latency;
3629 } else if (bytes/packets > 2000) {
3630 retval = bulk_latency;
3631 } else if (packets <= 2 && bytes < 512) {
3632 retval = lowest_latency;
3635 case bulk_latency: /* 250 usec aka 4000 ints/s */
3636 if (bytes > 25000) {
3638 retval = low_latency;
3639 } else if (bytes < 1500) {
3640 retval = low_latency;
3649 static void igb_set_itr(struct igb_adapter *adapter)
3651 struct igb_q_vector *q_vector = adapter->q_vector[0];
3653 u32 new_itr = q_vector->itr_val;
3655 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3656 if (adapter->link_speed != SPEED_1000) {
3662 adapter->rx_itr = igb_update_itr(adapter,
3664 q_vector->rx_ring->total_packets,
3665 q_vector->rx_ring->total_bytes);
3667 adapter->tx_itr = igb_update_itr(adapter,
3669 q_vector->tx_ring->total_packets,
3670 q_vector->tx_ring->total_bytes);
3671 current_itr = max(adapter->rx_itr, adapter->tx_itr);
3673 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3674 if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
3675 current_itr = low_latency;
3677 switch (current_itr) {
3678 /* counts and packets in update_itr are dependent on these numbers */
3679 case lowest_latency:
3680 new_itr = 56; /* aka 70,000 ints/sec */
3683 new_itr = 196; /* aka 20,000 ints/sec */
3686 new_itr = 980; /* aka 4,000 ints/sec */
3693 q_vector->rx_ring->total_bytes = 0;
3694 q_vector->rx_ring->total_packets = 0;
3695 q_vector->tx_ring->total_bytes = 0;
3696 q_vector->tx_ring->total_packets = 0;
3698 if (new_itr != q_vector->itr_val) {
3699 /* this attempts to bias the interrupt rate towards Bulk
3700 * by adding intermediate steps when interrupt rate is
3702 new_itr = new_itr > q_vector->itr_val ?
3703 max((new_itr * q_vector->itr_val) /
3704 (new_itr + (q_vector->itr_val >> 2)),
3707 /* Don't write the value here; it resets the adapter's
3708 * internal timer, and causes us to delay far longer than
3709 * we should between interrupts. Instead, we write the ITR
3710 * value at the beginning of the next interrupt so the timing
3711 * ends up being correct.
3713 q_vector->itr_val = new_itr;
3714 q_vector->set_itr = 1;
3718 #define IGB_TX_FLAGS_CSUM 0x00000001
3719 #define IGB_TX_FLAGS_VLAN 0x00000002
3720 #define IGB_TX_FLAGS_TSO 0x00000004
3721 #define IGB_TX_FLAGS_IPV4 0x00000008
3722 #define IGB_TX_FLAGS_TSTAMP 0x00000010
3723 #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3724 #define IGB_TX_FLAGS_VLAN_SHIFT 16
3726 static inline int igb_tso_adv(struct igb_ring *tx_ring,
3727 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
3729 struct e1000_adv_tx_context_desc *context_desc;
3732 struct igb_buffer *buffer_info;
3733 u32 info = 0, tu_cmd = 0;
3737 if (skb_header_cloned(skb)) {
3738 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3743 l4len = tcp_hdrlen(skb);
3746 if (skb->protocol == htons(ETH_P_IP)) {
3747 struct iphdr *iph = ip_hdr(skb);
3750 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3754 } else if (skb_is_gso_v6(skb)) {
3755 ipv6_hdr(skb)->payload_len = 0;
3756 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3757 &ipv6_hdr(skb)->daddr,
3761 i = tx_ring->next_to_use;
3763 buffer_info = &tx_ring->buffer_info[i];
3764 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3765 /* VLAN MACLEN IPLEN */
3766 if (tx_flags & IGB_TX_FLAGS_VLAN)
3767 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3768 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3769 *hdr_len += skb_network_offset(skb);
3770 info |= skb_network_header_len(skb);
3771 *hdr_len += skb_network_header_len(skb);
3772 context_desc->vlan_macip_lens = cpu_to_le32(info);
3774 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3775 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3777 if (skb->protocol == htons(ETH_P_IP))
3778 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3779 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3781 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3784 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
3785 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
3787 /* For 82575, context index must be unique per ring. */
3788 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3789 mss_l4len_idx |= tx_ring->reg_idx << 4;
3791 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3792 context_desc->seqnum_seed = 0;
3794 buffer_info->time_stamp = jiffies;
3795 buffer_info->next_to_watch = i;
3796 buffer_info->dma = 0;
3798 if (i == tx_ring->count)
3801 tx_ring->next_to_use = i;
3806 static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3807 struct sk_buff *skb, u32 tx_flags)
3809 struct e1000_adv_tx_context_desc *context_desc;
3810 struct device *dev = tx_ring->dev;
3811 struct igb_buffer *buffer_info;
3812 u32 info = 0, tu_cmd = 0;
3815 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3816 (tx_flags & IGB_TX_FLAGS_VLAN)) {
3817 i = tx_ring->next_to_use;
3818 buffer_info = &tx_ring->buffer_info[i];
3819 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3821 if (tx_flags & IGB_TX_FLAGS_VLAN)
3822 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3824 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3825 if (skb->ip_summed == CHECKSUM_PARTIAL)
3826 info |= skb_network_header_len(skb);
3828 context_desc->vlan_macip_lens = cpu_to_le32(info);
3830 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3832 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3835 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3836 const struct vlan_ethhdr *vhdr =
3837 (const struct vlan_ethhdr*)skb->data;
3839 protocol = vhdr->h_vlan_encapsulated_proto;
3841 protocol = skb->protocol;
3845 case cpu_to_be16(ETH_P_IP):
3846 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3847 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3848 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3849 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
3850 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
3852 case cpu_to_be16(ETH_P_IPV6):
3853 /* XXX what about other V6 headers?? */
3854 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3855 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3856 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
3857 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
3860 if (unlikely(net_ratelimit()))
3862 "partial checksum but proto=%x!\n",
3868 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3869 context_desc->seqnum_seed = 0;
3870 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3871 context_desc->mss_l4len_idx =
3872 cpu_to_le32(tx_ring->reg_idx << 4);
3874 buffer_info->time_stamp = jiffies;
3875 buffer_info->next_to_watch = i;
3876 buffer_info->dma = 0;
3879 if (i == tx_ring->count)
3881 tx_ring->next_to_use = i;
3888 #define IGB_MAX_TXD_PWR 16
3889 #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3891 static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3894 struct igb_buffer *buffer_info;
3895 struct device *dev = tx_ring->dev;
3896 unsigned int hlen = skb_headlen(skb);
3897 unsigned int count = 0, i;
3899 u16 gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
3901 i = tx_ring->next_to_use;
3903 buffer_info = &tx_ring->buffer_info[i];
3904 BUG_ON(hlen >= IGB_MAX_DATA_PER_TXD);
3905 buffer_info->length = hlen;
3906 /* set time_stamp *before* dma to help avoid a possible race */
3907 buffer_info->time_stamp = jiffies;
3908 buffer_info->next_to_watch = i;
3909 buffer_info->dma = dma_map_single(dev, skb->data, hlen,
3911 if (dma_mapping_error(dev, buffer_info->dma))
3914 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3915 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f];
3916 unsigned int len = frag->size;
3920 if (i == tx_ring->count)
3923 buffer_info = &tx_ring->buffer_info[i];
3924 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3925 buffer_info->length = len;
3926 buffer_info->time_stamp = jiffies;
3927 buffer_info->next_to_watch = i;
3928 buffer_info->mapped_as_page = true;
3929 buffer_info->dma = dma_map_page(dev,
3934 if (dma_mapping_error(dev, buffer_info->dma))
3939 tx_ring->buffer_info[i].skb = skb;
3940 tx_ring->buffer_info[i].shtx = skb_shinfo(skb)->tx_flags;
3941 /* multiply data chunks by size of headers */
3942 tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len;
3943 tx_ring->buffer_info[i].gso_segs = gso_segs;
3944 tx_ring->buffer_info[first].next_to_watch = i;
3949 dev_err(dev, "TX DMA map failed\n");
3951 /* clear timestamp and dma mappings for failed buffer_info mapping */
3952 buffer_info->dma = 0;
3953 buffer_info->time_stamp = 0;
3954 buffer_info->length = 0;
3955 buffer_info->next_to_watch = 0;
3956 buffer_info->mapped_as_page = false;
3958 /* clear timestamp and dma mappings for remaining portion of packet */
3963 buffer_info = &tx_ring->buffer_info[i];
3964 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
3970 static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
3971 u32 tx_flags, int count, u32 paylen,
3974 union e1000_adv_tx_desc *tx_desc;
3975 struct igb_buffer *buffer_info;
3976 u32 olinfo_status = 0, cmd_type_len;
3977 unsigned int i = tx_ring->next_to_use;
3979 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
3980 E1000_ADVTXD_DCMD_DEXT);
3982 if (tx_flags & IGB_TX_FLAGS_VLAN)
3983 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
3985 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
3986 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
3988 if (tx_flags & IGB_TX_FLAGS_TSO) {
3989 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
3991 /* insert tcp checksum */
3992 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3994 /* insert ip checksum */
3995 if (tx_flags & IGB_TX_FLAGS_IPV4)
3996 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
3998 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
3999 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4002 if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
4003 (tx_flags & (IGB_TX_FLAGS_CSUM |
4005 IGB_TX_FLAGS_VLAN)))
4006 olinfo_status |= tx_ring->reg_idx << 4;
4008 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
4011 buffer_info = &tx_ring->buffer_info[i];
4012 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
4013 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
4014 tx_desc->read.cmd_type_len =
4015 cpu_to_le32(cmd_type_len | buffer_info->length);
4016 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
4019 if (i == tx_ring->count)
4021 } while (count > 0);
4023 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
4024 /* Force memory writes to complete before letting h/w
4025 * know there are new descriptors to fetch. (Only
4026 * applicable for weak-ordered memory model archs,
4027 * such as IA-64). */
4030 tx_ring->next_to_use = i;
4031 writel(i, tx_ring->tail);
4032 /* we need this if more than one processor can write to our tail
4033 * at a time, it syncronizes IO on IA64/Altix systems */
4037 static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
4039 struct net_device *netdev = tx_ring->netdev;
4041 netif_stop_subqueue(netdev, tx_ring->queue_index);
4043 /* Herbert's original patch had:
4044 * smp_mb__after_netif_stop_queue();
4045 * but since that doesn't exist yet, just open code it. */
4048 /* We need to check again in a case another CPU has just
4049 * made room available. */
4050 if (igb_desc_unused(tx_ring) < size)
4054 netif_wake_subqueue(netdev, tx_ring->queue_index);
4055 tx_ring->tx_stats.restart_queue++;
4059 static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
4061 if (igb_desc_unused(tx_ring) >= size)
4063 return __igb_maybe_stop_tx(tx_ring, size);
4066 netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
4067 struct igb_ring *tx_ring)
4069 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
4074 union skb_shared_tx *shtx = skb_tx(skb);
4076 /* need: 1 descriptor per page,
4077 * + 2 desc gap to keep tail from touching head,
4078 * + 1 desc for skb->data,
4079 * + 1 desc for context descriptor,
4080 * otherwise try next time */
4081 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
4082 /* this is a hard error */
4083 return NETDEV_TX_BUSY;
4086 if (unlikely(shtx->hardware)) {
4087 shtx->in_progress = 1;
4088 tx_flags |= IGB_TX_FLAGS_TSTAMP;
4091 if (vlan_tx_tag_present(skb) && adapter->vlgrp) {
4092 tx_flags |= IGB_TX_FLAGS_VLAN;
4093 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4096 if (skb->protocol == htons(ETH_P_IP))
4097 tx_flags |= IGB_TX_FLAGS_IPV4;
4099 first = tx_ring->next_to_use;
4100 if (skb_is_gso(skb)) {
4101 tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
4104 dev_kfree_skb_any(skb);
4105 return NETDEV_TX_OK;
4110 tx_flags |= IGB_TX_FLAGS_TSO;
4111 else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
4112 (skb->ip_summed == CHECKSUM_PARTIAL))
4113 tx_flags |= IGB_TX_FLAGS_CSUM;
4116 * count reflects descriptors mapped, if 0 or less then mapping error
4117 * has occured and we need to rewind the descriptor queue
4119 count = igb_tx_map_adv(tx_ring, skb, first);
4121 dev_kfree_skb_any(skb);
4122 tx_ring->buffer_info[first].time_stamp = 0;
4123 tx_ring->next_to_use = first;
4124 return NETDEV_TX_OK;
4127 igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
4129 /* Make sure there is space in the ring for the next send. */
4130 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
4132 return NETDEV_TX_OK;
4135 static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
4136 struct net_device *netdev)
4138 struct igb_adapter *adapter = netdev_priv(netdev);
4139 struct igb_ring *tx_ring;
4142 if (test_bit(__IGB_DOWN, &adapter->state)) {
4143 dev_kfree_skb_any(skb);
4144 return NETDEV_TX_OK;
4147 if (skb->len <= 0) {
4148 dev_kfree_skb_any(skb);
4149 return NETDEV_TX_OK;
4152 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
4153 tx_ring = adapter->multi_tx_table[r_idx];
4155 /* This goes back to the question of how to logically map a tx queue
4156 * to a flow. Right now, performance is impacted slightly negatively
4157 * if using multiple tx queues. If the stack breaks away from a
4158 * single qdisc implementation, we can look at this again. */
4159 return igb_xmit_frame_ring_adv(skb, tx_ring);
4163 * igb_tx_timeout - Respond to a Tx Hang
4164 * @netdev: network interface device structure
4166 static void igb_tx_timeout(struct net_device *netdev)
4168 struct igb_adapter *adapter = netdev_priv(netdev);
4169 struct e1000_hw *hw = &adapter->hw;
4171 /* Do the reset outside of interrupt context */
4172 adapter->tx_timeout_count++;
4174 if (hw->mac.type == e1000_82580)
4175 hw->dev_spec._82575.global_device_reset = true;
4177 schedule_work(&adapter->reset_task);
4179 (adapter->eims_enable_mask & ~adapter->eims_other));
4182 static void igb_reset_task(struct work_struct *work)
4184 struct igb_adapter *adapter;
4185 adapter = container_of(work, struct igb_adapter, reset_task);
4188 netdev_err(adapter->netdev, "Reset adapter\n");
4189 igb_reinit_locked(adapter);
4193 * igb_get_stats - Get System Network Statistics
4194 * @netdev: network interface device structure
4196 * Returns the address of the device statistics structure.
4197 * The statistics are actually updated from the timer callback.
4199 static struct net_device_stats *igb_get_stats(struct net_device *netdev)
4201 /* only return the current stats */
4202 return &netdev->stats;
4206 * igb_change_mtu - Change the Maximum Transfer Unit
4207 * @netdev: network interface device structure
4208 * @new_mtu: new value for maximum frame size
4210 * Returns 0 on success, negative on failure
4212 static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4214 struct igb_adapter *adapter = netdev_priv(netdev);
4215 struct pci_dev *pdev = adapter->pdev;
4216 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4217 u32 rx_buffer_len, i;
4219 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
4220 dev_err(&pdev->dev, "Invalid MTU setting\n");
4224 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
4225 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
4229 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4232 /* igb_down has a dependency on max_frame_size */
4233 adapter->max_frame_size = max_frame;
4235 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
4236 * means we reserve 2 more, this pushes us to allocate from the next
4238 * i.e. RXBUFFER_2048 --> size-4096 slab
4241 if (adapter->hw.mac.type == e1000_82580)
4242 max_frame += IGB_TS_HDR_LEN;
4244 if (max_frame <= IGB_RXBUFFER_1024)
4245 rx_buffer_len = IGB_RXBUFFER_1024;
4246 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
4247 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
4249 rx_buffer_len = IGB_RXBUFFER_128;
4251 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN + IGB_TS_HDR_LEN) ||
4252 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN))
4253 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN;
4255 if ((adapter->hw.mac.type == e1000_82580) &&
4256 (rx_buffer_len == IGB_RXBUFFER_128))
4257 rx_buffer_len += IGB_RXBUFFER_64;
4259 if (netif_running(netdev))
4262 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
4263 netdev->mtu, new_mtu);
4264 netdev->mtu = new_mtu;
4266 for (i = 0; i < adapter->num_rx_queues; i++)
4267 adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len;
4269 if (netif_running(netdev))
4274 clear_bit(__IGB_RESETTING, &adapter->state);
4280 * igb_update_stats - Update the board statistics counters
4281 * @adapter: board private structure
4284 void igb_update_stats(struct igb_adapter *adapter)
4286 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
4287 struct e1000_hw *hw = &adapter->hw;
4288 struct pci_dev *pdev = adapter->pdev;
4294 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4297 * Prevent stats update while adapter is being reset, or if the pci
4298 * connection is down.
4300 if (adapter->link_speed == 0)
4302 if (pci_channel_offline(pdev))
4307 for (i = 0; i < adapter->num_rx_queues; i++) {
4308 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
4309 struct igb_ring *ring = adapter->rx_ring[i];
4310 ring->rx_stats.drops += rqdpc_tmp;
4311 net_stats->rx_fifo_errors += rqdpc_tmp;
4312 bytes += ring->rx_stats.bytes;
4313 packets += ring->rx_stats.packets;
4316 net_stats->rx_bytes = bytes;
4317 net_stats->rx_packets = packets;
4321 for (i = 0; i < adapter->num_tx_queues; i++) {
4322 struct igb_ring *ring = adapter->tx_ring[i];
4323 bytes += ring->tx_stats.bytes;
4324 packets += ring->tx_stats.packets;
4326 net_stats->tx_bytes = bytes;
4327 net_stats->tx_packets = packets;
4329 /* read stats registers */
4330 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4331 adapter->stats.gprc += rd32(E1000_GPRC);
4332 adapter->stats.gorc += rd32(E1000_GORCL);
4333 rd32(E1000_GORCH); /* clear GORCL */
4334 adapter->stats.bprc += rd32(E1000_BPRC);
4335 adapter->stats.mprc += rd32(E1000_MPRC);
4336 adapter->stats.roc += rd32(E1000_ROC);
4338 adapter->stats.prc64 += rd32(E1000_PRC64);
4339 adapter->stats.prc127 += rd32(E1000_PRC127);
4340 adapter->stats.prc255 += rd32(E1000_PRC255);
4341 adapter->stats.prc511 += rd32(E1000_PRC511);
4342 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4343 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4344 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4345 adapter->stats.sec += rd32(E1000_SEC);
4347 mpc = rd32(E1000_MPC);
4348 adapter->stats.mpc += mpc;
4349 net_stats->rx_fifo_errors += mpc;
4350 adapter->stats.scc += rd32(E1000_SCC);
4351 adapter->stats.ecol += rd32(E1000_ECOL);
4352 adapter->stats.mcc += rd32(E1000_MCC);
4353 adapter->stats.latecol += rd32(E1000_LATECOL);
4354 adapter->stats.dc += rd32(E1000_DC);
4355 adapter->stats.rlec += rd32(E1000_RLEC);
4356 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4357 adapter->stats.xontxc += rd32(E1000_XONTXC);
4358 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4359 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4360 adapter->stats.fcruc += rd32(E1000_FCRUC);
4361 adapter->stats.gptc += rd32(E1000_GPTC);
4362 adapter->stats.gotc += rd32(E1000_GOTCL);
4363 rd32(E1000_GOTCH); /* clear GOTCL */
4364 adapter->stats.rnbc += rd32(E1000_RNBC);
4365 adapter->stats.ruc += rd32(E1000_RUC);
4366 adapter->stats.rfc += rd32(E1000_RFC);
4367 adapter->stats.rjc += rd32(E1000_RJC);
4368 adapter->stats.tor += rd32(E1000_TORH);
4369 adapter->stats.tot += rd32(E1000_TOTH);
4370 adapter->stats.tpr += rd32(E1000_TPR);
4372 adapter->stats.ptc64 += rd32(E1000_PTC64);
4373 adapter->stats.ptc127 += rd32(E1000_PTC127);
4374 adapter->stats.ptc255 += rd32(E1000_PTC255);
4375 adapter->stats.ptc511 += rd32(E1000_PTC511);
4376 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4377 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4379 adapter->stats.mptc += rd32(E1000_MPTC);
4380 adapter->stats.bptc += rd32(E1000_BPTC);
4382 adapter->stats.tpt += rd32(E1000_TPT);
4383 adapter->stats.colc += rd32(E1000_COLC);
4385 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
4386 /* read internal phy specific stats */
4387 reg = rd32(E1000_CTRL_EXT);
4388 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4389 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4390 adapter->stats.tncrs += rd32(E1000_TNCRS);
4393 adapter->stats.tsctc += rd32(E1000_TSCTC);
4394 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4396 adapter->stats.iac += rd32(E1000_IAC);
4397 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4398 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4399 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4400 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4401 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4402 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4403 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4404 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4406 /* Fill out the OS statistics structure */
4407 net_stats->multicast = adapter->stats.mprc;
4408 net_stats->collisions = adapter->stats.colc;
4412 /* RLEC on some newer hardware can be incorrect so build
4413 * our own version based on RUC and ROC */
4414 net_stats->rx_errors = adapter->stats.rxerrc +
4415 adapter->stats.crcerrs + adapter->stats.algnerrc +
4416 adapter->stats.ruc + adapter->stats.roc +
4417 adapter->stats.cexterr;
4418 net_stats->rx_length_errors = adapter->stats.ruc +
4420 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4421 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4422 net_stats->rx_missed_errors = adapter->stats.mpc;
4425 net_stats->tx_errors = adapter->stats.ecol +
4426 adapter->stats.latecol;
4427 net_stats->tx_aborted_errors = adapter->stats.ecol;
4428 net_stats->tx_window_errors = adapter->stats.latecol;
4429 net_stats->tx_carrier_errors = adapter->stats.tncrs;
4431 /* Tx Dropped needs to be maintained elsewhere */
4434 if (hw->phy.media_type == e1000_media_type_copper) {
4435 if ((adapter->link_speed == SPEED_1000) &&
4436 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
4437 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4438 adapter->phy_stats.idle_errors += phy_tmp;
4442 /* Management Stats */
4443 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4444 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4445 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
4448 static irqreturn_t igb_msix_other(int irq, void *data)
4450 struct igb_adapter *adapter = data;
4451 struct e1000_hw *hw = &adapter->hw;
4452 u32 icr = rd32(E1000_ICR);
4453 /* reading ICR causes bit 31 of EICR to be cleared */
4455 if (icr & E1000_ICR_DRSTA)
4456 schedule_work(&adapter->reset_task);
4458 if (icr & E1000_ICR_DOUTSYNC) {
4459 /* HW is reporting DMA is out of sync */
4460 adapter->stats.doosync++;
4463 /* Check for a mailbox event */
4464 if (icr & E1000_ICR_VMMB)
4465 igb_msg_task(adapter);
4467 if (icr & E1000_ICR_LSC) {
4468 hw->mac.get_link_status = 1;
4469 /* guard against interrupt when we're going down */
4470 if (!test_bit(__IGB_DOWN, &adapter->state))
4471 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4474 if (adapter->vfs_allocated_count)
4475 wr32(E1000_IMS, E1000_IMS_LSC |
4477 E1000_IMS_DOUTSYNC);
4479 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
4480 wr32(E1000_EIMS, adapter->eims_other);
4485 static void igb_write_itr(struct igb_q_vector *q_vector)
4487 struct igb_adapter *adapter = q_vector->adapter;
4488 u32 itr_val = q_vector->itr_val & 0x7FFC;
4490 if (!q_vector->set_itr)
4496 if (adapter->hw.mac.type == e1000_82575)
4497 itr_val |= itr_val << 16;
4499 itr_val |= 0x8000000;
4501 writel(itr_val, q_vector->itr_register);
4502 q_vector->set_itr = 0;
4505 static irqreturn_t igb_msix_ring(int irq, void *data)
4507 struct igb_q_vector *q_vector = data;
4509 /* Write the ITR value calculated from the previous interrupt. */
4510 igb_write_itr(q_vector);
4512 napi_schedule(&q_vector->napi);
4517 #ifdef CONFIG_IGB_DCA
4518 static void igb_update_dca(struct igb_q_vector *q_vector)
4520 struct igb_adapter *adapter = q_vector->adapter;
4521 struct e1000_hw *hw = &adapter->hw;
4522 int cpu = get_cpu();
4524 if (q_vector->cpu == cpu)
4527 if (q_vector->tx_ring) {
4528 int q = q_vector->tx_ring->reg_idx;
4529 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4530 if (hw->mac.type == e1000_82575) {
4531 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4532 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4534 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4535 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4536 E1000_DCA_TXCTRL_CPUID_SHIFT;
4538 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4539 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4541 if (q_vector->rx_ring) {
4542 int q = q_vector->rx_ring->reg_idx;
4543 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4544 if (hw->mac.type == e1000_82575) {
4545 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4546 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4548 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
4549 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4550 E1000_DCA_RXCTRL_CPUID_SHIFT;
4552 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4553 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4554 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4555 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
4557 q_vector->cpu = cpu;
4562 static void igb_setup_dca(struct igb_adapter *adapter)
4564 struct e1000_hw *hw = &adapter->hw;
4567 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
4570 /* Always use CB2 mode, difference is masked in the CB driver. */
4571 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4573 for (i = 0; i < adapter->num_q_vectors; i++) {
4574 adapter->q_vector[i]->cpu = -1;
4575 igb_update_dca(adapter->q_vector[i]);
4579 static int __igb_notify_dca(struct device *dev, void *data)
4581 struct net_device *netdev = dev_get_drvdata(dev);
4582 struct igb_adapter *adapter = netdev_priv(netdev);
4583 struct pci_dev *pdev = adapter->pdev;
4584 struct e1000_hw *hw = &adapter->hw;
4585 unsigned long event = *(unsigned long *)data;
4588 case DCA_PROVIDER_ADD:
4589 /* if already enabled, don't do it again */
4590 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
4592 if (dca_add_requester(dev) == 0) {
4593 adapter->flags |= IGB_FLAG_DCA_ENABLED;
4594 dev_info(&pdev->dev, "DCA enabled\n");
4595 igb_setup_dca(adapter);
4598 /* Fall Through since DCA is disabled. */
4599 case DCA_PROVIDER_REMOVE:
4600 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
4601 /* without this a class_device is left
4602 * hanging around in the sysfs model */
4603 dca_remove_requester(dev);
4604 dev_info(&pdev->dev, "DCA disabled\n");
4605 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
4606 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
4614 static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4619 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4622 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4624 #endif /* CONFIG_IGB_DCA */
4626 static void igb_ping_all_vfs(struct igb_adapter *adapter)
4628 struct e1000_hw *hw = &adapter->hw;
4632 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4633 ping = E1000_PF_CONTROL_MSG;
4634 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
4635 ping |= E1000_VT_MSGTYPE_CTS;
4636 igb_write_mbx(hw, &ping, 1, i);
4640 static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4642 struct e1000_hw *hw = &adapter->hw;
4643 u32 vmolr = rd32(E1000_VMOLR(vf));
4644 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4646 vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC |
4647 IGB_VF_FLAG_MULTI_PROMISC);
4648 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4650 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4651 vmolr |= E1000_VMOLR_MPME;
4652 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4655 * if we have hashes and we are clearing a multicast promisc
4656 * flag we need to write the hashes to the MTA as this step
4657 * was previously skipped
4659 if (vf_data->num_vf_mc_hashes > 30) {
4660 vmolr |= E1000_VMOLR_MPME;
4661 } else if (vf_data->num_vf_mc_hashes) {
4663 vmolr |= E1000_VMOLR_ROMPE;
4664 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4665 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4669 wr32(E1000_VMOLR(vf), vmolr);
4671 /* there are flags left unprocessed, likely not supported */
4672 if (*msgbuf & E1000_VT_MSGINFO_MASK)
4679 static int igb_set_vf_multicasts(struct igb_adapter *adapter,
4680 u32 *msgbuf, u32 vf)
4682 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4683 u16 *hash_list = (u16 *)&msgbuf[1];
4684 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4687 /* salt away the number of multicast addresses assigned
4688 * to this VF for later use to restore when the PF multi cast
4691 vf_data->num_vf_mc_hashes = n;
4693 /* only up to 30 hash values supported */
4697 /* store the hashes for later use */
4698 for (i = 0; i < n; i++)
4699 vf_data->vf_mc_hashes[i] = hash_list[i];
4701 /* Flush and reset the mta with the new values */
4702 igb_set_rx_mode(adapter->netdev);
4707 static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
4709 struct e1000_hw *hw = &adapter->hw;
4710 struct vf_data_storage *vf_data;
4713 for (i = 0; i < adapter->vfs_allocated_count; i++) {
4714 u32 vmolr = rd32(E1000_VMOLR(i));
4715 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4717 vf_data = &adapter->vf_data[i];
4719 if ((vf_data->num_vf_mc_hashes > 30) ||
4720 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
4721 vmolr |= E1000_VMOLR_MPME;
4722 } else if (vf_data->num_vf_mc_hashes) {
4723 vmolr |= E1000_VMOLR_ROMPE;
4724 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4725 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4727 wr32(E1000_VMOLR(i), vmolr);
4731 static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
4733 struct e1000_hw *hw = &adapter->hw;
4734 u32 pool_mask, reg, vid;
4737 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4739 /* Find the vlan filter for this id */
4740 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4741 reg = rd32(E1000_VLVF(i));
4743 /* remove the vf from the pool */
4746 /* if pool is empty then remove entry from vfta */
4747 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
4748 (reg & E1000_VLVF_VLANID_ENABLE)) {
4750 vid = reg & E1000_VLVF_VLANID_MASK;
4751 igb_vfta_set(hw, vid, false);
4754 wr32(E1000_VLVF(i), reg);
4757 adapter->vf_data[vf].vlans_enabled = 0;
4760 static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4762 struct e1000_hw *hw = &adapter->hw;
4765 /* The vlvf table only exists on 82576 hardware and newer */
4766 if (hw->mac.type < e1000_82576)
4769 /* we only need to do this if VMDq is enabled */
4770 if (!adapter->vfs_allocated_count)
4773 /* Find the vlan filter for this id */
4774 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4775 reg = rd32(E1000_VLVF(i));
4776 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
4777 vid == (reg & E1000_VLVF_VLANID_MASK))
4782 if (i == E1000_VLVF_ARRAY_SIZE) {
4783 /* Did not find a matching VLAN ID entry that was
4784 * enabled. Search for a free filter entry, i.e.
4785 * one without the enable bit set
4787 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4788 reg = rd32(E1000_VLVF(i));
4789 if (!(reg & E1000_VLVF_VLANID_ENABLE))
4793 if (i < E1000_VLVF_ARRAY_SIZE) {
4794 /* Found an enabled/available entry */
4795 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4797 /* if !enabled we need to set this up in vfta */
4798 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
4799 /* add VID to filter table */
4800 igb_vfta_set(hw, vid, true);
4801 reg |= E1000_VLVF_VLANID_ENABLE;
4803 reg &= ~E1000_VLVF_VLANID_MASK;
4805 wr32(E1000_VLVF(i), reg);
4807 /* do not modify RLPML for PF devices */
4808 if (vf >= adapter->vfs_allocated_count)
4811 if (!adapter->vf_data[vf].vlans_enabled) {
4813 reg = rd32(E1000_VMOLR(vf));
4814 size = reg & E1000_VMOLR_RLPML_MASK;
4816 reg &= ~E1000_VMOLR_RLPML_MASK;
4818 wr32(E1000_VMOLR(vf), reg);
4821 adapter->vf_data[vf].vlans_enabled++;
4825 if (i < E1000_VLVF_ARRAY_SIZE) {
4826 /* remove vf from the pool */
4827 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
4828 /* if pool is empty then remove entry from vfta */
4829 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
4831 igb_vfta_set(hw, vid, false);
4833 wr32(E1000_VLVF(i), reg);
4835 /* do not modify RLPML for PF devices */
4836 if (vf >= adapter->vfs_allocated_count)
4839 adapter->vf_data[vf].vlans_enabled--;
4840 if (!adapter->vf_data[vf].vlans_enabled) {
4842 reg = rd32(E1000_VMOLR(vf));
4843 size = reg & E1000_VMOLR_RLPML_MASK;
4845 reg &= ~E1000_VMOLR_RLPML_MASK;
4847 wr32(E1000_VMOLR(vf), reg);
4854 static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
4856 struct e1000_hw *hw = &adapter->hw;
4859 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
4861 wr32(E1000_VMVIR(vf), 0);
4864 static int igb_ndo_set_vf_vlan(struct net_device *netdev,
4865 int vf, u16 vlan, u8 qos)
4868 struct igb_adapter *adapter = netdev_priv(netdev);
4870 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
4873 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
4876 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
4877 igb_set_vmolr(adapter, vf, !vlan);
4878 adapter->vf_data[vf].pf_vlan = vlan;
4879 adapter->vf_data[vf].pf_qos = qos;
4880 dev_info(&adapter->pdev->dev,
4881 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
4882 if (test_bit(__IGB_DOWN, &adapter->state)) {
4883 dev_warn(&adapter->pdev->dev,
4884 "The VF VLAN has been set,"
4885 " but the PF device is not up.\n");
4886 dev_warn(&adapter->pdev->dev,
4887 "Bring the PF device up before"
4888 " attempting to use the VF device.\n");
4891 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
4893 igb_set_vmvir(adapter, vlan, vf);
4894 igb_set_vmolr(adapter, vf, true);
4895 adapter->vf_data[vf].pf_vlan = 0;
4896 adapter->vf_data[vf].pf_qos = 0;
4902 static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4904 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4905 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
4907 return igb_vlvf_set(adapter, vid, add, vf);
4910 static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
4913 adapter->vf_data[vf].flags &= ~(IGB_VF_FLAG_PF_SET_MAC);
4914 adapter->vf_data[vf].last_nack = jiffies;
4916 /* reset offloads to defaults */
4917 igb_set_vmolr(adapter, vf, true);
4919 /* reset vlans for device */
4920 igb_clear_vf_vfta(adapter, vf);
4921 if (adapter->vf_data[vf].pf_vlan)
4922 igb_ndo_set_vf_vlan(adapter->netdev, vf,
4923 adapter->vf_data[vf].pf_vlan,
4924 adapter->vf_data[vf].pf_qos);
4926 igb_clear_vf_vfta(adapter, vf);
4928 /* reset multicast table array for vf */
4929 adapter->vf_data[vf].num_vf_mc_hashes = 0;
4931 /* Flush and reset the mta with the new values */
4932 igb_set_rx_mode(adapter->netdev);
4935 static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4937 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
4939 /* generate a new mac address as we were hotplug removed/added */
4940 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
4941 random_ether_addr(vf_mac);
4943 /* process remaining reset events */
4944 igb_vf_reset(adapter, vf);
4947 static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4949 struct e1000_hw *hw = &adapter->hw;
4950 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
4951 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
4953 u8 *addr = (u8 *)(&msgbuf[1]);
4955 /* process all the same items cleared in a function level reset */
4956 igb_vf_reset(adapter, vf);
4958 /* set vf mac address */
4959 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
4961 /* enable transmit and receive for vf */
4962 reg = rd32(E1000_VFTE);
4963 wr32(E1000_VFTE, reg | (1 << vf));
4964 reg = rd32(E1000_VFRE);
4965 wr32(E1000_VFRE, reg | (1 << vf));
4967 adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS;
4969 /* reply to reset with ack and vf mac address */
4970 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
4971 memcpy(addr, vf_mac, 6);
4972 igb_write_mbx(hw, msgbuf, 3, vf);
4975 static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
4978 * The VF MAC Address is stored in a packed array of bytes
4979 * starting at the second 32 bit word of the msg array
4981 unsigned char *addr = (char *)&msg[1];
4984 if (is_valid_ether_addr(addr))
4985 err = igb_set_vf_mac(adapter, vf, addr);
4990 static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
4992 struct e1000_hw *hw = &adapter->hw;
4993 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4994 u32 msg = E1000_VT_MSGTYPE_NACK;
4996 /* if device isn't clear to send it shouldn't be reading either */
4997 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
4998 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
4999 igb_write_mbx(hw, &msg, 1, vf);
5000 vf_data->last_nack = jiffies;
5004 static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
5006 struct pci_dev *pdev = adapter->pdev;
5007 u32 msgbuf[E1000_VFMAILBOX_SIZE];
5008 struct e1000_hw *hw = &adapter->hw;
5009 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5012 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
5015 /* if receive failed revoke VF CTS stats and restart init */
5016 dev_err(&pdev->dev, "Error receiving message from VF\n");
5017 vf_data->flags &= ~IGB_VF_FLAG_CTS;
5018 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5023 /* this is a message we already processed, do nothing */
5024 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
5028 * until the vf completes a reset it should not be
5029 * allowed to start any configuration.
5032 if (msgbuf[0] == E1000_VF_RESET) {
5033 igb_vf_reset_msg(adapter, vf);
5037 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
5038 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5044 switch ((msgbuf[0] & 0xFFFF)) {
5045 case E1000_VF_SET_MAC_ADDR:
5046 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5048 case E1000_VF_SET_PROMISC:
5049 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5051 case E1000_VF_SET_MULTICAST:
5052 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5054 case E1000_VF_SET_LPE:
5055 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5057 case E1000_VF_SET_VLAN:
5058 if (adapter->vf_data[vf].pf_vlan)
5061 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
5064 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
5069 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5071 /* notify the VF of the results of what it sent us */
5073 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5075 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5077 igb_write_mbx(hw, msgbuf, 1, vf);
5080 static void igb_msg_task(struct igb_adapter *adapter)
5082 struct e1000_hw *hw = &adapter->hw;
5085 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5086 /* process any reset requests */
5087 if (!igb_check_for_rst(hw, vf))
5088 igb_vf_reset_event(adapter, vf);
5090 /* process any messages pending */
5091 if (!igb_check_for_msg(hw, vf))
5092 igb_rcv_msg_from_vf(adapter, vf);
5094 /* process any acks */
5095 if (!igb_check_for_ack(hw, vf))
5096 igb_rcv_ack_from_vf(adapter, vf);
5101 * igb_set_uta - Set unicast filter table address
5102 * @adapter: board private structure
5104 * The unicast table address is a register array of 32-bit registers.
5105 * The table is meant to be used in a way similar to how the MTA is used
5106 * however due to certain limitations in the hardware it is necessary to
5107 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
5108 * enable bit to allow vlan tag stripping when promiscous mode is enabled
5110 static void igb_set_uta(struct igb_adapter *adapter)
5112 struct e1000_hw *hw = &adapter->hw;
5115 /* The UTA table only exists on 82576 hardware and newer */
5116 if (hw->mac.type < e1000_82576)
5119 /* we only need to do this if VMDq is enabled */
5120 if (!adapter->vfs_allocated_count)
5123 for (i = 0; i < hw->mac.uta_reg_count; i++)
5124 array_wr32(E1000_UTA, i, ~0);
5128 * igb_intr_msi - Interrupt Handler
5129 * @irq: interrupt number
5130 * @data: pointer to a network interface device structure
5132 static irqreturn_t igb_intr_msi(int irq, void *data)
5134 struct igb_adapter *adapter = data;
5135 struct igb_q_vector *q_vector = adapter->q_vector[0];
5136 struct e1000_hw *hw = &adapter->hw;
5137 /* read ICR disables interrupts using IAM */
5138 u32 icr = rd32(E1000_ICR);
5140 igb_write_itr(q_vector);
5142 if (icr & E1000_ICR_DRSTA)
5143 schedule_work(&adapter->reset_task);
5145 if (icr & E1000_ICR_DOUTSYNC) {
5146 /* HW is reporting DMA is out of sync */
5147 adapter->stats.doosync++;
5150 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5151 hw->mac.get_link_status = 1;
5152 if (!test_bit(__IGB_DOWN, &adapter->state))
5153 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5156 napi_schedule(&q_vector->napi);
5162 * igb_intr - Legacy Interrupt Handler
5163 * @irq: interrupt number
5164 * @data: pointer to a network interface device structure
5166 static irqreturn_t igb_intr(int irq, void *data)
5168 struct igb_adapter *adapter = data;
5169 struct igb_q_vector *q_vector = adapter->q_vector[0];
5170 struct e1000_hw *hw = &adapter->hw;
5171 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5172 * need for the IMC write */
5173 u32 icr = rd32(E1000_ICR);
5175 return IRQ_NONE; /* Not our interrupt */
5177 igb_write_itr(q_vector);
5179 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5180 * not set, then the adapter didn't send an interrupt */
5181 if (!(icr & E1000_ICR_INT_ASSERTED))
5184 if (icr & E1000_ICR_DRSTA)
5185 schedule_work(&adapter->reset_task);
5187 if (icr & E1000_ICR_DOUTSYNC) {
5188 /* HW is reporting DMA is out of sync */
5189 adapter->stats.doosync++;
5192 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5193 hw->mac.get_link_status = 1;
5194 /* guard against interrupt when we're going down */
5195 if (!test_bit(__IGB_DOWN, &adapter->state))
5196 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5199 napi_schedule(&q_vector->napi);
5204 static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
5206 struct igb_adapter *adapter = q_vector->adapter;
5207 struct e1000_hw *hw = &adapter->hw;
5209 if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
5210 (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
5211 if (!adapter->msix_entries)
5212 igb_set_itr(adapter);
5214 igb_update_ring_itr(q_vector);
5217 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5218 if (adapter->msix_entries)
5219 wr32(E1000_EIMS, q_vector->eims_value);
5221 igb_irq_enable(adapter);
5226 * igb_poll - NAPI Rx polling callback
5227 * @napi: napi polling structure
5228 * @budget: count of how many packets we should handle
5230 static int igb_poll(struct napi_struct *napi, int budget)
5232 struct igb_q_vector *q_vector = container_of(napi,
5233 struct igb_q_vector,
5235 int tx_clean_complete = 1, work_done = 0;
5237 #ifdef CONFIG_IGB_DCA
5238 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5239 igb_update_dca(q_vector);
5241 if (q_vector->tx_ring)
5242 tx_clean_complete = igb_clean_tx_irq(q_vector);
5244 if (q_vector->rx_ring)
5245 igb_clean_rx_irq_adv(q_vector, &work_done, budget);
5247 if (!tx_clean_complete)
5250 /* If not enough Rx work done, exit the polling mode */
5251 if (work_done < budget) {
5252 napi_complete(napi);
5253 igb_ring_irq_enable(q_vector);
5260 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
5261 * @adapter: board private structure
5262 * @shhwtstamps: timestamp structure to update
5263 * @regval: unsigned 64bit system time value.
5265 * We need to convert the system time value stored in the RX/TXSTMP registers
5266 * into a hwtstamp which can be used by the upper level timestamping functions
5268 static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
5269 struct skb_shared_hwtstamps *shhwtstamps,
5275 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
5276 * 24 to match clock shift we setup earlier.
5278 if (adapter->hw.mac.type == e1000_82580)
5279 regval <<= IGB_82580_TSYNC_SHIFT;
5281 ns = timecounter_cyc2time(&adapter->clock, regval);
5282 timecompare_update(&adapter->compare, ns);
5283 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
5284 shhwtstamps->hwtstamp = ns_to_ktime(ns);
5285 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
5289 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5290 * @q_vector: pointer to q_vector containing needed info
5291 * @buffer: pointer to igb_buffer structure
5293 * If we were asked to do hardware stamping and such a time stamp is
5294 * available, then it must have been for this skb here because we only
5295 * allow only one such packet into the queue.
5297 static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *buffer_info)
5299 struct igb_adapter *adapter = q_vector->adapter;
5300 struct e1000_hw *hw = &adapter->hw;
5301 struct skb_shared_hwtstamps shhwtstamps;
5304 /* if skb does not support hw timestamp or TX stamp not valid exit */
5305 if (likely(!buffer_info->shtx.hardware) ||
5306 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
5309 regval = rd32(E1000_TXSTMPL);
5310 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
5312 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
5313 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
5317 * igb_clean_tx_irq - Reclaim resources after transmit completes
5318 * @q_vector: pointer to q_vector containing needed info
5319 * returns true if ring is completely cleaned
5321 static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5323 struct igb_adapter *adapter = q_vector->adapter;
5324 struct igb_ring *tx_ring = q_vector->tx_ring;
5325 struct net_device *netdev = tx_ring->netdev;
5326 struct e1000_hw *hw = &adapter->hw;
5327 struct igb_buffer *buffer_info;
5328 union e1000_adv_tx_desc *tx_desc, *eop_desc;
5329 unsigned int total_bytes = 0, total_packets = 0;
5330 unsigned int i, eop, count = 0;
5331 bool cleaned = false;
5333 i = tx_ring->next_to_clean;
5334 eop = tx_ring->buffer_info[i].next_to_watch;
5335 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
5337 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
5338 (count < tx_ring->count)) {
5339 for (cleaned = false; !cleaned; count++) {
5340 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
5341 buffer_info = &tx_ring->buffer_info[i];
5342 cleaned = (i == eop);
5344 if (buffer_info->skb) {
5345 total_bytes += buffer_info->bytecount;
5346 /* gso_segs is currently only valid for tcp */
5347 total_packets += buffer_info->gso_segs;
5348 igb_tx_hwtstamp(q_vector, buffer_info);
5351 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
5352 tx_desc->wb.status = 0;
5355 if (i == tx_ring->count)
5358 eop = tx_ring->buffer_info[i].next_to_watch;
5359 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
5362 tx_ring->next_to_clean = i;
5364 if (unlikely(count &&
5365 netif_carrier_ok(netdev) &&
5366 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
5367 /* Make sure that anybody stopping the queue after this
5368 * sees the new next_to_clean.
5371 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
5372 !(test_bit(__IGB_DOWN, &adapter->state))) {
5373 netif_wake_subqueue(netdev, tx_ring->queue_index);
5374 tx_ring->tx_stats.restart_queue++;
5378 if (tx_ring->detect_tx_hung) {
5379 /* Detect a transmit hang in hardware, this serializes the
5380 * check with the clearing of time_stamp and movement of i */
5381 tx_ring->detect_tx_hung = false;
5382 if (tx_ring->buffer_info[i].time_stamp &&
5383 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
5384 (adapter->tx_timeout_factor * HZ)) &&
5385 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
5387 /* detected Tx unit hang */
5388 dev_err(tx_ring->dev,
5389 "Detected Tx Unit Hang\n"
5393 " next_to_use <%x>\n"
5394 " next_to_clean <%x>\n"
5395 "buffer_info[next_to_clean]\n"
5396 " time_stamp <%lx>\n"
5397 " next_to_watch <%x>\n"
5399 " desc.status <%x>\n",
5400 tx_ring->queue_index,
5401 readl(tx_ring->head),
5402 readl(tx_ring->tail),
5403 tx_ring->next_to_use,
5404 tx_ring->next_to_clean,
5405 tx_ring->buffer_info[eop].time_stamp,
5408 eop_desc->wb.status);
5409 netif_stop_subqueue(netdev, tx_ring->queue_index);
5412 tx_ring->total_bytes += total_bytes;
5413 tx_ring->total_packets += total_packets;
5414 tx_ring->tx_stats.bytes += total_bytes;
5415 tx_ring->tx_stats.packets += total_packets;
5416 return (count < tx_ring->count);
5420 * igb_receive_skb - helper function to handle rx indications
5421 * @q_vector: structure containing interrupt and ring information
5422 * @skb: packet to send up
5423 * @vlan_tag: vlan tag for packet
5425 static void igb_receive_skb(struct igb_q_vector *q_vector,
5426 struct sk_buff *skb,
5429 struct igb_adapter *adapter = q_vector->adapter;
5431 if (vlan_tag && adapter->vlgrp)
5432 vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
5435 napi_gro_receive(&q_vector->napi, skb);
5438 static inline void igb_rx_checksum_adv(struct igb_ring *ring,
5439 u32 status_err, struct sk_buff *skb)
5441 skb->ip_summed = CHECKSUM_NONE;
5443 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
5444 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
5445 (status_err & E1000_RXD_STAT_IXSM))
5448 /* TCP/UDP checksum error bit is set */
5450 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
5452 * work around errata with sctp packets where the TCPE aka
5453 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5454 * packets, (aka let the stack check the crc32c)
5456 if ((skb->len == 60) &&
5457 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
5458 ring->rx_stats.csum_err++;
5460 /* let the stack verify checksum errors */
5463 /* It must be a TCP or UDP packet with a valid checksum */
5464 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
5465 skb->ip_summed = CHECKSUM_UNNECESSARY;
5467 dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err);
5470 static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
5471 struct sk_buff *skb)
5473 struct igb_adapter *adapter = q_vector->adapter;
5474 struct e1000_hw *hw = &adapter->hw;
5478 * If this bit is set, then the RX registers contain the time stamp. No
5479 * other packet will be time stamped until we read these registers, so
5480 * read the registers to make them available again. Because only one
5481 * packet can be time stamped at a time, we know that the register
5482 * values must belong to this one here and therefore we don't need to
5483 * compare any of the additional attributes stored for it.
5485 * If nothing went wrong, then it should have a skb_shared_tx that we
5486 * can turn into a skb_shared_hwtstamps.
5488 if (staterr & E1000_RXDADV_STAT_TSIP) {
5489 u32 *stamp = (u32 *)skb->data;
5490 regval = le32_to_cpu(*(stamp + 2));
5491 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
5492 skb_pull(skb, IGB_TS_HDR_LEN);
5494 if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5497 regval = rd32(E1000_RXSTMPL);
5498 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5501 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5503 static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
5504 union e1000_adv_rx_desc *rx_desc)
5506 /* HW will not DMA in data larger than the given buffer, even if it
5507 * parses the (NFS, of course) header to be larger. In that case, it
5508 * fills the header buffer and spills the rest into the page.
5510 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5511 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
5512 if (hlen > rx_ring->rx_buffer_len)
5513 hlen = rx_ring->rx_buffer_len;
5517 static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5518 int *work_done, int budget)
5520 struct igb_ring *rx_ring = q_vector->rx_ring;
5521 struct net_device *netdev = rx_ring->netdev;
5522 struct device *dev = rx_ring->dev;
5523 union e1000_adv_rx_desc *rx_desc , *next_rxd;
5524 struct igb_buffer *buffer_info , *next_buffer;
5525 struct sk_buff *skb;
5526 bool cleaned = false;
5527 int cleaned_count = 0;
5528 int current_node = numa_node_id();
5529 unsigned int total_bytes = 0, total_packets = 0;
5535 i = rx_ring->next_to_clean;
5536 buffer_info = &rx_ring->buffer_info[i];
5537 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5538 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5540 while (staterr & E1000_RXD_STAT_DD) {
5541 if (*work_done >= budget)
5545 skb = buffer_info->skb;
5546 prefetch(skb->data - NET_IP_ALIGN);
5547 buffer_info->skb = NULL;
5550 if (i == rx_ring->count)
5553 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
5555 next_buffer = &rx_ring->buffer_info[i];
5557 length = le16_to_cpu(rx_desc->wb.upper.length);
5561 if (buffer_info->dma) {
5562 dma_unmap_single(dev, buffer_info->dma,
5563 rx_ring->rx_buffer_len,
5565 buffer_info->dma = 0;
5566 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
5567 skb_put(skb, length);
5570 skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
5574 dma_unmap_page(dev, buffer_info->page_dma,
5575 PAGE_SIZE / 2, DMA_FROM_DEVICE);
5576 buffer_info->page_dma = 0;
5578 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
5580 buffer_info->page_offset,
5583 if ((page_count(buffer_info->page) != 1) ||
5584 (page_to_nid(buffer_info->page) != current_node))
5585 buffer_info->page = NULL;
5587 get_page(buffer_info->page);
5590 skb->data_len += length;
5591 skb->truesize += length;
5594 if (!(staterr & E1000_RXD_STAT_EOP)) {
5595 buffer_info->skb = next_buffer->skb;
5596 buffer_info->dma = next_buffer->dma;
5597 next_buffer->skb = skb;
5598 next_buffer->dma = 0;
5602 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
5603 dev_kfree_skb_irq(skb);
5607 if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS))
5608 igb_rx_hwtstamp(q_vector, staterr, skb);
5609 total_bytes += skb->len;
5612 igb_rx_checksum_adv(rx_ring, staterr, skb);
5614 skb->protocol = eth_type_trans(skb, netdev);
5615 skb_record_rx_queue(skb, rx_ring->queue_index);
5617 vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
5618 le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
5620 igb_receive_skb(q_vector, skb, vlan_tag);
5623 rx_desc->wb.upper.status_error = 0;
5625 /* return some buffers to hardware, one at a time is too slow */
5626 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
5627 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
5631 /* use prefetched values */
5633 buffer_info = next_buffer;
5634 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5637 rx_ring->next_to_clean = i;
5638 cleaned_count = igb_desc_unused(rx_ring);
5641 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
5643 rx_ring->total_packets += total_packets;
5644 rx_ring->total_bytes += total_bytes;
5645 rx_ring->rx_stats.packets += total_packets;
5646 rx_ring->rx_stats.bytes += total_bytes;
5651 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
5652 * @adapter: address of board private structure
5654 void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5656 struct net_device *netdev = rx_ring->netdev;
5657 union e1000_adv_rx_desc *rx_desc;
5658 struct igb_buffer *buffer_info;
5659 struct sk_buff *skb;
5663 i = rx_ring->next_to_use;
5664 buffer_info = &rx_ring->buffer_info[i];
5666 bufsz = rx_ring->rx_buffer_len;
5668 while (cleaned_count--) {
5669 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5671 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
5672 if (!buffer_info->page) {
5673 buffer_info->page = netdev_alloc_page(netdev);
5674 if (!buffer_info->page) {
5675 rx_ring->rx_stats.alloc_failed++;
5678 buffer_info->page_offset = 0;
5680 buffer_info->page_offset ^= PAGE_SIZE / 2;
5682 buffer_info->page_dma =
5683 dma_map_page(rx_ring->dev, buffer_info->page,
5684 buffer_info->page_offset,
5687 if (dma_mapping_error(rx_ring->dev,
5688 buffer_info->page_dma)) {
5689 buffer_info->page_dma = 0;
5690 rx_ring->rx_stats.alloc_failed++;
5695 skb = buffer_info->skb;
5697 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
5699 rx_ring->rx_stats.alloc_failed++;
5703 buffer_info->skb = skb;
5705 if (!buffer_info->dma) {
5706 buffer_info->dma = dma_map_single(rx_ring->dev,
5710 if (dma_mapping_error(rx_ring->dev,
5711 buffer_info->dma)) {
5712 buffer_info->dma = 0;
5713 rx_ring->rx_stats.alloc_failed++;
5717 /* Refresh the desc even if buffer_addrs didn't change because
5718 * each write-back erases this info. */
5719 if (bufsz < IGB_RXBUFFER_1024) {
5720 rx_desc->read.pkt_addr =
5721 cpu_to_le64(buffer_info->page_dma);
5722 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
5724 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
5725 rx_desc->read.hdr_addr = 0;
5729 if (i == rx_ring->count)
5731 buffer_info = &rx_ring->buffer_info[i];
5735 if (rx_ring->next_to_use != i) {
5736 rx_ring->next_to_use = i;
5738 i = (rx_ring->count - 1);
5742 /* Force memory writes to complete before letting h/w
5743 * know there are new descriptors to fetch. (Only
5744 * applicable for weak-ordered memory model archs,
5745 * such as IA-64). */
5747 writel(i, rx_ring->tail);
5757 static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5759 struct igb_adapter *adapter = netdev_priv(netdev);
5760 struct mii_ioctl_data *data = if_mii(ifr);
5762 if (adapter->hw.phy.media_type != e1000_media_type_copper)
5767 data->phy_id = adapter->hw.phy.addr;
5770 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
5782 * igb_hwtstamp_ioctl - control hardware time stamping
5787 * Outgoing time stamping can be enabled and disabled. Play nice and
5788 * disable it when requested, although it shouldn't case any overhead
5789 * when no packet needs it. At most one packet in the queue may be
5790 * marked for time stamping, otherwise it would be impossible to tell
5791 * for sure to which packet the hardware time stamp belongs.
5793 * Incoming time stamping has to be configured via the hardware
5794 * filters. Not all combinations are supported, in particular event
5795 * type has to be specified. Matching the kind of event packet is
5796 * not supported, with the exception of "all V2 events regardless of
5800 static int igb_hwtstamp_ioctl(struct net_device *netdev,
5801 struct ifreq *ifr, int cmd)
5803 struct igb_adapter *adapter = netdev_priv(netdev);
5804 struct e1000_hw *hw = &adapter->hw;
5805 struct hwtstamp_config config;
5806 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
5807 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
5808 u32 tsync_rx_cfg = 0;
5813 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5816 /* reserved for future extensions */
5820 switch (config.tx_type) {
5821 case HWTSTAMP_TX_OFF:
5823 case HWTSTAMP_TX_ON:
5829 switch (config.rx_filter) {
5830 case HWTSTAMP_FILTER_NONE:
5833 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
5834 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
5835 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
5836 case HWTSTAMP_FILTER_ALL:
5838 * register TSYNCRXCFG must be set, therefore it is not
5839 * possible to time stamp both Sync and Delay_Req messages
5840 * => fall back to time stamping all packets
5842 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
5843 config.rx_filter = HWTSTAMP_FILTER_ALL;
5845 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
5846 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
5847 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
5850 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
5851 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
5852 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
5855 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
5856 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
5857 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
5858 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
5861 config.rx_filter = HWTSTAMP_FILTER_SOME;
5863 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5864 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
5865 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
5866 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
5869 config.rx_filter = HWTSTAMP_FILTER_SOME;
5871 case HWTSTAMP_FILTER_PTP_V2_EVENT:
5872 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5873 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
5874 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
5875 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
5882 if (hw->mac.type == e1000_82575) {
5883 if (tsync_rx_ctl | tsync_tx_ctl)
5889 * Per-packet timestamping only works if all packets are
5890 * timestamped, so enable timestamping in all packets as
5891 * long as one rx filter was configured.
5893 if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) {
5894 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
5895 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
5898 /* enable/disable TX */
5899 regval = rd32(E1000_TSYNCTXCTL);
5900 regval &= ~E1000_TSYNCTXCTL_ENABLED;
5901 regval |= tsync_tx_ctl;
5902 wr32(E1000_TSYNCTXCTL, regval);
5904 /* enable/disable RX */
5905 regval = rd32(E1000_TSYNCRXCTL);
5906 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
5907 regval |= tsync_rx_ctl;
5908 wr32(E1000_TSYNCRXCTL, regval);
5910 /* define which PTP packets are time stamped */
5911 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
5913 /* define ethertype filter for timestamped packets */
5916 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
5917 E1000_ETQF_1588 | /* enable timestamping */
5918 ETH_P_1588)); /* 1588 eth protocol type */
5920 wr32(E1000_ETQF(3), 0);
5922 #define PTP_PORT 319
5923 /* L4 Queue Filter[3]: filter by destination port and protocol */
5925 u32 ftqf = (IPPROTO_UDP /* UDP */
5926 | E1000_FTQF_VF_BP /* VF not compared */
5927 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
5928 | E1000_FTQF_MASK); /* mask all inputs */
5929 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
5931 wr32(E1000_IMIR(3), htons(PTP_PORT));
5932 wr32(E1000_IMIREXT(3),
5933 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
5934 if (hw->mac.type == e1000_82576) {
5935 /* enable source port check */
5936 wr32(E1000_SPQF(3), htons(PTP_PORT));
5937 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
5939 wr32(E1000_FTQF(3), ftqf);
5941 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
5945 adapter->hwtstamp_config = config;
5947 /* clear TX/RX time stamp registers, just to be sure */
5948 regval = rd32(E1000_TXSTMPH);
5949 regval = rd32(E1000_RXSTMPH);
5951 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
5961 static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5967 return igb_mii_ioctl(netdev, ifr, cmd);
5969 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
5975 s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5977 struct igb_adapter *adapter = hw->back;
5980 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5982 return -E1000_ERR_CONFIG;
5984 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
5989 s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5991 struct igb_adapter *adapter = hw->back;
5994 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5996 return -E1000_ERR_CONFIG;
5998 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
6003 static void igb_vlan_rx_register(struct net_device *netdev,
6004 struct vlan_group *grp)
6006 struct igb_adapter *adapter = netdev_priv(netdev);
6007 struct e1000_hw *hw = &adapter->hw;
6010 igb_irq_disable(adapter);
6011 adapter->vlgrp = grp;
6014 /* enable VLAN tag insert/strip */
6015 ctrl = rd32(E1000_CTRL);
6016 ctrl |= E1000_CTRL_VME;
6017 wr32(E1000_CTRL, ctrl);
6019 /* Disable CFI check */
6020 rctl = rd32(E1000_RCTL);
6021 rctl &= ~E1000_RCTL_CFIEN;
6022 wr32(E1000_RCTL, rctl);
6024 /* disable VLAN tag insert/strip */
6025 ctrl = rd32(E1000_CTRL);
6026 ctrl &= ~E1000_CTRL_VME;
6027 wr32(E1000_CTRL, ctrl);
6030 igb_rlpml_set(adapter);
6032 if (!test_bit(__IGB_DOWN, &adapter->state))
6033 igb_irq_enable(adapter);
6036 static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
6038 struct igb_adapter *adapter = netdev_priv(netdev);
6039 struct e1000_hw *hw = &adapter->hw;
6040 int pf_id = adapter->vfs_allocated_count;
6042 /* attempt to add filter to vlvf array */
6043 igb_vlvf_set(adapter, vid, true, pf_id);
6045 /* add the filter since PF can receive vlans w/o entry in vlvf */
6046 igb_vfta_set(hw, vid, true);
6049 static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
6051 struct igb_adapter *adapter = netdev_priv(netdev);
6052 struct e1000_hw *hw = &adapter->hw;
6053 int pf_id = adapter->vfs_allocated_count;
6056 igb_irq_disable(adapter);
6057 vlan_group_set_device(adapter->vlgrp, vid, NULL);
6059 if (!test_bit(__IGB_DOWN, &adapter->state))
6060 igb_irq_enable(adapter);
6062 /* remove vlan from VLVF table array */
6063 err = igb_vlvf_set(adapter, vid, false, pf_id);
6065 /* if vid was not present in VLVF just remove it from table */
6067 igb_vfta_set(hw, vid, false);
6070 static void igb_restore_vlan(struct igb_adapter *adapter)
6072 igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
6074 if (adapter->vlgrp) {
6076 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
6077 if (!vlan_group_get_device(adapter->vlgrp, vid))
6079 igb_vlan_rx_add_vid(adapter->netdev, vid);
6084 int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
6086 struct pci_dev *pdev = adapter->pdev;
6087 struct e1000_mac_info *mac = &adapter->hw.mac;
6092 case SPEED_10 + DUPLEX_HALF:
6093 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6095 case SPEED_10 + DUPLEX_FULL:
6096 mac->forced_speed_duplex = ADVERTISE_10_FULL;
6098 case SPEED_100 + DUPLEX_HALF:
6099 mac->forced_speed_duplex = ADVERTISE_100_HALF;
6101 case SPEED_100 + DUPLEX_FULL:
6102 mac->forced_speed_duplex = ADVERTISE_100_FULL;
6104 case SPEED_1000 + DUPLEX_FULL:
6106 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
6108 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6110 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6116 static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
6118 struct net_device *netdev = pci_get_drvdata(pdev);
6119 struct igb_adapter *adapter = netdev_priv(netdev);
6120 struct e1000_hw *hw = &adapter->hw;
6121 u32 ctrl, rctl, status;
6122 u32 wufc = adapter->wol;
6127 netif_device_detach(netdev);
6129 if (netif_running(netdev))
6132 igb_clear_interrupt_scheme(adapter);
6135 retval = pci_save_state(pdev);
6140 status = rd32(E1000_STATUS);
6141 if (status & E1000_STATUS_LU)
6142 wufc &= ~E1000_WUFC_LNKC;
6145 igb_setup_rctl(adapter);
6146 igb_set_rx_mode(netdev);
6148 /* turn on all-multi mode if wake on multicast is enabled */
6149 if (wufc & E1000_WUFC_MC) {
6150 rctl = rd32(E1000_RCTL);
6151 rctl |= E1000_RCTL_MPE;
6152 wr32(E1000_RCTL, rctl);
6155 ctrl = rd32(E1000_CTRL);
6156 /* advertise wake from D3Cold */
6157 #define E1000_CTRL_ADVD3WUC 0x00100000
6158 /* phy power management enable */
6159 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6160 ctrl |= E1000_CTRL_ADVD3WUC;
6161 wr32(E1000_CTRL, ctrl);
6163 /* Allow time for pending master requests to run */
6164 igb_disable_pcie_master(hw);
6166 wr32(E1000_WUC, E1000_WUC_PME_EN);
6167 wr32(E1000_WUFC, wufc);
6170 wr32(E1000_WUFC, 0);
6173 *enable_wake = wufc || adapter->en_mng_pt;
6175 igb_power_down_link(adapter);
6177 igb_power_up_link(adapter);
6179 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6180 * would have already happened in close and is redundant. */
6181 igb_release_hw_control(adapter);
6183 pci_disable_device(pdev);
6189 static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
6194 retval = __igb_shutdown(pdev, &wake);
6199 pci_prepare_to_sleep(pdev);
6201 pci_wake_from_d3(pdev, false);
6202 pci_set_power_state(pdev, PCI_D3hot);
6208 static int igb_resume(struct pci_dev *pdev)
6210 struct net_device *netdev = pci_get_drvdata(pdev);
6211 struct igb_adapter *adapter = netdev_priv(netdev);
6212 struct e1000_hw *hw = &adapter->hw;
6215 pci_set_power_state(pdev, PCI_D0);
6216 pci_restore_state(pdev);
6217 pci_save_state(pdev);
6219 err = pci_enable_device_mem(pdev);
6222 "igb: Cannot enable PCI device from suspend\n");
6225 pci_set_master(pdev);
6227 pci_enable_wake(pdev, PCI_D3hot, 0);
6228 pci_enable_wake(pdev, PCI_D3cold, 0);
6230 if (igb_init_interrupt_scheme(adapter)) {
6231 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6237 /* let the f/w know that the h/w is now under the control of the
6239 igb_get_hw_control(adapter);
6241 wr32(E1000_WUS, ~0);
6243 if (netif_running(netdev)) {
6244 err = igb_open(netdev);
6249 netif_device_attach(netdev);
6255 static void igb_shutdown(struct pci_dev *pdev)
6259 __igb_shutdown(pdev, &wake);
6261 if (system_state == SYSTEM_POWER_OFF) {
6262 pci_wake_from_d3(pdev, wake);
6263 pci_set_power_state(pdev, PCI_D3hot);
6267 #ifdef CONFIG_NET_POLL_CONTROLLER
6269 * Polling 'interrupt' - used by things like netconsole to send skbs
6270 * without having to re-enable interrupts. It's not called while
6271 * the interrupt routine is executing.
6273 static void igb_netpoll(struct net_device *netdev)
6275 struct igb_adapter *adapter = netdev_priv(netdev);
6276 struct e1000_hw *hw = &adapter->hw;
6279 if (!adapter->msix_entries) {
6280 struct igb_q_vector *q_vector = adapter->q_vector[0];
6281 igb_irq_disable(adapter);
6282 napi_schedule(&q_vector->napi);
6286 for (i = 0; i < adapter->num_q_vectors; i++) {
6287 struct igb_q_vector *q_vector = adapter->q_vector[i];
6288 wr32(E1000_EIMC, q_vector->eims_value);
6289 napi_schedule(&q_vector->napi);
6292 #endif /* CONFIG_NET_POLL_CONTROLLER */
6295 * igb_io_error_detected - called when PCI error is detected
6296 * @pdev: Pointer to PCI device
6297 * @state: The current pci connection state
6299 * This function is called after a PCI bus error affecting
6300 * this device has been detected.
6302 static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
6303 pci_channel_state_t state)
6305 struct net_device *netdev = pci_get_drvdata(pdev);
6306 struct igb_adapter *adapter = netdev_priv(netdev);
6308 netif_device_detach(netdev);
6310 if (state == pci_channel_io_perm_failure)
6311 return PCI_ERS_RESULT_DISCONNECT;
6313 if (netif_running(netdev))
6315 pci_disable_device(pdev);
6317 /* Request a slot slot reset. */
6318 return PCI_ERS_RESULT_NEED_RESET;
6322 * igb_io_slot_reset - called after the pci bus has been reset.
6323 * @pdev: Pointer to PCI device
6325 * Restart the card from scratch, as if from a cold-boot. Implementation
6326 * resembles the first-half of the igb_resume routine.
6328 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
6330 struct net_device *netdev = pci_get_drvdata(pdev);
6331 struct igb_adapter *adapter = netdev_priv(netdev);
6332 struct e1000_hw *hw = &adapter->hw;
6333 pci_ers_result_t result;
6336 if (pci_enable_device_mem(pdev)) {
6338 "Cannot re-enable PCI device after reset.\n");
6339 result = PCI_ERS_RESULT_DISCONNECT;
6341 pci_set_master(pdev);
6342 pci_restore_state(pdev);
6343 pci_save_state(pdev);
6345 pci_enable_wake(pdev, PCI_D3hot, 0);
6346 pci_enable_wake(pdev, PCI_D3cold, 0);
6349 wr32(E1000_WUS, ~0);
6350 result = PCI_ERS_RESULT_RECOVERED;
6353 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6355 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6356 "failed 0x%0x\n", err);
6357 /* non-fatal, continue */
6364 * igb_io_resume - called when traffic can start flowing again.
6365 * @pdev: Pointer to PCI device
6367 * This callback is called when the error recovery driver tells us that
6368 * its OK to resume normal operation. Implementation resembles the
6369 * second-half of the igb_resume routine.
6371 static void igb_io_resume(struct pci_dev *pdev)
6373 struct net_device *netdev = pci_get_drvdata(pdev);
6374 struct igb_adapter *adapter = netdev_priv(netdev);
6376 if (netif_running(netdev)) {
6377 if (igb_up(adapter)) {
6378 dev_err(&pdev->dev, "igb_up failed after reset\n");
6383 netif_device_attach(netdev);
6385 /* let the f/w know that the h/w is now under the control of the
6387 igb_get_hw_control(adapter);
6390 static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6393 u32 rar_low, rar_high;
6394 struct e1000_hw *hw = &adapter->hw;
6396 /* HW expects these in little endian so we reverse the byte order
6397 * from network order (big endian) to little endian
6399 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6400 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6401 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6403 /* Indicate to hardware the Address is Valid. */
6404 rar_high |= E1000_RAH_AV;
6406 if (hw->mac.type == e1000_82575)
6407 rar_high |= E1000_RAH_POOL_1 * qsel;
6409 rar_high |= E1000_RAH_POOL_1 << qsel;
6411 wr32(E1000_RAL(index), rar_low);
6413 wr32(E1000_RAH(index), rar_high);
6417 static int igb_set_vf_mac(struct igb_adapter *adapter,
6418 int vf, unsigned char *mac_addr)
6420 struct e1000_hw *hw = &adapter->hw;
6421 /* VF MAC addresses start at end of receive addresses and moves
6422 * torwards the first, as a result a collision should not be possible */
6423 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
6425 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
6427 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
6432 static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6434 struct igb_adapter *adapter = netdev_priv(netdev);
6435 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6437 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6438 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6439 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6440 " change effective.");
6441 if (test_bit(__IGB_DOWN, &adapter->state)) {
6442 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6443 " but the PF device is not up.\n");
6444 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6445 " attempting to use the VF device.\n");
6447 return igb_set_vf_mac(adapter, vf, mac);
6450 static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6455 static int igb_ndo_get_vf_config(struct net_device *netdev,
6456 int vf, struct ifla_vf_info *ivi)
6458 struct igb_adapter *adapter = netdev_priv(netdev);
6459 if (vf >= adapter->vfs_allocated_count)
6462 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
6464 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6465 ivi->qos = adapter->vf_data[vf].pf_qos;
6469 static void igb_vmm_control(struct igb_adapter *adapter)
6471 struct e1000_hw *hw = &adapter->hw;
6474 switch (hw->mac.type) {
6477 /* replication is not supported for 82575 */
6480 /* notify HW that the MAC is adding vlan tags */
6481 reg = rd32(E1000_DTXCTL);
6482 reg |= E1000_DTXCTL_VLAN_ADDED;
6483 wr32(E1000_DTXCTL, reg);
6485 /* enable replication vlan tag stripping */
6486 reg = rd32(E1000_RPLOLR);
6487 reg |= E1000_RPLOLR_STRVLAN;
6488 wr32(E1000_RPLOLR, reg);
6490 /* none of the above registers are supported by i350 */
6494 if (adapter->vfs_allocated_count) {
6495 igb_vmdq_set_loopback_pf(hw, true);
6496 igb_vmdq_set_replication_pf(hw, true);
6498 igb_vmdq_set_loopback_pf(hw, false);
6499 igb_vmdq_set_replication_pf(hw, false);